1972adf0fSEmmanuel Vadot /*-
2972adf0fSEmmanuel Vadot * Copyright (c) 2014 Ruslan Bukin <br@bsdpad.com>
3972adf0fSEmmanuel Vadot *
4972adf0fSEmmanuel Vadot * This software was developed by SRI International and the University of
5972adf0fSEmmanuel Vadot * Cambridge Computer Laboratory under DARPA/AFRL contract (FA8750-10-C-0237)
6972adf0fSEmmanuel Vadot * ("CTSRD"), as part of the DARPA CRASH research programme.
7972adf0fSEmmanuel Vadot *
8972adf0fSEmmanuel Vadot * Redistribution and use in source and binary forms, with or without
9972adf0fSEmmanuel Vadot * modification, are permitted provided that the following conditions
10972adf0fSEmmanuel Vadot * are met:
11972adf0fSEmmanuel Vadot * 1. Redistributions of source code must retain the above copyright
12972adf0fSEmmanuel Vadot * notice, this list of conditions and the following disclaimer.
13972adf0fSEmmanuel Vadot * 2. Redistributions in binary form must reproduce the above copyright
14972adf0fSEmmanuel Vadot * notice, this list of conditions and the following disclaimer in the
15972adf0fSEmmanuel Vadot * documentation and/or other materials provided with the distribution.
16972adf0fSEmmanuel Vadot *
17972adf0fSEmmanuel Vadot * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18972adf0fSEmmanuel Vadot * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19972adf0fSEmmanuel Vadot * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20972adf0fSEmmanuel Vadot * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21972adf0fSEmmanuel Vadot * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22972adf0fSEmmanuel Vadot * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23972adf0fSEmmanuel Vadot * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24972adf0fSEmmanuel Vadot * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25972adf0fSEmmanuel Vadot * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26972adf0fSEmmanuel Vadot * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27972adf0fSEmmanuel Vadot * SUCH DAMAGE.
28972adf0fSEmmanuel Vadot */
29972adf0fSEmmanuel Vadot
30972adf0fSEmmanuel Vadot #include <sys/param.h>
31972adf0fSEmmanuel Vadot #include <sys/systm.h>
32972adf0fSEmmanuel Vadot #include <sys/bus.h>
33972adf0fSEmmanuel Vadot #include <sys/kernel.h>
34972adf0fSEmmanuel Vadot #include <sys/lock.h>
35972adf0fSEmmanuel Vadot #include <sys/malloc.h>
36972adf0fSEmmanuel Vadot #include <sys/mbuf.h>
37972adf0fSEmmanuel Vadot #include <sys/module.h>
38972adf0fSEmmanuel Vadot #include <sys/mutex.h>
39972adf0fSEmmanuel Vadot #include <sys/rman.h>
40972adf0fSEmmanuel Vadot #include <sys/socket.h>
41972adf0fSEmmanuel Vadot
42972adf0fSEmmanuel Vadot #include <net/bpf.h>
43972adf0fSEmmanuel Vadot #include <net/if.h>
44972adf0fSEmmanuel Vadot #include <net/ethernet.h>
45972adf0fSEmmanuel Vadot #include <net/if_dl.h>
46972adf0fSEmmanuel Vadot #include <net/if_media.h>
47972adf0fSEmmanuel Vadot #include <net/if_types.h>
48972adf0fSEmmanuel Vadot #include <net/if_var.h>
49972adf0fSEmmanuel Vadot
50972adf0fSEmmanuel Vadot #include <machine/bus.h>
51972adf0fSEmmanuel Vadot
52be82b3a0SEmmanuel Vadot #include <dev/clk/clk.h>
531f469a9fSEmmanuel Vadot #include <dev/hwreset/hwreset.h>
54972adf0fSEmmanuel Vadot
55972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus.h>
56972adf0fSEmmanuel Vadot #include <dev/ofw/ofw_bus_subr.h>
57972adf0fSEmmanuel Vadot
58972adf0fSEmmanuel Vadot #include <dev/dwc/if_dwcvar.h>
59972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_reg.h>
60972adf0fSEmmanuel Vadot #include <dev/dwc/dwc1000_dma.h>
61972adf0fSEmmanuel Vadot
62afa0f66eSEmmanuel Vadot #define WATCHDOG_TIMEOUT_SECS 5
63363b7c39SEmmanuel Vadot #define DMA_RESET_TIMEOUT 100
64fe82f82dSEmmanuel Vadot
65fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 is almost unified */
66fe82f82dSEmmanuel Vadot #define TDESC0_OWN (1U << 31)
67fe82f82dSEmmanuel Vadot #define TDESC0_IHE (1U << 16) /* IP Header Error */
68fe82f82dSEmmanuel Vadot #define TDESC0_ES (1U << 15) /* Error Summary */
69fe82f82dSEmmanuel Vadot #define TDESC0_JT (1U << 14) /* Jabber Timeout */
70fe82f82dSEmmanuel Vadot #define TDESC0_FF (1U << 13) /* Frame Flushed */
71fe82f82dSEmmanuel Vadot #define TDESC0_PCE (1U << 12) /* Payload Checksum Error */
72fe82f82dSEmmanuel Vadot #define TDESC0_LOC (1U << 11) /* Loss of Carrier */
73fe82f82dSEmmanuel Vadot #define TDESC0_NC (1U << 10) /* No Carrier */
74fe82f82dSEmmanuel Vadot #define TDESC0_LC (1U << 9) /* Late Collision */
75fe82f82dSEmmanuel Vadot #define TDESC0_EC (1U << 8) /* Excessive Collision */
76fe82f82dSEmmanuel Vadot #define TDESC0_VF (1U << 7) /* VLAN Frame */
77fe82f82dSEmmanuel Vadot #define TDESC0_CC_MASK 0xf
78fe82f82dSEmmanuel Vadot #define TDESC0_CC_SHIFT 3 /* Collision Count */
79fe82f82dSEmmanuel Vadot #define TDESC0_ED (1U << 2) /* Excessive Deferral */
80fe82f82dSEmmanuel Vadot #define TDESC0_UF (1U << 1) /* Underflow Error */
81fe82f82dSEmmanuel Vadot #define TDESC0_DB (1U << 0) /* Deferred Bit */
82fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC0 extended format only */
83fe82f82dSEmmanuel Vadot #define ETDESC0_IC (1U << 30) /* Interrupt on Completion */
84fe82f82dSEmmanuel Vadot #define ETDESC0_LS (1U << 29) /* Last Segment */
85fe82f82dSEmmanuel Vadot #define ETDESC0_FS (1U << 28) /* First Segment */
86fe82f82dSEmmanuel Vadot #define ETDESC0_DC (1U << 27) /* Disable CRC */
87fe82f82dSEmmanuel Vadot #define ETDESC0_DP (1U << 26) /* Disable Padding */
88fe82f82dSEmmanuel Vadot #define ETDESC0_CIC_NONE (0U << 22) /* Checksum Insertion Control */
89fe82f82dSEmmanuel Vadot #define ETDESC0_CIC_HDR (1U << 22)
90fe82f82dSEmmanuel Vadot #define ETDESC0_CIC_SEG (2U << 22)
91fe82f82dSEmmanuel Vadot #define ETDESC0_CIC_FULL (3U << 22)
92fe82f82dSEmmanuel Vadot #define ETDESC0_TER (1U << 21) /* Transmit End of Ring */
93fe82f82dSEmmanuel Vadot #define ETDESC0_TCH (1U << 20) /* Second Address Chained */
94fe82f82dSEmmanuel Vadot
95fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 normal format */
96fe82f82dSEmmanuel Vadot #define NTDESC1_IC (1U << 31) /* Interrupt on Completion */
97fe82f82dSEmmanuel Vadot #define NTDESC1_LS (1U << 30) /* Last Segment */
98fe82f82dSEmmanuel Vadot #define NTDESC1_FS (1U << 29) /* First Segment */
99fe82f82dSEmmanuel Vadot #define NTDESC1_CIC_NONE (0U << 27) /* Checksum Insertion Control */
100fe82f82dSEmmanuel Vadot #define NTDESC1_CIC_HDR (1U << 27)
101fe82f82dSEmmanuel Vadot #define NTDESC1_CIC_SEG (2U << 27)
102fe82f82dSEmmanuel Vadot #define NTDESC1_CIC_FULL (3U << 27)
103fe82f82dSEmmanuel Vadot #define NTDESC1_DC (1U << 26) /* Disable CRC */
104fe82f82dSEmmanuel Vadot #define NTDESC1_TER (1U << 25) /* Transmit End of Ring */
105fe82f82dSEmmanuel Vadot #define NTDESC1_TCH (1U << 24) /* Second Address Chained */
106fe82f82dSEmmanuel Vadot /* TX descriptors - TDESC1 extended format */
107fe82f82dSEmmanuel Vadot #define ETDESC1_DP (1U << 23) /* Disable Padding */
108fe82f82dSEmmanuel Vadot #define ETDESC1_TBS2_MASK 0x7ff
109fe82f82dSEmmanuel Vadot #define ETDESC1_TBS2_SHIFT 11 /* Receive Buffer 2 Size */
110fe82f82dSEmmanuel Vadot #define ETDESC1_TBS1_MASK 0x7ff
111fe82f82dSEmmanuel Vadot #define ETDESC1_TBS1_SHIFT 0 /* Receive Buffer 1 Size */
112fe82f82dSEmmanuel Vadot
113fe82f82dSEmmanuel Vadot /* RX descriptor - RDESC0 is unified */
114fe82f82dSEmmanuel Vadot #define RDESC0_OWN (1U << 31)
115fe82f82dSEmmanuel Vadot #define RDESC0_AFM (1U << 30) /* Dest. Address Filter Fail */
116fe82f82dSEmmanuel Vadot #define RDESC0_FL_MASK 0x3fff
117fe82f82dSEmmanuel Vadot #define RDESC0_FL_SHIFT 16 /* Frame Length */
118fe82f82dSEmmanuel Vadot #define RDESC0_ES (1U << 15) /* Error Summary */
119fe82f82dSEmmanuel Vadot #define RDESC0_DE (1U << 14) /* Descriptor Error */
120fe82f82dSEmmanuel Vadot #define RDESC0_SAF (1U << 13) /* Source Address Filter Fail */
121fe82f82dSEmmanuel Vadot #define RDESC0_LE (1U << 12) /* Length Error */
122fe82f82dSEmmanuel Vadot #define RDESC0_OE (1U << 11) /* Overflow Error */
123fe82f82dSEmmanuel Vadot #define RDESC0_VLAN (1U << 10) /* VLAN Tag */
124fe82f82dSEmmanuel Vadot #define RDESC0_FS (1U << 9) /* First Descriptor */
125fe82f82dSEmmanuel Vadot #define RDESC0_LS (1U << 8) /* Last Descriptor */
126fe82f82dSEmmanuel Vadot #define RDESC0_ICE (1U << 7) /* IPC Checksum Error */
127fe82f82dSEmmanuel Vadot #define RDESC0_LC (1U << 6) /* Late Collision */
128fe82f82dSEmmanuel Vadot #define RDESC0_FT (1U << 5) /* Frame Type */
129fe82f82dSEmmanuel Vadot #define RDESC0_RWT (1U << 4) /* Receive Watchdog Timeout */
130fe82f82dSEmmanuel Vadot #define RDESC0_RE (1U << 3) /* Receive Error */
131fe82f82dSEmmanuel Vadot #define RDESC0_DBE (1U << 2) /* Dribble Bit Error */
132fe82f82dSEmmanuel Vadot #define RDESC0_CE (1U << 1) /* CRC Error */
133fe82f82dSEmmanuel Vadot #define RDESC0_PCE (1U << 0) /* Payload Checksum Error */
134fe82f82dSEmmanuel Vadot #define RDESC0_RXMA (1U << 0) /* Rx MAC Address */
135fe82f82dSEmmanuel Vadot
136fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 normal format */
137fe82f82dSEmmanuel Vadot #define NRDESC1_DIC (1U << 31) /* Disable Intr on Completion */
138fe82f82dSEmmanuel Vadot #define NRDESC1_RER (1U << 25) /* Receive End of Ring */
139fe82f82dSEmmanuel Vadot #define NRDESC1_RCH (1U << 24) /* Second Address Chained */
140fe82f82dSEmmanuel Vadot #define NRDESC1_RBS2_MASK 0x7ff
141fe82f82dSEmmanuel Vadot #define NRDESC1_RBS2_SHIFT 11 /* Receive Buffer 2 Size */
142fe82f82dSEmmanuel Vadot #define NRDESC1_RBS1_MASK 0x7ff
143fe82f82dSEmmanuel Vadot #define NRDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */
144fe82f82dSEmmanuel Vadot
145fe82f82dSEmmanuel Vadot /* RX descriptors - RDESC1 enhanced format */
146fe82f82dSEmmanuel Vadot #define ERDESC1_DIC (1U << 31) /* Disable Intr on Completion */
147fe82f82dSEmmanuel Vadot #define ERDESC1_RBS2_MASK 0x7ffff
148fe82f82dSEmmanuel Vadot #define ERDESC1_RBS2_SHIFT 16 /* Receive Buffer 2 Size */
149fe82f82dSEmmanuel Vadot #define ERDESC1_RER (1U << 15) /* Receive End of Ring */
150fe82f82dSEmmanuel Vadot #define ERDESC1_RCH (1U << 14) /* Second Address Chained */
151fe82f82dSEmmanuel Vadot #define ERDESC1_RBS1_MASK 0x7ffff
152fe82f82dSEmmanuel Vadot #define ERDESC1_RBS1_SHIFT 0 /* Receive Buffer 1 Size */
153fe82f82dSEmmanuel Vadot
154fe82f82dSEmmanuel Vadot /*
155fe82f82dSEmmanuel Vadot * The hardware imposes alignment restrictions on various objects involved in
156fe82f82dSEmmanuel Vadot * DMA transfers. These values are expressed in bytes (not bits).
157fe82f82dSEmmanuel Vadot */
158fe82f82dSEmmanuel Vadot #define DWC_DESC_RING_ALIGN 2048
159fe82f82dSEmmanuel Vadot
160afa0f66eSEmmanuel Vadot static inline uint32_t
next_txidx(struct dwc_softc * sc,uint32_t curidx)161afa0f66eSEmmanuel Vadot next_txidx(struct dwc_softc *sc, uint32_t curidx)
162afa0f66eSEmmanuel Vadot {
163afa0f66eSEmmanuel Vadot
164afa0f66eSEmmanuel Vadot return ((curidx + 1) % TX_DESC_COUNT);
165afa0f66eSEmmanuel Vadot }
166afa0f66eSEmmanuel Vadot
167972adf0fSEmmanuel Vadot static inline uint32_t
next_rxidx(struct dwc_softc * sc,uint32_t curidx)168972adf0fSEmmanuel Vadot next_rxidx(struct dwc_softc *sc, uint32_t curidx)
169972adf0fSEmmanuel Vadot {
170972adf0fSEmmanuel Vadot
171972adf0fSEmmanuel Vadot return ((curidx + 1) % RX_DESC_COUNT);
172972adf0fSEmmanuel Vadot }
173972adf0fSEmmanuel Vadot
174972adf0fSEmmanuel Vadot static void
dwc_get1paddr(void * arg,bus_dma_segment_t * segs,int nsegs,int error)175972adf0fSEmmanuel Vadot dwc_get1paddr(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
176972adf0fSEmmanuel Vadot {
177972adf0fSEmmanuel Vadot
178972adf0fSEmmanuel Vadot if (error != 0)
179972adf0fSEmmanuel Vadot return;
180972adf0fSEmmanuel Vadot *(bus_addr_t *)arg = segs[0].ds_addr;
181972adf0fSEmmanuel Vadot }
182972adf0fSEmmanuel Vadot
183972adf0fSEmmanuel Vadot inline static void
txdesc_clear(struct dwc_softc * sc,int idx)1847786911fSEmmanuel Vadot txdesc_clear(struct dwc_softc *sc, int idx)
185972adf0fSEmmanuel Vadot {
1867786911fSEmmanuel Vadot
1877786911fSEmmanuel Vadot sc->tx_desccount--;
1887786911fSEmmanuel Vadot sc->txdesc_ring[idx].addr1 = (uint32_t)(0);
1897786911fSEmmanuel Vadot sc->txdesc_ring[idx].desc0 = 0;
1907786911fSEmmanuel Vadot sc->txdesc_ring[idx].desc1 = 0;
191972adf0fSEmmanuel Vadot }
192972adf0fSEmmanuel Vadot
193972adf0fSEmmanuel Vadot inline static void
txdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr,uint32_t len,uint32_t flags,bool first,bool last)1947786911fSEmmanuel Vadot txdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr,
195972adf0fSEmmanuel Vadot uint32_t len, uint32_t flags, bool first, bool last)
196972adf0fSEmmanuel Vadot {
197972adf0fSEmmanuel Vadot uint32_t desc0, desc1;
198972adf0fSEmmanuel Vadot
1994b7975ecSEmmanuel Vadot if (!sc->dma_ext_desc) {
200972adf0fSEmmanuel Vadot desc0 = 0;
201972adf0fSEmmanuel Vadot desc1 = NTDESC1_TCH | len | flags;
202972adf0fSEmmanuel Vadot if (first)
203972adf0fSEmmanuel Vadot desc1 |= NTDESC1_FS;
204972adf0fSEmmanuel Vadot if (last)
205972adf0fSEmmanuel Vadot desc1 |= NTDESC1_LS | NTDESC1_IC;
206972adf0fSEmmanuel Vadot } else {
207972adf0fSEmmanuel Vadot desc0 = ETDESC0_TCH | flags;
208972adf0fSEmmanuel Vadot if (first)
209972adf0fSEmmanuel Vadot desc0 |= ETDESC0_FS;
210972adf0fSEmmanuel Vadot if (last)
211972adf0fSEmmanuel Vadot desc0 |= ETDESC0_LS | ETDESC0_IC;
212972adf0fSEmmanuel Vadot desc1 = len;
213972adf0fSEmmanuel Vadot }
214972adf0fSEmmanuel Vadot ++sc->tx_desccount;
215972adf0fSEmmanuel Vadot sc->txdesc_ring[idx].addr1 = (uint32_t)(paddr);
216972adf0fSEmmanuel Vadot sc->txdesc_ring[idx].desc0 = desc0;
217972adf0fSEmmanuel Vadot sc->txdesc_ring[idx].desc1 = desc1;
2187786911fSEmmanuel Vadot wmb();
21943cd6bbbSEmmanuel Vadot sc->txdesc_ring[idx].desc0 |= TDESC0_OWN;
2200cb63dcaSEmmanuel Vadot wmb();
221972adf0fSEmmanuel Vadot }
222972adf0fSEmmanuel Vadot
223972adf0fSEmmanuel Vadot inline static uint32_t
rxdesc_setup(struct dwc_softc * sc,int idx,bus_addr_t paddr)2247786911fSEmmanuel Vadot rxdesc_setup(struct dwc_softc *sc, int idx, bus_addr_t paddr)
225972adf0fSEmmanuel Vadot {
226972adf0fSEmmanuel Vadot uint32_t nidx;
227972adf0fSEmmanuel Vadot
228972adf0fSEmmanuel Vadot sc->rxdesc_ring[idx].addr1 = (uint32_t)paddr;
229972adf0fSEmmanuel Vadot nidx = next_rxidx(sc, idx);
230972adf0fSEmmanuel Vadot sc->rxdesc_ring[idx].addr2 = sc->rxdesc_ring_paddr +
231972adf0fSEmmanuel Vadot (nidx * sizeof(struct dwc_hwdesc));
2324b7975ecSEmmanuel Vadot if (!sc->dma_ext_desc)
233972adf0fSEmmanuel Vadot sc->rxdesc_ring[idx].desc1 = NRDESC1_RCH |
234972adf0fSEmmanuel Vadot MIN(MCLBYTES, NRDESC1_RBS1_MASK);
235972adf0fSEmmanuel Vadot else
236972adf0fSEmmanuel Vadot sc->rxdesc_ring[idx].desc1 = ERDESC1_RCH |
237972adf0fSEmmanuel Vadot MIN(MCLBYTES, ERDESC1_RBS1_MASK);
238972adf0fSEmmanuel Vadot
239972adf0fSEmmanuel Vadot wmb();
24043cd6bbbSEmmanuel Vadot sc->rxdesc_ring[idx].desc0 = RDESC0_OWN;
2410cb63dcaSEmmanuel Vadot wmb();
242972adf0fSEmmanuel Vadot return (nidx);
243972adf0fSEmmanuel Vadot }
244972adf0fSEmmanuel Vadot
245972adf0fSEmmanuel Vadot int
dma1000_setup_txbuf(struct dwc_softc * sc,int idx,struct mbuf ** mp)246972adf0fSEmmanuel Vadot dma1000_setup_txbuf(struct dwc_softc *sc, int idx, struct mbuf **mp)
247972adf0fSEmmanuel Vadot {
248972adf0fSEmmanuel Vadot struct bus_dma_segment segs[TX_MAP_MAX_SEGS];
249972adf0fSEmmanuel Vadot int error, nsegs;
250972adf0fSEmmanuel Vadot struct mbuf * m;
251972adf0fSEmmanuel Vadot uint32_t flags = 0;
252972adf0fSEmmanuel Vadot int i;
2537786911fSEmmanuel Vadot int last;
254972adf0fSEmmanuel Vadot
255972adf0fSEmmanuel Vadot error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
256972adf0fSEmmanuel Vadot *mp, segs, &nsegs, 0);
257972adf0fSEmmanuel Vadot if (error == EFBIG) {
258972adf0fSEmmanuel Vadot /*
259972adf0fSEmmanuel Vadot * The map may be partially mapped from the first call.
260972adf0fSEmmanuel Vadot * Make sure to reset it.
261972adf0fSEmmanuel Vadot */
262972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
263972adf0fSEmmanuel Vadot if ((m = m_defrag(*mp, M_NOWAIT)) == NULL)
264972adf0fSEmmanuel Vadot return (ENOMEM);
265972adf0fSEmmanuel Vadot *mp = m;
266972adf0fSEmmanuel Vadot error = bus_dmamap_load_mbuf_sg(sc->txbuf_tag, sc->txbuf_map[idx].map,
267972adf0fSEmmanuel Vadot *mp, segs, &nsegs, 0);
268972adf0fSEmmanuel Vadot }
269972adf0fSEmmanuel Vadot if (error != 0)
270972adf0fSEmmanuel Vadot return (ENOMEM);
271972adf0fSEmmanuel Vadot
272972adf0fSEmmanuel Vadot if (sc->tx_desccount + nsegs > TX_DESC_COUNT) {
273972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->txbuf_tag, sc->txbuf_map[idx].map);
274972adf0fSEmmanuel Vadot return (ENOMEM);
275972adf0fSEmmanuel Vadot }
276972adf0fSEmmanuel Vadot
277972adf0fSEmmanuel Vadot m = *mp;
278972adf0fSEmmanuel Vadot
279972adf0fSEmmanuel Vadot if ((m->m_pkthdr.csum_flags & CSUM_IP) != 0) {
280972adf0fSEmmanuel Vadot if ((m->m_pkthdr.csum_flags & (CSUM_TCP|CSUM_UDP)) != 0) {
2814b7975ecSEmmanuel Vadot if (!sc->dma_ext_desc)
282972adf0fSEmmanuel Vadot flags = NTDESC1_CIC_FULL;
283972adf0fSEmmanuel Vadot else
284972adf0fSEmmanuel Vadot flags = ETDESC0_CIC_FULL;
285972adf0fSEmmanuel Vadot } else {
2864b7975ecSEmmanuel Vadot if (!sc->dma_ext_desc)
287972adf0fSEmmanuel Vadot flags = NTDESC1_CIC_HDR;
288972adf0fSEmmanuel Vadot else
289972adf0fSEmmanuel Vadot flags = ETDESC0_CIC_HDR;
290972adf0fSEmmanuel Vadot }
291972adf0fSEmmanuel Vadot }
292972adf0fSEmmanuel Vadot
293972adf0fSEmmanuel Vadot bus_dmamap_sync(sc->txbuf_tag, sc->txbuf_map[idx].map,
294972adf0fSEmmanuel Vadot BUS_DMASYNC_PREWRITE);
295972adf0fSEmmanuel Vadot
296972adf0fSEmmanuel Vadot sc->txbuf_map[idx].mbuf = m;
297972adf0fSEmmanuel Vadot
298972adf0fSEmmanuel Vadot for (i = 0; i < nsegs; i++) {
2997786911fSEmmanuel Vadot txdesc_setup(sc, sc->tx_desc_head,
300972adf0fSEmmanuel Vadot segs[i].ds_addr, segs[i].ds_len,
301972adf0fSEmmanuel Vadot (i == 0) ? flags : 0, /* only first desc needs flags */
302972adf0fSEmmanuel Vadot (i == 0),
303972adf0fSEmmanuel Vadot (i == nsegs - 1));
304972adf0fSEmmanuel Vadot last = sc->tx_desc_head;
305972adf0fSEmmanuel Vadot sc->tx_desc_head = next_txidx(sc, sc->tx_desc_head);
306972adf0fSEmmanuel Vadot }
307972adf0fSEmmanuel Vadot
308972adf0fSEmmanuel Vadot sc->txbuf_map[idx].last_desc_idx = last;
309972adf0fSEmmanuel Vadot
310972adf0fSEmmanuel Vadot return (0);
311972adf0fSEmmanuel Vadot }
312972adf0fSEmmanuel Vadot
313972adf0fSEmmanuel Vadot static int
dma1000_setup_rxbuf(struct dwc_softc * sc,int idx,struct mbuf * m)314972adf0fSEmmanuel Vadot dma1000_setup_rxbuf(struct dwc_softc *sc, int idx, struct mbuf *m)
315972adf0fSEmmanuel Vadot {
316972adf0fSEmmanuel Vadot struct bus_dma_segment seg;
317972adf0fSEmmanuel Vadot int error, nsegs;
318972adf0fSEmmanuel Vadot
319972adf0fSEmmanuel Vadot m_adj(m, ETHER_ALIGN);
320972adf0fSEmmanuel Vadot
321972adf0fSEmmanuel Vadot error = bus_dmamap_load_mbuf_sg(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
322972adf0fSEmmanuel Vadot m, &seg, &nsegs, 0);
323972adf0fSEmmanuel Vadot if (error != 0)
324972adf0fSEmmanuel Vadot return (error);
325972adf0fSEmmanuel Vadot
326972adf0fSEmmanuel Vadot KASSERT(nsegs == 1, ("%s: %d segments returned!", __func__, nsegs));
327972adf0fSEmmanuel Vadot
328972adf0fSEmmanuel Vadot bus_dmamap_sync(sc->rxbuf_tag, sc->rxbuf_map[idx].map,
329972adf0fSEmmanuel Vadot BUS_DMASYNC_PREREAD);
330972adf0fSEmmanuel Vadot
331972adf0fSEmmanuel Vadot sc->rxbuf_map[idx].mbuf = m;
3327786911fSEmmanuel Vadot rxdesc_setup(sc, idx, seg.ds_addr);
333972adf0fSEmmanuel Vadot
334972adf0fSEmmanuel Vadot return (0);
335972adf0fSEmmanuel Vadot }
336972adf0fSEmmanuel Vadot
337972adf0fSEmmanuel Vadot static struct mbuf *
dwc_alloc_mbufcl(struct dwc_softc * sc)338972adf0fSEmmanuel Vadot dwc_alloc_mbufcl(struct dwc_softc *sc)
339972adf0fSEmmanuel Vadot {
340972adf0fSEmmanuel Vadot struct mbuf *m;
341972adf0fSEmmanuel Vadot
342972adf0fSEmmanuel Vadot m = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
343972adf0fSEmmanuel Vadot if (m != NULL)
344972adf0fSEmmanuel Vadot m->m_pkthdr.len = m->m_len = m->m_ext.ext_size;
345972adf0fSEmmanuel Vadot
346972adf0fSEmmanuel Vadot return (m);
347972adf0fSEmmanuel Vadot }
348972adf0fSEmmanuel Vadot
349972adf0fSEmmanuel Vadot static struct mbuf *
dwc_rxfinish_one(struct dwc_softc * sc,struct dwc_hwdesc * desc,struct dwc_bufmap * map)350972adf0fSEmmanuel Vadot dwc_rxfinish_one(struct dwc_softc *sc, struct dwc_hwdesc *desc,
351972adf0fSEmmanuel Vadot struct dwc_bufmap *map)
352972adf0fSEmmanuel Vadot {
353972adf0fSEmmanuel Vadot if_t ifp;
354972adf0fSEmmanuel Vadot struct mbuf *m, *m0;
355972adf0fSEmmanuel Vadot int len;
356972adf0fSEmmanuel Vadot uint32_t rdesc0;
357972adf0fSEmmanuel Vadot
358972adf0fSEmmanuel Vadot m = map->mbuf;
359972adf0fSEmmanuel Vadot ifp = sc->ifp;
360972adf0fSEmmanuel Vadot rdesc0 = desc ->desc0;
361972adf0fSEmmanuel Vadot
362972adf0fSEmmanuel Vadot if ((rdesc0 & (RDESC0_FS | RDESC0_LS)) !=
363972adf0fSEmmanuel Vadot (RDESC0_FS | RDESC0_LS)) {
364972adf0fSEmmanuel Vadot /*
365972adf0fSEmmanuel Vadot * Something very wrong happens. The whole packet should be
366*e125371fSGordon Bergling * received in one descriptor. Report problem.
367972adf0fSEmmanuel Vadot */
368972adf0fSEmmanuel Vadot device_printf(sc->dev,
369972adf0fSEmmanuel Vadot "%s: RX descriptor without FIRST and LAST bit set: 0x%08X",
370972adf0fSEmmanuel Vadot __func__, rdesc0);
371972adf0fSEmmanuel Vadot return (NULL);
372972adf0fSEmmanuel Vadot }
373972adf0fSEmmanuel Vadot
374972adf0fSEmmanuel Vadot len = (rdesc0 >> RDESC0_FL_SHIFT) & RDESC0_FL_MASK;
375972adf0fSEmmanuel Vadot if (len < 64) {
376972adf0fSEmmanuel Vadot /*
377972adf0fSEmmanuel Vadot * Lenght is invalid, recycle old mbuf
378972adf0fSEmmanuel Vadot * Probably impossible case
379972adf0fSEmmanuel Vadot */
380972adf0fSEmmanuel Vadot return (NULL);
381972adf0fSEmmanuel Vadot }
382972adf0fSEmmanuel Vadot
383972adf0fSEmmanuel Vadot /* Allocate new buffer */
384972adf0fSEmmanuel Vadot m0 = dwc_alloc_mbufcl(sc);
385972adf0fSEmmanuel Vadot if (m0 == NULL) {
386972adf0fSEmmanuel Vadot /* no new mbuf available, recycle old */
387972adf0fSEmmanuel Vadot if_inc_counter(sc->ifp, IFCOUNTER_IQDROPS, 1);
388972adf0fSEmmanuel Vadot return (NULL);
389972adf0fSEmmanuel Vadot }
390972adf0fSEmmanuel Vadot /* Do dmasync for newly received packet */
391972adf0fSEmmanuel Vadot bus_dmamap_sync(sc->rxbuf_tag, map->map, BUS_DMASYNC_POSTREAD);
392972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->rxbuf_tag, map->map);
393972adf0fSEmmanuel Vadot
394972adf0fSEmmanuel Vadot /* Received packet is valid, process it */
395972adf0fSEmmanuel Vadot m->m_pkthdr.rcvif = ifp;
396972adf0fSEmmanuel Vadot m->m_pkthdr.len = len;
397972adf0fSEmmanuel Vadot m->m_len = len;
398972adf0fSEmmanuel Vadot if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
399972adf0fSEmmanuel Vadot
400972adf0fSEmmanuel Vadot if ((if_getcapenable(ifp) & IFCAP_RXCSUM) != 0 &&
401972adf0fSEmmanuel Vadot (rdesc0 & RDESC0_FT) != 0) {
402972adf0fSEmmanuel Vadot m->m_pkthdr.csum_flags = CSUM_IP_CHECKED;
403972adf0fSEmmanuel Vadot if ((rdesc0 & RDESC0_ICE) == 0)
404972adf0fSEmmanuel Vadot m->m_pkthdr.csum_flags |= CSUM_IP_VALID;
405972adf0fSEmmanuel Vadot if ((rdesc0 & RDESC0_PCE) == 0) {
406972adf0fSEmmanuel Vadot m->m_pkthdr.csum_flags |=
407972adf0fSEmmanuel Vadot CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
408972adf0fSEmmanuel Vadot m->m_pkthdr.csum_data = 0xffff;
409972adf0fSEmmanuel Vadot }
410972adf0fSEmmanuel Vadot }
411972adf0fSEmmanuel Vadot
412972adf0fSEmmanuel Vadot /* Remove trailing FCS */
413972adf0fSEmmanuel Vadot m_adj(m, -ETHER_CRC_LEN);
414972adf0fSEmmanuel Vadot
415972adf0fSEmmanuel Vadot DWC_UNLOCK(sc);
416972adf0fSEmmanuel Vadot if_input(ifp, m);
417972adf0fSEmmanuel Vadot DWC_LOCK(sc);
418972adf0fSEmmanuel Vadot return (m0);
419972adf0fSEmmanuel Vadot }
420972adf0fSEmmanuel Vadot
421972adf0fSEmmanuel Vadot void
dma1000_txfinish_locked(struct dwc_softc * sc)422972adf0fSEmmanuel Vadot dma1000_txfinish_locked(struct dwc_softc *sc)
423972adf0fSEmmanuel Vadot {
424972adf0fSEmmanuel Vadot struct dwc_bufmap *bmap;
425972adf0fSEmmanuel Vadot struct dwc_hwdesc *desc;
426972adf0fSEmmanuel Vadot if_t ifp;
427972adf0fSEmmanuel Vadot int idx, last_idx;
428972adf0fSEmmanuel Vadot bool map_finished;
429972adf0fSEmmanuel Vadot
430972adf0fSEmmanuel Vadot DWC_ASSERT_LOCKED(sc);
431972adf0fSEmmanuel Vadot
432972adf0fSEmmanuel Vadot ifp = sc->ifp;
433972adf0fSEmmanuel Vadot /* check if all descriptors of the map are done */
434972adf0fSEmmanuel Vadot while (sc->tx_map_tail != sc->tx_map_head) {
435972adf0fSEmmanuel Vadot map_finished = true;
436972adf0fSEmmanuel Vadot bmap = &sc->txbuf_map[sc->tx_map_tail];
437972adf0fSEmmanuel Vadot idx = sc->tx_desc_tail;
438972adf0fSEmmanuel Vadot last_idx = next_txidx(sc, bmap->last_desc_idx);
439972adf0fSEmmanuel Vadot while (idx != last_idx) {
440972adf0fSEmmanuel Vadot desc = &sc->txdesc_ring[idx];
441972adf0fSEmmanuel Vadot if ((desc->desc0 & TDESC0_OWN) != 0) {
442972adf0fSEmmanuel Vadot map_finished = false;
443972adf0fSEmmanuel Vadot break;
444972adf0fSEmmanuel Vadot }
445972adf0fSEmmanuel Vadot idx = next_txidx(sc, idx);
446972adf0fSEmmanuel Vadot }
447972adf0fSEmmanuel Vadot
448972adf0fSEmmanuel Vadot if (!map_finished)
449972adf0fSEmmanuel Vadot break;
450972adf0fSEmmanuel Vadot bus_dmamap_sync(sc->txbuf_tag, bmap->map,
451972adf0fSEmmanuel Vadot BUS_DMASYNC_POSTWRITE);
452972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->txbuf_tag, bmap->map);
453972adf0fSEmmanuel Vadot m_freem(bmap->mbuf);
454972adf0fSEmmanuel Vadot bmap->mbuf = NULL;
455972adf0fSEmmanuel Vadot sc->tx_mapcount--;
456972adf0fSEmmanuel Vadot while (sc->tx_desc_tail != last_idx) {
4577786911fSEmmanuel Vadot txdesc_clear(sc, sc->tx_desc_tail);
458972adf0fSEmmanuel Vadot sc->tx_desc_tail = next_txidx(sc, sc->tx_desc_tail);
459972adf0fSEmmanuel Vadot }
460972adf0fSEmmanuel Vadot sc->tx_map_tail = next_txidx(sc, sc->tx_map_tail);
461972adf0fSEmmanuel Vadot if_setdrvflagbits(ifp, 0, IFF_DRV_OACTIVE);
462972adf0fSEmmanuel Vadot if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
463972adf0fSEmmanuel Vadot }
464972adf0fSEmmanuel Vadot
465972adf0fSEmmanuel Vadot /* If there are no buffers outstanding, muzzle the watchdog. */
466972adf0fSEmmanuel Vadot if (sc->tx_desc_tail == sc->tx_desc_head) {
467972adf0fSEmmanuel Vadot sc->tx_watchdog_count = 0;
468972adf0fSEmmanuel Vadot }
469972adf0fSEmmanuel Vadot }
470972adf0fSEmmanuel Vadot
471972adf0fSEmmanuel Vadot void
dma1000_txstart(struct dwc_softc * sc)472afa0f66eSEmmanuel Vadot dma1000_txstart(struct dwc_softc *sc)
473afa0f66eSEmmanuel Vadot {
474afa0f66eSEmmanuel Vadot int enqueued;
475afa0f66eSEmmanuel Vadot struct mbuf *m;
476afa0f66eSEmmanuel Vadot
477afa0f66eSEmmanuel Vadot enqueued = 0;
478afa0f66eSEmmanuel Vadot
479afa0f66eSEmmanuel Vadot for (;;) {
480afa0f66eSEmmanuel Vadot if (sc->tx_desccount > (TX_DESC_COUNT - TX_MAP_MAX_SEGS + 1)) {
481afa0f66eSEmmanuel Vadot if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
482afa0f66eSEmmanuel Vadot break;
483afa0f66eSEmmanuel Vadot }
484afa0f66eSEmmanuel Vadot
485afa0f66eSEmmanuel Vadot if (sc->tx_mapcount == (TX_MAP_COUNT - 1)) {
486afa0f66eSEmmanuel Vadot if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
487afa0f66eSEmmanuel Vadot break;
488afa0f66eSEmmanuel Vadot }
489afa0f66eSEmmanuel Vadot
490afa0f66eSEmmanuel Vadot m = if_dequeue(sc->ifp);
491afa0f66eSEmmanuel Vadot if (m == NULL)
492afa0f66eSEmmanuel Vadot break;
493afa0f66eSEmmanuel Vadot if (dma1000_setup_txbuf(sc, sc->tx_map_head, &m) != 0) {
494afa0f66eSEmmanuel Vadot if_sendq_prepend(sc->ifp, m);
495afa0f66eSEmmanuel Vadot if_setdrvflagbits(sc->ifp, IFF_DRV_OACTIVE, 0);
496afa0f66eSEmmanuel Vadot break;
497afa0f66eSEmmanuel Vadot }
498afa0f66eSEmmanuel Vadot bpf_mtap_if(sc->ifp, m);
499afa0f66eSEmmanuel Vadot sc->tx_map_head = next_txidx(sc, sc->tx_map_head);
500afa0f66eSEmmanuel Vadot sc->tx_mapcount++;
501afa0f66eSEmmanuel Vadot ++enqueued;
502afa0f66eSEmmanuel Vadot }
503afa0f66eSEmmanuel Vadot
504afa0f66eSEmmanuel Vadot if (enqueued != 0) {
505afa0f66eSEmmanuel Vadot WRITE4(sc, TRANSMIT_POLL_DEMAND, 0x1);
506afa0f66eSEmmanuel Vadot sc->tx_watchdog_count = WATCHDOG_TIMEOUT_SECS;
507afa0f66eSEmmanuel Vadot }
508afa0f66eSEmmanuel Vadot }
509afa0f66eSEmmanuel Vadot
510afa0f66eSEmmanuel Vadot void
dma1000_rxfinish_locked(struct dwc_softc * sc)511972adf0fSEmmanuel Vadot dma1000_rxfinish_locked(struct dwc_softc *sc)
512972adf0fSEmmanuel Vadot {
513972adf0fSEmmanuel Vadot struct mbuf *m;
514972adf0fSEmmanuel Vadot int error, idx;
515972adf0fSEmmanuel Vadot struct dwc_hwdesc *desc;
516972adf0fSEmmanuel Vadot
517972adf0fSEmmanuel Vadot DWC_ASSERT_LOCKED(sc);
518972adf0fSEmmanuel Vadot for (;;) {
519972adf0fSEmmanuel Vadot idx = sc->rx_idx;
520972adf0fSEmmanuel Vadot desc = sc->rxdesc_ring + idx;
521972adf0fSEmmanuel Vadot if ((desc->desc0 & RDESC0_OWN) != 0)
522972adf0fSEmmanuel Vadot break;
523972adf0fSEmmanuel Vadot
524972adf0fSEmmanuel Vadot m = dwc_rxfinish_one(sc, desc, sc->rxbuf_map + idx);
525972adf0fSEmmanuel Vadot if (m == NULL) {
526972adf0fSEmmanuel Vadot wmb();
52743cd6bbbSEmmanuel Vadot desc->desc0 = RDESC0_OWN;
5280cb63dcaSEmmanuel Vadot wmb();
529972adf0fSEmmanuel Vadot } else {
530972adf0fSEmmanuel Vadot /* We cannot create hole in RX ring */
531972adf0fSEmmanuel Vadot error = dma1000_setup_rxbuf(sc, idx, m);
532972adf0fSEmmanuel Vadot if (error != 0)
533972adf0fSEmmanuel Vadot panic("dma1000_setup_rxbuf failed: error %d\n",
534972adf0fSEmmanuel Vadot error);
535972adf0fSEmmanuel Vadot
536972adf0fSEmmanuel Vadot }
537972adf0fSEmmanuel Vadot sc->rx_idx = next_rxidx(sc, sc->rx_idx);
538972adf0fSEmmanuel Vadot }
539972adf0fSEmmanuel Vadot }
540972adf0fSEmmanuel Vadot
541972adf0fSEmmanuel Vadot /*
542972adf0fSEmmanuel Vadot * Start the DMA controller
543972adf0fSEmmanuel Vadot */
544972adf0fSEmmanuel Vadot void
dma1000_start(struct dwc_softc * sc)545972adf0fSEmmanuel Vadot dma1000_start(struct dwc_softc *sc)
546972adf0fSEmmanuel Vadot {
547972adf0fSEmmanuel Vadot uint32_t reg;
548972adf0fSEmmanuel Vadot
549972adf0fSEmmanuel Vadot DWC_ASSERT_LOCKED(sc);
550972adf0fSEmmanuel Vadot
551972adf0fSEmmanuel Vadot /* Initializa DMA and enable transmitters */
552972adf0fSEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
553972adf0fSEmmanuel Vadot reg |= (MODE_TSF | MODE_OSF | MODE_FUF);
554972adf0fSEmmanuel Vadot reg &= ~(MODE_RSF);
555972adf0fSEmmanuel Vadot reg |= (MODE_RTC_LEV32 << MODE_RTC_SHIFT);
556972adf0fSEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
557972adf0fSEmmanuel Vadot
558972adf0fSEmmanuel Vadot WRITE4(sc, INTERRUPT_ENABLE, INT_EN_DEFAULT);
559972adf0fSEmmanuel Vadot
560972adf0fSEmmanuel Vadot /* Start DMA */
561972adf0fSEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
562972adf0fSEmmanuel Vadot reg |= (MODE_ST | MODE_SR);
563972adf0fSEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
564972adf0fSEmmanuel Vadot }
565972adf0fSEmmanuel Vadot
566972adf0fSEmmanuel Vadot /*
567972adf0fSEmmanuel Vadot * Stop the DMA controller
568972adf0fSEmmanuel Vadot */
569972adf0fSEmmanuel Vadot void
dma1000_stop(struct dwc_softc * sc)570972adf0fSEmmanuel Vadot dma1000_stop(struct dwc_softc *sc)
571972adf0fSEmmanuel Vadot {
572972adf0fSEmmanuel Vadot uint32_t reg;
573972adf0fSEmmanuel Vadot
574972adf0fSEmmanuel Vadot DWC_ASSERT_LOCKED(sc);
575972adf0fSEmmanuel Vadot
576972adf0fSEmmanuel Vadot /* Stop DMA TX */
577972adf0fSEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
578972adf0fSEmmanuel Vadot reg &= ~(MODE_ST);
579972adf0fSEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
580972adf0fSEmmanuel Vadot
581972adf0fSEmmanuel Vadot /* Flush TX */
582972adf0fSEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
583972adf0fSEmmanuel Vadot reg |= (MODE_FTF);
584972adf0fSEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
585972adf0fSEmmanuel Vadot
586972adf0fSEmmanuel Vadot /* Stop DMA RX */
587972adf0fSEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
588972adf0fSEmmanuel Vadot reg &= ~(MODE_SR);
589972adf0fSEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
590972adf0fSEmmanuel Vadot }
591972adf0fSEmmanuel Vadot
592363b7c39SEmmanuel Vadot int
dma1000_reset(struct dwc_softc * sc)593363b7c39SEmmanuel Vadot dma1000_reset(struct dwc_softc *sc)
594363b7c39SEmmanuel Vadot {
595363b7c39SEmmanuel Vadot uint32_t reg;
596363b7c39SEmmanuel Vadot int i;
597363b7c39SEmmanuel Vadot
598363b7c39SEmmanuel Vadot reg = READ4(sc, BUS_MODE);
599363b7c39SEmmanuel Vadot reg |= (BUS_MODE_SWR);
600363b7c39SEmmanuel Vadot WRITE4(sc, BUS_MODE, reg);
601363b7c39SEmmanuel Vadot
602363b7c39SEmmanuel Vadot for (i = 0; i < DMA_RESET_TIMEOUT; i++) {
603363b7c39SEmmanuel Vadot if ((READ4(sc, BUS_MODE) & BUS_MODE_SWR) == 0)
604363b7c39SEmmanuel Vadot break;
605363b7c39SEmmanuel Vadot DELAY(10);
606363b7c39SEmmanuel Vadot }
607363b7c39SEmmanuel Vadot if (i >= DMA_RESET_TIMEOUT) {
608363b7c39SEmmanuel Vadot return (ENXIO);
609363b7c39SEmmanuel Vadot }
610363b7c39SEmmanuel Vadot
611363b7c39SEmmanuel Vadot return (0);
612363b7c39SEmmanuel Vadot }
613363b7c39SEmmanuel Vadot
614972adf0fSEmmanuel Vadot /*
615972adf0fSEmmanuel Vadot * Create the bus_dma resources
616972adf0fSEmmanuel Vadot */
617972adf0fSEmmanuel Vadot int
dma1000_init(struct dwc_softc * sc)618972adf0fSEmmanuel Vadot dma1000_init(struct dwc_softc *sc)
619972adf0fSEmmanuel Vadot {
620972adf0fSEmmanuel Vadot struct mbuf *m;
621f0a7dd77SEmmanuel Vadot uint32_t reg;
622972adf0fSEmmanuel Vadot int error;
623972adf0fSEmmanuel Vadot int nidx;
624972adf0fSEmmanuel Vadot int idx;
625972adf0fSEmmanuel Vadot
6265d88a52bSEmmanuel Vadot reg = BUS_MODE_USP;
6275d88a52bSEmmanuel Vadot if (!sc->nopblx8)
6285d88a52bSEmmanuel Vadot reg |= BUS_MODE_EIGHTXPBL;
6295d88a52bSEmmanuel Vadot reg |= (sc->txpbl << BUS_MODE_PBL_SHIFT);
6305d88a52bSEmmanuel Vadot reg |= (sc->rxpbl << BUS_MODE_RPBL_SHIFT);
6315d88a52bSEmmanuel Vadot if (sc->fixed_burst)
6325d88a52bSEmmanuel Vadot reg |= BUS_MODE_FIXEDBURST;
6335d88a52bSEmmanuel Vadot if (sc->mixed_burst)
6345d88a52bSEmmanuel Vadot reg |= BUS_MODE_MIXEDBURST;
6355d88a52bSEmmanuel Vadot if (sc->aal)
6365d88a52bSEmmanuel Vadot reg |= BUS_MODE_AAL;
6375d88a52bSEmmanuel Vadot
6385d88a52bSEmmanuel Vadot WRITE4(sc, BUS_MODE, reg);
6395d88a52bSEmmanuel Vadot
6404b7975ecSEmmanuel Vadot reg = READ4(sc, HW_FEATURE);
6414b7975ecSEmmanuel Vadot if (reg & HW_FEATURE_EXT_DESCRIPTOR)
6424b7975ecSEmmanuel Vadot sc->dma_ext_desc = true;
6434b7975ecSEmmanuel Vadot
644972adf0fSEmmanuel Vadot /*
645f0a7dd77SEmmanuel Vadot * DMA must be stop while changing descriptor list addresses.
646f0a7dd77SEmmanuel Vadot */
647f0a7dd77SEmmanuel Vadot reg = READ4(sc, OPERATION_MODE);
648f0a7dd77SEmmanuel Vadot reg &= ~(MODE_ST | MODE_SR);
649f0a7dd77SEmmanuel Vadot WRITE4(sc, OPERATION_MODE, reg);
650f0a7dd77SEmmanuel Vadot
651f0a7dd77SEmmanuel Vadot /*
652972adf0fSEmmanuel Vadot * Set up TX descriptor ring, descriptors, and dma maps.
653972adf0fSEmmanuel Vadot */
654972adf0fSEmmanuel Vadot error = bus_dma_tag_create(
655972adf0fSEmmanuel Vadot bus_get_dma_tag(sc->dev), /* Parent tag. */
656972adf0fSEmmanuel Vadot DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */
657972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
658972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR, /* highaddr */
659972adf0fSEmmanuel Vadot NULL, NULL, /* filter, filterarg */
660972adf0fSEmmanuel Vadot TX_DESC_SIZE, 1, /* maxsize, nsegments */
661972adf0fSEmmanuel Vadot TX_DESC_SIZE, /* maxsegsize */
662972adf0fSEmmanuel Vadot 0, /* flags */
663972adf0fSEmmanuel Vadot NULL, NULL, /* lockfunc, lockarg */
664972adf0fSEmmanuel Vadot &sc->txdesc_tag);
665972adf0fSEmmanuel Vadot if (error != 0) {
666972adf0fSEmmanuel Vadot device_printf(sc->dev,
667972adf0fSEmmanuel Vadot "could not create TX ring DMA tag.\n");
668972adf0fSEmmanuel Vadot goto out;
669972adf0fSEmmanuel Vadot }
670972adf0fSEmmanuel Vadot
671972adf0fSEmmanuel Vadot error = bus_dmamem_alloc(sc->txdesc_tag, (void**)&sc->txdesc_ring,
672972adf0fSEmmanuel Vadot BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
673972adf0fSEmmanuel Vadot &sc->txdesc_map);
674972adf0fSEmmanuel Vadot if (error != 0) {
675972adf0fSEmmanuel Vadot device_printf(sc->dev,
676972adf0fSEmmanuel Vadot "could not allocate TX descriptor ring.\n");
677972adf0fSEmmanuel Vadot goto out;
678972adf0fSEmmanuel Vadot }
679972adf0fSEmmanuel Vadot
680972adf0fSEmmanuel Vadot error = bus_dmamap_load(sc->txdesc_tag, sc->txdesc_map,
681972adf0fSEmmanuel Vadot sc->txdesc_ring, TX_DESC_SIZE, dwc_get1paddr,
682972adf0fSEmmanuel Vadot &sc->txdesc_ring_paddr, 0);
683972adf0fSEmmanuel Vadot if (error != 0) {
684972adf0fSEmmanuel Vadot device_printf(sc->dev,
685972adf0fSEmmanuel Vadot "could not load TX descriptor ring map.\n");
686972adf0fSEmmanuel Vadot goto out;
687972adf0fSEmmanuel Vadot }
688972adf0fSEmmanuel Vadot
689972adf0fSEmmanuel Vadot for (idx = 0; idx < TX_DESC_COUNT; idx++) {
690972adf0fSEmmanuel Vadot nidx = next_txidx(sc, idx);
691972adf0fSEmmanuel Vadot sc->txdesc_ring[idx].addr2 = sc->txdesc_ring_paddr +
692972adf0fSEmmanuel Vadot (nidx * sizeof(struct dwc_hwdesc));
693972adf0fSEmmanuel Vadot }
694972adf0fSEmmanuel Vadot
695972adf0fSEmmanuel Vadot error = bus_dma_tag_create(
696972adf0fSEmmanuel Vadot bus_get_dma_tag(sc->dev), /* Parent tag. */
697972adf0fSEmmanuel Vadot 1, 0, /* alignment, boundary */
698972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
699972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR, /* highaddr */
700972adf0fSEmmanuel Vadot NULL, NULL, /* filter, filterarg */
701972adf0fSEmmanuel Vadot MCLBYTES*TX_MAP_MAX_SEGS, /* maxsize */
702972adf0fSEmmanuel Vadot TX_MAP_MAX_SEGS, /* nsegments */
703972adf0fSEmmanuel Vadot MCLBYTES, /* maxsegsize */
704972adf0fSEmmanuel Vadot 0, /* flags */
705972adf0fSEmmanuel Vadot NULL, NULL, /* lockfunc, lockarg */
706972adf0fSEmmanuel Vadot &sc->txbuf_tag);
707972adf0fSEmmanuel Vadot if (error != 0) {
708972adf0fSEmmanuel Vadot device_printf(sc->dev,
709972adf0fSEmmanuel Vadot "could not create TX ring DMA tag.\n");
710972adf0fSEmmanuel Vadot goto out;
711972adf0fSEmmanuel Vadot }
712972adf0fSEmmanuel Vadot
713972adf0fSEmmanuel Vadot for (idx = 0; idx < TX_MAP_COUNT; idx++) {
714972adf0fSEmmanuel Vadot error = bus_dmamap_create(sc->txbuf_tag, BUS_DMA_COHERENT,
715972adf0fSEmmanuel Vadot &sc->txbuf_map[idx].map);
716972adf0fSEmmanuel Vadot if (error != 0) {
717972adf0fSEmmanuel Vadot device_printf(sc->dev,
718972adf0fSEmmanuel Vadot "could not create TX buffer DMA map.\n");
719972adf0fSEmmanuel Vadot goto out;
720972adf0fSEmmanuel Vadot }
721972adf0fSEmmanuel Vadot }
722972adf0fSEmmanuel Vadot
723972adf0fSEmmanuel Vadot for (idx = 0; idx < TX_DESC_COUNT; idx++)
7247786911fSEmmanuel Vadot txdesc_clear(sc, idx);
725972adf0fSEmmanuel Vadot
726f0a7dd77SEmmanuel Vadot WRITE4(sc, TX_DESCR_LIST_ADDR, sc->txdesc_ring_paddr);
727f0a7dd77SEmmanuel Vadot
728972adf0fSEmmanuel Vadot /*
729972adf0fSEmmanuel Vadot * Set up RX descriptor ring, descriptors, dma maps, and mbufs.
730972adf0fSEmmanuel Vadot */
731972adf0fSEmmanuel Vadot error = bus_dma_tag_create(
732972adf0fSEmmanuel Vadot bus_get_dma_tag(sc->dev), /* Parent tag. */
733972adf0fSEmmanuel Vadot DWC_DESC_RING_ALIGN, 0, /* alignment, boundary */
734972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
735972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR, /* highaddr */
736972adf0fSEmmanuel Vadot NULL, NULL, /* filter, filterarg */
737972adf0fSEmmanuel Vadot RX_DESC_SIZE, 1, /* maxsize, nsegments */
738972adf0fSEmmanuel Vadot RX_DESC_SIZE, /* maxsegsize */
739972adf0fSEmmanuel Vadot 0, /* flags */
740972adf0fSEmmanuel Vadot NULL, NULL, /* lockfunc, lockarg */
741972adf0fSEmmanuel Vadot &sc->rxdesc_tag);
742972adf0fSEmmanuel Vadot if (error != 0) {
743972adf0fSEmmanuel Vadot device_printf(sc->dev,
744972adf0fSEmmanuel Vadot "could not create RX ring DMA tag.\n");
745972adf0fSEmmanuel Vadot goto out;
746972adf0fSEmmanuel Vadot }
747972adf0fSEmmanuel Vadot
748972adf0fSEmmanuel Vadot error = bus_dmamem_alloc(sc->rxdesc_tag, (void **)&sc->rxdesc_ring,
749972adf0fSEmmanuel Vadot BUS_DMA_COHERENT | BUS_DMA_WAITOK | BUS_DMA_ZERO,
750972adf0fSEmmanuel Vadot &sc->rxdesc_map);
751972adf0fSEmmanuel Vadot if (error != 0) {
752972adf0fSEmmanuel Vadot device_printf(sc->dev,
753972adf0fSEmmanuel Vadot "could not allocate RX descriptor ring.\n");
754972adf0fSEmmanuel Vadot goto out;
755972adf0fSEmmanuel Vadot }
756972adf0fSEmmanuel Vadot
757972adf0fSEmmanuel Vadot error = bus_dmamap_load(sc->rxdesc_tag, sc->rxdesc_map,
758972adf0fSEmmanuel Vadot sc->rxdesc_ring, RX_DESC_SIZE, dwc_get1paddr,
759972adf0fSEmmanuel Vadot &sc->rxdesc_ring_paddr, 0);
760972adf0fSEmmanuel Vadot if (error != 0) {
761972adf0fSEmmanuel Vadot device_printf(sc->dev,
762972adf0fSEmmanuel Vadot "could not load RX descriptor ring map.\n");
763972adf0fSEmmanuel Vadot goto out;
764972adf0fSEmmanuel Vadot }
765972adf0fSEmmanuel Vadot
766972adf0fSEmmanuel Vadot error = bus_dma_tag_create(
767972adf0fSEmmanuel Vadot bus_get_dma_tag(sc->dev), /* Parent tag. */
768972adf0fSEmmanuel Vadot 1, 0, /* alignment, boundary */
769972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
770972adf0fSEmmanuel Vadot BUS_SPACE_MAXADDR, /* highaddr */
771972adf0fSEmmanuel Vadot NULL, NULL, /* filter, filterarg */
772972adf0fSEmmanuel Vadot MCLBYTES, 1, /* maxsize, nsegments */
773972adf0fSEmmanuel Vadot MCLBYTES, /* maxsegsize */
774972adf0fSEmmanuel Vadot 0, /* flags */
775972adf0fSEmmanuel Vadot NULL, NULL, /* lockfunc, lockarg */
776972adf0fSEmmanuel Vadot &sc->rxbuf_tag);
777972adf0fSEmmanuel Vadot if (error != 0) {
778972adf0fSEmmanuel Vadot device_printf(sc->dev,
779972adf0fSEmmanuel Vadot "could not create RX buf DMA tag.\n");
780972adf0fSEmmanuel Vadot goto out;
781972adf0fSEmmanuel Vadot }
782972adf0fSEmmanuel Vadot
783972adf0fSEmmanuel Vadot for (idx = 0; idx < RX_DESC_COUNT; idx++) {
784972adf0fSEmmanuel Vadot error = bus_dmamap_create(sc->rxbuf_tag, BUS_DMA_COHERENT,
785972adf0fSEmmanuel Vadot &sc->rxbuf_map[idx].map);
786972adf0fSEmmanuel Vadot if (error != 0) {
787972adf0fSEmmanuel Vadot device_printf(sc->dev,
788972adf0fSEmmanuel Vadot "could not create RX buffer DMA map.\n");
789972adf0fSEmmanuel Vadot goto out;
790972adf0fSEmmanuel Vadot }
791972adf0fSEmmanuel Vadot if ((m = dwc_alloc_mbufcl(sc)) == NULL) {
792972adf0fSEmmanuel Vadot device_printf(sc->dev, "Could not alloc mbuf\n");
793972adf0fSEmmanuel Vadot error = ENOMEM;
794972adf0fSEmmanuel Vadot goto out;
795972adf0fSEmmanuel Vadot }
796972adf0fSEmmanuel Vadot if ((error = dma1000_setup_rxbuf(sc, idx, m)) != 0) {
797972adf0fSEmmanuel Vadot device_printf(sc->dev,
798972adf0fSEmmanuel Vadot "could not create new RX buffer.\n");
799972adf0fSEmmanuel Vadot goto out;
800972adf0fSEmmanuel Vadot }
801972adf0fSEmmanuel Vadot }
802f0a7dd77SEmmanuel Vadot WRITE4(sc, RX_DESCR_LIST_ADDR, sc->rxdesc_ring_paddr);
803972adf0fSEmmanuel Vadot
804972adf0fSEmmanuel Vadot out:
805972adf0fSEmmanuel Vadot if (error != 0)
806972adf0fSEmmanuel Vadot return (ENXIO);
807972adf0fSEmmanuel Vadot
808972adf0fSEmmanuel Vadot return (0);
809972adf0fSEmmanuel Vadot }
810972adf0fSEmmanuel Vadot
811972adf0fSEmmanuel Vadot /*
812972adf0fSEmmanuel Vadot * Free the bus_dma resources
813972adf0fSEmmanuel Vadot */
814972adf0fSEmmanuel Vadot void
dma1000_free(struct dwc_softc * sc)815972adf0fSEmmanuel Vadot dma1000_free(struct dwc_softc *sc)
816972adf0fSEmmanuel Vadot {
817972adf0fSEmmanuel Vadot bus_dmamap_t map;
818972adf0fSEmmanuel Vadot int idx;
819972adf0fSEmmanuel Vadot
820972adf0fSEmmanuel Vadot /* Clean up RX DMA resources and free mbufs. */
821972adf0fSEmmanuel Vadot for (idx = 0; idx < RX_DESC_COUNT; ++idx) {
822972adf0fSEmmanuel Vadot if ((map = sc->rxbuf_map[idx].map) != NULL) {
823972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->rxbuf_tag, map);
824972adf0fSEmmanuel Vadot bus_dmamap_destroy(sc->rxbuf_tag, map);
825972adf0fSEmmanuel Vadot m_freem(sc->rxbuf_map[idx].mbuf);
826972adf0fSEmmanuel Vadot }
827972adf0fSEmmanuel Vadot }
828972adf0fSEmmanuel Vadot if (sc->rxbuf_tag != NULL)
829972adf0fSEmmanuel Vadot bus_dma_tag_destroy(sc->rxbuf_tag);
830972adf0fSEmmanuel Vadot if (sc->rxdesc_map != NULL) {
831972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->rxdesc_tag, sc->rxdesc_map);
832972adf0fSEmmanuel Vadot bus_dmamem_free(sc->rxdesc_tag, sc->rxdesc_ring,
833972adf0fSEmmanuel Vadot sc->rxdesc_map);
834972adf0fSEmmanuel Vadot }
835972adf0fSEmmanuel Vadot if (sc->rxdesc_tag != NULL)
836972adf0fSEmmanuel Vadot bus_dma_tag_destroy(sc->rxdesc_tag);
837972adf0fSEmmanuel Vadot
838972adf0fSEmmanuel Vadot /* Clean up TX DMA resources. */
839972adf0fSEmmanuel Vadot for (idx = 0; idx < TX_DESC_COUNT; ++idx) {
840972adf0fSEmmanuel Vadot if ((map = sc->txbuf_map[idx].map) != NULL) {
841972adf0fSEmmanuel Vadot /* TX maps are already unloaded. */
842972adf0fSEmmanuel Vadot bus_dmamap_destroy(sc->txbuf_tag, map);
843972adf0fSEmmanuel Vadot }
844972adf0fSEmmanuel Vadot }
845972adf0fSEmmanuel Vadot if (sc->txbuf_tag != NULL)
846972adf0fSEmmanuel Vadot bus_dma_tag_destroy(sc->txbuf_tag);
847972adf0fSEmmanuel Vadot if (sc->txdesc_map != NULL) {
848972adf0fSEmmanuel Vadot bus_dmamap_unload(sc->txdesc_tag, sc->txdesc_map);
849972adf0fSEmmanuel Vadot bus_dmamem_free(sc->txdesc_tag, sc->txdesc_ring,
850972adf0fSEmmanuel Vadot sc->txdesc_map);
851972adf0fSEmmanuel Vadot }
852972adf0fSEmmanuel Vadot if (sc->txdesc_tag != NULL)
853972adf0fSEmmanuel Vadot bus_dma_tag_destroy(sc->txdesc_tag);
854972adf0fSEmmanuel Vadot }
85529776aa4SEmmanuel Vadot
85629776aa4SEmmanuel Vadot /*
85729776aa4SEmmanuel Vadot * Interrupt function
85829776aa4SEmmanuel Vadot */
85929776aa4SEmmanuel Vadot
86029776aa4SEmmanuel Vadot int
dma1000_intr(struct dwc_softc * sc)86129776aa4SEmmanuel Vadot dma1000_intr(struct dwc_softc *sc)
86229776aa4SEmmanuel Vadot {
86329776aa4SEmmanuel Vadot uint32_t reg;
86429776aa4SEmmanuel Vadot int rv;
86529776aa4SEmmanuel Vadot
86629776aa4SEmmanuel Vadot DWC_ASSERT_LOCKED(sc);
86729776aa4SEmmanuel Vadot
86829776aa4SEmmanuel Vadot rv = 0;
86929776aa4SEmmanuel Vadot reg = READ4(sc, DMA_STATUS);
87029776aa4SEmmanuel Vadot if (reg & DMA_STATUS_NIS) {
87129776aa4SEmmanuel Vadot if (reg & DMA_STATUS_RI)
87229776aa4SEmmanuel Vadot dma1000_rxfinish_locked(sc);
87329776aa4SEmmanuel Vadot
87429776aa4SEmmanuel Vadot if (reg & DMA_STATUS_TI) {
87529776aa4SEmmanuel Vadot dma1000_txfinish_locked(sc);
87629776aa4SEmmanuel Vadot dma1000_txstart(sc);
87729776aa4SEmmanuel Vadot }
87829776aa4SEmmanuel Vadot }
87929776aa4SEmmanuel Vadot
88029776aa4SEmmanuel Vadot if (reg & DMA_STATUS_AIS) {
88129776aa4SEmmanuel Vadot if (reg & DMA_STATUS_FBI) {
88229776aa4SEmmanuel Vadot /* Fatal bus error */
88329776aa4SEmmanuel Vadot rv = EIO;
88429776aa4SEmmanuel Vadot }
88529776aa4SEmmanuel Vadot }
88629776aa4SEmmanuel Vadot
88729776aa4SEmmanuel Vadot WRITE4(sc, DMA_STATUS, reg & DMA_STATUS_INTR_MASK);
88829776aa4SEmmanuel Vadot return (rv);
88929776aa4SEmmanuel Vadot }
890