xref: /linux/drivers/net/wireless/broadcom/b43legacy/dma.c (revision 6e1d8d1470b2c9335715f7d52e864f0bd91a5f59)
1423e3ce3SKalle Valo /*
2423e3ce3SKalle Valo 
3423e3ce3SKalle Valo   Broadcom B43legacy wireless driver
4423e3ce3SKalle Valo 
5423e3ce3SKalle Valo   DMA ringbuffer and descriptor allocation/management
6423e3ce3SKalle Valo 
7423e3ce3SKalle Valo   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
8423e3ce3SKalle Valo 
9423e3ce3SKalle Valo   Some code in this file is derived from the b44.c driver
10423e3ce3SKalle Valo   Copyright (C) 2002 David S. Miller
11423e3ce3SKalle Valo   Copyright (C) Pekka Pietikainen
12423e3ce3SKalle Valo 
13423e3ce3SKalle Valo   This program is free software; you can redistribute it and/or modify
14423e3ce3SKalle Valo   it under the terms of the GNU General Public License as published by
15423e3ce3SKalle Valo   the Free Software Foundation; either version 2 of the License, or
16423e3ce3SKalle Valo   (at your option) any later version.
17423e3ce3SKalle Valo 
18423e3ce3SKalle Valo   This program is distributed in the hope that it will be useful,
19423e3ce3SKalle Valo   but WITHOUT ANY WARRANTY; without even the implied warranty of
20423e3ce3SKalle Valo   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
21423e3ce3SKalle Valo   GNU General Public License for more details.
22423e3ce3SKalle Valo 
23423e3ce3SKalle Valo   You should have received a copy of the GNU General Public License
24423e3ce3SKalle Valo   along with this program; see the file COPYING.  If not, write to
25423e3ce3SKalle Valo   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
26423e3ce3SKalle Valo   Boston, MA 02110-1301, USA.
27423e3ce3SKalle Valo 
28423e3ce3SKalle Valo */
29423e3ce3SKalle Valo 
30423e3ce3SKalle Valo #include "b43legacy.h"
31423e3ce3SKalle Valo #include "dma.h"
32423e3ce3SKalle Valo #include "main.h"
33423e3ce3SKalle Valo #include "debugfs.h"
34423e3ce3SKalle Valo #include "xmit.h"
35423e3ce3SKalle Valo 
36423e3ce3SKalle Valo #include <linux/dma-mapping.h>
37423e3ce3SKalle Valo #include <linux/pci.h>
38423e3ce3SKalle Valo #include <linux/delay.h>
39423e3ce3SKalle Valo #include <linux/skbuff.h>
40423e3ce3SKalle Valo #include <linux/slab.h>
41423e3ce3SKalle Valo #include <net/dst.h>
42423e3ce3SKalle Valo 
43423e3ce3SKalle Valo /* 32bit DMA ops. */
44423e3ce3SKalle Valo static
45423e3ce3SKalle Valo struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
46423e3ce3SKalle Valo 					  int slot,
47423e3ce3SKalle Valo 					  struct b43legacy_dmadesc_meta **meta)
48423e3ce3SKalle Valo {
49423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
50423e3ce3SKalle Valo 
51423e3ce3SKalle Valo 	*meta = &(ring->meta[slot]);
52423e3ce3SKalle Valo 	desc = ring->descbase;
53423e3ce3SKalle Valo 	desc = &(desc[slot]);
54423e3ce3SKalle Valo 
55423e3ce3SKalle Valo 	return desc;
56423e3ce3SKalle Valo }
57423e3ce3SKalle Valo 
58423e3ce3SKalle Valo static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
59423e3ce3SKalle Valo 				 struct b43legacy_dmadesc32 *desc,
60423e3ce3SKalle Valo 				 dma_addr_t dmaaddr, u16 bufsize,
61423e3ce3SKalle Valo 				 int start, int end, int irq)
62423e3ce3SKalle Valo {
63423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
64423e3ce3SKalle Valo 	int slot;
65423e3ce3SKalle Valo 	u32 ctl;
66423e3ce3SKalle Valo 	u32 addr;
67423e3ce3SKalle Valo 	u32 addrext;
68423e3ce3SKalle Valo 
69423e3ce3SKalle Valo 	slot = (int)(desc - descbase);
70423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
71423e3ce3SKalle Valo 
72423e3ce3SKalle Valo 	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
73423e3ce3SKalle Valo 	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
74423e3ce3SKalle Valo 		   >> SSB_DMA_TRANSLATION_SHIFT;
75423e3ce3SKalle Valo 	addr |= ring->dev->dma.translation;
76423e3ce3SKalle Valo 	ctl = (bufsize - ring->frameoffset)
77423e3ce3SKalle Valo 	      & B43legacy_DMA32_DCTL_BYTECNT;
78423e3ce3SKalle Valo 	if (slot == ring->nr_slots - 1)
79423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
80423e3ce3SKalle Valo 	if (start)
81423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
82423e3ce3SKalle Valo 	if (end)
83423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
84423e3ce3SKalle Valo 	if (irq)
85423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_IRQ;
86423e3ce3SKalle Valo 	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
87423e3ce3SKalle Valo 	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
88423e3ce3SKalle Valo 
89423e3ce3SKalle Valo 	desc->control = cpu_to_le32(ctl);
90423e3ce3SKalle Valo 	desc->address = cpu_to_le32(addr);
91423e3ce3SKalle Valo }
92423e3ce3SKalle Valo 
93423e3ce3SKalle Valo static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
94423e3ce3SKalle Valo {
95423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
96423e3ce3SKalle Valo 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
97423e3ce3SKalle Valo }
98423e3ce3SKalle Valo 
99423e3ce3SKalle Valo static void op32_tx_suspend(struct b43legacy_dmaring *ring)
100423e3ce3SKalle Valo {
101423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
102423e3ce3SKalle Valo 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
103423e3ce3SKalle Valo 			    | B43legacy_DMA32_TXSUSPEND);
104423e3ce3SKalle Valo }
105423e3ce3SKalle Valo 
106423e3ce3SKalle Valo static void op32_tx_resume(struct b43legacy_dmaring *ring)
107423e3ce3SKalle Valo {
108423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
109423e3ce3SKalle Valo 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
110423e3ce3SKalle Valo 			    & ~B43legacy_DMA32_TXSUSPEND);
111423e3ce3SKalle Valo }
112423e3ce3SKalle Valo 
113423e3ce3SKalle Valo static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
114423e3ce3SKalle Valo {
115423e3ce3SKalle Valo 	u32 val;
116423e3ce3SKalle Valo 
117423e3ce3SKalle Valo 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
118423e3ce3SKalle Valo 	val &= B43legacy_DMA32_RXDPTR;
119423e3ce3SKalle Valo 
120423e3ce3SKalle Valo 	return (val / sizeof(struct b43legacy_dmadesc32));
121423e3ce3SKalle Valo }
122423e3ce3SKalle Valo 
123423e3ce3SKalle Valo static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
124423e3ce3SKalle Valo 				    int slot)
125423e3ce3SKalle Valo {
126423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
127423e3ce3SKalle Valo 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
128423e3ce3SKalle Valo }
129423e3ce3SKalle Valo 
130423e3ce3SKalle Valo static inline int free_slots(struct b43legacy_dmaring *ring)
131423e3ce3SKalle Valo {
132423e3ce3SKalle Valo 	return (ring->nr_slots - ring->used_slots);
133423e3ce3SKalle Valo }
134423e3ce3SKalle Valo 
135423e3ce3SKalle Valo static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
136423e3ce3SKalle Valo {
137423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
138423e3ce3SKalle Valo 	if (slot == ring->nr_slots - 1)
139423e3ce3SKalle Valo 		return 0;
140423e3ce3SKalle Valo 	return slot + 1;
141423e3ce3SKalle Valo }
142423e3ce3SKalle Valo 
143423e3ce3SKalle Valo static inline int prev_slot(struct b43legacy_dmaring *ring, int slot)
144423e3ce3SKalle Valo {
145423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
146423e3ce3SKalle Valo 	if (slot == 0)
147423e3ce3SKalle Valo 		return ring->nr_slots - 1;
148423e3ce3SKalle Valo 	return slot - 1;
149423e3ce3SKalle Valo }
150423e3ce3SKalle Valo 
151423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
152423e3ce3SKalle Valo static void update_max_used_slots(struct b43legacy_dmaring *ring,
153423e3ce3SKalle Valo 				  int current_used_slots)
154423e3ce3SKalle Valo {
155423e3ce3SKalle Valo 	if (current_used_slots <= ring->max_used_slots)
156423e3ce3SKalle Valo 		return;
157423e3ce3SKalle Valo 	ring->max_used_slots = current_used_slots;
158423e3ce3SKalle Valo 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
159423e3ce3SKalle Valo 		b43legacydbg(ring->dev->wl,
160423e3ce3SKalle Valo 		       "max_used_slots increased to %d on %s ring %d\n",
161423e3ce3SKalle Valo 		       ring->max_used_slots,
162423e3ce3SKalle Valo 		       ring->tx ? "TX" : "RX",
163423e3ce3SKalle Valo 		       ring->index);
164423e3ce3SKalle Valo }
165423e3ce3SKalle Valo #else
166423e3ce3SKalle Valo static inline
167423e3ce3SKalle Valo void update_max_used_slots(struct b43legacy_dmaring *ring,
168423e3ce3SKalle Valo 			   int current_used_slots)
169423e3ce3SKalle Valo { }
170423e3ce3SKalle Valo #endif /* DEBUG */
171423e3ce3SKalle Valo 
172423e3ce3SKalle Valo /* Request a slot for usage. */
173423e3ce3SKalle Valo static inline
174423e3ce3SKalle Valo int request_slot(struct b43legacy_dmaring *ring)
175423e3ce3SKalle Valo {
176423e3ce3SKalle Valo 	int slot;
177423e3ce3SKalle Valo 
178423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
179423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->stopped);
180423e3ce3SKalle Valo 	B43legacy_WARN_ON(free_slots(ring) == 0);
181423e3ce3SKalle Valo 
182423e3ce3SKalle Valo 	slot = next_slot(ring, ring->current_slot);
183423e3ce3SKalle Valo 	ring->current_slot = slot;
184423e3ce3SKalle Valo 	ring->used_slots++;
185423e3ce3SKalle Valo 
186423e3ce3SKalle Valo 	update_max_used_slots(ring, ring->used_slots);
187423e3ce3SKalle Valo 
188423e3ce3SKalle Valo 	return slot;
189423e3ce3SKalle Valo }
190423e3ce3SKalle Valo 
191423e3ce3SKalle Valo /* Mac80211-queue to b43legacy-ring mapping */
192423e3ce3SKalle Valo static struct b43legacy_dmaring *priority_to_txring(
193423e3ce3SKalle Valo 						struct b43legacy_wldev *dev,
194423e3ce3SKalle Valo 						int queue_priority)
195423e3ce3SKalle Valo {
196423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
197423e3ce3SKalle Valo 
198423e3ce3SKalle Valo /*FIXME: For now we always run on TX-ring-1 */
199423e3ce3SKalle Valo return dev->dma.tx_ring1;
200423e3ce3SKalle Valo 
201423e3ce3SKalle Valo 	/* 0 = highest priority */
202423e3ce3SKalle Valo 	switch (queue_priority) {
203423e3ce3SKalle Valo 	default:
204423e3ce3SKalle Valo 		B43legacy_WARN_ON(1);
205423e3ce3SKalle Valo 		/* fallthrough */
206423e3ce3SKalle Valo 	case 0:
207423e3ce3SKalle Valo 		ring = dev->dma.tx_ring3;
208423e3ce3SKalle Valo 		break;
209423e3ce3SKalle Valo 	case 1:
210423e3ce3SKalle Valo 		ring = dev->dma.tx_ring2;
211423e3ce3SKalle Valo 		break;
212423e3ce3SKalle Valo 	case 2:
213423e3ce3SKalle Valo 		ring = dev->dma.tx_ring1;
214423e3ce3SKalle Valo 		break;
215423e3ce3SKalle Valo 	case 3:
216423e3ce3SKalle Valo 		ring = dev->dma.tx_ring0;
217423e3ce3SKalle Valo 		break;
218423e3ce3SKalle Valo 	case 4:
219423e3ce3SKalle Valo 		ring = dev->dma.tx_ring4;
220423e3ce3SKalle Valo 		break;
221423e3ce3SKalle Valo 	case 5:
222423e3ce3SKalle Valo 		ring = dev->dma.tx_ring5;
223423e3ce3SKalle Valo 		break;
224423e3ce3SKalle Valo 	}
225423e3ce3SKalle Valo 
226423e3ce3SKalle Valo 	return ring;
227423e3ce3SKalle Valo }
228423e3ce3SKalle Valo 
229423e3ce3SKalle Valo /* Bcm4301-ring to mac80211-queue mapping */
230423e3ce3SKalle Valo static inline int txring_to_priority(struct b43legacy_dmaring *ring)
231423e3ce3SKalle Valo {
232423e3ce3SKalle Valo 	static const u8 idx_to_prio[] =
233423e3ce3SKalle Valo 		{ 3, 2, 1, 0, 4, 5, };
234423e3ce3SKalle Valo 
235423e3ce3SKalle Valo /*FIXME: have only one queue, for now */
236423e3ce3SKalle Valo return 0;
237423e3ce3SKalle Valo 
238423e3ce3SKalle Valo 	return idx_to_prio[ring->index];
239423e3ce3SKalle Valo }
240423e3ce3SKalle Valo 
241423e3ce3SKalle Valo 
242423e3ce3SKalle Valo static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
243423e3ce3SKalle Valo 					int controller_idx)
244423e3ce3SKalle Valo {
245423e3ce3SKalle Valo 	static const u16 map32[] = {
246423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE0,
247423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE1,
248423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE2,
249423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE3,
250423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE4,
251423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE5,
252423e3ce3SKalle Valo 	};
253423e3ce3SKalle Valo 
254423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(controller_idx >= 0 &&
255423e3ce3SKalle Valo 			  controller_idx < ARRAY_SIZE(map32)));
256423e3ce3SKalle Valo 	return map32[controller_idx];
257423e3ce3SKalle Valo }
258423e3ce3SKalle Valo 
259423e3ce3SKalle Valo static inline
260423e3ce3SKalle Valo dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
261423e3ce3SKalle Valo 			  unsigned char *buf,
262423e3ce3SKalle Valo 			  size_t len,
263423e3ce3SKalle Valo 			  int tx)
264423e3ce3SKalle Valo {
265423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
266423e3ce3SKalle Valo 
267423e3ce3SKalle Valo 	if (tx)
268423e3ce3SKalle Valo 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
269423e3ce3SKalle Valo 					     buf, len,
270423e3ce3SKalle Valo 					     DMA_TO_DEVICE);
271423e3ce3SKalle Valo 	else
272423e3ce3SKalle Valo 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
273423e3ce3SKalle Valo 					     buf, len,
274423e3ce3SKalle Valo 					     DMA_FROM_DEVICE);
275423e3ce3SKalle Valo 
276423e3ce3SKalle Valo 	return dmaaddr;
277423e3ce3SKalle Valo }
278423e3ce3SKalle Valo 
279423e3ce3SKalle Valo static inline
280423e3ce3SKalle Valo void unmap_descbuffer(struct b43legacy_dmaring *ring,
281423e3ce3SKalle Valo 		      dma_addr_t addr,
282423e3ce3SKalle Valo 		      size_t len,
283423e3ce3SKalle Valo 		      int tx)
284423e3ce3SKalle Valo {
285423e3ce3SKalle Valo 	if (tx)
286423e3ce3SKalle Valo 		dma_unmap_single(ring->dev->dev->dma_dev,
287423e3ce3SKalle Valo 				     addr, len,
288423e3ce3SKalle Valo 				     DMA_TO_DEVICE);
289423e3ce3SKalle Valo 	else
290423e3ce3SKalle Valo 		dma_unmap_single(ring->dev->dev->dma_dev,
291423e3ce3SKalle Valo 				     addr, len,
292423e3ce3SKalle Valo 				     DMA_FROM_DEVICE);
293423e3ce3SKalle Valo }
294423e3ce3SKalle Valo 
295423e3ce3SKalle Valo static inline
296423e3ce3SKalle Valo void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
297423e3ce3SKalle Valo 			     dma_addr_t addr,
298423e3ce3SKalle Valo 			     size_t len)
299423e3ce3SKalle Valo {
300423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
301423e3ce3SKalle Valo 
302423e3ce3SKalle Valo 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
303423e3ce3SKalle Valo 				addr, len, DMA_FROM_DEVICE);
304423e3ce3SKalle Valo }
305423e3ce3SKalle Valo 
306423e3ce3SKalle Valo static inline
307423e3ce3SKalle Valo void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
308423e3ce3SKalle Valo 				dma_addr_t addr,
309423e3ce3SKalle Valo 				size_t len)
310423e3ce3SKalle Valo {
311423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
312423e3ce3SKalle Valo 
313423e3ce3SKalle Valo 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
314423e3ce3SKalle Valo 				   addr, len, DMA_FROM_DEVICE);
315423e3ce3SKalle Valo }
316423e3ce3SKalle Valo 
317423e3ce3SKalle Valo static inline
318423e3ce3SKalle Valo void free_descriptor_buffer(struct b43legacy_dmaring *ring,
319423e3ce3SKalle Valo 			    struct b43legacy_dmadesc_meta *meta,
320423e3ce3SKalle Valo 			    int irq_context)
321423e3ce3SKalle Valo {
322423e3ce3SKalle Valo 	if (meta->skb) {
323423e3ce3SKalle Valo 		if (irq_context)
324423e3ce3SKalle Valo 			dev_kfree_skb_irq(meta->skb);
325423e3ce3SKalle Valo 		else
326423e3ce3SKalle Valo 			dev_kfree_skb(meta->skb);
327423e3ce3SKalle Valo 		meta->skb = NULL;
328423e3ce3SKalle Valo 	}
329423e3ce3SKalle Valo }
330423e3ce3SKalle Valo 
331423e3ce3SKalle Valo static int alloc_ringmemory(struct b43legacy_dmaring *ring)
332423e3ce3SKalle Valo {
333423e3ce3SKalle Valo 	/* GFP flags must match the flags in free_ringmemory()! */
334423e3ce3SKalle Valo 	ring->descbase = dma_zalloc_coherent(ring->dev->dev->dma_dev,
335423e3ce3SKalle Valo 					     B43legacy_DMA_RINGMEMSIZE,
336423e3ce3SKalle Valo 					     &(ring->dmabase), GFP_KERNEL);
337423e3ce3SKalle Valo 	if (!ring->descbase)
338423e3ce3SKalle Valo 		return -ENOMEM;
339423e3ce3SKalle Valo 
340423e3ce3SKalle Valo 	return 0;
341423e3ce3SKalle Valo }
342423e3ce3SKalle Valo 
343423e3ce3SKalle Valo static void free_ringmemory(struct b43legacy_dmaring *ring)
344423e3ce3SKalle Valo {
345423e3ce3SKalle Valo 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
346423e3ce3SKalle Valo 			  ring->descbase, ring->dmabase);
347423e3ce3SKalle Valo }
348423e3ce3SKalle Valo 
349423e3ce3SKalle Valo /* Reset the RX DMA channel */
350423e3ce3SKalle Valo static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
351423e3ce3SKalle Valo 					    u16 mmio_base,
352423e3ce3SKalle Valo 					    enum b43legacy_dmatype type)
353423e3ce3SKalle Valo {
354423e3ce3SKalle Valo 	int i;
355423e3ce3SKalle Valo 	u32 value;
356423e3ce3SKalle Valo 	u16 offset;
357423e3ce3SKalle Valo 
358423e3ce3SKalle Valo 	might_sleep();
359423e3ce3SKalle Valo 
360423e3ce3SKalle Valo 	offset = B43legacy_DMA32_RXCTL;
361423e3ce3SKalle Valo 	b43legacy_write32(dev, mmio_base + offset, 0);
362423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
363423e3ce3SKalle Valo 		offset = B43legacy_DMA32_RXSTATUS;
364423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
365423e3ce3SKalle Valo 		value &= B43legacy_DMA32_RXSTATE;
366423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
367423e3ce3SKalle Valo 			i = -1;
368423e3ce3SKalle Valo 			break;
369423e3ce3SKalle Valo 		}
370423e3ce3SKalle Valo 		msleep(1);
371423e3ce3SKalle Valo 	}
372423e3ce3SKalle Valo 	if (i != -1) {
373423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
374423e3ce3SKalle Valo 		return -ENODEV;
375423e3ce3SKalle Valo 	}
376423e3ce3SKalle Valo 
377423e3ce3SKalle Valo 	return 0;
378423e3ce3SKalle Valo }
379423e3ce3SKalle Valo 
380423e3ce3SKalle Valo /* Reset the RX DMA channel */
381423e3ce3SKalle Valo static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
382423e3ce3SKalle Valo 					    u16 mmio_base,
383423e3ce3SKalle Valo 					    enum b43legacy_dmatype type)
384423e3ce3SKalle Valo {
385423e3ce3SKalle Valo 	int i;
386423e3ce3SKalle Valo 	u32 value;
387423e3ce3SKalle Valo 	u16 offset;
388423e3ce3SKalle Valo 
389423e3ce3SKalle Valo 	might_sleep();
390423e3ce3SKalle Valo 
391423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
392423e3ce3SKalle Valo 		offset = B43legacy_DMA32_TXSTATUS;
393423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
394423e3ce3SKalle Valo 		value &= B43legacy_DMA32_TXSTATE;
395423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
396423e3ce3SKalle Valo 		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
397423e3ce3SKalle Valo 		    value == B43legacy_DMA32_TXSTAT_STOPPED)
398423e3ce3SKalle Valo 			break;
399423e3ce3SKalle Valo 		msleep(1);
400423e3ce3SKalle Valo 	}
401423e3ce3SKalle Valo 	offset = B43legacy_DMA32_TXCTL;
402423e3ce3SKalle Valo 	b43legacy_write32(dev, mmio_base + offset, 0);
403423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
404423e3ce3SKalle Valo 		offset = B43legacy_DMA32_TXSTATUS;
405423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
406423e3ce3SKalle Valo 		value &= B43legacy_DMA32_TXSTATE;
407423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
408423e3ce3SKalle Valo 			i = -1;
409423e3ce3SKalle Valo 			break;
410423e3ce3SKalle Valo 		}
411423e3ce3SKalle Valo 		msleep(1);
412423e3ce3SKalle Valo 	}
413423e3ce3SKalle Valo 	if (i != -1) {
414423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
415423e3ce3SKalle Valo 		return -ENODEV;
416423e3ce3SKalle Valo 	}
417423e3ce3SKalle Valo 	/* ensure the reset is completed. */
418423e3ce3SKalle Valo 	msleep(1);
419423e3ce3SKalle Valo 
420423e3ce3SKalle Valo 	return 0;
421423e3ce3SKalle Valo }
422423e3ce3SKalle Valo 
423423e3ce3SKalle Valo /* Check if a DMA mapping address is invalid. */
424423e3ce3SKalle Valo static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
425423e3ce3SKalle Valo 					 dma_addr_t addr,
426423e3ce3SKalle Valo 					 size_t buffersize,
427423e3ce3SKalle Valo 					 bool dma_to_device)
428423e3ce3SKalle Valo {
429423e3ce3SKalle Valo 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
430423e3ce3SKalle Valo 		return true;
431423e3ce3SKalle Valo 
432423e3ce3SKalle Valo 	switch (ring->type) {
433423e3ce3SKalle Valo 	case B43legacy_DMA_30BIT:
434423e3ce3SKalle Valo 		if ((u64)addr + buffersize > (1ULL << 30))
435423e3ce3SKalle Valo 			goto address_error;
436423e3ce3SKalle Valo 		break;
437423e3ce3SKalle Valo 	case B43legacy_DMA_32BIT:
438423e3ce3SKalle Valo 		if ((u64)addr + buffersize > (1ULL << 32))
439423e3ce3SKalle Valo 			goto address_error;
440423e3ce3SKalle Valo 		break;
441423e3ce3SKalle Valo 	}
442423e3ce3SKalle Valo 
443423e3ce3SKalle Valo 	/* The address is OK. */
444423e3ce3SKalle Valo 	return false;
445423e3ce3SKalle Valo 
446423e3ce3SKalle Valo address_error:
447423e3ce3SKalle Valo 	/* We can't support this address. Unmap it again. */
448423e3ce3SKalle Valo 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
449423e3ce3SKalle Valo 
450423e3ce3SKalle Valo 	return true;
451423e3ce3SKalle Valo }
452423e3ce3SKalle Valo 
453423e3ce3SKalle Valo static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
454423e3ce3SKalle Valo 			       struct b43legacy_dmadesc32 *desc,
455423e3ce3SKalle Valo 			       struct b43legacy_dmadesc_meta *meta,
456423e3ce3SKalle Valo 			       gfp_t gfp_flags)
457423e3ce3SKalle Valo {
458423e3ce3SKalle Valo 	struct b43legacy_rxhdr_fw3 *rxhdr;
459423e3ce3SKalle Valo 	struct b43legacy_hwtxstatus *txstat;
460423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
461423e3ce3SKalle Valo 	struct sk_buff *skb;
462423e3ce3SKalle Valo 
463423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
464423e3ce3SKalle Valo 
465423e3ce3SKalle Valo 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
466423e3ce3SKalle Valo 	if (unlikely(!skb))
467423e3ce3SKalle Valo 		return -ENOMEM;
468423e3ce3SKalle Valo 	dmaaddr = map_descbuffer(ring, skb->data,
469423e3ce3SKalle Valo 				 ring->rx_buffersize, 0);
470423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
471423e3ce3SKalle Valo 		/* ugh. try to realloc in zone_dma */
472423e3ce3SKalle Valo 		gfp_flags |= GFP_DMA;
473423e3ce3SKalle Valo 
474423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
475423e3ce3SKalle Valo 
476423e3ce3SKalle Valo 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
477423e3ce3SKalle Valo 		if (unlikely(!skb))
478423e3ce3SKalle Valo 			return -ENOMEM;
479423e3ce3SKalle Valo 		dmaaddr = map_descbuffer(ring, skb->data,
480423e3ce3SKalle Valo 					 ring->rx_buffersize, 0);
481423e3ce3SKalle Valo 	}
482423e3ce3SKalle Valo 
483423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
484423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
485423e3ce3SKalle Valo 		return -EIO;
486423e3ce3SKalle Valo 	}
487423e3ce3SKalle Valo 
488423e3ce3SKalle Valo 	meta->skb = skb;
489423e3ce3SKalle Valo 	meta->dmaaddr = dmaaddr;
490423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
491423e3ce3SKalle Valo 
492423e3ce3SKalle Valo 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
493423e3ce3SKalle Valo 	rxhdr->frame_len = 0;
494423e3ce3SKalle Valo 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
495423e3ce3SKalle Valo 	txstat->cookie = 0;
496423e3ce3SKalle Valo 
497423e3ce3SKalle Valo 	return 0;
498423e3ce3SKalle Valo }
499423e3ce3SKalle Valo 
500423e3ce3SKalle Valo /* Allocate the initial descbuffers.
501423e3ce3SKalle Valo  * This is used for an RX ring only.
502423e3ce3SKalle Valo  */
503423e3ce3SKalle Valo static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
504423e3ce3SKalle Valo {
505423e3ce3SKalle Valo 	int i;
506423e3ce3SKalle Valo 	int err = -ENOMEM;
507423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
508423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
509423e3ce3SKalle Valo 
510423e3ce3SKalle Valo 	for (i = 0; i < ring->nr_slots; i++) {
511423e3ce3SKalle Valo 		desc = op32_idx2desc(ring, i, &meta);
512423e3ce3SKalle Valo 
513423e3ce3SKalle Valo 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
514423e3ce3SKalle Valo 		if (err) {
515423e3ce3SKalle Valo 			b43legacyerr(ring->dev->wl,
516423e3ce3SKalle Valo 			       "Failed to allocate initial descbuffers\n");
517423e3ce3SKalle Valo 			goto err_unwind;
518423e3ce3SKalle Valo 		}
519423e3ce3SKalle Valo 	}
520423e3ce3SKalle Valo 	mb(); /* all descbuffer setup before next line */
521423e3ce3SKalle Valo 	ring->used_slots = ring->nr_slots;
522423e3ce3SKalle Valo 	err = 0;
523423e3ce3SKalle Valo out:
524423e3ce3SKalle Valo 	return err;
525423e3ce3SKalle Valo 
526423e3ce3SKalle Valo err_unwind:
527423e3ce3SKalle Valo 	for (i--; i >= 0; i--) {
528423e3ce3SKalle Valo 		desc = op32_idx2desc(ring, i, &meta);
529423e3ce3SKalle Valo 
530423e3ce3SKalle Valo 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
531423e3ce3SKalle Valo 		dev_kfree_skb(meta->skb);
532423e3ce3SKalle Valo 	}
533423e3ce3SKalle Valo 	goto out;
534423e3ce3SKalle Valo }
535423e3ce3SKalle Valo 
536423e3ce3SKalle Valo /* Do initial setup of the DMA controller.
537423e3ce3SKalle Valo  * Reset the controller, write the ring busaddress
538423e3ce3SKalle Valo  * and switch the "enable" bit on.
539423e3ce3SKalle Valo  */
540423e3ce3SKalle Valo static int dmacontroller_setup(struct b43legacy_dmaring *ring)
541423e3ce3SKalle Valo {
542423e3ce3SKalle Valo 	int err = 0;
543423e3ce3SKalle Valo 	u32 value;
544423e3ce3SKalle Valo 	u32 addrext;
545423e3ce3SKalle Valo 	u32 trans = ring->dev->dma.translation;
546423e3ce3SKalle Valo 	u32 ringbase = (u32)(ring->dmabase);
547423e3ce3SKalle Valo 
548423e3ce3SKalle Valo 	if (ring->tx) {
549423e3ce3SKalle Valo 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
550423e3ce3SKalle Valo 			  >> SSB_DMA_TRANSLATION_SHIFT;
551423e3ce3SKalle Valo 		value = B43legacy_DMA32_TXENABLE;
552423e3ce3SKalle Valo 		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
553423e3ce3SKalle Valo 			& B43legacy_DMA32_TXADDREXT_MASK;
554423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
555423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
556423e3ce3SKalle Valo 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
557423e3ce3SKalle Valo 				    | trans);
558423e3ce3SKalle Valo 	} else {
559423e3ce3SKalle Valo 		err = alloc_initial_descbuffers(ring);
560423e3ce3SKalle Valo 		if (err)
561423e3ce3SKalle Valo 			goto out;
562423e3ce3SKalle Valo 
563423e3ce3SKalle Valo 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
564423e3ce3SKalle Valo 			  >> SSB_DMA_TRANSLATION_SHIFT;
565423e3ce3SKalle Valo 		value = (ring->frameoffset <<
566423e3ce3SKalle Valo 			 B43legacy_DMA32_RXFROFF_SHIFT);
567423e3ce3SKalle Valo 		value |= B43legacy_DMA32_RXENABLE;
568423e3ce3SKalle Valo 		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
569423e3ce3SKalle Valo 			 & B43legacy_DMA32_RXADDREXT_MASK;
570423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
571423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
572423e3ce3SKalle Valo 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
573423e3ce3SKalle Valo 				    | trans);
574423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
575423e3ce3SKalle Valo 	}
576423e3ce3SKalle Valo 
577423e3ce3SKalle Valo out:
578423e3ce3SKalle Valo 	return err;
579423e3ce3SKalle Valo }
580423e3ce3SKalle Valo 
581423e3ce3SKalle Valo /* Shutdown the DMA controller. */
582423e3ce3SKalle Valo static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
583423e3ce3SKalle Valo {
584423e3ce3SKalle Valo 	if (ring->tx) {
585423e3ce3SKalle Valo 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
586423e3ce3SKalle Valo 						 ring->type);
587423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
588423e3ce3SKalle Valo 	} else {
589423e3ce3SKalle Valo 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
590423e3ce3SKalle Valo 						 ring->type);
591423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
592423e3ce3SKalle Valo 	}
593423e3ce3SKalle Valo }
594423e3ce3SKalle Valo 
595423e3ce3SKalle Valo static void free_all_descbuffers(struct b43legacy_dmaring *ring)
596423e3ce3SKalle Valo {
597423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
598423e3ce3SKalle Valo 	int i;
599423e3ce3SKalle Valo 
600423e3ce3SKalle Valo 	if (!ring->used_slots)
601423e3ce3SKalle Valo 		return;
602423e3ce3SKalle Valo 	for (i = 0; i < ring->nr_slots; i++) {
603423e3ce3SKalle Valo 		op32_idx2desc(ring, i, &meta);
604423e3ce3SKalle Valo 
605423e3ce3SKalle Valo 		if (!meta->skb) {
606423e3ce3SKalle Valo 			B43legacy_WARN_ON(!ring->tx);
607423e3ce3SKalle Valo 			continue;
608423e3ce3SKalle Valo 		}
609423e3ce3SKalle Valo 		if (ring->tx)
610423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
611423e3ce3SKalle Valo 					 meta->skb->len, 1);
612423e3ce3SKalle Valo 		else
613423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
614423e3ce3SKalle Valo 					 ring->rx_buffersize, 0);
615423e3ce3SKalle Valo 		free_descriptor_buffer(ring, meta, 0);
616423e3ce3SKalle Valo 	}
617423e3ce3SKalle Valo }
618423e3ce3SKalle Valo 
619423e3ce3SKalle Valo static u64 supported_dma_mask(struct b43legacy_wldev *dev)
620423e3ce3SKalle Valo {
621423e3ce3SKalle Valo 	u32 tmp;
622423e3ce3SKalle Valo 	u16 mmio_base;
623423e3ce3SKalle Valo 
624423e3ce3SKalle Valo 	mmio_base = b43legacy_dmacontroller_base(0, 0);
625423e3ce3SKalle Valo 	b43legacy_write32(dev,
626423e3ce3SKalle Valo 			mmio_base + B43legacy_DMA32_TXCTL,
627423e3ce3SKalle Valo 			B43legacy_DMA32_TXADDREXT_MASK);
628423e3ce3SKalle Valo 	tmp = b43legacy_read32(dev, mmio_base +
629423e3ce3SKalle Valo 			       B43legacy_DMA32_TXCTL);
630423e3ce3SKalle Valo 	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
631423e3ce3SKalle Valo 		return DMA_BIT_MASK(32);
632423e3ce3SKalle Valo 
633423e3ce3SKalle Valo 	return DMA_BIT_MASK(30);
634423e3ce3SKalle Valo }
635423e3ce3SKalle Valo 
636423e3ce3SKalle Valo static enum b43legacy_dmatype dma_mask_to_engine_type(u64 dmamask)
637423e3ce3SKalle Valo {
638423e3ce3SKalle Valo 	if (dmamask == DMA_BIT_MASK(30))
639423e3ce3SKalle Valo 		return B43legacy_DMA_30BIT;
640423e3ce3SKalle Valo 	if (dmamask == DMA_BIT_MASK(32))
641423e3ce3SKalle Valo 		return B43legacy_DMA_32BIT;
642423e3ce3SKalle Valo 	B43legacy_WARN_ON(1);
643423e3ce3SKalle Valo 	return B43legacy_DMA_30BIT;
644423e3ce3SKalle Valo }
645423e3ce3SKalle Valo 
646423e3ce3SKalle Valo /* Main initialization function. */
647423e3ce3SKalle Valo static
648423e3ce3SKalle Valo struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
649423e3ce3SKalle Valo 						  int controller_index,
650423e3ce3SKalle Valo 						  int for_tx,
651423e3ce3SKalle Valo 						  enum b43legacy_dmatype type)
652423e3ce3SKalle Valo {
653423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
654423e3ce3SKalle Valo 	int err;
655423e3ce3SKalle Valo 	int nr_slots;
656423e3ce3SKalle Valo 	dma_addr_t dma_test;
657423e3ce3SKalle Valo 
658423e3ce3SKalle Valo 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
659423e3ce3SKalle Valo 	if (!ring)
660423e3ce3SKalle Valo 		goto out;
661423e3ce3SKalle Valo 	ring->type = type;
662423e3ce3SKalle Valo 	ring->dev = dev;
663423e3ce3SKalle Valo 
664423e3ce3SKalle Valo 	nr_slots = B43legacy_RXRING_SLOTS;
665423e3ce3SKalle Valo 	if (for_tx)
666423e3ce3SKalle Valo 		nr_slots = B43legacy_TXRING_SLOTS;
667423e3ce3SKalle Valo 
668423e3ce3SKalle Valo 	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
669423e3ce3SKalle Valo 			     GFP_KERNEL);
670423e3ce3SKalle Valo 	if (!ring->meta)
671423e3ce3SKalle Valo 		goto err_kfree_ring;
672423e3ce3SKalle Valo 	if (for_tx) {
673423e3ce3SKalle Valo 		ring->txhdr_cache = kcalloc(nr_slots,
674423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
675423e3ce3SKalle Valo 					GFP_KERNEL);
676423e3ce3SKalle Valo 		if (!ring->txhdr_cache)
677423e3ce3SKalle Valo 			goto err_kfree_meta;
678423e3ce3SKalle Valo 
679423e3ce3SKalle Valo 		/* test for ability to dma to txhdr_cache */
680423e3ce3SKalle Valo 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
681423e3ce3SKalle Valo 					      sizeof(struct b43legacy_txhdr_fw3),
682423e3ce3SKalle Valo 					      DMA_TO_DEVICE);
683423e3ce3SKalle Valo 
684423e3ce3SKalle Valo 		if (b43legacy_dma_mapping_error(ring, dma_test,
685423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
686423e3ce3SKalle Valo 			/* ugh realloc */
687423e3ce3SKalle Valo 			kfree(ring->txhdr_cache);
688423e3ce3SKalle Valo 			ring->txhdr_cache = kcalloc(nr_slots,
689423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
690423e3ce3SKalle Valo 					GFP_KERNEL | GFP_DMA);
691423e3ce3SKalle Valo 			if (!ring->txhdr_cache)
692423e3ce3SKalle Valo 				goto err_kfree_meta;
693423e3ce3SKalle Valo 
694423e3ce3SKalle Valo 			dma_test = dma_map_single(dev->dev->dma_dev,
695423e3ce3SKalle Valo 					ring->txhdr_cache,
696423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
697423e3ce3SKalle Valo 					DMA_TO_DEVICE);
698423e3ce3SKalle Valo 
699423e3ce3SKalle Valo 			if (b43legacy_dma_mapping_error(ring, dma_test,
700423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1))
701423e3ce3SKalle Valo 				goto err_kfree_txhdr_cache;
702423e3ce3SKalle Valo 		}
703423e3ce3SKalle Valo 
704423e3ce3SKalle Valo 		dma_unmap_single(dev->dev->dma_dev, dma_test,
705423e3ce3SKalle Valo 				 sizeof(struct b43legacy_txhdr_fw3),
706423e3ce3SKalle Valo 				 DMA_TO_DEVICE);
707423e3ce3SKalle Valo 	}
708423e3ce3SKalle Valo 
709423e3ce3SKalle Valo 	ring->nr_slots = nr_slots;
710423e3ce3SKalle Valo 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
711423e3ce3SKalle Valo 	ring->index = controller_index;
712423e3ce3SKalle Valo 	if (for_tx) {
713423e3ce3SKalle Valo 		ring->tx = true;
714423e3ce3SKalle Valo 		ring->current_slot = -1;
715423e3ce3SKalle Valo 	} else {
716423e3ce3SKalle Valo 		if (ring->index == 0) {
717423e3ce3SKalle Valo 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
718423e3ce3SKalle Valo 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
719423e3ce3SKalle Valo 		} else if (ring->index == 3) {
720423e3ce3SKalle Valo 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
721423e3ce3SKalle Valo 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
722423e3ce3SKalle Valo 		} else
723423e3ce3SKalle Valo 			B43legacy_WARN_ON(1);
724423e3ce3SKalle Valo 	}
725423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
726423e3ce3SKalle Valo 	ring->last_injected_overflow = jiffies;
727423e3ce3SKalle Valo #endif
728423e3ce3SKalle Valo 
729423e3ce3SKalle Valo 	err = alloc_ringmemory(ring);
730423e3ce3SKalle Valo 	if (err)
731423e3ce3SKalle Valo 		goto err_kfree_txhdr_cache;
732423e3ce3SKalle Valo 	err = dmacontroller_setup(ring);
733423e3ce3SKalle Valo 	if (err)
734423e3ce3SKalle Valo 		goto err_free_ringmemory;
735423e3ce3SKalle Valo 
736423e3ce3SKalle Valo out:
737423e3ce3SKalle Valo 	return ring;
738423e3ce3SKalle Valo 
739423e3ce3SKalle Valo err_free_ringmemory:
740423e3ce3SKalle Valo 	free_ringmemory(ring);
741423e3ce3SKalle Valo err_kfree_txhdr_cache:
742423e3ce3SKalle Valo 	kfree(ring->txhdr_cache);
743423e3ce3SKalle Valo err_kfree_meta:
744423e3ce3SKalle Valo 	kfree(ring->meta);
745423e3ce3SKalle Valo err_kfree_ring:
746423e3ce3SKalle Valo 	kfree(ring);
747423e3ce3SKalle Valo 	ring = NULL;
748423e3ce3SKalle Valo 	goto out;
749423e3ce3SKalle Valo }
750423e3ce3SKalle Valo 
751423e3ce3SKalle Valo /* Main cleanup function. */
752423e3ce3SKalle Valo static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
753423e3ce3SKalle Valo {
754423e3ce3SKalle Valo 	if (!ring)
755423e3ce3SKalle Valo 		return;
756423e3ce3SKalle Valo 
757423e3ce3SKalle Valo 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
758423e3ce3SKalle Valo 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
759423e3ce3SKalle Valo 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
760423e3ce3SKalle Valo 		     ring->nr_slots);
761423e3ce3SKalle Valo 	/* Device IRQs are disabled prior entering this function,
762423e3ce3SKalle Valo 	 * so no need to take care of concurrency with rx handler stuff.
763423e3ce3SKalle Valo 	 */
764423e3ce3SKalle Valo 	dmacontroller_cleanup(ring);
765423e3ce3SKalle Valo 	free_all_descbuffers(ring);
766423e3ce3SKalle Valo 	free_ringmemory(ring);
767423e3ce3SKalle Valo 
768423e3ce3SKalle Valo 	kfree(ring->txhdr_cache);
769423e3ce3SKalle Valo 	kfree(ring->meta);
770423e3ce3SKalle Valo 	kfree(ring);
771423e3ce3SKalle Valo }
772423e3ce3SKalle Valo 
773423e3ce3SKalle Valo void b43legacy_dma_free(struct b43legacy_wldev *dev)
774423e3ce3SKalle Valo {
775423e3ce3SKalle Valo 	struct b43legacy_dma *dma;
776423e3ce3SKalle Valo 
777423e3ce3SKalle Valo 	if (b43legacy_using_pio(dev))
778423e3ce3SKalle Valo 		return;
779423e3ce3SKalle Valo 	dma = &dev->dma;
780423e3ce3SKalle Valo 
781423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring3);
782423e3ce3SKalle Valo 	dma->rx_ring3 = NULL;
783423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring0);
784423e3ce3SKalle Valo 	dma->rx_ring0 = NULL;
785423e3ce3SKalle Valo 
786423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring5);
787423e3ce3SKalle Valo 	dma->tx_ring5 = NULL;
788423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring4);
789423e3ce3SKalle Valo 	dma->tx_ring4 = NULL;
790423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring3);
791423e3ce3SKalle Valo 	dma->tx_ring3 = NULL;
792423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring2);
793423e3ce3SKalle Valo 	dma->tx_ring2 = NULL;
794423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring1);
795423e3ce3SKalle Valo 	dma->tx_ring1 = NULL;
796423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring0);
797423e3ce3SKalle Valo 	dma->tx_ring0 = NULL;
798423e3ce3SKalle Valo }
799423e3ce3SKalle Valo 
800423e3ce3SKalle Valo static int b43legacy_dma_set_mask(struct b43legacy_wldev *dev, u64 mask)
801423e3ce3SKalle Valo {
802423e3ce3SKalle Valo 	u64 orig_mask = mask;
803423e3ce3SKalle Valo 	bool fallback = false;
804423e3ce3SKalle Valo 	int err;
805423e3ce3SKalle Valo 
806423e3ce3SKalle Valo 	/* Try to set the DMA mask. If it fails, try falling back to a
807423e3ce3SKalle Valo 	 * lower mask, as we can always also support a lower one. */
808423e3ce3SKalle Valo 	while (1) {
809423e3ce3SKalle Valo 		err = dma_set_mask_and_coherent(dev->dev->dma_dev, mask);
810423e3ce3SKalle Valo 		if (!err)
811423e3ce3SKalle Valo 			break;
812423e3ce3SKalle Valo 		if (mask == DMA_BIT_MASK(64)) {
813423e3ce3SKalle Valo 			mask = DMA_BIT_MASK(32);
814423e3ce3SKalle Valo 			fallback = true;
815423e3ce3SKalle Valo 			continue;
816423e3ce3SKalle Valo 		}
817423e3ce3SKalle Valo 		if (mask == DMA_BIT_MASK(32)) {
818423e3ce3SKalle Valo 			mask = DMA_BIT_MASK(30);
819423e3ce3SKalle Valo 			fallback = true;
820423e3ce3SKalle Valo 			continue;
821423e3ce3SKalle Valo 		}
822423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "The machine/kernel does not support "
823423e3ce3SKalle Valo 		       "the required %u-bit DMA mask\n",
824423e3ce3SKalle Valo 		       (unsigned int)dma_mask_to_engine_type(orig_mask));
825423e3ce3SKalle Valo 		return -EOPNOTSUPP;
826423e3ce3SKalle Valo 	}
827423e3ce3SKalle Valo 	if (fallback) {
828423e3ce3SKalle Valo 		b43legacyinfo(dev->wl, "DMA mask fallback from %u-bit to %u-"
829423e3ce3SKalle Valo 			"bit\n",
830423e3ce3SKalle Valo 			(unsigned int)dma_mask_to_engine_type(orig_mask),
831423e3ce3SKalle Valo 			(unsigned int)dma_mask_to_engine_type(mask));
832423e3ce3SKalle Valo 	}
833423e3ce3SKalle Valo 
834423e3ce3SKalle Valo 	return 0;
835423e3ce3SKalle Valo }
836423e3ce3SKalle Valo 
837423e3ce3SKalle Valo int b43legacy_dma_init(struct b43legacy_wldev *dev)
838423e3ce3SKalle Valo {
839423e3ce3SKalle Valo 	struct b43legacy_dma *dma = &dev->dma;
840423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
841423e3ce3SKalle Valo 	int err;
842423e3ce3SKalle Valo 	u64 dmamask;
843423e3ce3SKalle Valo 	enum b43legacy_dmatype type;
844423e3ce3SKalle Valo 
845423e3ce3SKalle Valo 	dmamask = supported_dma_mask(dev);
846423e3ce3SKalle Valo 	type = dma_mask_to_engine_type(dmamask);
847423e3ce3SKalle Valo 	err = b43legacy_dma_set_mask(dev, dmamask);
848423e3ce3SKalle Valo 	if (err) {
849423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_PIO
850423e3ce3SKalle Valo 		b43legacywarn(dev->wl, "DMA for this device not supported. "
851423e3ce3SKalle Valo 			"Falling back to PIO\n");
852423e3ce3SKalle Valo 		dev->__using_pio = true;
853423e3ce3SKalle Valo 		return -EAGAIN;
854423e3ce3SKalle Valo #else
855423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA for this device not supported and "
856423e3ce3SKalle Valo 		       "no PIO support compiled in\n");
857423e3ce3SKalle Valo 		return -EOPNOTSUPP;
858423e3ce3SKalle Valo #endif
859423e3ce3SKalle Valo 	}
860423e3ce3SKalle Valo 	dma->translation = ssb_dma_translation(dev->dev);
861423e3ce3SKalle Valo 
862423e3ce3SKalle Valo 	err = -ENOMEM;
863423e3ce3SKalle Valo 	/* setup TX DMA channels. */
864423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
865423e3ce3SKalle Valo 	if (!ring)
866423e3ce3SKalle Valo 		goto out;
867423e3ce3SKalle Valo 	dma->tx_ring0 = ring;
868423e3ce3SKalle Valo 
869423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
870423e3ce3SKalle Valo 	if (!ring)
871423e3ce3SKalle Valo 		goto err_destroy_tx0;
872423e3ce3SKalle Valo 	dma->tx_ring1 = ring;
873423e3ce3SKalle Valo 
874423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
875423e3ce3SKalle Valo 	if (!ring)
876423e3ce3SKalle Valo 		goto err_destroy_tx1;
877423e3ce3SKalle Valo 	dma->tx_ring2 = ring;
878423e3ce3SKalle Valo 
879423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
880423e3ce3SKalle Valo 	if (!ring)
881423e3ce3SKalle Valo 		goto err_destroy_tx2;
882423e3ce3SKalle Valo 	dma->tx_ring3 = ring;
883423e3ce3SKalle Valo 
884423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
885423e3ce3SKalle Valo 	if (!ring)
886423e3ce3SKalle Valo 		goto err_destroy_tx3;
887423e3ce3SKalle Valo 	dma->tx_ring4 = ring;
888423e3ce3SKalle Valo 
889423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
890423e3ce3SKalle Valo 	if (!ring)
891423e3ce3SKalle Valo 		goto err_destroy_tx4;
892423e3ce3SKalle Valo 	dma->tx_ring5 = ring;
893423e3ce3SKalle Valo 
894423e3ce3SKalle Valo 	/* setup RX DMA channels. */
895423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
896423e3ce3SKalle Valo 	if (!ring)
897423e3ce3SKalle Valo 		goto err_destroy_tx5;
898423e3ce3SKalle Valo 	dma->rx_ring0 = ring;
899423e3ce3SKalle Valo 
900423e3ce3SKalle Valo 	if (dev->dev->id.revision < 5) {
901423e3ce3SKalle Valo 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
902423e3ce3SKalle Valo 		if (!ring)
903423e3ce3SKalle Valo 			goto err_destroy_rx0;
904423e3ce3SKalle Valo 		dma->rx_ring3 = ring;
905423e3ce3SKalle Valo 	}
906423e3ce3SKalle Valo 
907423e3ce3SKalle Valo 	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
908423e3ce3SKalle Valo 	err = 0;
909423e3ce3SKalle Valo out:
910423e3ce3SKalle Valo 	return err;
911423e3ce3SKalle Valo 
912423e3ce3SKalle Valo err_destroy_rx0:
913423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring0);
914423e3ce3SKalle Valo 	dma->rx_ring0 = NULL;
915423e3ce3SKalle Valo err_destroy_tx5:
916423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring5);
917423e3ce3SKalle Valo 	dma->tx_ring5 = NULL;
918423e3ce3SKalle Valo err_destroy_tx4:
919423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring4);
920423e3ce3SKalle Valo 	dma->tx_ring4 = NULL;
921423e3ce3SKalle Valo err_destroy_tx3:
922423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring3);
923423e3ce3SKalle Valo 	dma->tx_ring3 = NULL;
924423e3ce3SKalle Valo err_destroy_tx2:
925423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring2);
926423e3ce3SKalle Valo 	dma->tx_ring2 = NULL;
927423e3ce3SKalle Valo err_destroy_tx1:
928423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring1);
929423e3ce3SKalle Valo 	dma->tx_ring1 = NULL;
930423e3ce3SKalle Valo err_destroy_tx0:
931423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring0);
932423e3ce3SKalle Valo 	dma->tx_ring0 = NULL;
933423e3ce3SKalle Valo 	goto out;
934423e3ce3SKalle Valo }
935423e3ce3SKalle Valo 
936423e3ce3SKalle Valo /* Generate a cookie for the TX header. */
937423e3ce3SKalle Valo static u16 generate_cookie(struct b43legacy_dmaring *ring,
938423e3ce3SKalle Valo 			   int slot)
939423e3ce3SKalle Valo {
940423e3ce3SKalle Valo 	u16 cookie = 0x1000;
941423e3ce3SKalle Valo 
942423e3ce3SKalle Valo 	/* Use the upper 4 bits of the cookie as
943423e3ce3SKalle Valo 	 * DMA controller ID and store the slot number
944423e3ce3SKalle Valo 	 * in the lower 12 bits.
945423e3ce3SKalle Valo 	 * Note that the cookie must never be 0, as this
946423e3ce3SKalle Valo 	 * is a special value used in RX path.
947423e3ce3SKalle Valo 	 */
948423e3ce3SKalle Valo 	switch (ring->index) {
949423e3ce3SKalle Valo 	case 0:
950423e3ce3SKalle Valo 		cookie = 0xA000;
951423e3ce3SKalle Valo 		break;
952423e3ce3SKalle Valo 	case 1:
953423e3ce3SKalle Valo 		cookie = 0xB000;
954423e3ce3SKalle Valo 		break;
955423e3ce3SKalle Valo 	case 2:
956423e3ce3SKalle Valo 		cookie = 0xC000;
957423e3ce3SKalle Valo 		break;
958423e3ce3SKalle Valo 	case 3:
959423e3ce3SKalle Valo 		cookie = 0xD000;
960423e3ce3SKalle Valo 		break;
961423e3ce3SKalle Valo 	case 4:
962423e3ce3SKalle Valo 		cookie = 0xE000;
963423e3ce3SKalle Valo 		break;
964423e3ce3SKalle Valo 	case 5:
965423e3ce3SKalle Valo 		cookie = 0xF000;
966423e3ce3SKalle Valo 		break;
967423e3ce3SKalle Valo 	}
968423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
969423e3ce3SKalle Valo 	cookie |= (u16)slot;
970423e3ce3SKalle Valo 
971423e3ce3SKalle Valo 	return cookie;
972423e3ce3SKalle Valo }
973423e3ce3SKalle Valo 
974423e3ce3SKalle Valo /* Inspect a cookie and find out to which controller/slot it belongs. */
975423e3ce3SKalle Valo static
976423e3ce3SKalle Valo struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
977423e3ce3SKalle Valo 				      u16 cookie, int *slot)
978423e3ce3SKalle Valo {
979423e3ce3SKalle Valo 	struct b43legacy_dma *dma = &dev->dma;
980423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring = NULL;
981423e3ce3SKalle Valo 
982423e3ce3SKalle Valo 	switch (cookie & 0xF000) {
983423e3ce3SKalle Valo 	case 0xA000:
984423e3ce3SKalle Valo 		ring = dma->tx_ring0;
985423e3ce3SKalle Valo 		break;
986423e3ce3SKalle Valo 	case 0xB000:
987423e3ce3SKalle Valo 		ring = dma->tx_ring1;
988423e3ce3SKalle Valo 		break;
989423e3ce3SKalle Valo 	case 0xC000:
990423e3ce3SKalle Valo 		ring = dma->tx_ring2;
991423e3ce3SKalle Valo 		break;
992423e3ce3SKalle Valo 	case 0xD000:
993423e3ce3SKalle Valo 		ring = dma->tx_ring3;
994423e3ce3SKalle Valo 		break;
995423e3ce3SKalle Valo 	case 0xE000:
996423e3ce3SKalle Valo 		ring = dma->tx_ring4;
997423e3ce3SKalle Valo 		break;
998423e3ce3SKalle Valo 	case 0xF000:
999423e3ce3SKalle Valo 		ring = dma->tx_ring5;
1000423e3ce3SKalle Valo 		break;
1001423e3ce3SKalle Valo 	default:
1002423e3ce3SKalle Valo 		B43legacy_WARN_ON(1);
1003423e3ce3SKalle Valo 	}
1004423e3ce3SKalle Valo 	*slot = (cookie & 0x0FFF);
1005423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
1006423e3ce3SKalle Valo 
1007423e3ce3SKalle Valo 	return ring;
1008423e3ce3SKalle Valo }
1009423e3ce3SKalle Valo 
1010423e3ce3SKalle Valo static int dma_tx_fragment(struct b43legacy_dmaring *ring,
1011423e3ce3SKalle Valo 			    struct sk_buff **in_skb)
1012423e3ce3SKalle Valo {
1013423e3ce3SKalle Valo 	struct sk_buff *skb = *in_skb;
1014423e3ce3SKalle Valo 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1015423e3ce3SKalle Valo 	u8 *header;
1016423e3ce3SKalle Valo 	int slot, old_top_slot, old_used_slots;
1017423e3ce3SKalle Valo 	int err;
1018423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
1019423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
1020423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta_hdr;
1021423e3ce3SKalle Valo 	struct sk_buff *bounce_skb;
1022423e3ce3SKalle Valo 
1023423e3ce3SKalle Valo #define SLOTS_PER_PACKET  2
1024423e3ce3SKalle Valo 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
1025423e3ce3SKalle Valo 
1026423e3ce3SKalle Valo 	old_top_slot = ring->current_slot;
1027423e3ce3SKalle Valo 	old_used_slots = ring->used_slots;
1028423e3ce3SKalle Valo 
1029423e3ce3SKalle Valo 	/* Get a slot for the header. */
1030423e3ce3SKalle Valo 	slot = request_slot(ring);
1031423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, slot, &meta_hdr);
1032423e3ce3SKalle Valo 	memset(meta_hdr, 0, sizeof(*meta_hdr));
1033423e3ce3SKalle Valo 
1034423e3ce3SKalle Valo 	header = &(ring->txhdr_cache[slot * sizeof(
1035423e3ce3SKalle Valo 			       struct b43legacy_txhdr_fw3)]);
1036423e3ce3SKalle Valo 	err = b43legacy_generate_txhdr(ring->dev, header,
1037423e3ce3SKalle Valo 				 skb->data, skb->len, info,
1038423e3ce3SKalle Valo 				 generate_cookie(ring, slot));
1039423e3ce3SKalle Valo 	if (unlikely(err)) {
1040423e3ce3SKalle Valo 		ring->current_slot = old_top_slot;
1041423e3ce3SKalle Valo 		ring->used_slots = old_used_slots;
1042423e3ce3SKalle Valo 		return err;
1043423e3ce3SKalle Valo 	}
1044423e3ce3SKalle Valo 
1045423e3ce3SKalle Valo 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1046423e3ce3SKalle Valo 					   sizeof(struct b43legacy_txhdr_fw3), 1);
1047423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
1048423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
1049423e3ce3SKalle Valo 		ring->current_slot = old_top_slot;
1050423e3ce3SKalle Valo 		ring->used_slots = old_used_slots;
1051423e3ce3SKalle Valo 		return -EIO;
1052423e3ce3SKalle Valo 	}
1053423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1054423e3ce3SKalle Valo 			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
1055423e3ce3SKalle Valo 
1056423e3ce3SKalle Valo 	/* Get a slot for the payload. */
1057423e3ce3SKalle Valo 	slot = request_slot(ring);
1058423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, slot, &meta);
1059423e3ce3SKalle Valo 	memset(meta, 0, sizeof(*meta));
1060423e3ce3SKalle Valo 
1061423e3ce3SKalle Valo 	meta->skb = skb;
1062423e3ce3SKalle Valo 	meta->is_last_fragment = true;
1063423e3ce3SKalle Valo 
1064423e3ce3SKalle Valo 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1065423e3ce3SKalle Valo 	/* create a bounce buffer in zone_dma on mapping failure. */
1066423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1067*6e1d8d14SJia-Ju Bai 		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
1068423e3ce3SKalle Valo 		if (!bounce_skb) {
1069423e3ce3SKalle Valo 			ring->current_slot = old_top_slot;
1070423e3ce3SKalle Valo 			ring->used_slots = old_used_slots;
1071423e3ce3SKalle Valo 			err = -ENOMEM;
1072423e3ce3SKalle Valo 			goto out_unmap_hdr;
1073423e3ce3SKalle Valo 		}
1074423e3ce3SKalle Valo 
107559ae1d12SJohannes Berg 		skb_put_data(bounce_skb, skb->data, skb->len);
1076423e3ce3SKalle Valo 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
1077423e3ce3SKalle Valo 		bounce_skb->dev = skb->dev;
1078423e3ce3SKalle Valo 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
1079423e3ce3SKalle Valo 		info = IEEE80211_SKB_CB(bounce_skb);
1080423e3ce3SKalle Valo 
1081423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
1082423e3ce3SKalle Valo 		skb = bounce_skb;
1083423e3ce3SKalle Valo 		*in_skb = bounce_skb;
1084423e3ce3SKalle Valo 		meta->skb = skb;
1085423e3ce3SKalle Valo 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1086423e3ce3SKalle Valo 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1087423e3ce3SKalle Valo 			ring->current_slot = old_top_slot;
1088423e3ce3SKalle Valo 			ring->used_slots = old_used_slots;
1089423e3ce3SKalle Valo 			err = -EIO;
1090423e3ce3SKalle Valo 			goto out_free_bounce;
1091423e3ce3SKalle Valo 		}
1092423e3ce3SKalle Valo 	}
1093423e3ce3SKalle Valo 
1094423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1095423e3ce3SKalle Valo 			     skb->len, 0, 1, 1);
1096423e3ce3SKalle Valo 
1097423e3ce3SKalle Valo 	wmb();	/* previous stuff MUST be done */
1098423e3ce3SKalle Valo 	/* Now transfer the whole frame. */
1099423e3ce3SKalle Valo 	op32_poke_tx(ring, next_slot(ring, slot));
1100423e3ce3SKalle Valo 	return 0;
1101423e3ce3SKalle Valo 
1102423e3ce3SKalle Valo out_free_bounce:
1103423e3ce3SKalle Valo 	dev_kfree_skb_any(skb);
1104423e3ce3SKalle Valo out_unmap_hdr:
1105423e3ce3SKalle Valo 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1106423e3ce3SKalle Valo 			 sizeof(struct b43legacy_txhdr_fw3), 1);
1107423e3ce3SKalle Valo 	return err;
1108423e3ce3SKalle Valo }
1109423e3ce3SKalle Valo 
1110423e3ce3SKalle Valo static inline
1111423e3ce3SKalle Valo int should_inject_overflow(struct b43legacy_dmaring *ring)
1112423e3ce3SKalle Valo {
1113423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
1114423e3ce3SKalle Valo 	if (unlikely(b43legacy_debug(ring->dev,
1115423e3ce3SKalle Valo 				     B43legacy_DBG_DMAOVERFLOW))) {
1116423e3ce3SKalle Valo 		/* Check if we should inject another ringbuffer overflow
1117423e3ce3SKalle Valo 		 * to test handling of this situation in the stack. */
1118423e3ce3SKalle Valo 		unsigned long next_overflow;
1119423e3ce3SKalle Valo 
1120423e3ce3SKalle Valo 		next_overflow = ring->last_injected_overflow + HZ;
1121423e3ce3SKalle Valo 		if (time_after(jiffies, next_overflow)) {
1122423e3ce3SKalle Valo 			ring->last_injected_overflow = jiffies;
1123423e3ce3SKalle Valo 			b43legacydbg(ring->dev->wl,
1124423e3ce3SKalle Valo 			       "Injecting TX ring overflow on "
1125423e3ce3SKalle Valo 			       "DMA controller %d\n", ring->index);
1126423e3ce3SKalle Valo 			return 1;
1127423e3ce3SKalle Valo 		}
1128423e3ce3SKalle Valo 	}
1129423e3ce3SKalle Valo #endif /* CONFIG_B43LEGACY_DEBUG */
1130423e3ce3SKalle Valo 	return 0;
1131423e3ce3SKalle Valo }
1132423e3ce3SKalle Valo 
1133423e3ce3SKalle Valo int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1134423e3ce3SKalle Valo 		     struct sk_buff *skb)
1135423e3ce3SKalle Valo {
1136423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
1137423e3ce3SKalle Valo 	int err = 0;
1138423e3ce3SKalle Valo 
1139423e3ce3SKalle Valo 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1140423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1141423e3ce3SKalle Valo 
1142423e3ce3SKalle Valo 	if (unlikely(ring->stopped)) {
1143423e3ce3SKalle Valo 		/* We get here only because of a bug in mac80211.
1144423e3ce3SKalle Valo 		 * Because of a race, one packet may be queued after
1145423e3ce3SKalle Valo 		 * the queue is stopped, thus we got called when we shouldn't.
1146423e3ce3SKalle Valo 		 * For now, just refuse the transmit. */
1147423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1148423e3ce3SKalle Valo 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1149423e3ce3SKalle Valo 		return -ENOSPC;
1150423e3ce3SKalle Valo 	}
1151423e3ce3SKalle Valo 
1152423e3ce3SKalle Valo 	if (unlikely(WARN_ON(free_slots(ring) < SLOTS_PER_PACKET))) {
1153423e3ce3SKalle Valo 		/* If we get here, we have a real error with the queue
1154423e3ce3SKalle Valo 		 * full, but queues not stopped. */
1155423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA queue overflow\n");
1156423e3ce3SKalle Valo 		return -ENOSPC;
1157423e3ce3SKalle Valo 	}
1158423e3ce3SKalle Valo 
1159423e3ce3SKalle Valo 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1160423e3ce3SKalle Valo 	 * into the skb data or cb now. */
1161423e3ce3SKalle Valo 	err = dma_tx_fragment(ring, &skb);
1162423e3ce3SKalle Valo 	if (unlikely(err == -ENOKEY)) {
1163423e3ce3SKalle Valo 		/* Drop this packet, as we don't have the encryption key
1164423e3ce3SKalle Valo 		 * anymore and must not transmit it unencrypted. */
1165423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
1166423e3ce3SKalle Valo 		return 0;
1167423e3ce3SKalle Valo 	}
1168423e3ce3SKalle Valo 	if (unlikely(err)) {
1169423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1170423e3ce3SKalle Valo 		return err;
1171423e3ce3SKalle Valo 	}
1172423e3ce3SKalle Valo 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1173423e3ce3SKalle Valo 	    should_inject_overflow(ring)) {
1174423e3ce3SKalle Valo 		/* This TX ring is full. */
1175423e3ce3SKalle Valo 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1176423e3ce3SKalle Valo 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1177423e3ce3SKalle Valo 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1178423e3ce3SKalle Valo 		ring->stopped = true;
1179423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1180423e3ce3SKalle Valo 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1181423e3ce3SKalle Valo 			       ring->index);
1182423e3ce3SKalle Valo 	}
1183423e3ce3SKalle Valo 	return err;
1184423e3ce3SKalle Valo }
1185423e3ce3SKalle Valo 
1186423e3ce3SKalle Valo void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1187423e3ce3SKalle Valo 				 const struct b43legacy_txstatus *status)
1188423e3ce3SKalle Valo {
1189423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
1190423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
1191423e3ce3SKalle Valo 	int retry_limit;
1192423e3ce3SKalle Valo 	int slot;
1193423e3ce3SKalle Valo 	int firstused;
1194423e3ce3SKalle Valo 
1195423e3ce3SKalle Valo 	ring = parse_cookie(dev, status->cookie, &slot);
1196423e3ce3SKalle Valo 	if (unlikely(!ring))
1197423e3ce3SKalle Valo 		return;
1198423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1199423e3ce3SKalle Valo 
1200423e3ce3SKalle Valo 	/* Sanity check: TX packets are processed in-order on one ring.
1201423e3ce3SKalle Valo 	 * Check if the slot deduced from the cookie really is the first
1202423e3ce3SKalle Valo 	 * used slot. */
1203423e3ce3SKalle Valo 	firstused = ring->current_slot - ring->used_slots + 1;
1204423e3ce3SKalle Valo 	if (firstused < 0)
1205423e3ce3SKalle Valo 		firstused = ring->nr_slots + firstused;
1206423e3ce3SKalle Valo 	if (unlikely(slot != firstused)) {
1207423e3ce3SKalle Valo 		/* This possibly is a firmware bug and will result in
1208423e3ce3SKalle Valo 		 * malfunction, memory leaks and/or stall of DMA functionality.
1209423e3ce3SKalle Valo 		 */
1210423e3ce3SKalle Valo 		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1211423e3ce3SKalle Valo 			     "ring %d. Expected %d, but got %d\n",
1212423e3ce3SKalle Valo 			     ring->index, firstused, slot);
1213423e3ce3SKalle Valo 		return;
1214423e3ce3SKalle Valo 	}
1215423e3ce3SKalle Valo 
1216423e3ce3SKalle Valo 	while (1) {
1217423e3ce3SKalle Valo 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1218423e3ce3SKalle Valo 		op32_idx2desc(ring, slot, &meta);
1219423e3ce3SKalle Valo 
1220423e3ce3SKalle Valo 		if (meta->skb)
1221423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
1222423e3ce3SKalle Valo 					 meta->skb->len, 1);
1223423e3ce3SKalle Valo 		else
1224423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
1225423e3ce3SKalle Valo 					 sizeof(struct b43legacy_txhdr_fw3),
1226423e3ce3SKalle Valo 					 1);
1227423e3ce3SKalle Valo 
1228423e3ce3SKalle Valo 		if (meta->is_last_fragment) {
1229423e3ce3SKalle Valo 			struct ieee80211_tx_info *info;
1230423e3ce3SKalle Valo 			BUG_ON(!meta->skb);
1231423e3ce3SKalle Valo 			info = IEEE80211_SKB_CB(meta->skb);
1232423e3ce3SKalle Valo 
1233423e3ce3SKalle Valo 			/* preserve the confiured retry limit before clearing the status
1234423e3ce3SKalle Valo 			 * The xmit function has overwritten the rc's value with the actual
1235423e3ce3SKalle Valo 			 * retry limit done by the hardware */
1236423e3ce3SKalle Valo 			retry_limit = info->status.rates[0].count;
1237423e3ce3SKalle Valo 			ieee80211_tx_info_clear_status(info);
1238423e3ce3SKalle Valo 
1239423e3ce3SKalle Valo 			if (status->acked)
1240423e3ce3SKalle Valo 				info->flags |= IEEE80211_TX_STAT_ACK;
1241423e3ce3SKalle Valo 
1242423e3ce3SKalle Valo 			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1243423e3ce3SKalle Valo 				/*
1244423e3ce3SKalle Valo 				 * If the short retries (RTS, not data frame) have exceeded
1245423e3ce3SKalle Valo 				 * the limit, the hw will not have tried the selected rate,
1246423e3ce3SKalle Valo 				 * but will have used the fallback rate instead.
1247423e3ce3SKalle Valo 				 * Don't let the rate control count attempts for the selected
1248423e3ce3SKalle Valo 				 * rate in this case, otherwise the statistics will be off.
1249423e3ce3SKalle Valo 				 */
1250423e3ce3SKalle Valo 				info->status.rates[0].count = 0;
1251423e3ce3SKalle Valo 				info->status.rates[1].count = status->frame_count;
1252423e3ce3SKalle Valo 			} else {
1253423e3ce3SKalle Valo 				if (status->frame_count > retry_limit) {
1254423e3ce3SKalle Valo 					info->status.rates[0].count = retry_limit;
1255423e3ce3SKalle Valo 					info->status.rates[1].count = status->frame_count -
1256423e3ce3SKalle Valo 							retry_limit;
1257423e3ce3SKalle Valo 
1258423e3ce3SKalle Valo 				} else {
1259423e3ce3SKalle Valo 					info->status.rates[0].count = status->frame_count;
1260423e3ce3SKalle Valo 					info->status.rates[1].idx = -1;
1261423e3ce3SKalle Valo 				}
1262423e3ce3SKalle Valo 			}
1263423e3ce3SKalle Valo 
1264423e3ce3SKalle Valo 			/* Call back to inform the ieee80211 subsystem about the
1265423e3ce3SKalle Valo 			 * status of the transmission.
1266423e3ce3SKalle Valo 			 * Some fields of txstat are already filled in dma_tx().
1267423e3ce3SKalle Valo 			 */
1268423e3ce3SKalle Valo 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1269423e3ce3SKalle Valo 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1270423e3ce3SKalle Valo 			meta->skb = NULL;
1271423e3ce3SKalle Valo 		} else {
1272423e3ce3SKalle Valo 			/* No need to call free_descriptor_buffer here, as
1273423e3ce3SKalle Valo 			 * this is only the txhdr, which is not allocated.
1274423e3ce3SKalle Valo 			 */
1275423e3ce3SKalle Valo 			B43legacy_WARN_ON(meta->skb != NULL);
1276423e3ce3SKalle Valo 		}
1277423e3ce3SKalle Valo 
1278423e3ce3SKalle Valo 		/* Everything unmapped and free'd. So it's not used anymore. */
1279423e3ce3SKalle Valo 		ring->used_slots--;
1280423e3ce3SKalle Valo 
1281423e3ce3SKalle Valo 		if (meta->is_last_fragment)
1282423e3ce3SKalle Valo 			break;
1283423e3ce3SKalle Valo 		slot = next_slot(ring, slot);
1284423e3ce3SKalle Valo 	}
1285423e3ce3SKalle Valo 	dev->stats.last_tx = jiffies;
1286423e3ce3SKalle Valo 	if (ring->stopped) {
1287423e3ce3SKalle Valo 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1288423e3ce3SKalle Valo 		ring->stopped = false;
1289423e3ce3SKalle Valo 	}
1290423e3ce3SKalle Valo 
1291423e3ce3SKalle Valo 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1292423e3ce3SKalle Valo 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1293423e3ce3SKalle Valo 	} else {
1294423e3ce3SKalle Valo 		/* If the driver queue is running wake the corresponding
1295423e3ce3SKalle Valo 		 * mac80211 queue. */
1296423e3ce3SKalle Valo 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1297423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1298423e3ce3SKalle Valo 			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1299423e3ce3SKalle Valo 				     ring->index);
1300423e3ce3SKalle Valo 	}
1301423e3ce3SKalle Valo 	/* Add work to the queue. */
1302423e3ce3SKalle Valo 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1303423e3ce3SKalle Valo }
1304423e3ce3SKalle Valo 
1305423e3ce3SKalle Valo static void dma_rx(struct b43legacy_dmaring *ring,
1306423e3ce3SKalle Valo 		   int *slot)
1307423e3ce3SKalle Valo {
1308423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
1309423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
1310423e3ce3SKalle Valo 	struct b43legacy_rxhdr_fw3 *rxhdr;
1311423e3ce3SKalle Valo 	struct sk_buff *skb;
1312423e3ce3SKalle Valo 	u16 len;
1313423e3ce3SKalle Valo 	int err;
1314423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
1315423e3ce3SKalle Valo 
1316423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, *slot, &meta);
1317423e3ce3SKalle Valo 
1318423e3ce3SKalle Valo 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1319423e3ce3SKalle Valo 	skb = meta->skb;
1320423e3ce3SKalle Valo 
1321423e3ce3SKalle Valo 	if (ring->index == 3) {
1322423e3ce3SKalle Valo 		/* We received an xmit status. */
1323423e3ce3SKalle Valo 		struct b43legacy_hwtxstatus *hw =
1324423e3ce3SKalle Valo 				(struct b43legacy_hwtxstatus *)skb->data;
1325423e3ce3SKalle Valo 		int i = 0;
1326423e3ce3SKalle Valo 
1327423e3ce3SKalle Valo 		while (hw->cookie == 0) {
1328423e3ce3SKalle Valo 			if (i > 100)
1329423e3ce3SKalle Valo 				break;
1330423e3ce3SKalle Valo 			i++;
1331423e3ce3SKalle Valo 			udelay(2);
1332423e3ce3SKalle Valo 			barrier();
1333423e3ce3SKalle Valo 		}
1334423e3ce3SKalle Valo 		b43legacy_handle_hwtxstatus(ring->dev, hw);
1335423e3ce3SKalle Valo 		/* recycle the descriptor buffer. */
1336423e3ce3SKalle Valo 		sync_descbuffer_for_device(ring, meta->dmaaddr,
1337423e3ce3SKalle Valo 					   ring->rx_buffersize);
1338423e3ce3SKalle Valo 
1339423e3ce3SKalle Valo 		return;
1340423e3ce3SKalle Valo 	}
1341423e3ce3SKalle Valo 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1342423e3ce3SKalle Valo 	len = le16_to_cpu(rxhdr->frame_len);
1343423e3ce3SKalle Valo 	if (len == 0) {
1344423e3ce3SKalle Valo 		int i = 0;
1345423e3ce3SKalle Valo 
1346423e3ce3SKalle Valo 		do {
1347423e3ce3SKalle Valo 			udelay(2);
1348423e3ce3SKalle Valo 			barrier();
1349423e3ce3SKalle Valo 			len = le16_to_cpu(rxhdr->frame_len);
1350423e3ce3SKalle Valo 		} while (len == 0 && i++ < 5);
1351423e3ce3SKalle Valo 		if (unlikely(len == 0)) {
1352423e3ce3SKalle Valo 			/* recycle the descriptor buffer. */
1353423e3ce3SKalle Valo 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1354423e3ce3SKalle Valo 						   ring->rx_buffersize);
1355423e3ce3SKalle Valo 			goto drop;
1356423e3ce3SKalle Valo 		}
1357423e3ce3SKalle Valo 	}
1358423e3ce3SKalle Valo 	if (unlikely(len > ring->rx_buffersize)) {
1359423e3ce3SKalle Valo 		/* The data did not fit into one descriptor buffer
1360423e3ce3SKalle Valo 		 * and is split over multiple buffers.
1361423e3ce3SKalle Valo 		 * This should never happen, as we try to allocate buffers
1362423e3ce3SKalle Valo 		 * big enough. So simply ignore this packet.
1363423e3ce3SKalle Valo 		 */
1364423e3ce3SKalle Valo 		int cnt = 0;
1365423e3ce3SKalle Valo 		s32 tmp = len;
1366423e3ce3SKalle Valo 
1367423e3ce3SKalle Valo 		while (1) {
1368423e3ce3SKalle Valo 			desc = op32_idx2desc(ring, *slot, &meta);
1369423e3ce3SKalle Valo 			/* recycle the descriptor buffer. */
1370423e3ce3SKalle Valo 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1371423e3ce3SKalle Valo 						   ring->rx_buffersize);
1372423e3ce3SKalle Valo 			*slot = next_slot(ring, *slot);
1373423e3ce3SKalle Valo 			cnt++;
1374423e3ce3SKalle Valo 			tmp -= ring->rx_buffersize;
1375423e3ce3SKalle Valo 			if (tmp <= 0)
1376423e3ce3SKalle Valo 				break;
1377423e3ce3SKalle Valo 		}
1378423e3ce3SKalle Valo 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1379423e3ce3SKalle Valo 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1380423e3ce3SKalle Valo 		       len, ring->rx_buffersize, cnt);
1381423e3ce3SKalle Valo 		goto drop;
1382423e3ce3SKalle Valo 	}
1383423e3ce3SKalle Valo 
1384423e3ce3SKalle Valo 	dmaaddr = meta->dmaaddr;
1385423e3ce3SKalle Valo 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1386423e3ce3SKalle Valo 	if (unlikely(err)) {
1387423e3ce3SKalle Valo 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1388423e3ce3SKalle Valo 			     " failed\n");
1389423e3ce3SKalle Valo 		sync_descbuffer_for_device(ring, dmaaddr,
1390423e3ce3SKalle Valo 					   ring->rx_buffersize);
1391423e3ce3SKalle Valo 		goto drop;
1392423e3ce3SKalle Valo 	}
1393423e3ce3SKalle Valo 
1394423e3ce3SKalle Valo 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1395423e3ce3SKalle Valo 	skb_put(skb, len + ring->frameoffset);
1396423e3ce3SKalle Valo 	skb_pull(skb, ring->frameoffset);
1397423e3ce3SKalle Valo 
1398423e3ce3SKalle Valo 	b43legacy_rx(ring->dev, skb, rxhdr);
1399423e3ce3SKalle Valo drop:
1400423e3ce3SKalle Valo 	return;
1401423e3ce3SKalle Valo }
1402423e3ce3SKalle Valo 
1403423e3ce3SKalle Valo void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1404423e3ce3SKalle Valo {
1405423e3ce3SKalle Valo 	int slot;
1406423e3ce3SKalle Valo 	int current_slot;
1407423e3ce3SKalle Valo 	int used_slots = 0;
1408423e3ce3SKalle Valo 
1409423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
1410423e3ce3SKalle Valo 	current_slot = op32_get_current_rxslot(ring);
1411423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1412423e3ce3SKalle Valo 			   ring->nr_slots));
1413423e3ce3SKalle Valo 
1414423e3ce3SKalle Valo 	slot = ring->current_slot;
1415423e3ce3SKalle Valo 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1416423e3ce3SKalle Valo 		dma_rx(ring, &slot);
1417423e3ce3SKalle Valo 		update_max_used_slots(ring, ++used_slots);
1418423e3ce3SKalle Valo 	}
1419423e3ce3SKalle Valo 	op32_set_current_rxslot(ring, slot);
1420423e3ce3SKalle Valo 	ring->current_slot = slot;
1421423e3ce3SKalle Valo }
1422423e3ce3SKalle Valo 
1423423e3ce3SKalle Valo static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1424423e3ce3SKalle Valo {
1425423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1426423e3ce3SKalle Valo 	op32_tx_suspend(ring);
1427423e3ce3SKalle Valo }
1428423e3ce3SKalle Valo 
1429423e3ce3SKalle Valo static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1430423e3ce3SKalle Valo {
1431423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1432423e3ce3SKalle Valo 	op32_tx_resume(ring);
1433423e3ce3SKalle Valo }
1434423e3ce3SKalle Valo 
1435423e3ce3SKalle Valo void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1436423e3ce3SKalle Valo {
1437423e3ce3SKalle Valo 	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1438423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1439423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1440423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1441423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1442423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1443423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1444423e3ce3SKalle Valo }
1445423e3ce3SKalle Valo 
1446423e3ce3SKalle Valo void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1447423e3ce3SKalle Valo {
1448423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1449423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1450423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1451423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1452423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1453423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1454423e3ce3SKalle Valo 	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1455423e3ce3SKalle Valo }
1456