xref: /linux/drivers/net/wireless/broadcom/b43legacy/dma.c (revision 1ac731c529cd4d6adbce134754b51ff7d822b145)
1ca47d344SThomas Gleixner // SPDX-License-Identifier: GPL-2.0-or-later
2423e3ce3SKalle Valo /*
3423e3ce3SKalle Valo 
4423e3ce3SKalle Valo   Broadcom B43legacy wireless driver
5423e3ce3SKalle Valo 
6423e3ce3SKalle Valo   DMA ringbuffer and descriptor allocation/management
7423e3ce3SKalle Valo 
8423e3ce3SKalle Valo   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9423e3ce3SKalle Valo 
10423e3ce3SKalle Valo   Some code in this file is derived from the b44.c driver
11423e3ce3SKalle Valo   Copyright (C) 2002 David S. Miller
12423e3ce3SKalle Valo   Copyright (C) Pekka Pietikainen
13423e3ce3SKalle Valo 
14423e3ce3SKalle Valo 
15423e3ce3SKalle Valo */
16423e3ce3SKalle Valo 
17423e3ce3SKalle Valo #include "b43legacy.h"
18423e3ce3SKalle Valo #include "dma.h"
19423e3ce3SKalle Valo #include "main.h"
20423e3ce3SKalle Valo #include "debugfs.h"
21423e3ce3SKalle Valo #include "xmit.h"
22423e3ce3SKalle Valo 
23423e3ce3SKalle Valo #include <linux/dma-mapping.h>
24423e3ce3SKalle Valo #include <linux/pci.h>
25423e3ce3SKalle Valo #include <linux/delay.h>
26423e3ce3SKalle Valo #include <linux/skbuff.h>
27423e3ce3SKalle Valo #include <linux/slab.h>
28423e3ce3SKalle Valo #include <net/dst.h>
29423e3ce3SKalle Valo 
30423e3ce3SKalle Valo /* 32bit DMA ops. */
31423e3ce3SKalle Valo static
op32_idx2desc(struct b43legacy_dmaring * ring,int slot,struct b43legacy_dmadesc_meta ** meta)32423e3ce3SKalle Valo struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
33423e3ce3SKalle Valo 					  int slot,
34423e3ce3SKalle Valo 					  struct b43legacy_dmadesc_meta **meta)
35423e3ce3SKalle Valo {
36423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
37423e3ce3SKalle Valo 
38423e3ce3SKalle Valo 	*meta = &(ring->meta[slot]);
39423e3ce3SKalle Valo 	desc = ring->descbase;
40423e3ce3SKalle Valo 	desc = &(desc[slot]);
41423e3ce3SKalle Valo 
42423e3ce3SKalle Valo 	return desc;
43423e3ce3SKalle Valo }
44423e3ce3SKalle Valo 
op32_fill_descriptor(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc32 * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)45423e3ce3SKalle Valo static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
46423e3ce3SKalle Valo 				 struct b43legacy_dmadesc32 *desc,
47423e3ce3SKalle Valo 				 dma_addr_t dmaaddr, u16 bufsize,
48423e3ce3SKalle Valo 				 int start, int end, int irq)
49423e3ce3SKalle Valo {
50423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
51423e3ce3SKalle Valo 	int slot;
52423e3ce3SKalle Valo 	u32 ctl;
53423e3ce3SKalle Valo 	u32 addr;
54423e3ce3SKalle Valo 	u32 addrext;
55423e3ce3SKalle Valo 
56423e3ce3SKalle Valo 	slot = (int)(desc - descbase);
57423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
58423e3ce3SKalle Valo 
59423e3ce3SKalle Valo 	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
60423e3ce3SKalle Valo 	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
61423e3ce3SKalle Valo 		   >> SSB_DMA_TRANSLATION_SHIFT;
62423e3ce3SKalle Valo 	addr |= ring->dev->dma.translation;
63423e3ce3SKalle Valo 	ctl = (bufsize - ring->frameoffset)
64423e3ce3SKalle Valo 	      & B43legacy_DMA32_DCTL_BYTECNT;
65423e3ce3SKalle Valo 	if (slot == ring->nr_slots - 1)
66423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
67423e3ce3SKalle Valo 	if (start)
68423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
69423e3ce3SKalle Valo 	if (end)
70423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
71423e3ce3SKalle Valo 	if (irq)
72423e3ce3SKalle Valo 		ctl |= B43legacy_DMA32_DCTL_IRQ;
73423e3ce3SKalle Valo 	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
74423e3ce3SKalle Valo 	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
75423e3ce3SKalle Valo 
76423e3ce3SKalle Valo 	desc->control = cpu_to_le32(ctl);
77423e3ce3SKalle Valo 	desc->address = cpu_to_le32(addr);
78423e3ce3SKalle Valo }
79423e3ce3SKalle Valo 
op32_poke_tx(struct b43legacy_dmaring * ring,int slot)80423e3ce3SKalle Valo static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
81423e3ce3SKalle Valo {
82423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
83423e3ce3SKalle Valo 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
84423e3ce3SKalle Valo }
85423e3ce3SKalle Valo 
op32_tx_suspend(struct b43legacy_dmaring * ring)86423e3ce3SKalle Valo static void op32_tx_suspend(struct b43legacy_dmaring *ring)
87423e3ce3SKalle Valo {
88423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89423e3ce3SKalle Valo 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
90423e3ce3SKalle Valo 			    | B43legacy_DMA32_TXSUSPEND);
91423e3ce3SKalle Valo }
92423e3ce3SKalle Valo 
op32_tx_resume(struct b43legacy_dmaring * ring)93423e3ce3SKalle Valo static void op32_tx_resume(struct b43legacy_dmaring *ring)
94423e3ce3SKalle Valo {
95423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96423e3ce3SKalle Valo 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
97423e3ce3SKalle Valo 			    & ~B43legacy_DMA32_TXSUSPEND);
98423e3ce3SKalle Valo }
99423e3ce3SKalle Valo 
op32_get_current_rxslot(struct b43legacy_dmaring * ring)100423e3ce3SKalle Valo static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
101423e3ce3SKalle Valo {
102423e3ce3SKalle Valo 	u32 val;
103423e3ce3SKalle Valo 
104423e3ce3SKalle Valo 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
105423e3ce3SKalle Valo 	val &= B43legacy_DMA32_RXDPTR;
106423e3ce3SKalle Valo 
107423e3ce3SKalle Valo 	return (val / sizeof(struct b43legacy_dmadesc32));
108423e3ce3SKalle Valo }
109423e3ce3SKalle Valo 
op32_set_current_rxslot(struct b43legacy_dmaring * ring,int slot)110423e3ce3SKalle Valo static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
111423e3ce3SKalle Valo 				    int slot)
112423e3ce3SKalle Valo {
113423e3ce3SKalle Valo 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
114423e3ce3SKalle Valo 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
115423e3ce3SKalle Valo }
116423e3ce3SKalle Valo 
free_slots(struct b43legacy_dmaring * ring)117423e3ce3SKalle Valo static inline int free_slots(struct b43legacy_dmaring *ring)
118423e3ce3SKalle Valo {
119423e3ce3SKalle Valo 	return (ring->nr_slots - ring->used_slots);
120423e3ce3SKalle Valo }
121423e3ce3SKalle Valo 
next_slot(struct b43legacy_dmaring * ring,int slot)122423e3ce3SKalle Valo static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
123423e3ce3SKalle Valo {
124423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125423e3ce3SKalle Valo 	if (slot == ring->nr_slots - 1)
126423e3ce3SKalle Valo 		return 0;
127423e3ce3SKalle Valo 	return slot + 1;
128423e3ce3SKalle Valo }
129423e3ce3SKalle Valo 
130423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
update_max_used_slots(struct b43legacy_dmaring * ring,int current_used_slots)131423e3ce3SKalle Valo static void update_max_used_slots(struct b43legacy_dmaring *ring,
132423e3ce3SKalle Valo 				  int current_used_slots)
133423e3ce3SKalle Valo {
134423e3ce3SKalle Valo 	if (current_used_slots <= ring->max_used_slots)
135423e3ce3SKalle Valo 		return;
136423e3ce3SKalle Valo 	ring->max_used_slots = current_used_slots;
137423e3ce3SKalle Valo 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
138423e3ce3SKalle Valo 		b43legacydbg(ring->dev->wl,
139423e3ce3SKalle Valo 		       "max_used_slots increased to %d on %s ring %d\n",
140423e3ce3SKalle Valo 		       ring->max_used_slots,
141423e3ce3SKalle Valo 		       ring->tx ? "TX" : "RX",
142423e3ce3SKalle Valo 		       ring->index);
143423e3ce3SKalle Valo }
144423e3ce3SKalle Valo #else
145423e3ce3SKalle Valo static inline
update_max_used_slots(struct b43legacy_dmaring * ring,int current_used_slots)146423e3ce3SKalle Valo void update_max_used_slots(struct b43legacy_dmaring *ring,
147423e3ce3SKalle Valo 			   int current_used_slots)
148423e3ce3SKalle Valo { }
149423e3ce3SKalle Valo #endif /* DEBUG */
150423e3ce3SKalle Valo 
151423e3ce3SKalle Valo /* Request a slot for usage. */
152423e3ce3SKalle Valo static inline
request_slot(struct b43legacy_dmaring * ring)153423e3ce3SKalle Valo int request_slot(struct b43legacy_dmaring *ring)
154423e3ce3SKalle Valo {
155423e3ce3SKalle Valo 	int slot;
156423e3ce3SKalle Valo 
157423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
158423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->stopped);
159423e3ce3SKalle Valo 	B43legacy_WARN_ON(free_slots(ring) == 0);
160423e3ce3SKalle Valo 
161423e3ce3SKalle Valo 	slot = next_slot(ring, ring->current_slot);
162423e3ce3SKalle Valo 	ring->current_slot = slot;
163423e3ce3SKalle Valo 	ring->used_slots++;
164423e3ce3SKalle Valo 
165423e3ce3SKalle Valo 	update_max_used_slots(ring, ring->used_slots);
166423e3ce3SKalle Valo 
167423e3ce3SKalle Valo 	return slot;
168423e3ce3SKalle Valo }
169423e3ce3SKalle Valo 
170423e3ce3SKalle Valo /* Mac80211-queue to b43legacy-ring mapping */
priority_to_txring(struct b43legacy_wldev * dev,int queue_priority)171423e3ce3SKalle Valo static struct b43legacy_dmaring *priority_to_txring(
172423e3ce3SKalle Valo 						struct b43legacy_wldev *dev,
173423e3ce3SKalle Valo 						int queue_priority)
174423e3ce3SKalle Valo {
175423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
176423e3ce3SKalle Valo 
177423e3ce3SKalle Valo /*FIXME: For now we always run on TX-ring-1 */
178423e3ce3SKalle Valo return dev->dma.tx_ring1;
179423e3ce3SKalle Valo 
180423e3ce3SKalle Valo 	/* 0 = highest priority */
181423e3ce3SKalle Valo 	switch (queue_priority) {
182423e3ce3SKalle Valo 	default:
183423e3ce3SKalle Valo 		B43legacy_WARN_ON(1);
184*ce3b6845SGustavo A. R. Silva 		fallthrough;
185423e3ce3SKalle Valo 	case 0:
186423e3ce3SKalle Valo 		ring = dev->dma.tx_ring3;
187423e3ce3SKalle Valo 		break;
188423e3ce3SKalle Valo 	case 1:
189423e3ce3SKalle Valo 		ring = dev->dma.tx_ring2;
190423e3ce3SKalle Valo 		break;
191423e3ce3SKalle Valo 	case 2:
192423e3ce3SKalle Valo 		ring = dev->dma.tx_ring1;
193423e3ce3SKalle Valo 		break;
194423e3ce3SKalle Valo 	case 3:
195423e3ce3SKalle Valo 		ring = dev->dma.tx_ring0;
196423e3ce3SKalle Valo 		break;
197423e3ce3SKalle Valo 	case 4:
198423e3ce3SKalle Valo 		ring = dev->dma.tx_ring4;
199423e3ce3SKalle Valo 		break;
200423e3ce3SKalle Valo 	case 5:
201423e3ce3SKalle Valo 		ring = dev->dma.tx_ring5;
202423e3ce3SKalle Valo 		break;
203423e3ce3SKalle Valo 	}
204423e3ce3SKalle Valo 
205423e3ce3SKalle Valo 	return ring;
206423e3ce3SKalle Valo }
207423e3ce3SKalle Valo 
b43legacy_dmacontroller_base(enum b43legacy_dmatype type,int controller_idx)208423e3ce3SKalle Valo static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
209423e3ce3SKalle Valo 					int controller_idx)
210423e3ce3SKalle Valo {
211423e3ce3SKalle Valo 	static const u16 map32[] = {
212423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE0,
213423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE1,
214423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE2,
215423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE3,
216423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE4,
217423e3ce3SKalle Valo 		B43legacy_MMIO_DMA32_BASE5,
218423e3ce3SKalle Valo 	};
219423e3ce3SKalle Valo 
220423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(controller_idx >= 0 &&
221423e3ce3SKalle Valo 			  controller_idx < ARRAY_SIZE(map32)));
222423e3ce3SKalle Valo 	return map32[controller_idx];
223423e3ce3SKalle Valo }
224423e3ce3SKalle Valo 
225423e3ce3SKalle Valo static inline
map_descbuffer(struct b43legacy_dmaring * ring,unsigned char * buf,size_t len,int tx)226423e3ce3SKalle Valo dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
227423e3ce3SKalle Valo 			  unsigned char *buf,
228423e3ce3SKalle Valo 			  size_t len,
229423e3ce3SKalle Valo 			  int tx)
230423e3ce3SKalle Valo {
231423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
232423e3ce3SKalle Valo 
233423e3ce3SKalle Valo 	if (tx)
234423e3ce3SKalle Valo 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
235423e3ce3SKalle Valo 					     buf, len,
236423e3ce3SKalle Valo 					     DMA_TO_DEVICE);
237423e3ce3SKalle Valo 	else
238423e3ce3SKalle Valo 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
239423e3ce3SKalle Valo 					     buf, len,
240423e3ce3SKalle Valo 					     DMA_FROM_DEVICE);
241423e3ce3SKalle Valo 
242423e3ce3SKalle Valo 	return dmaaddr;
243423e3ce3SKalle Valo }
244423e3ce3SKalle Valo 
245423e3ce3SKalle Valo static inline
unmap_descbuffer(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len,int tx)246423e3ce3SKalle Valo void unmap_descbuffer(struct b43legacy_dmaring *ring,
247423e3ce3SKalle Valo 		      dma_addr_t addr,
248423e3ce3SKalle Valo 		      size_t len,
249423e3ce3SKalle Valo 		      int tx)
250423e3ce3SKalle Valo {
251423e3ce3SKalle Valo 	if (tx)
252423e3ce3SKalle Valo 		dma_unmap_single(ring->dev->dev->dma_dev,
253423e3ce3SKalle Valo 				     addr, len,
254423e3ce3SKalle Valo 				     DMA_TO_DEVICE);
255423e3ce3SKalle Valo 	else
256423e3ce3SKalle Valo 		dma_unmap_single(ring->dev->dev->dma_dev,
257423e3ce3SKalle Valo 				     addr, len,
258423e3ce3SKalle Valo 				     DMA_FROM_DEVICE);
259423e3ce3SKalle Valo }
260423e3ce3SKalle Valo 
261423e3ce3SKalle Valo static inline
sync_descbuffer_for_cpu(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len)262423e3ce3SKalle Valo void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
263423e3ce3SKalle Valo 			     dma_addr_t addr,
264423e3ce3SKalle Valo 			     size_t len)
265423e3ce3SKalle Valo {
266423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
267423e3ce3SKalle Valo 
268423e3ce3SKalle Valo 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
269423e3ce3SKalle Valo 				addr, len, DMA_FROM_DEVICE);
270423e3ce3SKalle Valo }
271423e3ce3SKalle Valo 
272423e3ce3SKalle Valo static inline
sync_descbuffer_for_device(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t len)273423e3ce3SKalle Valo void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
274423e3ce3SKalle Valo 				dma_addr_t addr,
275423e3ce3SKalle Valo 				size_t len)
276423e3ce3SKalle Valo {
277423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
278423e3ce3SKalle Valo 
279423e3ce3SKalle Valo 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
280423e3ce3SKalle Valo 				   addr, len, DMA_FROM_DEVICE);
281423e3ce3SKalle Valo }
282423e3ce3SKalle Valo 
283423e3ce3SKalle Valo static inline
free_descriptor_buffer(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc_meta * meta,int irq_context)284423e3ce3SKalle Valo void free_descriptor_buffer(struct b43legacy_dmaring *ring,
285423e3ce3SKalle Valo 			    struct b43legacy_dmadesc_meta *meta,
286423e3ce3SKalle Valo 			    int irq_context)
287423e3ce3SKalle Valo {
288423e3ce3SKalle Valo 	if (meta->skb) {
289423e3ce3SKalle Valo 		if (irq_context)
290423e3ce3SKalle Valo 			dev_kfree_skb_irq(meta->skb);
291423e3ce3SKalle Valo 		else
292423e3ce3SKalle Valo 			dev_kfree_skb(meta->skb);
293423e3ce3SKalle Valo 		meta->skb = NULL;
294423e3ce3SKalle Valo 	}
295423e3ce3SKalle Valo }
296423e3ce3SKalle Valo 
alloc_ringmemory(struct b43legacy_dmaring * ring)297423e3ce3SKalle Valo static int alloc_ringmemory(struct b43legacy_dmaring *ring)
298423e3ce3SKalle Valo {
299423e3ce3SKalle Valo 	/* GFP flags must match the flags in free_ringmemory()! */
300750afb08SLuis Chamberlain 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
301423e3ce3SKalle Valo 					    B43legacy_DMA_RINGMEMSIZE,
302423e3ce3SKalle Valo 					    &(ring->dmabase), GFP_KERNEL);
303423e3ce3SKalle Valo 	if (!ring->descbase)
304423e3ce3SKalle Valo 		return -ENOMEM;
305423e3ce3SKalle Valo 
306423e3ce3SKalle Valo 	return 0;
307423e3ce3SKalle Valo }
308423e3ce3SKalle Valo 
free_ringmemory(struct b43legacy_dmaring * ring)309423e3ce3SKalle Valo static void free_ringmemory(struct b43legacy_dmaring *ring)
310423e3ce3SKalle Valo {
311423e3ce3SKalle Valo 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
312423e3ce3SKalle Valo 			  ring->descbase, ring->dmabase);
313423e3ce3SKalle Valo }
314423e3ce3SKalle Valo 
315423e3ce3SKalle Valo /* Reset the RX DMA channel */
b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev * dev,u16 mmio_base,enum b43legacy_dmatype type)316423e3ce3SKalle Valo static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
317423e3ce3SKalle Valo 					    u16 mmio_base,
318423e3ce3SKalle Valo 					    enum b43legacy_dmatype type)
319423e3ce3SKalle Valo {
320423e3ce3SKalle Valo 	int i;
321423e3ce3SKalle Valo 	u32 value;
322423e3ce3SKalle Valo 	u16 offset;
323423e3ce3SKalle Valo 
324423e3ce3SKalle Valo 	might_sleep();
325423e3ce3SKalle Valo 
326423e3ce3SKalle Valo 	offset = B43legacy_DMA32_RXCTL;
327423e3ce3SKalle Valo 	b43legacy_write32(dev, mmio_base + offset, 0);
328423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
329423e3ce3SKalle Valo 		offset = B43legacy_DMA32_RXSTATUS;
330423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
331423e3ce3SKalle Valo 		value &= B43legacy_DMA32_RXSTATE;
332423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
333423e3ce3SKalle Valo 			i = -1;
334423e3ce3SKalle Valo 			break;
335423e3ce3SKalle Valo 		}
336423e3ce3SKalle Valo 		msleep(1);
337423e3ce3SKalle Valo 	}
338423e3ce3SKalle Valo 	if (i != -1) {
339423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
340423e3ce3SKalle Valo 		return -ENODEV;
341423e3ce3SKalle Valo 	}
342423e3ce3SKalle Valo 
343423e3ce3SKalle Valo 	return 0;
344423e3ce3SKalle Valo }
345423e3ce3SKalle Valo 
346423e3ce3SKalle Valo /* Reset the RX DMA channel */
b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev * dev,u16 mmio_base,enum b43legacy_dmatype type)347423e3ce3SKalle Valo static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
348423e3ce3SKalle Valo 					    u16 mmio_base,
349423e3ce3SKalle Valo 					    enum b43legacy_dmatype type)
350423e3ce3SKalle Valo {
351423e3ce3SKalle Valo 	int i;
352423e3ce3SKalle Valo 	u32 value;
353423e3ce3SKalle Valo 	u16 offset;
354423e3ce3SKalle Valo 
355423e3ce3SKalle Valo 	might_sleep();
356423e3ce3SKalle Valo 
357423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
358423e3ce3SKalle Valo 		offset = B43legacy_DMA32_TXSTATUS;
359423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
360423e3ce3SKalle Valo 		value &= B43legacy_DMA32_TXSTATE;
361423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
362423e3ce3SKalle Valo 		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
363423e3ce3SKalle Valo 		    value == B43legacy_DMA32_TXSTAT_STOPPED)
364423e3ce3SKalle Valo 			break;
365423e3ce3SKalle Valo 		msleep(1);
366423e3ce3SKalle Valo 	}
367423e3ce3SKalle Valo 	offset = B43legacy_DMA32_TXCTL;
368423e3ce3SKalle Valo 	b43legacy_write32(dev, mmio_base + offset, 0);
369423e3ce3SKalle Valo 	for (i = 0; i < 10; i++) {
370423e3ce3SKalle Valo 		offset = B43legacy_DMA32_TXSTATUS;
371423e3ce3SKalle Valo 		value = b43legacy_read32(dev, mmio_base + offset);
372423e3ce3SKalle Valo 		value &= B43legacy_DMA32_TXSTATE;
373423e3ce3SKalle Valo 		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
374423e3ce3SKalle Valo 			i = -1;
375423e3ce3SKalle Valo 			break;
376423e3ce3SKalle Valo 		}
377423e3ce3SKalle Valo 		msleep(1);
378423e3ce3SKalle Valo 	}
379423e3ce3SKalle Valo 	if (i != -1) {
380423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
381423e3ce3SKalle Valo 		return -ENODEV;
382423e3ce3SKalle Valo 	}
383423e3ce3SKalle Valo 	/* ensure the reset is completed. */
384423e3ce3SKalle Valo 	msleep(1);
385423e3ce3SKalle Valo 
386423e3ce3SKalle Valo 	return 0;
387423e3ce3SKalle Valo }
388423e3ce3SKalle Valo 
389423e3ce3SKalle Valo /* Check if a DMA mapping address is invalid. */
b43legacy_dma_mapping_error(struct b43legacy_dmaring * ring,dma_addr_t addr,size_t buffersize,bool dma_to_device)390423e3ce3SKalle Valo static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
391423e3ce3SKalle Valo 					 dma_addr_t addr,
392423e3ce3SKalle Valo 					 size_t buffersize,
393423e3ce3SKalle Valo 					 bool dma_to_device)
394423e3ce3SKalle Valo {
395423e3ce3SKalle Valo 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
396423e3ce3SKalle Valo 		return true;
397423e3ce3SKalle Valo 
398423e3ce3SKalle Valo 	switch (ring->type) {
399423e3ce3SKalle Valo 	case B43legacy_DMA_30BIT:
400423e3ce3SKalle Valo 		if ((u64)addr + buffersize > (1ULL << 30))
401423e3ce3SKalle Valo 			goto address_error;
402423e3ce3SKalle Valo 		break;
403423e3ce3SKalle Valo 	case B43legacy_DMA_32BIT:
404423e3ce3SKalle Valo 		if ((u64)addr + buffersize > (1ULL << 32))
405423e3ce3SKalle Valo 			goto address_error;
406423e3ce3SKalle Valo 		break;
407423e3ce3SKalle Valo 	}
408423e3ce3SKalle Valo 
409423e3ce3SKalle Valo 	/* The address is OK. */
410423e3ce3SKalle Valo 	return false;
411423e3ce3SKalle Valo 
412423e3ce3SKalle Valo address_error:
413423e3ce3SKalle Valo 	/* We can't support this address. Unmap it again. */
414423e3ce3SKalle Valo 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
415423e3ce3SKalle Valo 
416423e3ce3SKalle Valo 	return true;
417423e3ce3SKalle Valo }
418423e3ce3SKalle Valo 
setup_rx_descbuffer(struct b43legacy_dmaring * ring,struct b43legacy_dmadesc32 * desc,struct b43legacy_dmadesc_meta * meta,gfp_t gfp_flags)419423e3ce3SKalle Valo static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
420423e3ce3SKalle Valo 			       struct b43legacy_dmadesc32 *desc,
421423e3ce3SKalle Valo 			       struct b43legacy_dmadesc_meta *meta,
422423e3ce3SKalle Valo 			       gfp_t gfp_flags)
423423e3ce3SKalle Valo {
424423e3ce3SKalle Valo 	struct b43legacy_rxhdr_fw3 *rxhdr;
425423e3ce3SKalle Valo 	struct b43legacy_hwtxstatus *txstat;
426423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
427423e3ce3SKalle Valo 	struct sk_buff *skb;
428423e3ce3SKalle Valo 
429423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
430423e3ce3SKalle Valo 
431423e3ce3SKalle Valo 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
432423e3ce3SKalle Valo 	if (unlikely(!skb))
433423e3ce3SKalle Valo 		return -ENOMEM;
434423e3ce3SKalle Valo 	dmaaddr = map_descbuffer(ring, skb->data,
435423e3ce3SKalle Valo 				 ring->rx_buffersize, 0);
436423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
437423e3ce3SKalle Valo 		/* ugh. try to realloc in zone_dma */
438423e3ce3SKalle Valo 		gfp_flags |= GFP_DMA;
439423e3ce3SKalle Valo 
440423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
441423e3ce3SKalle Valo 
442423e3ce3SKalle Valo 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
443423e3ce3SKalle Valo 		if (unlikely(!skb))
444423e3ce3SKalle Valo 			return -ENOMEM;
445423e3ce3SKalle Valo 		dmaaddr = map_descbuffer(ring, skb->data,
446423e3ce3SKalle Valo 					 ring->rx_buffersize, 0);
447423e3ce3SKalle Valo 	}
448423e3ce3SKalle Valo 
449423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
450423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
451423e3ce3SKalle Valo 		return -EIO;
452423e3ce3SKalle Valo 	}
453423e3ce3SKalle Valo 
454423e3ce3SKalle Valo 	meta->skb = skb;
455423e3ce3SKalle Valo 	meta->dmaaddr = dmaaddr;
456423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
457423e3ce3SKalle Valo 
458423e3ce3SKalle Valo 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
459423e3ce3SKalle Valo 	rxhdr->frame_len = 0;
460423e3ce3SKalle Valo 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
461423e3ce3SKalle Valo 	txstat->cookie = 0;
462423e3ce3SKalle Valo 
463423e3ce3SKalle Valo 	return 0;
464423e3ce3SKalle Valo }
465423e3ce3SKalle Valo 
466423e3ce3SKalle Valo /* Allocate the initial descbuffers.
467423e3ce3SKalle Valo  * This is used for an RX ring only.
468423e3ce3SKalle Valo  */
alloc_initial_descbuffers(struct b43legacy_dmaring * ring)469423e3ce3SKalle Valo static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
470423e3ce3SKalle Valo {
471423e3ce3SKalle Valo 	int i;
472423e3ce3SKalle Valo 	int err = -ENOMEM;
473423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
474423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
475423e3ce3SKalle Valo 
476423e3ce3SKalle Valo 	for (i = 0; i < ring->nr_slots; i++) {
477423e3ce3SKalle Valo 		desc = op32_idx2desc(ring, i, &meta);
478423e3ce3SKalle Valo 
479423e3ce3SKalle Valo 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
480423e3ce3SKalle Valo 		if (err) {
481423e3ce3SKalle Valo 			b43legacyerr(ring->dev->wl,
482423e3ce3SKalle Valo 			       "Failed to allocate initial descbuffers\n");
483423e3ce3SKalle Valo 			goto err_unwind;
484423e3ce3SKalle Valo 		}
485423e3ce3SKalle Valo 	}
486423e3ce3SKalle Valo 	mb(); /* all descbuffer setup before next line */
487423e3ce3SKalle Valo 	ring->used_slots = ring->nr_slots;
488423e3ce3SKalle Valo 	err = 0;
489423e3ce3SKalle Valo out:
490423e3ce3SKalle Valo 	return err;
491423e3ce3SKalle Valo 
492423e3ce3SKalle Valo err_unwind:
493423e3ce3SKalle Valo 	for (i--; i >= 0; i--) {
494423e3ce3SKalle Valo 		desc = op32_idx2desc(ring, i, &meta);
495423e3ce3SKalle Valo 
496423e3ce3SKalle Valo 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
497423e3ce3SKalle Valo 		dev_kfree_skb(meta->skb);
498423e3ce3SKalle Valo 	}
499423e3ce3SKalle Valo 	goto out;
500423e3ce3SKalle Valo }
501423e3ce3SKalle Valo 
502423e3ce3SKalle Valo /* Do initial setup of the DMA controller.
503423e3ce3SKalle Valo  * Reset the controller, write the ring busaddress
504423e3ce3SKalle Valo  * and switch the "enable" bit on.
505423e3ce3SKalle Valo  */
dmacontroller_setup(struct b43legacy_dmaring * ring)506423e3ce3SKalle Valo static int dmacontroller_setup(struct b43legacy_dmaring *ring)
507423e3ce3SKalle Valo {
508423e3ce3SKalle Valo 	int err = 0;
509423e3ce3SKalle Valo 	u32 value;
510423e3ce3SKalle Valo 	u32 addrext;
511423e3ce3SKalle Valo 	u32 trans = ring->dev->dma.translation;
512423e3ce3SKalle Valo 	u32 ringbase = (u32)(ring->dmabase);
513423e3ce3SKalle Valo 
514423e3ce3SKalle Valo 	if (ring->tx) {
515423e3ce3SKalle Valo 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
516423e3ce3SKalle Valo 			  >> SSB_DMA_TRANSLATION_SHIFT;
517423e3ce3SKalle Valo 		value = B43legacy_DMA32_TXENABLE;
518423e3ce3SKalle Valo 		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
519423e3ce3SKalle Valo 			& B43legacy_DMA32_TXADDREXT_MASK;
520423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
521423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
522423e3ce3SKalle Valo 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
523423e3ce3SKalle Valo 				    | trans);
524423e3ce3SKalle Valo 	} else {
525423e3ce3SKalle Valo 		err = alloc_initial_descbuffers(ring);
526423e3ce3SKalle Valo 		if (err)
527423e3ce3SKalle Valo 			goto out;
528423e3ce3SKalle Valo 
529423e3ce3SKalle Valo 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
530423e3ce3SKalle Valo 			  >> SSB_DMA_TRANSLATION_SHIFT;
531423e3ce3SKalle Valo 		value = (ring->frameoffset <<
532423e3ce3SKalle Valo 			 B43legacy_DMA32_RXFROFF_SHIFT);
533423e3ce3SKalle Valo 		value |= B43legacy_DMA32_RXENABLE;
534423e3ce3SKalle Valo 		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
535423e3ce3SKalle Valo 			 & B43legacy_DMA32_RXADDREXT_MASK;
536423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
537423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
538423e3ce3SKalle Valo 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
539423e3ce3SKalle Valo 				    | trans);
540423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
541423e3ce3SKalle Valo 	}
542423e3ce3SKalle Valo 
543423e3ce3SKalle Valo out:
544423e3ce3SKalle Valo 	return err;
545423e3ce3SKalle Valo }
546423e3ce3SKalle Valo 
547423e3ce3SKalle Valo /* Shutdown the DMA controller. */
dmacontroller_cleanup(struct b43legacy_dmaring * ring)548423e3ce3SKalle Valo static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
549423e3ce3SKalle Valo {
550423e3ce3SKalle Valo 	if (ring->tx) {
551423e3ce3SKalle Valo 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
552423e3ce3SKalle Valo 						 ring->type);
553423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
554423e3ce3SKalle Valo 	} else {
555423e3ce3SKalle Valo 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
556423e3ce3SKalle Valo 						 ring->type);
557423e3ce3SKalle Valo 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
558423e3ce3SKalle Valo 	}
559423e3ce3SKalle Valo }
560423e3ce3SKalle Valo 
free_all_descbuffers(struct b43legacy_dmaring * ring)561423e3ce3SKalle Valo static void free_all_descbuffers(struct b43legacy_dmaring *ring)
562423e3ce3SKalle Valo {
563423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
564423e3ce3SKalle Valo 	int i;
565423e3ce3SKalle Valo 
566423e3ce3SKalle Valo 	if (!ring->used_slots)
567423e3ce3SKalle Valo 		return;
568423e3ce3SKalle Valo 	for (i = 0; i < ring->nr_slots; i++) {
569423e3ce3SKalle Valo 		op32_idx2desc(ring, i, &meta);
570423e3ce3SKalle Valo 
571423e3ce3SKalle Valo 		if (!meta->skb) {
572423e3ce3SKalle Valo 			B43legacy_WARN_ON(!ring->tx);
573423e3ce3SKalle Valo 			continue;
574423e3ce3SKalle Valo 		}
575423e3ce3SKalle Valo 		if (ring->tx)
576423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
577423e3ce3SKalle Valo 					 meta->skb->len, 1);
578423e3ce3SKalle Valo 		else
579423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
580423e3ce3SKalle Valo 					 ring->rx_buffersize, 0);
581423e3ce3SKalle Valo 		free_descriptor_buffer(ring, meta, 0);
582423e3ce3SKalle Valo 	}
583423e3ce3SKalle Valo }
584423e3ce3SKalle Valo 
b43legacy_engine_type(struct b43legacy_wldev * dev)58580372782SChristoph Hellwig static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
586423e3ce3SKalle Valo {
587423e3ce3SKalle Valo 	u32 tmp;
588423e3ce3SKalle Valo 	u16 mmio_base;
589423e3ce3SKalle Valo 
590423e3ce3SKalle Valo 	mmio_base = b43legacy_dmacontroller_base(0, 0);
591423e3ce3SKalle Valo 	b43legacy_write32(dev,
592423e3ce3SKalle Valo 			mmio_base + B43legacy_DMA32_TXCTL,
593423e3ce3SKalle Valo 			B43legacy_DMA32_TXADDREXT_MASK);
594423e3ce3SKalle Valo 	tmp = b43legacy_read32(dev, mmio_base +
595423e3ce3SKalle Valo 			       B43legacy_DMA32_TXCTL);
596423e3ce3SKalle Valo 	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
597423e3ce3SKalle Valo 		return B43legacy_DMA_32BIT;
598423e3ce3SKalle Valo 	return B43legacy_DMA_30BIT;
599423e3ce3SKalle Valo }
600423e3ce3SKalle Valo 
601423e3ce3SKalle Valo /* Main initialization function. */
602423e3ce3SKalle Valo static
b43legacy_setup_dmaring(struct b43legacy_wldev * dev,int controller_index,int for_tx,enum b43legacy_dmatype type)603423e3ce3SKalle Valo struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
604423e3ce3SKalle Valo 						  int controller_index,
605423e3ce3SKalle Valo 						  int for_tx,
606423e3ce3SKalle Valo 						  enum b43legacy_dmatype type)
607423e3ce3SKalle Valo {
608423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
609423e3ce3SKalle Valo 	int err;
610423e3ce3SKalle Valo 	int nr_slots;
611423e3ce3SKalle Valo 	dma_addr_t dma_test;
612423e3ce3SKalle Valo 
613423e3ce3SKalle Valo 	ring = kzalloc(sizeof(*ring), GFP_KERNEL);
614423e3ce3SKalle Valo 	if (!ring)
615423e3ce3SKalle Valo 		goto out;
616423e3ce3SKalle Valo 	ring->type = type;
617423e3ce3SKalle Valo 	ring->dev = dev;
618423e3ce3SKalle Valo 
619423e3ce3SKalle Valo 	nr_slots = B43legacy_RXRING_SLOTS;
620423e3ce3SKalle Valo 	if (for_tx)
621423e3ce3SKalle Valo 		nr_slots = B43legacy_TXRING_SLOTS;
622423e3ce3SKalle Valo 
623423e3ce3SKalle Valo 	ring->meta = kcalloc(nr_slots, sizeof(struct b43legacy_dmadesc_meta),
624423e3ce3SKalle Valo 			     GFP_KERNEL);
625423e3ce3SKalle Valo 	if (!ring->meta)
626423e3ce3SKalle Valo 		goto err_kfree_ring;
627423e3ce3SKalle Valo 	if (for_tx) {
628423e3ce3SKalle Valo 		ring->txhdr_cache = kcalloc(nr_slots,
629423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
630423e3ce3SKalle Valo 					GFP_KERNEL);
631423e3ce3SKalle Valo 		if (!ring->txhdr_cache)
632423e3ce3SKalle Valo 			goto err_kfree_meta;
633423e3ce3SKalle Valo 
634423e3ce3SKalle Valo 		/* test for ability to dma to txhdr_cache */
635423e3ce3SKalle Valo 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
636423e3ce3SKalle Valo 					      sizeof(struct b43legacy_txhdr_fw3),
637423e3ce3SKalle Valo 					      DMA_TO_DEVICE);
638423e3ce3SKalle Valo 
639423e3ce3SKalle Valo 		if (b43legacy_dma_mapping_error(ring, dma_test,
640423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
641423e3ce3SKalle Valo 			/* ugh realloc */
642423e3ce3SKalle Valo 			kfree(ring->txhdr_cache);
643423e3ce3SKalle Valo 			ring->txhdr_cache = kcalloc(nr_slots,
644423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
645423e3ce3SKalle Valo 					GFP_KERNEL | GFP_DMA);
646423e3ce3SKalle Valo 			if (!ring->txhdr_cache)
647423e3ce3SKalle Valo 				goto err_kfree_meta;
648423e3ce3SKalle Valo 
649423e3ce3SKalle Valo 			dma_test = dma_map_single(dev->dev->dma_dev,
650423e3ce3SKalle Valo 					ring->txhdr_cache,
651423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3),
652423e3ce3SKalle Valo 					DMA_TO_DEVICE);
653423e3ce3SKalle Valo 
654423e3ce3SKalle Valo 			if (b43legacy_dma_mapping_error(ring, dma_test,
655423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1))
656423e3ce3SKalle Valo 				goto err_kfree_txhdr_cache;
657423e3ce3SKalle Valo 		}
658423e3ce3SKalle Valo 
659423e3ce3SKalle Valo 		dma_unmap_single(dev->dev->dma_dev, dma_test,
660423e3ce3SKalle Valo 				 sizeof(struct b43legacy_txhdr_fw3),
661423e3ce3SKalle Valo 				 DMA_TO_DEVICE);
662423e3ce3SKalle Valo 	}
663423e3ce3SKalle Valo 
664423e3ce3SKalle Valo 	ring->nr_slots = nr_slots;
665423e3ce3SKalle Valo 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
666423e3ce3SKalle Valo 	ring->index = controller_index;
667423e3ce3SKalle Valo 	if (for_tx) {
668423e3ce3SKalle Valo 		ring->tx = true;
669423e3ce3SKalle Valo 		ring->current_slot = -1;
670423e3ce3SKalle Valo 	} else {
671423e3ce3SKalle Valo 		if (ring->index == 0) {
672423e3ce3SKalle Valo 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
673423e3ce3SKalle Valo 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
674423e3ce3SKalle Valo 		} else if (ring->index == 3) {
675423e3ce3SKalle Valo 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
676423e3ce3SKalle Valo 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
677423e3ce3SKalle Valo 		} else
678423e3ce3SKalle Valo 			B43legacy_WARN_ON(1);
679423e3ce3SKalle Valo 	}
680423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
681423e3ce3SKalle Valo 	ring->last_injected_overflow = jiffies;
682423e3ce3SKalle Valo #endif
683423e3ce3SKalle Valo 
684423e3ce3SKalle Valo 	err = alloc_ringmemory(ring);
685423e3ce3SKalle Valo 	if (err)
686423e3ce3SKalle Valo 		goto err_kfree_txhdr_cache;
687423e3ce3SKalle Valo 	err = dmacontroller_setup(ring);
688423e3ce3SKalle Valo 	if (err)
689423e3ce3SKalle Valo 		goto err_free_ringmemory;
690423e3ce3SKalle Valo 
691423e3ce3SKalle Valo out:
692423e3ce3SKalle Valo 	return ring;
693423e3ce3SKalle Valo 
694423e3ce3SKalle Valo err_free_ringmemory:
695423e3ce3SKalle Valo 	free_ringmemory(ring);
696423e3ce3SKalle Valo err_kfree_txhdr_cache:
697423e3ce3SKalle Valo 	kfree(ring->txhdr_cache);
698423e3ce3SKalle Valo err_kfree_meta:
699423e3ce3SKalle Valo 	kfree(ring->meta);
700423e3ce3SKalle Valo err_kfree_ring:
701423e3ce3SKalle Valo 	kfree(ring);
702423e3ce3SKalle Valo 	ring = NULL;
703423e3ce3SKalle Valo 	goto out;
704423e3ce3SKalle Valo }
705423e3ce3SKalle Valo 
706423e3ce3SKalle Valo /* Main cleanup function. */
b43legacy_destroy_dmaring(struct b43legacy_dmaring * ring)707423e3ce3SKalle Valo static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
708423e3ce3SKalle Valo {
709423e3ce3SKalle Valo 	if (!ring)
710423e3ce3SKalle Valo 		return;
711423e3ce3SKalle Valo 
712423e3ce3SKalle Valo 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
713423e3ce3SKalle Valo 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
714423e3ce3SKalle Valo 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
715423e3ce3SKalle Valo 		     ring->nr_slots);
716423e3ce3SKalle Valo 	/* Device IRQs are disabled prior entering this function,
717423e3ce3SKalle Valo 	 * so no need to take care of concurrency with rx handler stuff.
718423e3ce3SKalle Valo 	 */
719423e3ce3SKalle Valo 	dmacontroller_cleanup(ring);
720423e3ce3SKalle Valo 	free_all_descbuffers(ring);
721423e3ce3SKalle Valo 	free_ringmemory(ring);
722423e3ce3SKalle Valo 
723423e3ce3SKalle Valo 	kfree(ring->txhdr_cache);
724423e3ce3SKalle Valo 	kfree(ring->meta);
725423e3ce3SKalle Valo 	kfree(ring);
726423e3ce3SKalle Valo }
727423e3ce3SKalle Valo 
b43legacy_dma_free(struct b43legacy_wldev * dev)728423e3ce3SKalle Valo void b43legacy_dma_free(struct b43legacy_wldev *dev)
729423e3ce3SKalle Valo {
730423e3ce3SKalle Valo 	struct b43legacy_dma *dma;
731423e3ce3SKalle Valo 
732423e3ce3SKalle Valo 	if (b43legacy_using_pio(dev))
733423e3ce3SKalle Valo 		return;
734423e3ce3SKalle Valo 	dma = &dev->dma;
735423e3ce3SKalle Valo 
736423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring3);
737423e3ce3SKalle Valo 	dma->rx_ring3 = NULL;
738423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring0);
739423e3ce3SKalle Valo 	dma->rx_ring0 = NULL;
740423e3ce3SKalle Valo 
741423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring5);
742423e3ce3SKalle Valo 	dma->tx_ring5 = NULL;
743423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring4);
744423e3ce3SKalle Valo 	dma->tx_ring4 = NULL;
745423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring3);
746423e3ce3SKalle Valo 	dma->tx_ring3 = NULL;
747423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring2);
748423e3ce3SKalle Valo 	dma->tx_ring2 = NULL;
749423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring1);
750423e3ce3SKalle Valo 	dma->tx_ring1 = NULL;
751423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring0);
752423e3ce3SKalle Valo 	dma->tx_ring0 = NULL;
753423e3ce3SKalle Valo }
754423e3ce3SKalle Valo 
b43legacy_dma_init(struct b43legacy_wldev * dev)755423e3ce3SKalle Valo int b43legacy_dma_init(struct b43legacy_wldev *dev)
756423e3ce3SKalle Valo {
757423e3ce3SKalle Valo 	struct b43legacy_dma *dma = &dev->dma;
758423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
75980372782SChristoph Hellwig 	enum b43legacy_dmatype type = b43legacy_engine_type(dev);
760423e3ce3SKalle Valo 	int err;
761423e3ce3SKalle Valo 
76280372782SChristoph Hellwig 	err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
763423e3ce3SKalle Valo 	if (err) {
764423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_PIO
765423e3ce3SKalle Valo 		b43legacywarn(dev->wl, "DMA for this device not supported. "
766423e3ce3SKalle Valo 			"Falling back to PIO\n");
767423e3ce3SKalle Valo 		dev->__using_pio = true;
768423e3ce3SKalle Valo 		return -EAGAIN;
769423e3ce3SKalle Valo #else
770423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA for this device not supported and "
771423e3ce3SKalle Valo 		       "no PIO support compiled in\n");
772423e3ce3SKalle Valo 		return -EOPNOTSUPP;
773423e3ce3SKalle Valo #endif
774423e3ce3SKalle Valo 	}
775423e3ce3SKalle Valo 	dma->translation = ssb_dma_translation(dev->dev);
776423e3ce3SKalle Valo 
777423e3ce3SKalle Valo 	err = -ENOMEM;
778423e3ce3SKalle Valo 	/* setup TX DMA channels. */
779423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
780423e3ce3SKalle Valo 	if (!ring)
781423e3ce3SKalle Valo 		goto out;
782423e3ce3SKalle Valo 	dma->tx_ring0 = ring;
783423e3ce3SKalle Valo 
784423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
785423e3ce3SKalle Valo 	if (!ring)
786423e3ce3SKalle Valo 		goto err_destroy_tx0;
787423e3ce3SKalle Valo 	dma->tx_ring1 = ring;
788423e3ce3SKalle Valo 
789423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
790423e3ce3SKalle Valo 	if (!ring)
791423e3ce3SKalle Valo 		goto err_destroy_tx1;
792423e3ce3SKalle Valo 	dma->tx_ring2 = ring;
793423e3ce3SKalle Valo 
794423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
795423e3ce3SKalle Valo 	if (!ring)
796423e3ce3SKalle Valo 		goto err_destroy_tx2;
797423e3ce3SKalle Valo 	dma->tx_ring3 = ring;
798423e3ce3SKalle Valo 
799423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
800423e3ce3SKalle Valo 	if (!ring)
801423e3ce3SKalle Valo 		goto err_destroy_tx3;
802423e3ce3SKalle Valo 	dma->tx_ring4 = ring;
803423e3ce3SKalle Valo 
804423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
805423e3ce3SKalle Valo 	if (!ring)
806423e3ce3SKalle Valo 		goto err_destroy_tx4;
807423e3ce3SKalle Valo 	dma->tx_ring5 = ring;
808423e3ce3SKalle Valo 
809423e3ce3SKalle Valo 	/* setup RX DMA channels. */
810423e3ce3SKalle Valo 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
811423e3ce3SKalle Valo 	if (!ring)
812423e3ce3SKalle Valo 		goto err_destroy_tx5;
813423e3ce3SKalle Valo 	dma->rx_ring0 = ring;
814423e3ce3SKalle Valo 
815423e3ce3SKalle Valo 	if (dev->dev->id.revision < 5) {
816423e3ce3SKalle Valo 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
817423e3ce3SKalle Valo 		if (!ring)
818423e3ce3SKalle Valo 			goto err_destroy_rx0;
819423e3ce3SKalle Valo 		dma->rx_ring3 = ring;
820423e3ce3SKalle Valo 	}
821423e3ce3SKalle Valo 
822423e3ce3SKalle Valo 	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
823423e3ce3SKalle Valo 	err = 0;
824423e3ce3SKalle Valo out:
825423e3ce3SKalle Valo 	return err;
826423e3ce3SKalle Valo 
827423e3ce3SKalle Valo err_destroy_rx0:
828423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->rx_ring0);
829423e3ce3SKalle Valo 	dma->rx_ring0 = NULL;
830423e3ce3SKalle Valo err_destroy_tx5:
831423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring5);
832423e3ce3SKalle Valo 	dma->tx_ring5 = NULL;
833423e3ce3SKalle Valo err_destroy_tx4:
834423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring4);
835423e3ce3SKalle Valo 	dma->tx_ring4 = NULL;
836423e3ce3SKalle Valo err_destroy_tx3:
837423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring3);
838423e3ce3SKalle Valo 	dma->tx_ring3 = NULL;
839423e3ce3SKalle Valo err_destroy_tx2:
840423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring2);
841423e3ce3SKalle Valo 	dma->tx_ring2 = NULL;
842423e3ce3SKalle Valo err_destroy_tx1:
843423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring1);
844423e3ce3SKalle Valo 	dma->tx_ring1 = NULL;
845423e3ce3SKalle Valo err_destroy_tx0:
846423e3ce3SKalle Valo 	b43legacy_destroy_dmaring(dma->tx_ring0);
847423e3ce3SKalle Valo 	dma->tx_ring0 = NULL;
848423e3ce3SKalle Valo 	goto out;
849423e3ce3SKalle Valo }
850423e3ce3SKalle Valo 
851423e3ce3SKalle Valo /* Generate a cookie for the TX header. */
generate_cookie(struct b43legacy_dmaring * ring,int slot)852423e3ce3SKalle Valo static u16 generate_cookie(struct b43legacy_dmaring *ring,
853423e3ce3SKalle Valo 			   int slot)
854423e3ce3SKalle Valo {
855423e3ce3SKalle Valo 	u16 cookie = 0x1000;
856423e3ce3SKalle Valo 
857423e3ce3SKalle Valo 	/* Use the upper 4 bits of the cookie as
858423e3ce3SKalle Valo 	 * DMA controller ID and store the slot number
859423e3ce3SKalle Valo 	 * in the lower 12 bits.
860423e3ce3SKalle Valo 	 * Note that the cookie must never be 0, as this
861423e3ce3SKalle Valo 	 * is a special value used in RX path.
862423e3ce3SKalle Valo 	 */
863423e3ce3SKalle Valo 	switch (ring->index) {
864423e3ce3SKalle Valo 	case 0:
865423e3ce3SKalle Valo 		cookie = 0xA000;
866423e3ce3SKalle Valo 		break;
867423e3ce3SKalle Valo 	case 1:
868423e3ce3SKalle Valo 		cookie = 0xB000;
869423e3ce3SKalle Valo 		break;
870423e3ce3SKalle Valo 	case 2:
871423e3ce3SKalle Valo 		cookie = 0xC000;
872423e3ce3SKalle Valo 		break;
873423e3ce3SKalle Valo 	case 3:
874423e3ce3SKalle Valo 		cookie = 0xD000;
875423e3ce3SKalle Valo 		break;
876423e3ce3SKalle Valo 	case 4:
877423e3ce3SKalle Valo 		cookie = 0xE000;
878423e3ce3SKalle Valo 		break;
879423e3ce3SKalle Valo 	case 5:
880423e3ce3SKalle Valo 		cookie = 0xF000;
881423e3ce3SKalle Valo 		break;
882423e3ce3SKalle Valo 	}
883423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
884423e3ce3SKalle Valo 	cookie |= (u16)slot;
885423e3ce3SKalle Valo 
886423e3ce3SKalle Valo 	return cookie;
887423e3ce3SKalle Valo }
888423e3ce3SKalle Valo 
889423e3ce3SKalle Valo /* Inspect a cookie and find out to which controller/slot it belongs. */
890423e3ce3SKalle Valo static
parse_cookie(struct b43legacy_wldev * dev,u16 cookie,int * slot)891423e3ce3SKalle Valo struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
892423e3ce3SKalle Valo 				      u16 cookie, int *slot)
893423e3ce3SKalle Valo {
894423e3ce3SKalle Valo 	struct b43legacy_dma *dma = &dev->dma;
895423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring = NULL;
896423e3ce3SKalle Valo 
897423e3ce3SKalle Valo 	switch (cookie & 0xF000) {
898423e3ce3SKalle Valo 	case 0xA000:
899423e3ce3SKalle Valo 		ring = dma->tx_ring0;
900423e3ce3SKalle Valo 		break;
901423e3ce3SKalle Valo 	case 0xB000:
902423e3ce3SKalle Valo 		ring = dma->tx_ring1;
903423e3ce3SKalle Valo 		break;
904423e3ce3SKalle Valo 	case 0xC000:
905423e3ce3SKalle Valo 		ring = dma->tx_ring2;
906423e3ce3SKalle Valo 		break;
907423e3ce3SKalle Valo 	case 0xD000:
908423e3ce3SKalle Valo 		ring = dma->tx_ring3;
909423e3ce3SKalle Valo 		break;
910423e3ce3SKalle Valo 	case 0xE000:
911423e3ce3SKalle Valo 		ring = dma->tx_ring4;
912423e3ce3SKalle Valo 		break;
913423e3ce3SKalle Valo 	case 0xF000:
914423e3ce3SKalle Valo 		ring = dma->tx_ring5;
915423e3ce3SKalle Valo 		break;
916423e3ce3SKalle Valo 	default:
917423e3ce3SKalle Valo 		B43legacy_WARN_ON(1);
918423e3ce3SKalle Valo 	}
919423e3ce3SKalle Valo 	*slot = (cookie & 0x0FFF);
920423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
921423e3ce3SKalle Valo 
922423e3ce3SKalle Valo 	return ring;
923423e3ce3SKalle Valo }
924423e3ce3SKalle Valo 
dma_tx_fragment(struct b43legacy_dmaring * ring,struct sk_buff ** in_skb)925423e3ce3SKalle Valo static int dma_tx_fragment(struct b43legacy_dmaring *ring,
926423e3ce3SKalle Valo 			    struct sk_buff **in_skb)
927423e3ce3SKalle Valo {
928423e3ce3SKalle Valo 	struct sk_buff *skb = *in_skb;
929423e3ce3SKalle Valo 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
930423e3ce3SKalle Valo 	u8 *header;
931423e3ce3SKalle Valo 	int slot, old_top_slot, old_used_slots;
932423e3ce3SKalle Valo 	int err;
933423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
934423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
935423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta_hdr;
936423e3ce3SKalle Valo 	struct sk_buff *bounce_skb;
937423e3ce3SKalle Valo 
938423e3ce3SKalle Valo #define SLOTS_PER_PACKET  2
939423e3ce3SKalle Valo 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
940423e3ce3SKalle Valo 
941423e3ce3SKalle Valo 	old_top_slot = ring->current_slot;
942423e3ce3SKalle Valo 	old_used_slots = ring->used_slots;
943423e3ce3SKalle Valo 
944423e3ce3SKalle Valo 	/* Get a slot for the header. */
945423e3ce3SKalle Valo 	slot = request_slot(ring);
946423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, slot, &meta_hdr);
947423e3ce3SKalle Valo 	memset(meta_hdr, 0, sizeof(*meta_hdr));
948423e3ce3SKalle Valo 
949423e3ce3SKalle Valo 	header = &(ring->txhdr_cache[slot * sizeof(
950423e3ce3SKalle Valo 			       struct b43legacy_txhdr_fw3)]);
951423e3ce3SKalle Valo 	err = b43legacy_generate_txhdr(ring->dev, header,
952423e3ce3SKalle Valo 				 skb->data, skb->len, info,
953423e3ce3SKalle Valo 				 generate_cookie(ring, slot));
954423e3ce3SKalle Valo 	if (unlikely(err)) {
955423e3ce3SKalle Valo 		ring->current_slot = old_top_slot;
956423e3ce3SKalle Valo 		ring->used_slots = old_used_slots;
957423e3ce3SKalle Valo 		return err;
958423e3ce3SKalle Valo 	}
959423e3ce3SKalle Valo 
960423e3ce3SKalle Valo 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
961423e3ce3SKalle Valo 					   sizeof(struct b43legacy_txhdr_fw3), 1);
962423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
963423e3ce3SKalle Valo 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
964423e3ce3SKalle Valo 		ring->current_slot = old_top_slot;
965423e3ce3SKalle Valo 		ring->used_slots = old_used_slots;
966423e3ce3SKalle Valo 		return -EIO;
967423e3ce3SKalle Valo 	}
968423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
969423e3ce3SKalle Valo 			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
970423e3ce3SKalle Valo 
971423e3ce3SKalle Valo 	/* Get a slot for the payload. */
972423e3ce3SKalle Valo 	slot = request_slot(ring);
973423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, slot, &meta);
974423e3ce3SKalle Valo 	memset(meta, 0, sizeof(*meta));
975423e3ce3SKalle Valo 
976423e3ce3SKalle Valo 	meta->skb = skb;
977423e3ce3SKalle Valo 	meta->is_last_fragment = true;
978423e3ce3SKalle Valo 
979423e3ce3SKalle Valo 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
980423e3ce3SKalle Valo 	/* create a bounce buffer in zone_dma on mapping failure. */
981423e3ce3SKalle Valo 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
9826e1d8d14SJia-Ju Bai 		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
983423e3ce3SKalle Valo 		if (!bounce_skb) {
984423e3ce3SKalle Valo 			ring->current_slot = old_top_slot;
985423e3ce3SKalle Valo 			ring->used_slots = old_used_slots;
986423e3ce3SKalle Valo 			err = -ENOMEM;
987423e3ce3SKalle Valo 			goto out_unmap_hdr;
988423e3ce3SKalle Valo 		}
989423e3ce3SKalle Valo 
99059ae1d12SJohannes Berg 		skb_put_data(bounce_skb, skb->data, skb->len);
991423e3ce3SKalle Valo 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
992423e3ce3SKalle Valo 		bounce_skb->dev = skb->dev;
993423e3ce3SKalle Valo 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
994423e3ce3SKalle Valo 		info = IEEE80211_SKB_CB(bounce_skb);
995423e3ce3SKalle Valo 
996423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
997423e3ce3SKalle Valo 		skb = bounce_skb;
998423e3ce3SKalle Valo 		*in_skb = bounce_skb;
999423e3ce3SKalle Valo 		meta->skb = skb;
1000423e3ce3SKalle Valo 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1001423e3ce3SKalle Valo 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1002423e3ce3SKalle Valo 			ring->current_slot = old_top_slot;
1003423e3ce3SKalle Valo 			ring->used_slots = old_used_slots;
1004423e3ce3SKalle Valo 			err = -EIO;
1005423e3ce3SKalle Valo 			goto out_free_bounce;
1006423e3ce3SKalle Valo 		}
1007423e3ce3SKalle Valo 	}
1008423e3ce3SKalle Valo 
1009423e3ce3SKalle Valo 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1010423e3ce3SKalle Valo 			     skb->len, 0, 1, 1);
1011423e3ce3SKalle Valo 
1012423e3ce3SKalle Valo 	wmb();	/* previous stuff MUST be done */
1013423e3ce3SKalle Valo 	/* Now transfer the whole frame. */
1014423e3ce3SKalle Valo 	op32_poke_tx(ring, next_slot(ring, slot));
1015423e3ce3SKalle Valo 	return 0;
1016423e3ce3SKalle Valo 
1017423e3ce3SKalle Valo out_free_bounce:
1018423e3ce3SKalle Valo 	dev_kfree_skb_any(skb);
1019423e3ce3SKalle Valo out_unmap_hdr:
1020423e3ce3SKalle Valo 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1021423e3ce3SKalle Valo 			 sizeof(struct b43legacy_txhdr_fw3), 1);
1022423e3ce3SKalle Valo 	return err;
1023423e3ce3SKalle Valo }
1024423e3ce3SKalle Valo 
1025423e3ce3SKalle Valo static inline
should_inject_overflow(struct b43legacy_dmaring * ring)1026423e3ce3SKalle Valo int should_inject_overflow(struct b43legacy_dmaring *ring)
1027423e3ce3SKalle Valo {
1028423e3ce3SKalle Valo #ifdef CONFIG_B43LEGACY_DEBUG
1029423e3ce3SKalle Valo 	if (unlikely(b43legacy_debug(ring->dev,
1030423e3ce3SKalle Valo 				     B43legacy_DBG_DMAOVERFLOW))) {
1031423e3ce3SKalle Valo 		/* Check if we should inject another ringbuffer overflow
1032423e3ce3SKalle Valo 		 * to test handling of this situation in the stack. */
1033423e3ce3SKalle Valo 		unsigned long next_overflow;
1034423e3ce3SKalle Valo 
1035423e3ce3SKalle Valo 		next_overflow = ring->last_injected_overflow + HZ;
1036423e3ce3SKalle Valo 		if (time_after(jiffies, next_overflow)) {
1037423e3ce3SKalle Valo 			ring->last_injected_overflow = jiffies;
1038423e3ce3SKalle Valo 			b43legacydbg(ring->dev->wl,
1039423e3ce3SKalle Valo 			       "Injecting TX ring overflow on "
1040423e3ce3SKalle Valo 			       "DMA controller %d\n", ring->index);
1041423e3ce3SKalle Valo 			return 1;
1042423e3ce3SKalle Valo 		}
1043423e3ce3SKalle Valo 	}
1044423e3ce3SKalle Valo #endif /* CONFIG_B43LEGACY_DEBUG */
1045423e3ce3SKalle Valo 	return 0;
1046423e3ce3SKalle Valo }
1047423e3ce3SKalle Valo 
b43legacy_dma_tx(struct b43legacy_wldev * dev,struct sk_buff * skb)1048423e3ce3SKalle Valo int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1049423e3ce3SKalle Valo 		     struct sk_buff *skb)
1050423e3ce3SKalle Valo {
1051423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
1052423e3ce3SKalle Valo 	int err = 0;
1053423e3ce3SKalle Valo 
1054423e3ce3SKalle Valo 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1055423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1056423e3ce3SKalle Valo 
1057423e3ce3SKalle Valo 	if (unlikely(ring->stopped)) {
1058423e3ce3SKalle Valo 		/* We get here only because of a bug in mac80211.
1059423e3ce3SKalle Valo 		 * Because of a race, one packet may be queued after
1060423e3ce3SKalle Valo 		 * the queue is stopped, thus we got called when we shouldn't.
1061423e3ce3SKalle Valo 		 * For now, just refuse the transmit. */
1062423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1063423e3ce3SKalle Valo 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1064423e3ce3SKalle Valo 		return -ENOSPC;
1065423e3ce3SKalle Valo 	}
1066423e3ce3SKalle Valo 
10677e41fb50SIgor Stoppa 	if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1068423e3ce3SKalle Valo 		/* If we get here, we have a real error with the queue
1069423e3ce3SKalle Valo 		 * full, but queues not stopped. */
1070423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA queue overflow\n");
1071423e3ce3SKalle Valo 		return -ENOSPC;
1072423e3ce3SKalle Valo 	}
1073423e3ce3SKalle Valo 
1074423e3ce3SKalle Valo 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1075423e3ce3SKalle Valo 	 * into the skb data or cb now. */
1076423e3ce3SKalle Valo 	err = dma_tx_fragment(ring, &skb);
1077423e3ce3SKalle Valo 	if (unlikely(err == -ENOKEY)) {
1078423e3ce3SKalle Valo 		/* Drop this packet, as we don't have the encryption key
1079423e3ce3SKalle Valo 		 * anymore and must not transmit it unencrypted. */
1080423e3ce3SKalle Valo 		dev_kfree_skb_any(skb);
1081423e3ce3SKalle Valo 		return 0;
1082423e3ce3SKalle Valo 	}
1083423e3ce3SKalle Valo 	if (unlikely(err)) {
1084423e3ce3SKalle Valo 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1085423e3ce3SKalle Valo 		return err;
1086423e3ce3SKalle Valo 	}
1087423e3ce3SKalle Valo 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1088423e3ce3SKalle Valo 	    should_inject_overflow(ring)) {
1089423e3ce3SKalle Valo 		/* This TX ring is full. */
1090423e3ce3SKalle Valo 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1091423e3ce3SKalle Valo 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1092423e3ce3SKalle Valo 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1093423e3ce3SKalle Valo 		ring->stopped = true;
1094423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1095423e3ce3SKalle Valo 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1096423e3ce3SKalle Valo 			       ring->index);
1097423e3ce3SKalle Valo 	}
1098423e3ce3SKalle Valo 	return err;
1099423e3ce3SKalle Valo }
1100423e3ce3SKalle Valo 
b43legacy_dma_handle_txstatus(struct b43legacy_wldev * dev,const struct b43legacy_txstatus * status)1101423e3ce3SKalle Valo void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1102423e3ce3SKalle Valo 				 const struct b43legacy_txstatus *status)
1103423e3ce3SKalle Valo {
1104423e3ce3SKalle Valo 	struct b43legacy_dmaring *ring;
1105423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
1106423e3ce3SKalle Valo 	int retry_limit;
1107423e3ce3SKalle Valo 	int slot;
1108423e3ce3SKalle Valo 	int firstused;
1109423e3ce3SKalle Valo 
1110423e3ce3SKalle Valo 	ring = parse_cookie(dev, status->cookie, &slot);
1111423e3ce3SKalle Valo 	if (unlikely(!ring))
1112423e3ce3SKalle Valo 		return;
1113423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1114423e3ce3SKalle Valo 
1115423e3ce3SKalle Valo 	/* Sanity check: TX packets are processed in-order on one ring.
1116423e3ce3SKalle Valo 	 * Check if the slot deduced from the cookie really is the first
1117423e3ce3SKalle Valo 	 * used slot. */
1118423e3ce3SKalle Valo 	firstused = ring->current_slot - ring->used_slots + 1;
1119423e3ce3SKalle Valo 	if (firstused < 0)
1120423e3ce3SKalle Valo 		firstused = ring->nr_slots + firstused;
1121423e3ce3SKalle Valo 	if (unlikely(slot != firstused)) {
1122423e3ce3SKalle Valo 		/* This possibly is a firmware bug and will result in
1123423e3ce3SKalle Valo 		 * malfunction, memory leaks and/or stall of DMA functionality.
1124423e3ce3SKalle Valo 		 */
1125423e3ce3SKalle Valo 		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1126423e3ce3SKalle Valo 			     "ring %d. Expected %d, but got %d\n",
1127423e3ce3SKalle Valo 			     ring->index, firstused, slot);
1128423e3ce3SKalle Valo 		return;
1129423e3ce3SKalle Valo 	}
1130423e3ce3SKalle Valo 
1131423e3ce3SKalle Valo 	while (1) {
1132423e3ce3SKalle Valo 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1133423e3ce3SKalle Valo 		op32_idx2desc(ring, slot, &meta);
1134423e3ce3SKalle Valo 
1135423e3ce3SKalle Valo 		if (meta->skb)
1136423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
1137423e3ce3SKalle Valo 					 meta->skb->len, 1);
1138423e3ce3SKalle Valo 		else
1139423e3ce3SKalle Valo 			unmap_descbuffer(ring, meta->dmaaddr,
1140423e3ce3SKalle Valo 					 sizeof(struct b43legacy_txhdr_fw3),
1141423e3ce3SKalle Valo 					 1);
1142423e3ce3SKalle Valo 
1143423e3ce3SKalle Valo 		if (meta->is_last_fragment) {
1144423e3ce3SKalle Valo 			struct ieee80211_tx_info *info;
1145423e3ce3SKalle Valo 			BUG_ON(!meta->skb);
1146423e3ce3SKalle Valo 			info = IEEE80211_SKB_CB(meta->skb);
1147423e3ce3SKalle Valo 
1148423e3ce3SKalle Valo 			/* preserve the confiured retry limit before clearing the status
1149423e3ce3SKalle Valo 			 * The xmit function has overwritten the rc's value with the actual
1150423e3ce3SKalle Valo 			 * retry limit done by the hardware */
1151423e3ce3SKalle Valo 			retry_limit = info->status.rates[0].count;
1152423e3ce3SKalle Valo 			ieee80211_tx_info_clear_status(info);
1153423e3ce3SKalle Valo 
1154423e3ce3SKalle Valo 			if (status->acked)
1155423e3ce3SKalle Valo 				info->flags |= IEEE80211_TX_STAT_ACK;
1156423e3ce3SKalle Valo 
1157423e3ce3SKalle Valo 			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1158423e3ce3SKalle Valo 				/*
1159423e3ce3SKalle Valo 				 * If the short retries (RTS, not data frame) have exceeded
1160423e3ce3SKalle Valo 				 * the limit, the hw will not have tried the selected rate,
1161423e3ce3SKalle Valo 				 * but will have used the fallback rate instead.
1162423e3ce3SKalle Valo 				 * Don't let the rate control count attempts for the selected
1163423e3ce3SKalle Valo 				 * rate in this case, otherwise the statistics will be off.
1164423e3ce3SKalle Valo 				 */
1165423e3ce3SKalle Valo 				info->status.rates[0].count = 0;
1166423e3ce3SKalle Valo 				info->status.rates[1].count = status->frame_count;
1167423e3ce3SKalle Valo 			} else {
1168423e3ce3SKalle Valo 				if (status->frame_count > retry_limit) {
1169423e3ce3SKalle Valo 					info->status.rates[0].count = retry_limit;
1170423e3ce3SKalle Valo 					info->status.rates[1].count = status->frame_count -
1171423e3ce3SKalle Valo 							retry_limit;
1172423e3ce3SKalle Valo 
1173423e3ce3SKalle Valo 				} else {
1174423e3ce3SKalle Valo 					info->status.rates[0].count = status->frame_count;
1175423e3ce3SKalle Valo 					info->status.rates[1].idx = -1;
1176423e3ce3SKalle Valo 				}
1177423e3ce3SKalle Valo 			}
1178423e3ce3SKalle Valo 
1179423e3ce3SKalle Valo 			/* Call back to inform the ieee80211 subsystem about the
1180423e3ce3SKalle Valo 			 * status of the transmission.
1181423e3ce3SKalle Valo 			 * Some fields of txstat are already filled in dma_tx().
1182423e3ce3SKalle Valo 			 */
1183423e3ce3SKalle Valo 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1184423e3ce3SKalle Valo 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1185423e3ce3SKalle Valo 			meta->skb = NULL;
1186423e3ce3SKalle Valo 		} else {
1187423e3ce3SKalle Valo 			/* No need to call free_descriptor_buffer here, as
1188423e3ce3SKalle Valo 			 * this is only the txhdr, which is not allocated.
1189423e3ce3SKalle Valo 			 */
1190423e3ce3SKalle Valo 			B43legacy_WARN_ON(meta->skb != NULL);
1191423e3ce3SKalle Valo 		}
1192423e3ce3SKalle Valo 
1193423e3ce3SKalle Valo 		/* Everything unmapped and free'd. So it's not used anymore. */
1194423e3ce3SKalle Valo 		ring->used_slots--;
1195423e3ce3SKalle Valo 
1196423e3ce3SKalle Valo 		if (meta->is_last_fragment)
1197423e3ce3SKalle Valo 			break;
1198423e3ce3SKalle Valo 		slot = next_slot(ring, slot);
1199423e3ce3SKalle Valo 	}
1200423e3ce3SKalle Valo 	dev->stats.last_tx = jiffies;
1201423e3ce3SKalle Valo 	if (ring->stopped) {
1202423e3ce3SKalle Valo 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1203423e3ce3SKalle Valo 		ring->stopped = false;
1204423e3ce3SKalle Valo 	}
1205423e3ce3SKalle Valo 
1206423e3ce3SKalle Valo 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1207423e3ce3SKalle Valo 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1208423e3ce3SKalle Valo 	} else {
1209423e3ce3SKalle Valo 		/* If the driver queue is running wake the corresponding
1210423e3ce3SKalle Valo 		 * mac80211 queue. */
1211423e3ce3SKalle Valo 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1212423e3ce3SKalle Valo 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1213423e3ce3SKalle Valo 			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1214423e3ce3SKalle Valo 				     ring->index);
1215423e3ce3SKalle Valo 	}
1216423e3ce3SKalle Valo 	/* Add work to the queue. */
1217423e3ce3SKalle Valo 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1218423e3ce3SKalle Valo }
1219423e3ce3SKalle Valo 
dma_rx(struct b43legacy_dmaring * ring,int * slot)1220423e3ce3SKalle Valo static void dma_rx(struct b43legacy_dmaring *ring,
1221423e3ce3SKalle Valo 		   int *slot)
1222423e3ce3SKalle Valo {
1223423e3ce3SKalle Valo 	struct b43legacy_dmadesc32 *desc;
1224423e3ce3SKalle Valo 	struct b43legacy_dmadesc_meta *meta;
1225423e3ce3SKalle Valo 	struct b43legacy_rxhdr_fw3 *rxhdr;
1226423e3ce3SKalle Valo 	struct sk_buff *skb;
1227423e3ce3SKalle Valo 	u16 len;
1228423e3ce3SKalle Valo 	int err;
1229423e3ce3SKalle Valo 	dma_addr_t dmaaddr;
1230423e3ce3SKalle Valo 
1231423e3ce3SKalle Valo 	desc = op32_idx2desc(ring, *slot, &meta);
1232423e3ce3SKalle Valo 
1233423e3ce3SKalle Valo 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1234423e3ce3SKalle Valo 	skb = meta->skb;
1235423e3ce3SKalle Valo 
1236423e3ce3SKalle Valo 	if (ring->index == 3) {
1237423e3ce3SKalle Valo 		/* We received an xmit status. */
1238423e3ce3SKalle Valo 		struct b43legacy_hwtxstatus *hw =
1239423e3ce3SKalle Valo 				(struct b43legacy_hwtxstatus *)skb->data;
1240423e3ce3SKalle Valo 		int i = 0;
1241423e3ce3SKalle Valo 
1242423e3ce3SKalle Valo 		while (hw->cookie == 0) {
1243423e3ce3SKalle Valo 			if (i > 100)
1244423e3ce3SKalle Valo 				break;
1245423e3ce3SKalle Valo 			i++;
1246423e3ce3SKalle Valo 			udelay(2);
1247423e3ce3SKalle Valo 			barrier();
1248423e3ce3SKalle Valo 		}
1249423e3ce3SKalle Valo 		b43legacy_handle_hwtxstatus(ring->dev, hw);
1250423e3ce3SKalle Valo 		/* recycle the descriptor buffer. */
1251423e3ce3SKalle Valo 		sync_descbuffer_for_device(ring, meta->dmaaddr,
1252423e3ce3SKalle Valo 					   ring->rx_buffersize);
1253423e3ce3SKalle Valo 
1254423e3ce3SKalle Valo 		return;
1255423e3ce3SKalle Valo 	}
1256423e3ce3SKalle Valo 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1257423e3ce3SKalle Valo 	len = le16_to_cpu(rxhdr->frame_len);
1258423e3ce3SKalle Valo 	if (len == 0) {
1259423e3ce3SKalle Valo 		int i = 0;
1260423e3ce3SKalle Valo 
1261423e3ce3SKalle Valo 		do {
1262423e3ce3SKalle Valo 			udelay(2);
1263423e3ce3SKalle Valo 			barrier();
1264423e3ce3SKalle Valo 			len = le16_to_cpu(rxhdr->frame_len);
1265423e3ce3SKalle Valo 		} while (len == 0 && i++ < 5);
1266423e3ce3SKalle Valo 		if (unlikely(len == 0)) {
1267423e3ce3SKalle Valo 			/* recycle the descriptor buffer. */
1268423e3ce3SKalle Valo 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1269423e3ce3SKalle Valo 						   ring->rx_buffersize);
1270423e3ce3SKalle Valo 			goto drop;
1271423e3ce3SKalle Valo 		}
1272423e3ce3SKalle Valo 	}
1273423e3ce3SKalle Valo 	if (unlikely(len > ring->rx_buffersize)) {
1274423e3ce3SKalle Valo 		/* The data did not fit into one descriptor buffer
1275423e3ce3SKalle Valo 		 * and is split over multiple buffers.
1276423e3ce3SKalle Valo 		 * This should never happen, as we try to allocate buffers
1277423e3ce3SKalle Valo 		 * big enough. So simply ignore this packet.
1278423e3ce3SKalle Valo 		 */
1279423e3ce3SKalle Valo 		int cnt = 0;
1280423e3ce3SKalle Valo 		s32 tmp = len;
1281423e3ce3SKalle Valo 
1282423e3ce3SKalle Valo 		while (1) {
1283423e3ce3SKalle Valo 			desc = op32_idx2desc(ring, *slot, &meta);
1284423e3ce3SKalle Valo 			/* recycle the descriptor buffer. */
1285423e3ce3SKalle Valo 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1286423e3ce3SKalle Valo 						   ring->rx_buffersize);
1287423e3ce3SKalle Valo 			*slot = next_slot(ring, *slot);
1288423e3ce3SKalle Valo 			cnt++;
1289423e3ce3SKalle Valo 			tmp -= ring->rx_buffersize;
1290423e3ce3SKalle Valo 			if (tmp <= 0)
1291423e3ce3SKalle Valo 				break;
1292423e3ce3SKalle Valo 		}
1293423e3ce3SKalle Valo 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1294423e3ce3SKalle Valo 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1295423e3ce3SKalle Valo 		       len, ring->rx_buffersize, cnt);
1296423e3ce3SKalle Valo 		goto drop;
1297423e3ce3SKalle Valo 	}
1298423e3ce3SKalle Valo 
1299423e3ce3SKalle Valo 	dmaaddr = meta->dmaaddr;
1300423e3ce3SKalle Valo 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1301423e3ce3SKalle Valo 	if (unlikely(err)) {
1302423e3ce3SKalle Valo 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1303423e3ce3SKalle Valo 			     " failed\n");
1304423e3ce3SKalle Valo 		sync_descbuffer_for_device(ring, dmaaddr,
1305423e3ce3SKalle Valo 					   ring->rx_buffersize);
1306423e3ce3SKalle Valo 		goto drop;
1307423e3ce3SKalle Valo 	}
1308423e3ce3SKalle Valo 
1309423e3ce3SKalle Valo 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1310423e3ce3SKalle Valo 	skb_put(skb, len + ring->frameoffset);
1311423e3ce3SKalle Valo 	skb_pull(skb, ring->frameoffset);
1312423e3ce3SKalle Valo 
1313423e3ce3SKalle Valo 	b43legacy_rx(ring->dev, skb, rxhdr);
1314423e3ce3SKalle Valo drop:
1315423e3ce3SKalle Valo 	return;
1316423e3ce3SKalle Valo }
1317423e3ce3SKalle Valo 
b43legacy_dma_rx(struct b43legacy_dmaring * ring)1318423e3ce3SKalle Valo void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1319423e3ce3SKalle Valo {
1320423e3ce3SKalle Valo 	int slot;
1321423e3ce3SKalle Valo 	int current_slot;
1322423e3ce3SKalle Valo 	int used_slots = 0;
1323423e3ce3SKalle Valo 
1324423e3ce3SKalle Valo 	B43legacy_WARN_ON(ring->tx);
1325423e3ce3SKalle Valo 	current_slot = op32_get_current_rxslot(ring);
1326423e3ce3SKalle Valo 	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1327423e3ce3SKalle Valo 			   ring->nr_slots));
1328423e3ce3SKalle Valo 
1329423e3ce3SKalle Valo 	slot = ring->current_slot;
1330423e3ce3SKalle Valo 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1331423e3ce3SKalle Valo 		dma_rx(ring, &slot);
1332423e3ce3SKalle Valo 		update_max_used_slots(ring, ++used_slots);
1333423e3ce3SKalle Valo 	}
1334423e3ce3SKalle Valo 	op32_set_current_rxslot(ring, slot);
1335423e3ce3SKalle Valo 	ring->current_slot = slot;
1336423e3ce3SKalle Valo }
1337423e3ce3SKalle Valo 
b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring * ring)1338423e3ce3SKalle Valo static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1339423e3ce3SKalle Valo {
1340423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1341423e3ce3SKalle Valo 	op32_tx_suspend(ring);
1342423e3ce3SKalle Valo }
1343423e3ce3SKalle Valo 
b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring * ring)1344423e3ce3SKalle Valo static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1345423e3ce3SKalle Valo {
1346423e3ce3SKalle Valo 	B43legacy_WARN_ON(!ring->tx);
1347423e3ce3SKalle Valo 	op32_tx_resume(ring);
1348423e3ce3SKalle Valo }
1349423e3ce3SKalle Valo 
b43legacy_dma_tx_suspend(struct b43legacy_wldev * dev)1350423e3ce3SKalle Valo void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1351423e3ce3SKalle Valo {
1352423e3ce3SKalle Valo 	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1353423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1354423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1355423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1356423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1357423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1358423e3ce3SKalle Valo 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1359423e3ce3SKalle Valo }
1360423e3ce3SKalle Valo 
b43legacy_dma_tx_resume(struct b43legacy_wldev * dev)1361423e3ce3SKalle Valo void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1362423e3ce3SKalle Valo {
1363423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1364423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1365423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1366423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1367423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1368423e3ce3SKalle Valo 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1369423e3ce3SKalle Valo 	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1370423e3ce3SKalle Valo }
1371