xref: /linux/drivers/net/wireless/broadcom/b43legacy/dma.c (revision 1fd1dc41724319406b0aff221a352a400b0ddfc5)
1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3 
4   Broadcom B43legacy wireless driver
5 
6   DMA ringbuffer and descriptor allocation/management
7 
8   Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9 
10   Some code in this file is derived from the b44.c driver
11   Copyright (C) 2002 David S. Miller
12   Copyright (C) Pekka Pietikainen
13 
14 
15 */
16 
17 #include "b43legacy.h"
18 #include "dma.h"
19 #include "main.h"
20 #include "debugfs.h"
21 #include "xmit.h"
22 
23 #include <linux/dma-mapping.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/skbuff.h>
27 #include <linux/slab.h>
28 #include <net/dst.h>
29 
30 /* 32bit DMA ops. */
31 static
32 struct b43legacy_dmadesc32 *op32_idx2desc(struct b43legacy_dmaring *ring,
33 					  int slot,
34 					  struct b43legacy_dmadesc_meta **meta)
35 {
36 	struct b43legacy_dmadesc32 *desc;
37 
38 	*meta = &(ring->meta[slot]);
39 	desc = ring->descbase;
40 	desc = &(desc[slot]);
41 
42 	return desc;
43 }
44 
45 static void op32_fill_descriptor(struct b43legacy_dmaring *ring,
46 				 struct b43legacy_dmadesc32 *desc,
47 				 dma_addr_t dmaaddr, u16 bufsize,
48 				 int start, int end, int irq)
49 {
50 	struct b43legacy_dmadesc32 *descbase = ring->descbase;
51 	int slot;
52 	u32 ctl;
53 	u32 addr;
54 	u32 addrext;
55 
56 	slot = (int)(desc - descbase);
57 	B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
58 
59 	addr = (u32)(dmaaddr & ~SSB_DMA_TRANSLATION_MASK);
60 	addrext = (u32)(dmaaddr & SSB_DMA_TRANSLATION_MASK)
61 		   >> SSB_DMA_TRANSLATION_SHIFT;
62 	addr |= ring->dev->dma.translation;
63 	ctl = (bufsize - ring->frameoffset)
64 	      & B43legacy_DMA32_DCTL_BYTECNT;
65 	if (slot == ring->nr_slots - 1)
66 		ctl |= B43legacy_DMA32_DCTL_DTABLEEND;
67 	if (start)
68 		ctl |= B43legacy_DMA32_DCTL_FRAMESTART;
69 	if (end)
70 		ctl |= B43legacy_DMA32_DCTL_FRAMEEND;
71 	if (irq)
72 		ctl |= B43legacy_DMA32_DCTL_IRQ;
73 	ctl |= (addrext << B43legacy_DMA32_DCTL_ADDREXT_SHIFT)
74 	       & B43legacy_DMA32_DCTL_ADDREXT_MASK;
75 
76 	desc->control = cpu_to_le32(ctl);
77 	desc->address = cpu_to_le32(addr);
78 }
79 
80 static void op32_poke_tx(struct b43legacy_dmaring *ring, int slot)
81 {
82 	b43legacy_dma_write(ring, B43legacy_DMA32_TXINDEX,
83 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
84 }
85 
86 static void op32_tx_suspend(struct b43legacy_dmaring *ring)
87 {
88 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
89 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
90 			    | B43legacy_DMA32_TXSUSPEND);
91 }
92 
93 static void op32_tx_resume(struct b43legacy_dmaring *ring)
94 {
95 	b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL,
96 			    b43legacy_dma_read(ring, B43legacy_DMA32_TXCTL)
97 			    & ~B43legacy_DMA32_TXSUSPEND);
98 }
99 
100 static int op32_get_current_rxslot(struct b43legacy_dmaring *ring)
101 {
102 	u32 val;
103 
104 	val = b43legacy_dma_read(ring, B43legacy_DMA32_RXSTATUS);
105 	val &= B43legacy_DMA32_RXDPTR;
106 
107 	return (val / sizeof(struct b43legacy_dmadesc32));
108 }
109 
110 static void op32_set_current_rxslot(struct b43legacy_dmaring *ring,
111 				    int slot)
112 {
113 	b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX,
114 			    (u32)(slot * sizeof(struct b43legacy_dmadesc32)));
115 }
116 
117 static inline int free_slots(struct b43legacy_dmaring *ring)
118 {
119 	return (ring->nr_slots - ring->used_slots);
120 }
121 
122 static inline int next_slot(struct b43legacy_dmaring *ring, int slot)
123 {
124 	B43legacy_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
125 	if (slot == ring->nr_slots - 1)
126 		return 0;
127 	return slot + 1;
128 }
129 
130 #ifdef CONFIG_B43LEGACY_DEBUG
131 static void update_max_used_slots(struct b43legacy_dmaring *ring,
132 				  int current_used_slots)
133 {
134 	if (current_used_slots <= ring->max_used_slots)
135 		return;
136 	ring->max_used_slots = current_used_slots;
137 	if (b43legacy_debug(ring->dev, B43legacy_DBG_DMAVERBOSE))
138 		b43legacydbg(ring->dev->wl,
139 		       "max_used_slots increased to %d on %s ring %d\n",
140 		       ring->max_used_slots,
141 		       ring->tx ? "TX" : "RX",
142 		       ring->index);
143 }
144 #else
145 static inline
146 void update_max_used_slots(struct b43legacy_dmaring *ring,
147 			   int current_used_slots)
148 { }
149 #endif /* DEBUG */
150 
151 /* Request a slot for usage. */
152 static inline
153 int request_slot(struct b43legacy_dmaring *ring)
154 {
155 	int slot;
156 
157 	B43legacy_WARN_ON(!ring->tx);
158 	B43legacy_WARN_ON(ring->stopped);
159 	B43legacy_WARN_ON(free_slots(ring) == 0);
160 
161 	slot = next_slot(ring, ring->current_slot);
162 	ring->current_slot = slot;
163 	ring->used_slots++;
164 
165 	update_max_used_slots(ring, ring->used_slots);
166 
167 	return slot;
168 }
169 
170 /* Mac80211-queue to b43legacy-ring mapping */
171 static struct b43legacy_dmaring *priority_to_txring(
172 						struct b43legacy_wldev *dev,
173 						int queue_priority)
174 {
175 	struct b43legacy_dmaring *ring;
176 
177 /*FIXME: For now we always run on TX-ring-1 */
178 return dev->dma.tx_ring1;
179 
180 	/* 0 = highest priority */
181 	switch (queue_priority) {
182 	default:
183 		B43legacy_WARN_ON(1);
184 		fallthrough;
185 	case 0:
186 		ring = dev->dma.tx_ring3;
187 		break;
188 	case 1:
189 		ring = dev->dma.tx_ring2;
190 		break;
191 	case 2:
192 		ring = dev->dma.tx_ring1;
193 		break;
194 	case 3:
195 		ring = dev->dma.tx_ring0;
196 		break;
197 	case 4:
198 		ring = dev->dma.tx_ring4;
199 		break;
200 	case 5:
201 		ring = dev->dma.tx_ring5;
202 		break;
203 	}
204 
205 	return ring;
206 }
207 
208 static u16 b43legacy_dmacontroller_base(enum b43legacy_dmatype type,
209 					int controller_idx)
210 {
211 	static const u16 map32[] = {
212 		B43legacy_MMIO_DMA32_BASE0,
213 		B43legacy_MMIO_DMA32_BASE1,
214 		B43legacy_MMIO_DMA32_BASE2,
215 		B43legacy_MMIO_DMA32_BASE3,
216 		B43legacy_MMIO_DMA32_BASE4,
217 		B43legacy_MMIO_DMA32_BASE5,
218 	};
219 
220 	B43legacy_WARN_ON(!(controller_idx >= 0 &&
221 			  controller_idx < ARRAY_SIZE(map32)));
222 	return map32[controller_idx];
223 }
224 
225 static inline
226 dma_addr_t map_descbuffer(struct b43legacy_dmaring *ring,
227 			  unsigned char *buf,
228 			  size_t len,
229 			  int tx)
230 {
231 	dma_addr_t dmaaddr;
232 
233 	if (tx)
234 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
235 					     buf, len,
236 					     DMA_TO_DEVICE);
237 	else
238 		dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
239 					     buf, len,
240 					     DMA_FROM_DEVICE);
241 
242 	return dmaaddr;
243 }
244 
245 static inline
246 void unmap_descbuffer(struct b43legacy_dmaring *ring,
247 		      dma_addr_t addr,
248 		      size_t len,
249 		      int tx)
250 {
251 	if (tx)
252 		dma_unmap_single(ring->dev->dev->dma_dev,
253 				     addr, len,
254 				     DMA_TO_DEVICE);
255 	else
256 		dma_unmap_single(ring->dev->dev->dma_dev,
257 				     addr, len,
258 				     DMA_FROM_DEVICE);
259 }
260 
261 static inline
262 void sync_descbuffer_for_cpu(struct b43legacy_dmaring *ring,
263 			     dma_addr_t addr,
264 			     size_t len)
265 {
266 	B43legacy_WARN_ON(ring->tx);
267 
268 	dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
269 				addr, len, DMA_FROM_DEVICE);
270 }
271 
272 static inline
273 void sync_descbuffer_for_device(struct b43legacy_dmaring *ring,
274 				dma_addr_t addr,
275 				size_t len)
276 {
277 	B43legacy_WARN_ON(ring->tx);
278 
279 	dma_sync_single_for_device(ring->dev->dev->dma_dev,
280 				   addr, len, DMA_FROM_DEVICE);
281 }
282 
283 static inline
284 void free_descriptor_buffer(struct b43legacy_dmaring *ring,
285 			    struct b43legacy_dmadesc_meta *meta,
286 			    int irq_context)
287 {
288 	if (meta->skb) {
289 		if (irq_context)
290 			dev_kfree_skb_irq(meta->skb);
291 		else
292 			dev_kfree_skb(meta->skb);
293 		meta->skb = NULL;
294 	}
295 }
296 
297 static int alloc_ringmemory(struct b43legacy_dmaring *ring)
298 {
299 	/* GFP flags must match the flags in free_ringmemory()! */
300 	ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
301 					    B43legacy_DMA_RINGMEMSIZE,
302 					    &(ring->dmabase), GFP_KERNEL);
303 	if (!ring->descbase)
304 		return -ENOMEM;
305 
306 	return 0;
307 }
308 
309 static void free_ringmemory(struct b43legacy_dmaring *ring)
310 {
311 	dma_free_coherent(ring->dev->dev->dma_dev, B43legacy_DMA_RINGMEMSIZE,
312 			  ring->descbase, ring->dmabase);
313 }
314 
315 /* Reset the RX DMA channel */
316 static int b43legacy_dmacontroller_rx_reset(struct b43legacy_wldev *dev,
317 					    u16 mmio_base,
318 					    enum b43legacy_dmatype type)
319 {
320 	int i;
321 	u32 value;
322 	u16 offset;
323 
324 	might_sleep();
325 
326 	offset = B43legacy_DMA32_RXCTL;
327 	b43legacy_write32(dev, mmio_base + offset, 0);
328 	for (i = 0; i < 10; i++) {
329 		offset = B43legacy_DMA32_RXSTATUS;
330 		value = b43legacy_read32(dev, mmio_base + offset);
331 		value &= B43legacy_DMA32_RXSTATE;
332 		if (value == B43legacy_DMA32_RXSTAT_DISABLED) {
333 			i = -1;
334 			break;
335 		}
336 		msleep(1);
337 	}
338 	if (i != -1) {
339 		b43legacyerr(dev->wl, "DMA RX reset timed out\n");
340 		return -ENODEV;
341 	}
342 
343 	return 0;
344 }
345 
346 /* Reset the RX DMA channel */
347 static int b43legacy_dmacontroller_tx_reset(struct b43legacy_wldev *dev,
348 					    u16 mmio_base,
349 					    enum b43legacy_dmatype type)
350 {
351 	int i;
352 	u32 value;
353 	u16 offset;
354 
355 	might_sleep();
356 
357 	for (i = 0; i < 10; i++) {
358 		offset = B43legacy_DMA32_TXSTATUS;
359 		value = b43legacy_read32(dev, mmio_base + offset);
360 		value &= B43legacy_DMA32_TXSTATE;
361 		if (value == B43legacy_DMA32_TXSTAT_DISABLED ||
362 		    value == B43legacy_DMA32_TXSTAT_IDLEWAIT ||
363 		    value == B43legacy_DMA32_TXSTAT_STOPPED)
364 			break;
365 		msleep(1);
366 	}
367 	offset = B43legacy_DMA32_TXCTL;
368 	b43legacy_write32(dev, mmio_base + offset, 0);
369 	for (i = 0; i < 10; i++) {
370 		offset = B43legacy_DMA32_TXSTATUS;
371 		value = b43legacy_read32(dev, mmio_base + offset);
372 		value &= B43legacy_DMA32_TXSTATE;
373 		if (value == B43legacy_DMA32_TXSTAT_DISABLED) {
374 			i = -1;
375 			break;
376 		}
377 		msleep(1);
378 	}
379 	if (i != -1) {
380 		b43legacyerr(dev->wl, "DMA TX reset timed out\n");
381 		return -ENODEV;
382 	}
383 	/* ensure the reset is completed. */
384 	msleep(1);
385 
386 	return 0;
387 }
388 
389 /* Check if a DMA mapping address is invalid. */
390 static bool b43legacy_dma_mapping_error(struct b43legacy_dmaring *ring,
391 					 dma_addr_t addr,
392 					 size_t buffersize,
393 					 bool dma_to_device)
394 {
395 	if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
396 		return true;
397 
398 	switch (ring->type) {
399 	case B43legacy_DMA_30BIT:
400 		if ((u64)addr + buffersize > (1ULL << 30))
401 			goto address_error;
402 		break;
403 	case B43legacy_DMA_32BIT:
404 		if ((u64)addr + buffersize > (1ULL << 32))
405 			goto address_error;
406 		break;
407 	}
408 
409 	/* The address is OK. */
410 	return false;
411 
412 address_error:
413 	/* We can't support this address. Unmap it again. */
414 	unmap_descbuffer(ring, addr, buffersize, dma_to_device);
415 
416 	return true;
417 }
418 
419 static int setup_rx_descbuffer(struct b43legacy_dmaring *ring,
420 			       struct b43legacy_dmadesc32 *desc,
421 			       struct b43legacy_dmadesc_meta *meta,
422 			       gfp_t gfp_flags)
423 {
424 	struct b43legacy_rxhdr_fw3 *rxhdr;
425 	struct b43legacy_hwtxstatus *txstat;
426 	dma_addr_t dmaaddr;
427 	struct sk_buff *skb;
428 
429 	B43legacy_WARN_ON(ring->tx);
430 
431 	skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
432 	if (unlikely(!skb))
433 		return -ENOMEM;
434 	dmaaddr = map_descbuffer(ring, skb->data,
435 				 ring->rx_buffersize, 0);
436 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
437 		/* ugh. try to realloc in zone_dma */
438 		gfp_flags |= GFP_DMA;
439 
440 		dev_kfree_skb_any(skb);
441 
442 		skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
443 		if (unlikely(!skb))
444 			return -ENOMEM;
445 		dmaaddr = map_descbuffer(ring, skb->data,
446 					 ring->rx_buffersize, 0);
447 	}
448 
449 	if (b43legacy_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
450 		dev_kfree_skb_any(skb);
451 		return -EIO;
452 	}
453 
454 	meta->skb = skb;
455 	meta->dmaaddr = dmaaddr;
456 	op32_fill_descriptor(ring, desc, dmaaddr, ring->rx_buffersize, 0, 0, 0);
457 
458 	rxhdr = (struct b43legacy_rxhdr_fw3 *)(skb->data);
459 	rxhdr->frame_len = 0;
460 	txstat = (struct b43legacy_hwtxstatus *)(skb->data);
461 	txstat->cookie = 0;
462 
463 	return 0;
464 }
465 
466 /* Allocate the initial descbuffers.
467  * This is used for an RX ring only.
468  */
469 static int alloc_initial_descbuffers(struct b43legacy_dmaring *ring)
470 {
471 	int i;
472 	int err = -ENOMEM;
473 	struct b43legacy_dmadesc32 *desc;
474 	struct b43legacy_dmadesc_meta *meta;
475 
476 	for (i = 0; i < ring->nr_slots; i++) {
477 		desc = op32_idx2desc(ring, i, &meta);
478 
479 		err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
480 		if (err) {
481 			b43legacyerr(ring->dev->wl,
482 			       "Failed to allocate initial descbuffers\n");
483 			goto err_unwind;
484 		}
485 	}
486 	mb(); /* all descbuffer setup before next line */
487 	ring->used_slots = ring->nr_slots;
488 	err = 0;
489 out:
490 	return err;
491 
492 err_unwind:
493 	for (i--; i >= 0; i--) {
494 		desc = op32_idx2desc(ring, i, &meta);
495 
496 		unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
497 		dev_kfree_skb(meta->skb);
498 	}
499 	goto out;
500 }
501 
502 /* Do initial setup of the DMA controller.
503  * Reset the controller, write the ring busaddress
504  * and switch the "enable" bit on.
505  */
506 static int dmacontroller_setup(struct b43legacy_dmaring *ring)
507 {
508 	int err = 0;
509 	u32 value;
510 	u32 addrext;
511 	u32 trans = ring->dev->dma.translation;
512 	u32 ringbase = (u32)(ring->dmabase);
513 
514 	if (ring->tx) {
515 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
516 			  >> SSB_DMA_TRANSLATION_SHIFT;
517 		value = B43legacy_DMA32_TXENABLE;
518 		value |= (addrext << B43legacy_DMA32_TXADDREXT_SHIFT)
519 			& B43legacy_DMA32_TXADDREXT_MASK;
520 		b43legacy_dma_write(ring, B43legacy_DMA32_TXCTL, value);
521 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING,
522 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
523 				    | trans);
524 	} else {
525 		err = alloc_initial_descbuffers(ring);
526 		if (err)
527 			goto out;
528 
529 		addrext = (ringbase & SSB_DMA_TRANSLATION_MASK)
530 			  >> SSB_DMA_TRANSLATION_SHIFT;
531 		value = (ring->frameoffset <<
532 			 B43legacy_DMA32_RXFROFF_SHIFT);
533 		value |= B43legacy_DMA32_RXENABLE;
534 		value |= (addrext << B43legacy_DMA32_RXADDREXT_SHIFT)
535 			 & B43legacy_DMA32_RXADDREXT_MASK;
536 		b43legacy_dma_write(ring, B43legacy_DMA32_RXCTL, value);
537 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING,
538 				    (ringbase & ~SSB_DMA_TRANSLATION_MASK)
539 				    | trans);
540 		b43legacy_dma_write(ring, B43legacy_DMA32_RXINDEX, 200);
541 	}
542 
543 out:
544 	return err;
545 }
546 
547 /* Shutdown the DMA controller. */
548 static void dmacontroller_cleanup(struct b43legacy_dmaring *ring)
549 {
550 	if (ring->tx) {
551 		b43legacy_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
552 						 ring->type);
553 		b43legacy_dma_write(ring, B43legacy_DMA32_TXRING, 0);
554 	} else {
555 		b43legacy_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
556 						 ring->type);
557 		b43legacy_dma_write(ring, B43legacy_DMA32_RXRING, 0);
558 	}
559 }
560 
561 static void free_all_descbuffers(struct b43legacy_dmaring *ring)
562 {
563 	struct b43legacy_dmadesc_meta *meta;
564 	int i;
565 
566 	if (!ring->used_slots)
567 		return;
568 	for (i = 0; i < ring->nr_slots; i++) {
569 		op32_idx2desc(ring, i, &meta);
570 
571 		if (!meta->skb) {
572 			B43legacy_WARN_ON(!ring->tx);
573 			continue;
574 		}
575 		if (ring->tx)
576 			unmap_descbuffer(ring, meta->dmaaddr,
577 					 meta->skb->len, 1);
578 		else
579 			unmap_descbuffer(ring, meta->dmaaddr,
580 					 ring->rx_buffersize, 0);
581 		free_descriptor_buffer(ring, meta, 0);
582 	}
583 }
584 
585 static enum b43legacy_dmatype b43legacy_engine_type(struct b43legacy_wldev *dev)
586 {
587 	u32 tmp;
588 	u16 mmio_base;
589 
590 	mmio_base = b43legacy_dmacontroller_base(0, 0);
591 	b43legacy_write32(dev,
592 			mmio_base + B43legacy_DMA32_TXCTL,
593 			B43legacy_DMA32_TXADDREXT_MASK);
594 	tmp = b43legacy_read32(dev, mmio_base +
595 			       B43legacy_DMA32_TXCTL);
596 	if (tmp & B43legacy_DMA32_TXADDREXT_MASK)
597 		return B43legacy_DMA_32BIT;
598 	return B43legacy_DMA_30BIT;
599 }
600 
601 /* Main initialization function. */
602 static
603 struct b43legacy_dmaring *b43legacy_setup_dmaring(struct b43legacy_wldev *dev,
604 						  int controller_index,
605 						  int for_tx,
606 						  enum b43legacy_dmatype type)
607 {
608 	struct b43legacy_dmaring *ring;
609 	int err;
610 	int nr_slots;
611 	dma_addr_t dma_test;
612 
613 	ring = kzalloc_obj(*ring);
614 	if (!ring)
615 		goto out;
616 	ring->type = type;
617 	ring->dev = dev;
618 
619 	nr_slots = B43legacy_RXRING_SLOTS;
620 	if (for_tx)
621 		nr_slots = B43legacy_TXRING_SLOTS;
622 
623 	ring->meta = kzalloc_objs(struct b43legacy_dmadesc_meta, nr_slots);
624 	if (!ring->meta)
625 		goto err_kfree_ring;
626 	if (for_tx) {
627 		ring->txhdr_cache = kcalloc(nr_slots,
628 					sizeof(struct b43legacy_txhdr_fw3),
629 					GFP_KERNEL);
630 		if (!ring->txhdr_cache)
631 			goto err_kfree_meta;
632 
633 		/* test for ability to dma to txhdr_cache */
634 		dma_test = dma_map_single(dev->dev->dma_dev, ring->txhdr_cache,
635 					      sizeof(struct b43legacy_txhdr_fw3),
636 					      DMA_TO_DEVICE);
637 
638 		if (b43legacy_dma_mapping_error(ring, dma_test,
639 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
640 			/* ugh realloc */
641 			kfree(ring->txhdr_cache);
642 			ring->txhdr_cache = kcalloc(nr_slots,
643 					sizeof(struct b43legacy_txhdr_fw3),
644 					GFP_KERNEL | GFP_DMA);
645 			if (!ring->txhdr_cache)
646 				goto err_kfree_meta;
647 
648 			dma_test = dma_map_single(dev->dev->dma_dev,
649 					ring->txhdr_cache,
650 					sizeof(struct b43legacy_txhdr_fw3),
651 					DMA_TO_DEVICE);
652 
653 			if (b43legacy_dma_mapping_error(ring, dma_test,
654 					sizeof(struct b43legacy_txhdr_fw3), 1))
655 				goto err_kfree_txhdr_cache;
656 		}
657 
658 		dma_unmap_single(dev->dev->dma_dev, dma_test,
659 				 sizeof(struct b43legacy_txhdr_fw3),
660 				 DMA_TO_DEVICE);
661 	}
662 
663 	ring->nr_slots = nr_slots;
664 	ring->mmio_base = b43legacy_dmacontroller_base(type, controller_index);
665 	ring->index = controller_index;
666 	if (for_tx) {
667 		ring->tx = true;
668 		ring->current_slot = -1;
669 	} else {
670 		if (ring->index == 0) {
671 			ring->rx_buffersize = B43legacy_DMA0_RX_BUFFERSIZE;
672 			ring->frameoffset = B43legacy_DMA0_RX_FRAMEOFFSET;
673 		} else if (ring->index == 3) {
674 			ring->rx_buffersize = B43legacy_DMA3_RX_BUFFERSIZE;
675 			ring->frameoffset = B43legacy_DMA3_RX_FRAMEOFFSET;
676 		} else
677 			B43legacy_WARN_ON(1);
678 	}
679 #ifdef CONFIG_B43LEGACY_DEBUG
680 	ring->last_injected_overflow = jiffies;
681 #endif
682 
683 	err = alloc_ringmemory(ring);
684 	if (err)
685 		goto err_kfree_txhdr_cache;
686 	err = dmacontroller_setup(ring);
687 	if (err)
688 		goto err_free_ringmemory;
689 
690 out:
691 	return ring;
692 
693 err_free_ringmemory:
694 	free_ringmemory(ring);
695 err_kfree_txhdr_cache:
696 	kfree(ring->txhdr_cache);
697 err_kfree_meta:
698 	kfree(ring->meta);
699 err_kfree_ring:
700 	kfree(ring);
701 	ring = NULL;
702 	goto out;
703 }
704 
705 /* Main cleanup function. */
706 static void b43legacy_destroy_dmaring(struct b43legacy_dmaring *ring)
707 {
708 	if (!ring)
709 		return;
710 
711 	b43legacydbg(ring->dev->wl, "DMA-%u 0x%04X (%s) max used slots:"
712 		     " %d/%d\n", (unsigned int)(ring->type), ring->mmio_base,
713 		     (ring->tx) ? "TX" : "RX", ring->max_used_slots,
714 		     ring->nr_slots);
715 	/* Device IRQs are disabled prior entering this function,
716 	 * so no need to take care of concurrency with rx handler stuff.
717 	 */
718 	dmacontroller_cleanup(ring);
719 	free_all_descbuffers(ring);
720 	free_ringmemory(ring);
721 
722 	kfree(ring->txhdr_cache);
723 	kfree(ring->meta);
724 	kfree(ring);
725 }
726 
727 void b43legacy_dma_free(struct b43legacy_wldev *dev)
728 {
729 	struct b43legacy_dma *dma;
730 
731 	if (b43legacy_using_pio(dev))
732 		return;
733 	dma = &dev->dma;
734 
735 	b43legacy_destroy_dmaring(dma->rx_ring3);
736 	dma->rx_ring3 = NULL;
737 	b43legacy_destroy_dmaring(dma->rx_ring0);
738 	dma->rx_ring0 = NULL;
739 
740 	b43legacy_destroy_dmaring(dma->tx_ring5);
741 	dma->tx_ring5 = NULL;
742 	b43legacy_destroy_dmaring(dma->tx_ring4);
743 	dma->tx_ring4 = NULL;
744 	b43legacy_destroy_dmaring(dma->tx_ring3);
745 	dma->tx_ring3 = NULL;
746 	b43legacy_destroy_dmaring(dma->tx_ring2);
747 	dma->tx_ring2 = NULL;
748 	b43legacy_destroy_dmaring(dma->tx_ring1);
749 	dma->tx_ring1 = NULL;
750 	b43legacy_destroy_dmaring(dma->tx_ring0);
751 	dma->tx_ring0 = NULL;
752 }
753 
754 int b43legacy_dma_init(struct b43legacy_wldev *dev)
755 {
756 	struct b43legacy_dma *dma = &dev->dma;
757 	struct b43legacy_dmaring *ring;
758 	enum b43legacy_dmatype type = b43legacy_engine_type(dev);
759 	int err;
760 
761 	err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
762 	if (err) {
763 #ifdef CONFIG_B43LEGACY_PIO
764 		b43legacywarn(dev->wl, "DMA for this device not supported. "
765 			"Falling back to PIO\n");
766 		dev->__using_pio = true;
767 		return -EAGAIN;
768 #else
769 		b43legacyerr(dev->wl, "DMA for this device not supported and "
770 		       "no PIO support compiled in\n");
771 		return -EOPNOTSUPP;
772 #endif
773 	}
774 	dma->translation = ssb_dma_translation(dev->dev);
775 
776 	err = -ENOMEM;
777 	/* setup TX DMA channels. */
778 	ring = b43legacy_setup_dmaring(dev, 0, 1, type);
779 	if (!ring)
780 		goto out;
781 	dma->tx_ring0 = ring;
782 
783 	ring = b43legacy_setup_dmaring(dev, 1, 1, type);
784 	if (!ring)
785 		goto err_destroy_tx0;
786 	dma->tx_ring1 = ring;
787 
788 	ring = b43legacy_setup_dmaring(dev, 2, 1, type);
789 	if (!ring)
790 		goto err_destroy_tx1;
791 	dma->tx_ring2 = ring;
792 
793 	ring = b43legacy_setup_dmaring(dev, 3, 1, type);
794 	if (!ring)
795 		goto err_destroy_tx2;
796 	dma->tx_ring3 = ring;
797 
798 	ring = b43legacy_setup_dmaring(dev, 4, 1, type);
799 	if (!ring)
800 		goto err_destroy_tx3;
801 	dma->tx_ring4 = ring;
802 
803 	ring = b43legacy_setup_dmaring(dev, 5, 1, type);
804 	if (!ring)
805 		goto err_destroy_tx4;
806 	dma->tx_ring5 = ring;
807 
808 	/* setup RX DMA channels. */
809 	ring = b43legacy_setup_dmaring(dev, 0, 0, type);
810 	if (!ring)
811 		goto err_destroy_tx5;
812 	dma->rx_ring0 = ring;
813 
814 	if (dev->dev->id.revision < 5) {
815 		ring = b43legacy_setup_dmaring(dev, 3, 0, type);
816 		if (!ring)
817 			goto err_destroy_rx0;
818 		dma->rx_ring3 = ring;
819 	}
820 
821 	b43legacydbg(dev->wl, "%u-bit DMA initialized\n", (unsigned int)type);
822 	err = 0;
823 out:
824 	return err;
825 
826 err_destroy_rx0:
827 	b43legacy_destroy_dmaring(dma->rx_ring0);
828 	dma->rx_ring0 = NULL;
829 err_destroy_tx5:
830 	b43legacy_destroy_dmaring(dma->tx_ring5);
831 	dma->tx_ring5 = NULL;
832 err_destroy_tx4:
833 	b43legacy_destroy_dmaring(dma->tx_ring4);
834 	dma->tx_ring4 = NULL;
835 err_destroy_tx3:
836 	b43legacy_destroy_dmaring(dma->tx_ring3);
837 	dma->tx_ring3 = NULL;
838 err_destroy_tx2:
839 	b43legacy_destroy_dmaring(dma->tx_ring2);
840 	dma->tx_ring2 = NULL;
841 err_destroy_tx1:
842 	b43legacy_destroy_dmaring(dma->tx_ring1);
843 	dma->tx_ring1 = NULL;
844 err_destroy_tx0:
845 	b43legacy_destroy_dmaring(dma->tx_ring0);
846 	dma->tx_ring0 = NULL;
847 	goto out;
848 }
849 
850 /* Generate a cookie for the TX header. */
851 static u16 generate_cookie(struct b43legacy_dmaring *ring,
852 			   int slot)
853 {
854 	u16 cookie = 0x1000;
855 
856 	/* Use the upper 4 bits of the cookie as
857 	 * DMA controller ID and store the slot number
858 	 * in the lower 12 bits.
859 	 * Note that the cookie must never be 0, as this
860 	 * is a special value used in RX path.
861 	 */
862 	switch (ring->index) {
863 	case 0:
864 		cookie = 0xA000;
865 		break;
866 	case 1:
867 		cookie = 0xB000;
868 		break;
869 	case 2:
870 		cookie = 0xC000;
871 		break;
872 	case 3:
873 		cookie = 0xD000;
874 		break;
875 	case 4:
876 		cookie = 0xE000;
877 		break;
878 	case 5:
879 		cookie = 0xF000;
880 		break;
881 	}
882 	B43legacy_WARN_ON(!(((u16)slot & 0xF000) == 0x0000));
883 	cookie |= (u16)slot;
884 
885 	return cookie;
886 }
887 
888 /* Inspect a cookie and find out to which controller/slot it belongs. */
889 static
890 struct b43legacy_dmaring *parse_cookie(struct b43legacy_wldev *dev,
891 				      u16 cookie, int *slot)
892 {
893 	struct b43legacy_dma *dma = &dev->dma;
894 	struct b43legacy_dmaring *ring = NULL;
895 
896 	switch (cookie & 0xF000) {
897 	case 0xA000:
898 		ring = dma->tx_ring0;
899 		break;
900 	case 0xB000:
901 		ring = dma->tx_ring1;
902 		break;
903 	case 0xC000:
904 		ring = dma->tx_ring2;
905 		break;
906 	case 0xD000:
907 		ring = dma->tx_ring3;
908 		break;
909 	case 0xE000:
910 		ring = dma->tx_ring4;
911 		break;
912 	case 0xF000:
913 		ring = dma->tx_ring5;
914 		break;
915 	default:
916 		B43legacy_WARN_ON(1);
917 	}
918 	*slot = (cookie & 0x0FFF);
919 	B43legacy_WARN_ON(!(ring && *slot >= 0 && *slot < ring->nr_slots));
920 
921 	return ring;
922 }
923 
924 static int dma_tx_fragment(struct b43legacy_dmaring *ring,
925 			    struct sk_buff **in_skb)
926 {
927 	struct sk_buff *skb = *in_skb;
928 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
929 	u8 *header;
930 	int slot, old_top_slot, old_used_slots;
931 	int err;
932 	struct b43legacy_dmadesc32 *desc;
933 	struct b43legacy_dmadesc_meta *meta;
934 	struct b43legacy_dmadesc_meta *meta_hdr;
935 	struct sk_buff *bounce_skb;
936 
937 #define SLOTS_PER_PACKET  2
938 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
939 
940 	old_top_slot = ring->current_slot;
941 	old_used_slots = ring->used_slots;
942 
943 	/* Get a slot for the header. */
944 	slot = request_slot(ring);
945 	desc = op32_idx2desc(ring, slot, &meta_hdr);
946 	memset(meta_hdr, 0, sizeof(*meta_hdr));
947 
948 	header = &(ring->txhdr_cache[slot * sizeof(
949 			       struct b43legacy_txhdr_fw3)]);
950 	err = b43legacy_generate_txhdr(ring->dev, header,
951 				 skb->data, skb->len, info,
952 				 generate_cookie(ring, slot));
953 	if (unlikely(err)) {
954 		ring->current_slot = old_top_slot;
955 		ring->used_slots = old_used_slots;
956 		return err;
957 	}
958 
959 	meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
960 					   sizeof(struct b43legacy_txhdr_fw3), 1);
961 	if (b43legacy_dma_mapping_error(ring, meta_hdr->dmaaddr,
962 					sizeof(struct b43legacy_txhdr_fw3), 1)) {
963 		ring->current_slot = old_top_slot;
964 		ring->used_slots = old_used_slots;
965 		return -EIO;
966 	}
967 	op32_fill_descriptor(ring, desc, meta_hdr->dmaaddr,
968 			     sizeof(struct b43legacy_txhdr_fw3), 1, 0, 0);
969 
970 	/* Get a slot for the payload. */
971 	slot = request_slot(ring);
972 	desc = op32_idx2desc(ring, slot, &meta);
973 	memset(meta, 0, sizeof(*meta));
974 
975 	meta->skb = skb;
976 	meta->is_last_fragment = true;
977 
978 	meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
979 	/* create a bounce buffer in zone_dma on mapping failure. */
980 	if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
981 		bounce_skb = alloc_skb(skb->len, GFP_KERNEL | GFP_DMA);
982 		if (!bounce_skb) {
983 			ring->current_slot = old_top_slot;
984 			ring->used_slots = old_used_slots;
985 			err = -ENOMEM;
986 			goto out_unmap_hdr;
987 		}
988 
989 		skb_put_data(bounce_skb, skb->data, skb->len);
990 		memcpy(bounce_skb->cb, skb->cb, sizeof(skb->cb));
991 		bounce_skb->dev = skb->dev;
992 		skb_set_queue_mapping(bounce_skb, skb_get_queue_mapping(skb));
993 		info = IEEE80211_SKB_CB(bounce_skb);
994 
995 		dev_kfree_skb_any(skb);
996 		skb = bounce_skb;
997 		*in_skb = bounce_skb;
998 		meta->skb = skb;
999 		meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1000 		if (b43legacy_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1001 			ring->current_slot = old_top_slot;
1002 			ring->used_slots = old_used_slots;
1003 			err = -EIO;
1004 			goto out_free_bounce;
1005 		}
1006 	}
1007 
1008 	op32_fill_descriptor(ring, desc, meta->dmaaddr,
1009 			     skb->len, 0, 1, 1);
1010 
1011 	wmb();	/* previous stuff MUST be done */
1012 	/* Now transfer the whole frame. */
1013 	op32_poke_tx(ring, next_slot(ring, slot));
1014 	return 0;
1015 
1016 out_free_bounce:
1017 	dev_kfree_skb_any(skb);
1018 out_unmap_hdr:
1019 	unmap_descbuffer(ring, meta_hdr->dmaaddr,
1020 			 sizeof(struct b43legacy_txhdr_fw3), 1);
1021 	return err;
1022 }
1023 
1024 static inline
1025 int should_inject_overflow(struct b43legacy_dmaring *ring)
1026 {
1027 #ifdef CONFIG_B43LEGACY_DEBUG
1028 	if (unlikely(b43legacy_debug(ring->dev,
1029 				     B43legacy_DBG_DMAOVERFLOW))) {
1030 		/* Check if we should inject another ringbuffer overflow
1031 		 * to test handling of this situation in the stack. */
1032 		unsigned long next_overflow;
1033 
1034 		next_overflow = ring->last_injected_overflow + HZ;
1035 		if (time_after(jiffies, next_overflow)) {
1036 			ring->last_injected_overflow = jiffies;
1037 			b43legacydbg(ring->dev->wl,
1038 			       "Injecting TX ring overflow on "
1039 			       "DMA controller %d\n", ring->index);
1040 			return 1;
1041 		}
1042 	}
1043 #endif /* CONFIG_B43LEGACY_DEBUG */
1044 	return 0;
1045 }
1046 
1047 int b43legacy_dma_tx(struct b43legacy_wldev *dev,
1048 		     struct sk_buff *skb)
1049 {
1050 	struct b43legacy_dmaring *ring;
1051 	int err = 0;
1052 
1053 	ring = priority_to_txring(dev, skb_get_queue_mapping(skb));
1054 	B43legacy_WARN_ON(!ring->tx);
1055 
1056 	if (unlikely(ring->stopped)) {
1057 		/* We get here only because of a bug in mac80211.
1058 		 * Because of a race, one packet may be queued after
1059 		 * the queue is stopped, thus we got called when we shouldn't.
1060 		 * For now, just refuse the transmit. */
1061 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1062 			b43legacyerr(dev->wl, "Packet after queue stopped\n");
1063 		return -ENOSPC;
1064 	}
1065 
1066 	if (WARN_ON(free_slots(ring) < SLOTS_PER_PACKET)) {
1067 		/* If we get here, we have a real error with the queue
1068 		 * full, but queues not stopped. */
1069 		b43legacyerr(dev->wl, "DMA queue overflow\n");
1070 		return -ENOSPC;
1071 	}
1072 
1073 	/* dma_tx_fragment might reallocate the skb, so invalidate pointers pointing
1074 	 * into the skb data or cb now. */
1075 	err = dma_tx_fragment(ring, &skb);
1076 	if (unlikely(err == -ENOKEY)) {
1077 		/* Drop this packet, as we don't have the encryption key
1078 		 * anymore and must not transmit it unencrypted. */
1079 		dev_kfree_skb_any(skb);
1080 		return 0;
1081 	}
1082 	if (unlikely(err)) {
1083 		b43legacyerr(dev->wl, "DMA tx mapping failure\n");
1084 		return err;
1085 	}
1086 	if ((free_slots(ring) < SLOTS_PER_PACKET) ||
1087 	    should_inject_overflow(ring)) {
1088 		/* This TX ring is full. */
1089 		unsigned int skb_mapping = skb_get_queue_mapping(skb);
1090 		ieee80211_stop_queue(dev->wl->hw, skb_mapping);
1091 		dev->wl->tx_queue_stopped[skb_mapping] = 1;
1092 		ring->stopped = true;
1093 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1094 			b43legacydbg(dev->wl, "Stopped TX ring %d\n",
1095 			       ring->index);
1096 	}
1097 	return err;
1098 }
1099 
1100 void b43legacy_dma_handle_txstatus(struct b43legacy_wldev *dev,
1101 				 const struct b43legacy_txstatus *status)
1102 {
1103 	struct b43legacy_dmaring *ring;
1104 	struct b43legacy_dmadesc_meta *meta;
1105 	int retry_limit;
1106 	int slot;
1107 	int firstused;
1108 
1109 	ring = parse_cookie(dev, status->cookie, &slot);
1110 	if (unlikely(!ring))
1111 		return;
1112 	B43legacy_WARN_ON(!ring->tx);
1113 
1114 	/* Sanity check: TX packets are processed in-order on one ring.
1115 	 * Check if the slot deduced from the cookie really is the first
1116 	 * used slot. */
1117 	firstused = ring->current_slot - ring->used_slots + 1;
1118 	if (firstused < 0)
1119 		firstused = ring->nr_slots + firstused;
1120 	if (unlikely(slot != firstused)) {
1121 		/* This possibly is a firmware bug and will result in
1122 		 * malfunction, memory leaks and/or stall of DMA functionality.
1123 		 */
1124 		b43legacydbg(dev->wl, "Out of order TX status report on DMA "
1125 			     "ring %d. Expected %d, but got %d\n",
1126 			     ring->index, firstused, slot);
1127 		return;
1128 	}
1129 
1130 	while (1) {
1131 		B43legacy_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
1132 		op32_idx2desc(ring, slot, &meta);
1133 
1134 		if (meta->skb)
1135 			unmap_descbuffer(ring, meta->dmaaddr,
1136 					 meta->skb->len, 1);
1137 		else
1138 			unmap_descbuffer(ring, meta->dmaaddr,
1139 					 sizeof(struct b43legacy_txhdr_fw3),
1140 					 1);
1141 
1142 		if (meta->is_last_fragment) {
1143 			struct ieee80211_tx_info *info;
1144 			BUG_ON(!meta->skb);
1145 			info = IEEE80211_SKB_CB(meta->skb);
1146 
1147 			/* preserve the confiured retry limit before clearing the status
1148 			 * The xmit function has overwritten the rc's value with the actual
1149 			 * retry limit done by the hardware */
1150 			retry_limit = info->status.rates[0].count;
1151 			ieee80211_tx_info_clear_status(info);
1152 
1153 			if (status->acked)
1154 				info->flags |= IEEE80211_TX_STAT_ACK;
1155 
1156 			if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
1157 				/*
1158 				 * If the short retries (RTS, not data frame) have exceeded
1159 				 * the limit, the hw will not have tried the selected rate,
1160 				 * but will have used the fallback rate instead.
1161 				 * Don't let the rate control count attempts for the selected
1162 				 * rate in this case, otherwise the statistics will be off.
1163 				 */
1164 				info->status.rates[0].count = 0;
1165 				info->status.rates[1].count = status->frame_count;
1166 			} else {
1167 				if (status->frame_count > retry_limit) {
1168 					info->status.rates[0].count = retry_limit;
1169 					info->status.rates[1].count = status->frame_count -
1170 							retry_limit;
1171 
1172 				} else {
1173 					info->status.rates[0].count = status->frame_count;
1174 					info->status.rates[1].idx = -1;
1175 				}
1176 			}
1177 
1178 			/* Call back to inform the ieee80211 subsystem about the
1179 			 * status of the transmission.
1180 			 * Some fields of txstat are already filled in dma_tx().
1181 			 */
1182 			ieee80211_tx_status_irqsafe(dev->wl->hw, meta->skb);
1183 			/* skb is freed by ieee80211_tx_status_irqsafe() */
1184 			meta->skb = NULL;
1185 		} else {
1186 			/* No need to call free_descriptor_buffer here, as
1187 			 * this is only the txhdr, which is not allocated.
1188 			 */
1189 			B43legacy_WARN_ON(meta->skb != NULL);
1190 		}
1191 
1192 		/* Everything unmapped and free'd. So it's not used anymore. */
1193 		ring->used_slots--;
1194 
1195 		if (meta->is_last_fragment)
1196 			break;
1197 		slot = next_slot(ring, slot);
1198 	}
1199 	dev->stats.last_tx = jiffies;
1200 	if (ring->stopped) {
1201 		B43legacy_WARN_ON(free_slots(ring) < SLOTS_PER_PACKET);
1202 		ring->stopped = false;
1203 	}
1204 
1205 	if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1206 		dev->wl->tx_queue_stopped[ring->queue_prio] = 0;
1207 	} else {
1208 		/* If the driver queue is running wake the corresponding
1209 		 * mac80211 queue. */
1210 		ieee80211_wake_queue(dev->wl->hw, ring->queue_prio);
1211 		if (b43legacy_debug(dev, B43legacy_DBG_DMAVERBOSE))
1212 			b43legacydbg(dev->wl, "Woke up TX ring %d\n",
1213 				     ring->index);
1214 	}
1215 	/* Add work to the queue. */
1216 	ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1217 }
1218 
1219 static void dma_rx(struct b43legacy_dmaring *ring,
1220 		   int *slot)
1221 {
1222 	struct b43legacy_dmadesc32 *desc;
1223 	struct b43legacy_dmadesc_meta *meta;
1224 	struct b43legacy_rxhdr_fw3 *rxhdr;
1225 	struct sk_buff *skb;
1226 	u16 len;
1227 	int err;
1228 	dma_addr_t dmaaddr;
1229 
1230 	desc = op32_idx2desc(ring, *slot, &meta);
1231 
1232 	sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1233 	skb = meta->skb;
1234 
1235 	if (ring->index == 3) {
1236 		/* We received an xmit status. */
1237 		struct b43legacy_hwtxstatus *hw =
1238 				(struct b43legacy_hwtxstatus *)skb->data;
1239 		int i = 0;
1240 
1241 		while (hw->cookie == 0) {
1242 			if (i > 100)
1243 				break;
1244 			i++;
1245 			udelay(2);
1246 			barrier();
1247 		}
1248 		b43legacy_handle_hwtxstatus(ring->dev, hw);
1249 		/* recycle the descriptor buffer. */
1250 		sync_descbuffer_for_device(ring, meta->dmaaddr,
1251 					   ring->rx_buffersize);
1252 
1253 		return;
1254 	}
1255 	rxhdr = (struct b43legacy_rxhdr_fw3 *)skb->data;
1256 	len = le16_to_cpu(rxhdr->frame_len);
1257 	if (len == 0) {
1258 		int i = 0;
1259 
1260 		do {
1261 			udelay(2);
1262 			barrier();
1263 			len = le16_to_cpu(rxhdr->frame_len);
1264 		} while (len == 0 && i++ < 5);
1265 		if (unlikely(len == 0)) {
1266 			/* recycle the descriptor buffer. */
1267 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1268 						   ring->rx_buffersize);
1269 			goto drop;
1270 		}
1271 	}
1272 	if (unlikely(len > ring->rx_buffersize)) {
1273 		/* The data did not fit into one descriptor buffer
1274 		 * and is split over multiple buffers.
1275 		 * This should never happen, as we try to allocate buffers
1276 		 * big enough. So simply ignore this packet.
1277 		 */
1278 		int cnt = 0;
1279 		s32 tmp = len;
1280 
1281 		while (1) {
1282 			desc = op32_idx2desc(ring, *slot, &meta);
1283 			/* recycle the descriptor buffer. */
1284 			sync_descbuffer_for_device(ring, meta->dmaaddr,
1285 						   ring->rx_buffersize);
1286 			*slot = next_slot(ring, *slot);
1287 			cnt++;
1288 			tmp -= ring->rx_buffersize;
1289 			if (tmp <= 0)
1290 				break;
1291 		}
1292 		b43legacyerr(ring->dev->wl, "DMA RX buffer too small "
1293 		       "(len: %u, buffer: %u, nr-dropped: %d)\n",
1294 		       len, ring->rx_buffersize, cnt);
1295 		goto drop;
1296 	}
1297 
1298 	dmaaddr = meta->dmaaddr;
1299 	err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1300 	if (unlikely(err)) {
1301 		b43legacydbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer()"
1302 			     " failed\n");
1303 		sync_descbuffer_for_device(ring, dmaaddr,
1304 					   ring->rx_buffersize);
1305 		goto drop;
1306 	}
1307 
1308 	unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1309 	skb_put(skb, len + ring->frameoffset);
1310 	skb_pull(skb, ring->frameoffset);
1311 
1312 	b43legacy_rx(ring->dev, skb, rxhdr);
1313 drop:
1314 	return;
1315 }
1316 
1317 void b43legacy_dma_rx(struct b43legacy_dmaring *ring)
1318 {
1319 	int slot;
1320 	int current_slot;
1321 	int used_slots = 0;
1322 
1323 	B43legacy_WARN_ON(ring->tx);
1324 	current_slot = op32_get_current_rxslot(ring);
1325 	B43legacy_WARN_ON(!(current_slot >= 0 && current_slot <
1326 			   ring->nr_slots));
1327 
1328 	slot = ring->current_slot;
1329 	for (; slot != current_slot; slot = next_slot(ring, slot)) {
1330 		dma_rx(ring, &slot);
1331 		update_max_used_slots(ring, ++used_slots);
1332 	}
1333 	op32_set_current_rxslot(ring, slot);
1334 	ring->current_slot = slot;
1335 }
1336 
1337 static void b43legacy_dma_tx_suspend_ring(struct b43legacy_dmaring *ring)
1338 {
1339 	B43legacy_WARN_ON(!ring->tx);
1340 	op32_tx_suspend(ring);
1341 }
1342 
1343 static void b43legacy_dma_tx_resume_ring(struct b43legacy_dmaring *ring)
1344 {
1345 	B43legacy_WARN_ON(!ring->tx);
1346 	op32_tx_resume(ring);
1347 }
1348 
1349 void b43legacy_dma_tx_suspend(struct b43legacy_wldev *dev)
1350 {
1351 	b43legacy_power_saving_ctl_bits(dev, -1, 1);
1352 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring0);
1353 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring1);
1354 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring2);
1355 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring3);
1356 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring4);
1357 	b43legacy_dma_tx_suspend_ring(dev->dma.tx_ring5);
1358 }
1359 
1360 void b43legacy_dma_tx_resume(struct b43legacy_wldev *dev)
1361 {
1362 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring5);
1363 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring4);
1364 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring3);
1365 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring2);
1366 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring1);
1367 	b43legacy_dma_tx_resume_ring(dev->dma.tx_ring0);
1368 	b43legacy_power_saving_ctl_bits(dev, -1, -1);
1369 }
1370