1 // SPDX-License-Identifier: GPL-2.0-or-later
2 /*
3
4 Broadcom B43 wireless driver
5
6 DMA ringbuffer and descriptor allocation/management
7
8 Copyright (c) 2005, 2006 Michael Buesch <m@bues.ch>
9
10 Some code in this file is derived from the b44.c driver
11 Copyright (C) 2002 David S. Miller
12 Copyright (C) Pekka Pietikainen
13
14
15 */
16
17 #include "b43.h"
18 #include "dma.h"
19 #include "main.h"
20 #include "debugfs.h"
21 #include "xmit.h"
22
23 #include <linux/dma-mapping.h>
24 #include <linux/pci.h>
25 #include <linux/delay.h>
26 #include <linux/skbuff.h>
27 #include <linux/etherdevice.h>
28 #include <linux/slab.h>
29 #include <asm/div64.h>
30
31
32 /* Required number of TX DMA slots per TX frame.
33 * This currently is 2, because we put the header and the ieee80211 frame
34 * into separate slots. */
35 #define TX_SLOTS_PER_FRAME 2
36
b43_dma_address(struct b43_dma * dma,dma_addr_t dmaaddr,enum b43_addrtype addrtype)37 static u32 b43_dma_address(struct b43_dma *dma, dma_addr_t dmaaddr,
38 enum b43_addrtype addrtype)
39 {
40 u32 addr;
41
42 switch (addrtype) {
43 case B43_DMA_ADDR_LOW:
44 addr = lower_32_bits(dmaaddr);
45 if (dma->translation_in_low) {
46 addr &= ~SSB_DMA_TRANSLATION_MASK;
47 addr |= dma->translation;
48 }
49 break;
50 case B43_DMA_ADDR_HIGH:
51 addr = upper_32_bits(dmaaddr);
52 if (!dma->translation_in_low) {
53 addr &= ~SSB_DMA_TRANSLATION_MASK;
54 addr |= dma->translation;
55 }
56 break;
57 case B43_DMA_ADDR_EXT:
58 if (dma->translation_in_low)
59 addr = lower_32_bits(dmaaddr);
60 else
61 addr = upper_32_bits(dmaaddr);
62 addr &= SSB_DMA_TRANSLATION_MASK;
63 addr >>= SSB_DMA_TRANSLATION_SHIFT;
64 break;
65 }
66
67 return addr;
68 }
69
70 /* 32bit DMA ops. */
71 static
op32_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)72 struct b43_dmadesc_generic *op32_idx2desc(struct b43_dmaring *ring,
73 int slot,
74 struct b43_dmadesc_meta **meta)
75 {
76 struct b43_dmadesc32 *desc;
77
78 *meta = &(ring->meta[slot]);
79 desc = ring->descbase;
80 desc = &(desc[slot]);
81
82 return (struct b43_dmadesc_generic *)desc;
83 }
84
op32_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)85 static void op32_fill_descriptor(struct b43_dmaring *ring,
86 struct b43_dmadesc_generic *desc,
87 dma_addr_t dmaaddr, u16 bufsize,
88 int start, int end, int irq)
89 {
90 struct b43_dmadesc32 *descbase = ring->descbase;
91 int slot;
92 u32 ctl;
93 u32 addr;
94 u32 addrext;
95
96 slot = (int)(&(desc->dma32) - descbase);
97 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
98
99 addr = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
100 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
101
102 ctl = bufsize & B43_DMA32_DCTL_BYTECNT;
103 if (slot == ring->nr_slots - 1)
104 ctl |= B43_DMA32_DCTL_DTABLEEND;
105 if (start)
106 ctl |= B43_DMA32_DCTL_FRAMESTART;
107 if (end)
108 ctl |= B43_DMA32_DCTL_FRAMEEND;
109 if (irq)
110 ctl |= B43_DMA32_DCTL_IRQ;
111 ctl |= (addrext << B43_DMA32_DCTL_ADDREXT_SHIFT)
112 & B43_DMA32_DCTL_ADDREXT_MASK;
113
114 desc->dma32.control = cpu_to_le32(ctl);
115 desc->dma32.address = cpu_to_le32(addr);
116 }
117
op32_poke_tx(struct b43_dmaring * ring,int slot)118 static void op32_poke_tx(struct b43_dmaring *ring, int slot)
119 {
120 b43_dma_write(ring, B43_DMA32_TXINDEX,
121 (u32) (slot * sizeof(struct b43_dmadesc32)));
122 }
123
op32_tx_suspend(struct b43_dmaring * ring)124 static void op32_tx_suspend(struct b43_dmaring *ring)
125 {
126 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
127 | B43_DMA32_TXSUSPEND);
128 }
129
op32_tx_resume(struct b43_dmaring * ring)130 static void op32_tx_resume(struct b43_dmaring *ring)
131 {
132 b43_dma_write(ring, B43_DMA32_TXCTL, b43_dma_read(ring, B43_DMA32_TXCTL)
133 & ~B43_DMA32_TXSUSPEND);
134 }
135
op32_get_current_rxslot(struct b43_dmaring * ring)136 static int op32_get_current_rxslot(struct b43_dmaring *ring)
137 {
138 u32 val;
139
140 val = b43_dma_read(ring, B43_DMA32_RXSTATUS);
141 val &= B43_DMA32_RXDPTR;
142
143 return (val / sizeof(struct b43_dmadesc32));
144 }
145
op32_set_current_rxslot(struct b43_dmaring * ring,int slot)146 static void op32_set_current_rxslot(struct b43_dmaring *ring, int slot)
147 {
148 b43_dma_write(ring, B43_DMA32_RXINDEX,
149 (u32) (slot * sizeof(struct b43_dmadesc32)));
150 }
151
152 static const struct b43_dma_ops dma32_ops = {
153 .idx2desc = op32_idx2desc,
154 .fill_descriptor = op32_fill_descriptor,
155 .poke_tx = op32_poke_tx,
156 .tx_suspend = op32_tx_suspend,
157 .tx_resume = op32_tx_resume,
158 .get_current_rxslot = op32_get_current_rxslot,
159 .set_current_rxslot = op32_set_current_rxslot,
160 };
161
162 /* 64bit DMA ops. */
163 static
op64_idx2desc(struct b43_dmaring * ring,int slot,struct b43_dmadesc_meta ** meta)164 struct b43_dmadesc_generic *op64_idx2desc(struct b43_dmaring *ring,
165 int slot,
166 struct b43_dmadesc_meta **meta)
167 {
168 struct b43_dmadesc64 *desc;
169
170 *meta = &(ring->meta[slot]);
171 desc = ring->descbase;
172 desc = &(desc[slot]);
173
174 return (struct b43_dmadesc_generic *)desc;
175 }
176
op64_fill_descriptor(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,dma_addr_t dmaaddr,u16 bufsize,int start,int end,int irq)177 static void op64_fill_descriptor(struct b43_dmaring *ring,
178 struct b43_dmadesc_generic *desc,
179 dma_addr_t dmaaddr, u16 bufsize,
180 int start, int end, int irq)
181 {
182 struct b43_dmadesc64 *descbase = ring->descbase;
183 int slot;
184 u32 ctl0 = 0, ctl1 = 0;
185 u32 addrlo, addrhi;
186 u32 addrext;
187
188 slot = (int)(&(desc->dma64) - descbase);
189 B43_WARN_ON(!(slot >= 0 && slot < ring->nr_slots));
190
191 addrlo = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_LOW);
192 addrhi = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_HIGH);
193 addrext = b43_dma_address(&ring->dev->dma, dmaaddr, B43_DMA_ADDR_EXT);
194
195 if (slot == ring->nr_slots - 1)
196 ctl0 |= B43_DMA64_DCTL0_DTABLEEND;
197 if (start)
198 ctl0 |= B43_DMA64_DCTL0_FRAMESTART;
199 if (end)
200 ctl0 |= B43_DMA64_DCTL0_FRAMEEND;
201 if (irq)
202 ctl0 |= B43_DMA64_DCTL0_IRQ;
203 ctl1 |= bufsize & B43_DMA64_DCTL1_BYTECNT;
204 ctl1 |= (addrext << B43_DMA64_DCTL1_ADDREXT_SHIFT)
205 & B43_DMA64_DCTL1_ADDREXT_MASK;
206
207 desc->dma64.control0 = cpu_to_le32(ctl0);
208 desc->dma64.control1 = cpu_to_le32(ctl1);
209 desc->dma64.address_low = cpu_to_le32(addrlo);
210 desc->dma64.address_high = cpu_to_le32(addrhi);
211 }
212
op64_poke_tx(struct b43_dmaring * ring,int slot)213 static void op64_poke_tx(struct b43_dmaring *ring, int slot)
214 {
215 b43_dma_write(ring, B43_DMA64_TXINDEX,
216 (u32) (slot * sizeof(struct b43_dmadesc64)));
217 }
218
op64_tx_suspend(struct b43_dmaring * ring)219 static void op64_tx_suspend(struct b43_dmaring *ring)
220 {
221 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
222 | B43_DMA64_TXSUSPEND);
223 }
224
op64_tx_resume(struct b43_dmaring * ring)225 static void op64_tx_resume(struct b43_dmaring *ring)
226 {
227 b43_dma_write(ring, B43_DMA64_TXCTL, b43_dma_read(ring, B43_DMA64_TXCTL)
228 & ~B43_DMA64_TXSUSPEND);
229 }
230
op64_get_current_rxslot(struct b43_dmaring * ring)231 static int op64_get_current_rxslot(struct b43_dmaring *ring)
232 {
233 u32 val;
234
235 val = b43_dma_read(ring, B43_DMA64_RXSTATUS);
236 val &= B43_DMA64_RXSTATDPTR;
237
238 return (val / sizeof(struct b43_dmadesc64));
239 }
240
op64_set_current_rxslot(struct b43_dmaring * ring,int slot)241 static void op64_set_current_rxslot(struct b43_dmaring *ring, int slot)
242 {
243 b43_dma_write(ring, B43_DMA64_RXINDEX,
244 (u32) (slot * sizeof(struct b43_dmadesc64)));
245 }
246
247 static const struct b43_dma_ops dma64_ops = {
248 .idx2desc = op64_idx2desc,
249 .fill_descriptor = op64_fill_descriptor,
250 .poke_tx = op64_poke_tx,
251 .tx_suspend = op64_tx_suspend,
252 .tx_resume = op64_tx_resume,
253 .get_current_rxslot = op64_get_current_rxslot,
254 .set_current_rxslot = op64_set_current_rxslot,
255 };
256
free_slots(struct b43_dmaring * ring)257 static inline int free_slots(struct b43_dmaring *ring)
258 {
259 return (ring->nr_slots - ring->used_slots);
260 }
261
next_slot(struct b43_dmaring * ring,int slot)262 static inline int next_slot(struct b43_dmaring *ring, int slot)
263 {
264 B43_WARN_ON(!(slot >= -1 && slot <= ring->nr_slots - 1));
265 if (slot == ring->nr_slots - 1)
266 return 0;
267 return slot + 1;
268 }
269
prev_slot(struct b43_dmaring * ring,int slot)270 static inline int prev_slot(struct b43_dmaring *ring, int slot)
271 {
272 B43_WARN_ON(!(slot >= 0 && slot <= ring->nr_slots - 1));
273 if (slot == 0)
274 return ring->nr_slots - 1;
275 return slot - 1;
276 }
277
278 #ifdef CONFIG_B43_DEBUG
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)279 static void update_max_used_slots(struct b43_dmaring *ring,
280 int current_used_slots)
281 {
282 if (current_used_slots <= ring->max_used_slots)
283 return;
284 ring->max_used_slots = current_used_slots;
285 if (b43_debug(ring->dev, B43_DBG_DMAVERBOSE)) {
286 b43dbg(ring->dev->wl,
287 "max_used_slots increased to %d on %s ring %d\n",
288 ring->max_used_slots,
289 ring->tx ? "TX" : "RX", ring->index);
290 }
291 }
292 #else
293 static inline
update_max_used_slots(struct b43_dmaring * ring,int current_used_slots)294 void update_max_used_slots(struct b43_dmaring *ring, int current_used_slots)
295 {
296 }
297 #endif /* DEBUG */
298
299 /* Request a slot for usage. */
request_slot(struct b43_dmaring * ring)300 static inline int request_slot(struct b43_dmaring *ring)
301 {
302 int slot;
303
304 B43_WARN_ON(!ring->tx);
305 B43_WARN_ON(ring->stopped);
306 B43_WARN_ON(free_slots(ring) == 0);
307
308 slot = next_slot(ring, ring->current_slot);
309 ring->current_slot = slot;
310 ring->used_slots++;
311
312 update_max_used_slots(ring, ring->used_slots);
313
314 return slot;
315 }
316
b43_dmacontroller_base(enum b43_dmatype type,int controller_idx)317 static u16 b43_dmacontroller_base(enum b43_dmatype type, int controller_idx)
318 {
319 static const u16 map64[] = {
320 B43_MMIO_DMA64_BASE0,
321 B43_MMIO_DMA64_BASE1,
322 B43_MMIO_DMA64_BASE2,
323 B43_MMIO_DMA64_BASE3,
324 B43_MMIO_DMA64_BASE4,
325 B43_MMIO_DMA64_BASE5,
326 };
327 static const u16 map32[] = {
328 B43_MMIO_DMA32_BASE0,
329 B43_MMIO_DMA32_BASE1,
330 B43_MMIO_DMA32_BASE2,
331 B43_MMIO_DMA32_BASE3,
332 B43_MMIO_DMA32_BASE4,
333 B43_MMIO_DMA32_BASE5,
334 };
335
336 if (type == B43_DMA_64BIT) {
337 B43_WARN_ON(!(controller_idx >= 0 &&
338 controller_idx < ARRAY_SIZE(map64)));
339 return map64[controller_idx];
340 }
341 B43_WARN_ON(!(controller_idx >= 0 &&
342 controller_idx < ARRAY_SIZE(map32)));
343 return map32[controller_idx];
344 }
345
346 static inline
map_descbuffer(struct b43_dmaring * ring,unsigned char * buf,size_t len,int tx)347 dma_addr_t map_descbuffer(struct b43_dmaring *ring,
348 unsigned char *buf, size_t len, int tx)
349 {
350 dma_addr_t dmaaddr;
351
352 if (tx) {
353 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
354 buf, len, DMA_TO_DEVICE);
355 } else {
356 dmaaddr = dma_map_single(ring->dev->dev->dma_dev,
357 buf, len, DMA_FROM_DEVICE);
358 }
359
360 return dmaaddr;
361 }
362
363 static inline
unmap_descbuffer(struct b43_dmaring * ring,dma_addr_t addr,size_t len,int tx)364 void unmap_descbuffer(struct b43_dmaring *ring,
365 dma_addr_t addr, size_t len, int tx)
366 {
367 if (tx) {
368 dma_unmap_single(ring->dev->dev->dma_dev,
369 addr, len, DMA_TO_DEVICE);
370 } else {
371 dma_unmap_single(ring->dev->dev->dma_dev,
372 addr, len, DMA_FROM_DEVICE);
373 }
374 }
375
376 static inline
sync_descbuffer_for_cpu(struct b43_dmaring * ring,dma_addr_t addr,size_t len)377 void sync_descbuffer_for_cpu(struct b43_dmaring *ring,
378 dma_addr_t addr, size_t len)
379 {
380 B43_WARN_ON(ring->tx);
381 dma_sync_single_for_cpu(ring->dev->dev->dma_dev,
382 addr, len, DMA_FROM_DEVICE);
383 }
384
385 static inline
sync_descbuffer_for_device(struct b43_dmaring * ring,dma_addr_t addr,size_t len)386 void sync_descbuffer_for_device(struct b43_dmaring *ring,
387 dma_addr_t addr, size_t len)
388 {
389 B43_WARN_ON(ring->tx);
390 dma_sync_single_for_device(ring->dev->dev->dma_dev,
391 addr, len, DMA_FROM_DEVICE);
392 }
393
394 static inline
free_descriptor_buffer(struct b43_dmaring * ring,struct b43_dmadesc_meta * meta)395 void free_descriptor_buffer(struct b43_dmaring *ring,
396 struct b43_dmadesc_meta *meta)
397 {
398 if (meta->skb) {
399 if (ring->tx)
400 ieee80211_free_txskb(ring->dev->wl->hw, meta->skb);
401 else
402 dev_kfree_skb_any(meta->skb);
403 meta->skb = NULL;
404 }
405 }
406
alloc_ringmemory(struct b43_dmaring * ring)407 static int alloc_ringmemory(struct b43_dmaring *ring)
408 {
409 /* The specs call for 4K buffers for 30- and 32-bit DMA with 4K
410 * alignment and 8K buffers for 64-bit DMA with 8K alignment.
411 * In practice we could use smaller buffers for the latter, but the
412 * alignment is really important because of the hardware bug. If bit
413 * 0x00001000 is used in DMA address, some hardware (like BCM4331)
414 * copies that bit into B43_DMA64_RXSTATUS and we get false values from
415 * B43_DMA64_RXSTATDPTR. Let's just use 8K buffers even if we don't use
416 * more than 256 slots for ring.
417 */
418 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
419 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
420
421 ring->descbase = dma_alloc_coherent(ring->dev->dev->dma_dev,
422 ring_mem_size, &(ring->dmabase),
423 GFP_KERNEL);
424 if (!ring->descbase)
425 return -ENOMEM;
426
427 return 0;
428 }
429
free_ringmemory(struct b43_dmaring * ring)430 static void free_ringmemory(struct b43_dmaring *ring)
431 {
432 u16 ring_mem_size = (ring->type == B43_DMA_64BIT) ?
433 B43_DMA64_RINGMEMSIZE : B43_DMA32_RINGMEMSIZE;
434 dma_free_coherent(ring->dev->dev->dma_dev, ring_mem_size,
435 ring->descbase, ring->dmabase);
436 }
437
438 /* Reset the RX DMA channel */
b43_dmacontroller_rx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)439 static int b43_dmacontroller_rx_reset(struct b43_wldev *dev, u16 mmio_base,
440 enum b43_dmatype type)
441 {
442 int i;
443 u32 value;
444 u16 offset;
445
446 might_sleep();
447
448 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXCTL : B43_DMA32_RXCTL;
449 b43_write32(dev, mmio_base + offset, 0);
450 for (i = 0; i < 10; i++) {
451 offset = (type == B43_DMA_64BIT) ? B43_DMA64_RXSTATUS :
452 B43_DMA32_RXSTATUS;
453 value = b43_read32(dev, mmio_base + offset);
454 if (type == B43_DMA_64BIT) {
455 value &= B43_DMA64_RXSTAT;
456 if (value == B43_DMA64_RXSTAT_DISABLED) {
457 i = -1;
458 break;
459 }
460 } else {
461 value &= B43_DMA32_RXSTATE;
462 if (value == B43_DMA32_RXSTAT_DISABLED) {
463 i = -1;
464 break;
465 }
466 }
467 msleep(1);
468 }
469 if (i != -1) {
470 b43err(dev->wl, "DMA RX reset timed out\n");
471 return -ENODEV;
472 }
473
474 return 0;
475 }
476
477 /* Reset the TX DMA channel */
b43_dmacontroller_tx_reset(struct b43_wldev * dev,u16 mmio_base,enum b43_dmatype type)478 static int b43_dmacontroller_tx_reset(struct b43_wldev *dev, u16 mmio_base,
479 enum b43_dmatype type)
480 {
481 int i;
482 u32 value;
483 u16 offset;
484
485 might_sleep();
486
487 for (i = 0; i < 10; i++) {
488 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
489 B43_DMA32_TXSTATUS;
490 value = b43_read32(dev, mmio_base + offset);
491 if (type == B43_DMA_64BIT) {
492 value &= B43_DMA64_TXSTAT;
493 if (value == B43_DMA64_TXSTAT_DISABLED ||
494 value == B43_DMA64_TXSTAT_IDLEWAIT ||
495 value == B43_DMA64_TXSTAT_STOPPED)
496 break;
497 } else {
498 value &= B43_DMA32_TXSTATE;
499 if (value == B43_DMA32_TXSTAT_DISABLED ||
500 value == B43_DMA32_TXSTAT_IDLEWAIT ||
501 value == B43_DMA32_TXSTAT_STOPPED)
502 break;
503 }
504 msleep(1);
505 }
506 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXCTL : B43_DMA32_TXCTL;
507 b43_write32(dev, mmio_base + offset, 0);
508 for (i = 0; i < 10; i++) {
509 offset = (type == B43_DMA_64BIT) ? B43_DMA64_TXSTATUS :
510 B43_DMA32_TXSTATUS;
511 value = b43_read32(dev, mmio_base + offset);
512 if (type == B43_DMA_64BIT) {
513 value &= B43_DMA64_TXSTAT;
514 if (value == B43_DMA64_TXSTAT_DISABLED) {
515 i = -1;
516 break;
517 }
518 } else {
519 value &= B43_DMA32_TXSTATE;
520 if (value == B43_DMA32_TXSTAT_DISABLED) {
521 i = -1;
522 break;
523 }
524 }
525 msleep(1);
526 }
527 if (i != -1) {
528 b43err(dev->wl, "DMA TX reset timed out\n");
529 return -ENODEV;
530 }
531 /* ensure the reset is completed. */
532 msleep(1);
533
534 return 0;
535 }
536
537 /* Check if a DMA mapping address is invalid. */
b43_dma_mapping_error(struct b43_dmaring * ring,dma_addr_t addr,size_t buffersize,bool dma_to_device)538 static bool b43_dma_mapping_error(struct b43_dmaring *ring,
539 dma_addr_t addr,
540 size_t buffersize, bool dma_to_device)
541 {
542 if (unlikely(dma_mapping_error(ring->dev->dev->dma_dev, addr)))
543 return true;
544
545 switch (ring->type) {
546 case B43_DMA_30BIT:
547 if ((u64)addr + buffersize > (1ULL << 30))
548 goto address_error;
549 break;
550 case B43_DMA_32BIT:
551 if ((u64)addr + buffersize > (1ULL << 32))
552 goto address_error;
553 break;
554 case B43_DMA_64BIT:
555 /* Currently we can't have addresses beyond
556 * 64bit in the kernel. */
557 break;
558 }
559
560 /* The address is OK. */
561 return false;
562
563 address_error:
564 /* We can't support this address. Unmap it again. */
565 unmap_descbuffer(ring, addr, buffersize, dma_to_device);
566
567 return true;
568 }
569
b43_rx_buffer_is_poisoned(struct b43_dmaring * ring,struct sk_buff * skb)570 static bool b43_rx_buffer_is_poisoned(struct b43_dmaring *ring, struct sk_buff *skb)
571 {
572 unsigned char *f = skb->data + ring->frameoffset;
573
574 return ((f[0] & f[1] & f[2] & f[3] & f[4] & f[5] & f[6] & f[7]) == 0xFF);
575 }
576
b43_poison_rx_buffer(struct b43_dmaring * ring,struct sk_buff * skb)577 static void b43_poison_rx_buffer(struct b43_dmaring *ring, struct sk_buff *skb)
578 {
579 struct b43_rxhdr_fw4 *rxhdr;
580 unsigned char *frame;
581
582 /* This poisons the RX buffer to detect DMA failures. */
583
584 rxhdr = (struct b43_rxhdr_fw4 *)(skb->data);
585 rxhdr->frame_len = 0;
586
587 B43_WARN_ON(ring->rx_buffersize < ring->frameoffset + sizeof(struct b43_plcp_hdr6) + 2);
588 frame = skb->data + ring->frameoffset;
589 memset(frame, 0xFF, sizeof(struct b43_plcp_hdr6) + 2 /* padding */);
590 }
591
setup_rx_descbuffer(struct b43_dmaring * ring,struct b43_dmadesc_generic * desc,struct b43_dmadesc_meta * meta,gfp_t gfp_flags)592 static int setup_rx_descbuffer(struct b43_dmaring *ring,
593 struct b43_dmadesc_generic *desc,
594 struct b43_dmadesc_meta *meta, gfp_t gfp_flags)
595 {
596 dma_addr_t dmaaddr;
597 struct sk_buff *skb;
598
599 B43_WARN_ON(ring->tx);
600
601 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
602 if (unlikely(!skb))
603 return -ENOMEM;
604 b43_poison_rx_buffer(ring, skb);
605 dmaaddr = map_descbuffer(ring, skb->data, ring->rx_buffersize, 0);
606 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
607 /* ugh. try to realloc in zone_dma */
608 gfp_flags |= GFP_DMA;
609
610 dev_kfree_skb_any(skb);
611
612 skb = __dev_alloc_skb(ring->rx_buffersize, gfp_flags);
613 if (unlikely(!skb))
614 return -ENOMEM;
615 b43_poison_rx_buffer(ring, skb);
616 dmaaddr = map_descbuffer(ring, skb->data,
617 ring->rx_buffersize, 0);
618 if (b43_dma_mapping_error(ring, dmaaddr, ring->rx_buffersize, 0)) {
619 b43err(ring->dev->wl, "RX DMA buffer allocation failed\n");
620 dev_kfree_skb_any(skb);
621 return -EIO;
622 }
623 }
624
625 meta->skb = skb;
626 meta->dmaaddr = dmaaddr;
627 ring->ops->fill_descriptor(ring, desc, dmaaddr,
628 ring->rx_buffersize, 0, 0, 0);
629
630 return 0;
631 }
632
633 /* Allocate the initial descbuffers.
634 * This is used for an RX ring only.
635 */
alloc_initial_descbuffers(struct b43_dmaring * ring)636 static int alloc_initial_descbuffers(struct b43_dmaring *ring)
637 {
638 int i, err = -ENOMEM;
639 struct b43_dmadesc_generic *desc;
640 struct b43_dmadesc_meta *meta;
641
642 for (i = 0; i < ring->nr_slots; i++) {
643 desc = ring->ops->idx2desc(ring, i, &meta);
644
645 err = setup_rx_descbuffer(ring, desc, meta, GFP_KERNEL);
646 if (err) {
647 b43err(ring->dev->wl,
648 "Failed to allocate initial descbuffers\n");
649 goto err_unwind;
650 }
651 }
652 mb();
653 ring->used_slots = ring->nr_slots;
654 err = 0;
655 out:
656 return err;
657
658 err_unwind:
659 for (i--; i >= 0; i--) {
660 desc = ring->ops->idx2desc(ring, i, &meta);
661
662 unmap_descbuffer(ring, meta->dmaaddr, ring->rx_buffersize, 0);
663 dev_kfree_skb(meta->skb);
664 }
665 goto out;
666 }
667
668 /* Do initial setup of the DMA controller.
669 * Reset the controller, write the ring busaddress
670 * and switch the "enable" bit on.
671 */
dmacontroller_setup(struct b43_dmaring * ring)672 static int dmacontroller_setup(struct b43_dmaring *ring)
673 {
674 int err = 0;
675 u32 value;
676 u32 addrext;
677 bool parity = ring->dev->dma.parity;
678 u32 addrlo;
679 u32 addrhi;
680
681 if (ring->tx) {
682 if (ring->type == B43_DMA_64BIT) {
683 u64 ringbase = (u64) (ring->dmabase);
684 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
685 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
686 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
687
688 value = B43_DMA64_TXENABLE;
689 value |= (addrext << B43_DMA64_TXADDREXT_SHIFT)
690 & B43_DMA64_TXADDREXT_MASK;
691 if (!parity)
692 value |= B43_DMA64_TXPARITYDISABLE;
693 b43_dma_write(ring, B43_DMA64_TXCTL, value);
694 b43_dma_write(ring, B43_DMA64_TXRINGLO, addrlo);
695 b43_dma_write(ring, B43_DMA64_TXRINGHI, addrhi);
696 } else {
697 u32 ringbase = (u32) (ring->dmabase);
698 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
699 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
700
701 value = B43_DMA32_TXENABLE;
702 value |= (addrext << B43_DMA32_TXADDREXT_SHIFT)
703 & B43_DMA32_TXADDREXT_MASK;
704 if (!parity)
705 value |= B43_DMA32_TXPARITYDISABLE;
706 b43_dma_write(ring, B43_DMA32_TXCTL, value);
707 b43_dma_write(ring, B43_DMA32_TXRING, addrlo);
708 }
709 } else {
710 err = alloc_initial_descbuffers(ring);
711 if (err)
712 goto out;
713 if (ring->type == B43_DMA_64BIT) {
714 u64 ringbase = (u64) (ring->dmabase);
715 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
716 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
717 addrhi = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_HIGH);
718
719 value = (ring->frameoffset << B43_DMA64_RXFROFF_SHIFT);
720 value |= B43_DMA64_RXENABLE;
721 value |= (addrext << B43_DMA64_RXADDREXT_SHIFT)
722 & B43_DMA64_RXADDREXT_MASK;
723 if (!parity)
724 value |= B43_DMA64_RXPARITYDISABLE;
725 b43_dma_write(ring, B43_DMA64_RXCTL, value);
726 b43_dma_write(ring, B43_DMA64_RXRINGLO, addrlo);
727 b43_dma_write(ring, B43_DMA64_RXRINGHI, addrhi);
728 b43_dma_write(ring, B43_DMA64_RXINDEX, ring->nr_slots *
729 sizeof(struct b43_dmadesc64));
730 } else {
731 u32 ringbase = (u32) (ring->dmabase);
732 addrext = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_EXT);
733 addrlo = b43_dma_address(&ring->dev->dma, ringbase, B43_DMA_ADDR_LOW);
734
735 value = (ring->frameoffset << B43_DMA32_RXFROFF_SHIFT);
736 value |= B43_DMA32_RXENABLE;
737 value |= (addrext << B43_DMA32_RXADDREXT_SHIFT)
738 & B43_DMA32_RXADDREXT_MASK;
739 if (!parity)
740 value |= B43_DMA32_RXPARITYDISABLE;
741 b43_dma_write(ring, B43_DMA32_RXCTL, value);
742 b43_dma_write(ring, B43_DMA32_RXRING, addrlo);
743 b43_dma_write(ring, B43_DMA32_RXINDEX, ring->nr_slots *
744 sizeof(struct b43_dmadesc32));
745 }
746 }
747
748 out:
749 return err;
750 }
751
752 /* Shutdown the DMA controller. */
dmacontroller_cleanup(struct b43_dmaring * ring)753 static void dmacontroller_cleanup(struct b43_dmaring *ring)
754 {
755 if (ring->tx) {
756 b43_dmacontroller_tx_reset(ring->dev, ring->mmio_base,
757 ring->type);
758 if (ring->type == B43_DMA_64BIT) {
759 b43_dma_write(ring, B43_DMA64_TXRINGLO, 0);
760 b43_dma_write(ring, B43_DMA64_TXRINGHI, 0);
761 } else
762 b43_dma_write(ring, B43_DMA32_TXRING, 0);
763 } else {
764 b43_dmacontroller_rx_reset(ring->dev, ring->mmio_base,
765 ring->type);
766 if (ring->type == B43_DMA_64BIT) {
767 b43_dma_write(ring, B43_DMA64_RXRINGLO, 0);
768 b43_dma_write(ring, B43_DMA64_RXRINGHI, 0);
769 } else
770 b43_dma_write(ring, B43_DMA32_RXRING, 0);
771 }
772 }
773
free_all_descbuffers(struct b43_dmaring * ring)774 static void free_all_descbuffers(struct b43_dmaring *ring)
775 {
776 struct b43_dmadesc_meta *meta;
777 int i;
778
779 if (!ring->used_slots)
780 return;
781 for (i = 0; i < ring->nr_slots; i++) {
782 /* get meta - ignore returned value */
783 ring->ops->idx2desc(ring, i, &meta);
784
785 if (!meta->skb || b43_dma_ptr_is_poisoned(meta->skb)) {
786 B43_WARN_ON(!ring->tx);
787 continue;
788 }
789 if (ring->tx) {
790 unmap_descbuffer(ring, meta->dmaaddr,
791 meta->skb->len, 1);
792 } else {
793 unmap_descbuffer(ring, meta->dmaaddr,
794 ring->rx_buffersize, 0);
795 }
796 free_descriptor_buffer(ring, meta);
797 }
798 }
799
b43_engine_type(struct b43_wldev * dev)800 static enum b43_dmatype b43_engine_type(struct b43_wldev *dev)
801 {
802 u32 tmp;
803 u16 mmio_base;
804
805 switch (dev->dev->bus_type) {
806 #ifdef CONFIG_B43_BCMA
807 case B43_BUS_BCMA:
808 tmp = bcma_aread32(dev->dev->bdev, BCMA_IOST);
809 if (tmp & BCMA_IOST_DMA64)
810 return B43_DMA_64BIT;
811 break;
812 #endif
813 #ifdef CONFIG_B43_SSB
814 case B43_BUS_SSB:
815 tmp = ssb_read32(dev->dev->sdev, SSB_TMSHIGH);
816 if (tmp & SSB_TMSHIGH_DMA64)
817 return B43_DMA_64BIT;
818 break;
819 #endif
820 }
821
822 mmio_base = b43_dmacontroller_base(0, 0);
823 b43_write32(dev, mmio_base + B43_DMA32_TXCTL, B43_DMA32_TXADDREXT_MASK);
824 tmp = b43_read32(dev, mmio_base + B43_DMA32_TXCTL);
825 if (tmp & B43_DMA32_TXADDREXT_MASK)
826 return B43_DMA_32BIT;
827 return B43_DMA_30BIT;
828 }
829
830 /* Main initialization function. */
831 static
b43_setup_dmaring(struct b43_wldev * dev,int controller_index,int for_tx,enum b43_dmatype type)832 struct b43_dmaring *b43_setup_dmaring(struct b43_wldev *dev,
833 int controller_index,
834 int for_tx,
835 enum b43_dmatype type)
836 {
837 struct b43_dmaring *ring;
838 int i, err;
839 dma_addr_t dma_test;
840
841 ring = kzalloc_obj(*ring);
842 if (!ring)
843 goto out;
844
845 ring->nr_slots = B43_RXRING_SLOTS;
846 if (for_tx)
847 ring->nr_slots = B43_TXRING_SLOTS;
848
849 ring->meta = kzalloc_objs(struct b43_dmadesc_meta, ring->nr_slots);
850 if (!ring->meta)
851 goto err_kfree_ring;
852 for (i = 0; i < ring->nr_slots; i++)
853 ring->meta->skb = B43_DMA_PTR_POISON;
854
855 ring->type = type;
856 ring->dev = dev;
857 ring->mmio_base = b43_dmacontroller_base(type, controller_index);
858 ring->index = controller_index;
859 if (type == B43_DMA_64BIT)
860 ring->ops = &dma64_ops;
861 else
862 ring->ops = &dma32_ops;
863 if (for_tx) {
864 ring->tx = true;
865 ring->current_slot = -1;
866 } else {
867 if (ring->index == 0) {
868 switch (dev->fw.hdr_format) {
869 case B43_FW_HDR_598:
870 ring->rx_buffersize = B43_DMA0_RX_FW598_BUFSIZE;
871 ring->frameoffset = B43_DMA0_RX_FW598_FO;
872 break;
873 case B43_FW_HDR_410:
874 case B43_FW_HDR_351:
875 ring->rx_buffersize = B43_DMA0_RX_FW351_BUFSIZE;
876 ring->frameoffset = B43_DMA0_RX_FW351_FO;
877 break;
878 }
879 } else
880 B43_WARN_ON(1);
881 }
882 #ifdef CONFIG_B43_DEBUG
883 ring->last_injected_overflow = jiffies;
884 #endif
885
886 if (for_tx) {
887 /* Assumption: B43_TXRING_SLOTS can be divided by TX_SLOTS_PER_FRAME */
888 BUILD_BUG_ON(B43_TXRING_SLOTS % TX_SLOTS_PER_FRAME != 0);
889
890 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
891 b43_txhdr_size(dev),
892 GFP_KERNEL);
893 if (!ring->txhdr_cache)
894 goto err_kfree_meta;
895
896 /* test for ability to dma to txhdr_cache */
897 dma_test = dma_map_single(dev->dev->dma_dev,
898 ring->txhdr_cache,
899 b43_txhdr_size(dev),
900 DMA_TO_DEVICE);
901
902 if (b43_dma_mapping_error(ring, dma_test,
903 b43_txhdr_size(dev), 1)) {
904 /* ugh realloc */
905 kfree(ring->txhdr_cache);
906 ring->txhdr_cache = kcalloc(ring->nr_slots / TX_SLOTS_PER_FRAME,
907 b43_txhdr_size(dev),
908 GFP_KERNEL | GFP_DMA);
909 if (!ring->txhdr_cache)
910 goto err_kfree_meta;
911
912 dma_test = dma_map_single(dev->dev->dma_dev,
913 ring->txhdr_cache,
914 b43_txhdr_size(dev),
915 DMA_TO_DEVICE);
916
917 if (b43_dma_mapping_error(ring, dma_test,
918 b43_txhdr_size(dev), 1)) {
919
920 b43err(dev->wl,
921 "TXHDR DMA allocation failed\n");
922 goto err_kfree_txhdr_cache;
923 }
924 }
925
926 dma_unmap_single(dev->dev->dma_dev,
927 dma_test, b43_txhdr_size(dev),
928 DMA_TO_DEVICE);
929 }
930
931 err = alloc_ringmemory(ring);
932 if (err)
933 goto err_kfree_txhdr_cache;
934 err = dmacontroller_setup(ring);
935 if (err)
936 goto err_free_ringmemory;
937
938 out:
939 return ring;
940
941 err_free_ringmemory:
942 free_ringmemory(ring);
943 err_kfree_txhdr_cache:
944 kfree(ring->txhdr_cache);
945 err_kfree_meta:
946 kfree(ring->meta);
947 err_kfree_ring:
948 kfree(ring);
949 ring = NULL;
950 goto out;
951 }
952
953 #define divide(a, b) ({ \
954 typeof(a) __a = a; \
955 do_div(__a, b); \
956 __a; \
957 })
958
959 #define modulo(a, b) ({ \
960 typeof(a) __a = a; \
961 do_div(__a, b); \
962 })
963
964 /* Main cleanup function. */
b43_destroy_dmaring(struct b43_dmaring * ring,const char * ringname)965 static void b43_destroy_dmaring(struct b43_dmaring *ring,
966 const char *ringname)
967 {
968 if (!ring)
969 return;
970
971 #ifdef CONFIG_B43_DEBUG
972 {
973 /* Print some statistics. */
974 u64 failed_packets = ring->nr_failed_tx_packets;
975 u64 succeed_packets = ring->nr_succeed_tx_packets;
976 u64 nr_packets = failed_packets + succeed_packets;
977 u64 permille_failed = 0, average_tries = 0;
978
979 if (nr_packets)
980 permille_failed = divide(failed_packets * 1000, nr_packets);
981 if (nr_packets)
982 average_tries = divide(ring->nr_total_packet_tries * 100, nr_packets);
983
984 b43dbg(ring->dev->wl, "DMA-%u %s: "
985 "Used slots %d/%d, Failed frames %llu/%llu = %llu.%01llu%%, "
986 "Average tries %llu.%02llu\n",
987 (unsigned int)(ring->type), ringname,
988 ring->max_used_slots,
989 ring->nr_slots,
990 (unsigned long long)failed_packets,
991 (unsigned long long)nr_packets,
992 (unsigned long long)divide(permille_failed, 10),
993 (unsigned long long)modulo(permille_failed, 10),
994 (unsigned long long)divide(average_tries, 100),
995 (unsigned long long)modulo(average_tries, 100));
996 }
997 #endif /* DEBUG */
998
999 /* Device IRQs are disabled prior entering this function,
1000 * so no need to take care of concurrency with rx handler stuff.
1001 */
1002 dmacontroller_cleanup(ring);
1003 free_all_descbuffers(ring);
1004 free_ringmemory(ring);
1005
1006 kfree(ring->txhdr_cache);
1007 kfree(ring->meta);
1008 kfree(ring);
1009 }
1010
1011 #define destroy_ring(dma, ring) do { \
1012 b43_destroy_dmaring((dma)->ring, __stringify(ring)); \
1013 (dma)->ring = NULL; \
1014 } while (0)
1015
b43_dma_free(struct b43_wldev * dev)1016 void b43_dma_free(struct b43_wldev *dev)
1017 {
1018 struct b43_dma *dma;
1019
1020 if (b43_using_pio_transfers(dev))
1021 return;
1022 dma = &dev->dma;
1023
1024 destroy_ring(dma, rx_ring);
1025 destroy_ring(dma, tx_ring_AC_BK);
1026 destroy_ring(dma, tx_ring_AC_BE);
1027 destroy_ring(dma, tx_ring_AC_VI);
1028 destroy_ring(dma, tx_ring_AC_VO);
1029 destroy_ring(dma, tx_ring_mcast);
1030 }
1031
1032 /* Some hardware with 64-bit DMA seems to be bugged and looks for translation
1033 * bit in low address word instead of high one.
1034 */
b43_dma_translation_in_low_word(struct b43_wldev * dev,enum b43_dmatype type)1035 static bool b43_dma_translation_in_low_word(struct b43_wldev *dev,
1036 enum b43_dmatype type)
1037 {
1038 if (type != B43_DMA_64BIT)
1039 return true;
1040
1041 #ifdef CONFIG_B43_SSB
1042 if (dev->dev->bus_type == B43_BUS_SSB &&
1043 dev->dev->sdev->bus->bustype == SSB_BUSTYPE_PCI &&
1044 !(pci_is_pcie(dev->dev->sdev->bus->host_pci) &&
1045 ssb_read32(dev->dev->sdev, SSB_TMSHIGH) & SSB_TMSHIGH_DMA64))
1046 return true;
1047 #endif
1048 return false;
1049 }
1050
b43_dma_init(struct b43_wldev * dev)1051 int b43_dma_init(struct b43_wldev *dev)
1052 {
1053 struct b43_dma *dma = &dev->dma;
1054 enum b43_dmatype type = b43_engine_type(dev);
1055 int err;
1056
1057 err = dma_set_mask_and_coherent(dev->dev->dma_dev, DMA_BIT_MASK(type));
1058 if (err) {
1059 b43err(dev->wl, "The machine/kernel does not support "
1060 "the required %u-bit DMA mask\n", type);
1061 return err;
1062 }
1063
1064 switch (dev->dev->bus_type) {
1065 #ifdef CONFIG_B43_BCMA
1066 case B43_BUS_BCMA:
1067 dma->translation = bcma_core_dma_translation(dev->dev->bdev);
1068 break;
1069 #endif
1070 #ifdef CONFIG_B43_SSB
1071 case B43_BUS_SSB:
1072 dma->translation = ssb_dma_translation(dev->dev->sdev);
1073 break;
1074 #endif
1075 }
1076 dma->translation_in_low = b43_dma_translation_in_low_word(dev, type);
1077
1078 dma->parity = true;
1079 #ifdef CONFIG_B43_BCMA
1080 /* TODO: find out which SSB devices need disabling parity */
1081 if (dev->dev->bus_type == B43_BUS_BCMA)
1082 dma->parity = false;
1083 #endif
1084
1085 err = -ENOMEM;
1086 /* setup TX DMA channels. */
1087 dma->tx_ring_AC_BK = b43_setup_dmaring(dev, 0, 1, type);
1088 if (!dma->tx_ring_AC_BK)
1089 goto out;
1090
1091 dma->tx_ring_AC_BE = b43_setup_dmaring(dev, 1, 1, type);
1092 if (!dma->tx_ring_AC_BE)
1093 goto err_destroy_bk;
1094
1095 dma->tx_ring_AC_VI = b43_setup_dmaring(dev, 2, 1, type);
1096 if (!dma->tx_ring_AC_VI)
1097 goto err_destroy_be;
1098
1099 dma->tx_ring_AC_VO = b43_setup_dmaring(dev, 3, 1, type);
1100 if (!dma->tx_ring_AC_VO)
1101 goto err_destroy_vi;
1102
1103 dma->tx_ring_mcast = b43_setup_dmaring(dev, 4, 1, type);
1104 if (!dma->tx_ring_mcast)
1105 goto err_destroy_vo;
1106
1107 /* setup RX DMA channel. */
1108 dma->rx_ring = b43_setup_dmaring(dev, 0, 0, type);
1109 if (!dma->rx_ring)
1110 goto err_destroy_mcast;
1111
1112 /* No support for the TX status DMA ring. */
1113 B43_WARN_ON(dev->dev->core_rev < 5);
1114
1115 b43dbg(dev->wl, "%u-bit DMA initialized\n",
1116 (unsigned int)type);
1117 err = 0;
1118 out:
1119 return err;
1120
1121 err_destroy_mcast:
1122 destroy_ring(dma, tx_ring_mcast);
1123 err_destroy_vo:
1124 destroy_ring(dma, tx_ring_AC_VO);
1125 err_destroy_vi:
1126 destroy_ring(dma, tx_ring_AC_VI);
1127 err_destroy_be:
1128 destroy_ring(dma, tx_ring_AC_BE);
1129 err_destroy_bk:
1130 destroy_ring(dma, tx_ring_AC_BK);
1131 return err;
1132 }
1133
1134 /* Generate a cookie for the TX header. */
generate_cookie(struct b43_dmaring * ring,int slot)1135 static u16 generate_cookie(struct b43_dmaring *ring, int slot)
1136 {
1137 u16 cookie;
1138
1139 /* Use the upper 4 bits of the cookie as
1140 * DMA controller ID and store the slot number
1141 * in the lower 12 bits.
1142 * Note that the cookie must never be 0, as this
1143 * is a special value used in RX path.
1144 * It can also not be 0xFFFF because that is special
1145 * for multicast frames.
1146 */
1147 cookie = (((u16)ring->index + 1) << 12);
1148 B43_WARN_ON(slot & ~0x0FFF);
1149 cookie |= (u16)slot;
1150
1151 return cookie;
1152 }
1153
1154 /* Inspect a cookie and find out to which controller/slot it belongs. */
1155 static
parse_cookie(struct b43_wldev * dev,u16 cookie,int * slot)1156 struct b43_dmaring *parse_cookie(struct b43_wldev *dev, u16 cookie, int *slot)
1157 {
1158 struct b43_dma *dma = &dev->dma;
1159 struct b43_dmaring *ring = NULL;
1160
1161 switch (cookie & 0xF000) {
1162 case 0x1000:
1163 ring = dma->tx_ring_AC_BK;
1164 break;
1165 case 0x2000:
1166 ring = dma->tx_ring_AC_BE;
1167 break;
1168 case 0x3000:
1169 ring = dma->tx_ring_AC_VI;
1170 break;
1171 case 0x4000:
1172 ring = dma->tx_ring_AC_VO;
1173 break;
1174 case 0x5000:
1175 ring = dma->tx_ring_mcast;
1176 break;
1177 }
1178 *slot = (cookie & 0x0FFF);
1179 if (unlikely(!ring || *slot < 0 || *slot >= ring->nr_slots)) {
1180 b43dbg(dev->wl, "TX-status contains "
1181 "invalid cookie: 0x%04X\n", cookie);
1182 return NULL;
1183 }
1184
1185 return ring;
1186 }
1187
dma_tx_fragment(struct b43_dmaring * ring,struct sk_buff * skb)1188 static int dma_tx_fragment(struct b43_dmaring *ring,
1189 struct sk_buff *skb)
1190 {
1191 const struct b43_dma_ops *ops = ring->ops;
1192 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193 struct b43_private_tx_info *priv_info = b43_get_priv_tx_info(info);
1194 u8 *header;
1195 int slot, old_top_slot, old_used_slots;
1196 int err;
1197 struct b43_dmadesc_generic *desc;
1198 struct b43_dmadesc_meta *meta;
1199 struct b43_dmadesc_meta *meta_hdr;
1200 u16 cookie;
1201 size_t hdrsize = b43_txhdr_size(ring->dev);
1202
1203 /* Important note: If the number of used DMA slots per TX frame
1204 * is changed here, the TX_SLOTS_PER_FRAME definition at the top of
1205 * the file has to be updated, too!
1206 */
1207
1208 old_top_slot = ring->current_slot;
1209 old_used_slots = ring->used_slots;
1210
1211 /* Get a slot for the header. */
1212 slot = request_slot(ring);
1213 desc = ops->idx2desc(ring, slot, &meta_hdr);
1214 memset(meta_hdr, 0, sizeof(*meta_hdr));
1215
1216 header = &(ring->txhdr_cache[(slot / TX_SLOTS_PER_FRAME) * hdrsize]);
1217 cookie = generate_cookie(ring, slot);
1218 err = b43_generate_txhdr(ring->dev, header,
1219 skb, info, cookie);
1220 if (unlikely(err)) {
1221 ring->current_slot = old_top_slot;
1222 ring->used_slots = old_used_slots;
1223 return err;
1224 }
1225
1226 meta_hdr->dmaaddr = map_descbuffer(ring, (unsigned char *)header,
1227 hdrsize, 1);
1228 if (b43_dma_mapping_error(ring, meta_hdr->dmaaddr, hdrsize, 1)) {
1229 ring->current_slot = old_top_slot;
1230 ring->used_slots = old_used_slots;
1231 return -EIO;
1232 }
1233 ops->fill_descriptor(ring, desc, meta_hdr->dmaaddr,
1234 hdrsize, 1, 0, 0);
1235
1236 /* Get a slot for the payload. */
1237 slot = request_slot(ring);
1238 desc = ops->idx2desc(ring, slot, &meta);
1239 memset(meta, 0, sizeof(*meta));
1240
1241 meta->skb = skb;
1242 meta->is_last_fragment = true;
1243 priv_info->bouncebuffer = NULL;
1244
1245 meta->dmaaddr = map_descbuffer(ring, skb->data, skb->len, 1);
1246 /* create a bounce buffer in zone_dma on mapping failure. */
1247 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1248 priv_info->bouncebuffer = kmemdup(skb->data, skb->len,
1249 GFP_ATOMIC | GFP_DMA);
1250 if (!priv_info->bouncebuffer) {
1251 ring->current_slot = old_top_slot;
1252 ring->used_slots = old_used_slots;
1253 err = -ENOMEM;
1254 goto out_unmap_hdr;
1255 }
1256
1257 meta->dmaaddr = map_descbuffer(ring, priv_info->bouncebuffer, skb->len, 1);
1258 if (b43_dma_mapping_error(ring, meta->dmaaddr, skb->len, 1)) {
1259 kfree(priv_info->bouncebuffer);
1260 priv_info->bouncebuffer = NULL;
1261 ring->current_slot = old_top_slot;
1262 ring->used_slots = old_used_slots;
1263 err = -EIO;
1264 goto out_unmap_hdr;
1265 }
1266 }
1267
1268 ops->fill_descriptor(ring, desc, meta->dmaaddr, skb->len, 0, 1, 1);
1269
1270 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1271 /* Tell the firmware about the cookie of the last
1272 * mcast frame, so it can clear the more-data bit in it. */
1273 b43_shm_write16(ring->dev, B43_SHM_SHARED,
1274 B43_SHM_SH_MCASTCOOKIE, cookie);
1275 }
1276 /* Now transfer the whole frame. */
1277 wmb();
1278 ops->poke_tx(ring, next_slot(ring, slot));
1279 return 0;
1280
1281 out_unmap_hdr:
1282 unmap_descbuffer(ring, meta_hdr->dmaaddr,
1283 hdrsize, 1);
1284 return err;
1285 }
1286
should_inject_overflow(struct b43_dmaring * ring)1287 static inline int should_inject_overflow(struct b43_dmaring *ring)
1288 {
1289 #ifdef CONFIG_B43_DEBUG
1290 if (unlikely(b43_debug(ring->dev, B43_DBG_DMAOVERFLOW))) {
1291 /* Check if we should inject another ringbuffer overflow
1292 * to test handling of this situation in the stack. */
1293 unsigned long next_overflow;
1294
1295 next_overflow = ring->last_injected_overflow + HZ;
1296 if (time_after(jiffies, next_overflow)) {
1297 ring->last_injected_overflow = jiffies;
1298 b43dbg(ring->dev->wl,
1299 "Injecting TX ring overflow on "
1300 "DMA controller %d\n", ring->index);
1301 return 1;
1302 }
1303 }
1304 #endif /* CONFIG_B43_DEBUG */
1305 return 0;
1306 }
1307
1308 /* Static mapping of mac80211's queues (priorities) to b43 DMA rings. */
select_ring_by_priority(struct b43_wldev * dev,u8 queue_prio)1309 static struct b43_dmaring *select_ring_by_priority(struct b43_wldev *dev,
1310 u8 queue_prio)
1311 {
1312 struct b43_dmaring *ring;
1313
1314 if (dev->qos_enabled) {
1315 /* 0 = highest priority */
1316 switch (queue_prio) {
1317 default:
1318 B43_WARN_ON(1);
1319 fallthrough;
1320 case 0:
1321 ring = dev->dma.tx_ring_AC_VO;
1322 break;
1323 case 1:
1324 ring = dev->dma.tx_ring_AC_VI;
1325 break;
1326 case 2:
1327 ring = dev->dma.tx_ring_AC_BE;
1328 break;
1329 case 3:
1330 ring = dev->dma.tx_ring_AC_BK;
1331 break;
1332 }
1333 } else
1334 ring = dev->dma.tx_ring_AC_BE;
1335
1336 return ring;
1337 }
1338
b43_dma_tx(struct b43_wldev * dev,struct sk_buff * skb)1339 int b43_dma_tx(struct b43_wldev *dev, struct sk_buff *skb)
1340 {
1341 struct b43_dmaring *ring;
1342 struct ieee80211_hdr *hdr;
1343 int err = 0;
1344 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1345
1346 hdr = (struct ieee80211_hdr *)skb->data;
1347 if (info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM) {
1348 /* The multicast ring will be sent after the DTIM */
1349 ring = dev->dma.tx_ring_mcast;
1350 /* Set the more-data bit. Ucode will clear it on
1351 * the last frame for us. */
1352 hdr->frame_control |= cpu_to_le16(IEEE80211_FCTL_MOREDATA);
1353 } else {
1354 /* Decide by priority where to put this frame. */
1355 ring = select_ring_by_priority(
1356 dev, skb_get_queue_mapping(skb));
1357 }
1358
1359 B43_WARN_ON(!ring->tx);
1360
1361 if (unlikely(ring->stopped)) {
1362 /* We get here only because of a bug in mac80211.
1363 * Because of a race, one packet may be queued after
1364 * the queue is stopped, thus we got called when we shouldn't.
1365 * For now, just refuse the transmit. */
1366 if (b43_debug(dev, B43_DBG_DMAVERBOSE))
1367 b43err(dev->wl, "Packet after queue stopped\n");
1368 err = -ENOSPC;
1369 goto out;
1370 }
1371
1372 if (WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME)) {
1373 /* If we get here, we have a real error with the queue
1374 * full, but queues not stopped. */
1375 b43err(dev->wl, "DMA queue overflow\n");
1376 err = -ENOSPC;
1377 goto out;
1378 }
1379
1380 /* Assign the queue number to the ring (if not already done before)
1381 * so TX status handling can use it. The queue to ring mapping is
1382 * static, so we don't need to store it per frame. */
1383 ring->queue_prio = skb_get_queue_mapping(skb);
1384
1385 err = dma_tx_fragment(ring, skb);
1386 if (unlikely(err == -ENOKEY)) {
1387 /* Drop this packet, as we don't have the encryption key
1388 * anymore and must not transmit it unencrypted. */
1389 ieee80211_free_txskb(dev->wl->hw, skb);
1390 err = 0;
1391 goto out;
1392 }
1393 if (unlikely(err)) {
1394 b43err(dev->wl, "DMA tx mapping failure\n");
1395 goto out;
1396 }
1397 if ((free_slots(ring) < TX_SLOTS_PER_FRAME) ||
1398 should_inject_overflow(ring)) {
1399 /* This TX ring is full. */
1400 unsigned int skb_mapping = skb_get_queue_mapping(skb);
1401 b43_stop_queue(dev, skb_mapping);
1402 dev->wl->tx_queue_stopped[skb_mapping] = true;
1403 ring->stopped = true;
1404 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1405 b43dbg(dev->wl, "Stopped TX ring %d\n", ring->index);
1406 }
1407 }
1408 out:
1409
1410 return err;
1411 }
1412
b43_dma_handle_txstatus(struct b43_wldev * dev,const struct b43_txstatus * status)1413 void b43_dma_handle_txstatus(struct b43_wldev *dev,
1414 const struct b43_txstatus *status)
1415 {
1416 const struct b43_dma_ops *ops;
1417 struct b43_dmaring *ring;
1418 struct b43_dmadesc_meta *meta;
1419 static const struct b43_txstatus fake; /* filled with 0 */
1420 const struct b43_txstatus *txstat;
1421 int slot, firstused;
1422 bool frame_succeed;
1423 int skip;
1424 static u8 err_out1;
1425
1426 ring = parse_cookie(dev, status->cookie, &slot);
1427 if (unlikely(!ring))
1428 return;
1429 B43_WARN_ON(!ring->tx);
1430
1431 /* Sanity check: TX packets are processed in-order on one ring.
1432 * Check if the slot deduced from the cookie really is the first
1433 * used slot. */
1434 firstused = ring->current_slot - ring->used_slots + 1;
1435 if (firstused < 0)
1436 firstused = ring->nr_slots + firstused;
1437
1438 skip = 0;
1439 if (unlikely(slot != firstused)) {
1440 /* This possibly is a firmware bug and will result in
1441 * malfunction, memory leaks and/or stall of DMA functionality.
1442 */
1443 if (slot == next_slot(ring, next_slot(ring, firstused))) {
1444 /* If a single header/data pair was missed, skip over
1445 * the first two slots in an attempt to recover.
1446 */
1447 slot = firstused;
1448 skip = 2;
1449 if (!err_out1) {
1450 /* Report the error once. */
1451 b43dbg(dev->wl,
1452 "Skip on DMA ring %d slot %d.\n",
1453 ring->index, slot);
1454 err_out1 = 1;
1455 }
1456 } else {
1457 /* More than a single header/data pair were missed.
1458 * Report this error. If running with open-source
1459 * firmware, then reset the controller to
1460 * revive operation.
1461 */
1462 b43dbg(dev->wl,
1463 "Out of order TX status report on DMA ring %d. Expected %d, but got %d\n",
1464 ring->index, firstused, slot);
1465 if (dev->fw.opensource)
1466 b43_controller_restart(dev, "Out of order TX");
1467 return;
1468 }
1469 }
1470
1471 ops = ring->ops;
1472 while (1) {
1473 B43_WARN_ON(slot < 0 || slot >= ring->nr_slots);
1474 /* get meta - ignore returned value */
1475 ops->idx2desc(ring, slot, &meta);
1476
1477 if (b43_dma_ptr_is_poisoned(meta->skb)) {
1478 b43dbg(dev->wl, "Poisoned TX slot %d (first=%d) "
1479 "on ring %d\n",
1480 slot, firstused, ring->index);
1481 break;
1482 }
1483
1484 if (meta->skb) {
1485 struct b43_private_tx_info *priv_info =
1486 b43_get_priv_tx_info(IEEE80211_SKB_CB(meta->skb));
1487
1488 unmap_descbuffer(ring, meta->dmaaddr,
1489 meta->skb->len, 1);
1490 kfree(priv_info->bouncebuffer);
1491 priv_info->bouncebuffer = NULL;
1492 } else {
1493 unmap_descbuffer(ring, meta->dmaaddr,
1494 b43_txhdr_size(dev), 1);
1495 }
1496
1497 if (meta->is_last_fragment) {
1498 struct ieee80211_tx_info *info;
1499
1500 if (unlikely(!meta->skb)) {
1501 /* This is a scatter-gather fragment of a frame,
1502 * so the skb pointer must not be NULL.
1503 */
1504 b43dbg(dev->wl, "TX status unexpected NULL skb "
1505 "at slot %d (first=%d) on ring %d\n",
1506 slot, firstused, ring->index);
1507 break;
1508 }
1509
1510 info = IEEE80211_SKB_CB(meta->skb);
1511
1512 /*
1513 * Call back to inform the ieee80211 subsystem about
1514 * the status of the transmission. When skipping over
1515 * a missed TX status report, use a status structure
1516 * filled with zeros to indicate that the frame was not
1517 * sent (frame_count 0) and not acknowledged
1518 */
1519 if (unlikely(skip))
1520 txstat = &fake;
1521 else
1522 txstat = status;
1523
1524 frame_succeed = b43_fill_txstatus_report(dev, info,
1525 txstat);
1526 #ifdef CONFIG_B43_DEBUG
1527 if (frame_succeed)
1528 ring->nr_succeed_tx_packets++;
1529 else
1530 ring->nr_failed_tx_packets++;
1531 ring->nr_total_packet_tries += status->frame_count;
1532 #endif /* DEBUG */
1533 ieee80211_tx_status_skb(dev->wl->hw, meta->skb);
1534
1535 /* skb will be freed by ieee80211_tx_status_skb().
1536 * Poison our pointer. */
1537 meta->skb = B43_DMA_PTR_POISON;
1538 } else {
1539 /* No need to call free_descriptor_buffer here, as
1540 * this is only the txhdr, which is not allocated.
1541 */
1542 if (unlikely(meta->skb)) {
1543 b43dbg(dev->wl, "TX status unexpected non-NULL skb "
1544 "at slot %d (first=%d) on ring %d\n",
1545 slot, firstused, ring->index);
1546 break;
1547 }
1548 }
1549
1550 /* Everything unmapped and free'd. So it's not used anymore. */
1551 ring->used_slots--;
1552
1553 if (meta->is_last_fragment && !skip) {
1554 /* This is the last scatter-gather
1555 * fragment of the frame. We are done. */
1556 break;
1557 }
1558 slot = next_slot(ring, slot);
1559 if (skip > 0)
1560 --skip;
1561 }
1562 if (ring->stopped) {
1563 B43_WARN_ON(free_slots(ring) < TX_SLOTS_PER_FRAME);
1564 ring->stopped = false;
1565 }
1566
1567 if (dev->wl->tx_queue_stopped[ring->queue_prio]) {
1568 dev->wl->tx_queue_stopped[ring->queue_prio] = false;
1569 } else {
1570 /* If the driver queue is running wake the corresponding
1571 * mac80211 queue. */
1572 b43_wake_queue(dev, ring->queue_prio);
1573 if (b43_debug(dev, B43_DBG_DMAVERBOSE)) {
1574 b43dbg(dev->wl, "Woke up TX ring %d\n", ring->index);
1575 }
1576 }
1577 /* Add work to the queue. */
1578 ieee80211_queue_work(dev->wl->hw, &dev->wl->tx_work);
1579 }
1580
dma_rx(struct b43_dmaring * ring,int * slot)1581 static void dma_rx(struct b43_dmaring *ring, int *slot)
1582 {
1583 const struct b43_dma_ops *ops = ring->ops;
1584 struct b43_dmadesc_generic *desc;
1585 struct b43_dmadesc_meta *meta;
1586 struct b43_rxhdr_fw4 *rxhdr;
1587 struct sk_buff *skb;
1588 u16 len;
1589 int err;
1590 dma_addr_t dmaaddr;
1591
1592 desc = ops->idx2desc(ring, *slot, &meta);
1593
1594 sync_descbuffer_for_cpu(ring, meta->dmaaddr, ring->rx_buffersize);
1595 skb = meta->skb;
1596
1597 rxhdr = (struct b43_rxhdr_fw4 *)skb->data;
1598 len = le16_to_cpu(rxhdr->frame_len);
1599 if (len == 0) {
1600 int i = 0;
1601
1602 do {
1603 udelay(2);
1604 barrier();
1605 len = le16_to_cpu(rxhdr->frame_len);
1606 } while (len == 0 && i++ < 5);
1607 if (unlikely(len == 0)) {
1608 dmaaddr = meta->dmaaddr;
1609 goto drop_recycle_buffer;
1610 }
1611 }
1612 if (unlikely(b43_rx_buffer_is_poisoned(ring, skb))) {
1613 /* Something went wrong with the DMA.
1614 * The device did not touch the buffer and did not overwrite the poison. */
1615 b43dbg(ring->dev->wl, "DMA RX: Dropping poisoned buffer.\n");
1616 dmaaddr = meta->dmaaddr;
1617 goto drop_recycle_buffer;
1618 }
1619 if (unlikely(len + ring->frameoffset > ring->rx_buffersize)) {
1620 /* The data did not fit into one descriptor buffer
1621 * and is split over multiple buffers.
1622 * This should never happen, as we try to allocate buffers
1623 * big enough. So simply ignore this packet.
1624 */
1625 int cnt = 0;
1626 s32 tmp = len;
1627
1628 while (1) {
1629 desc = ops->idx2desc(ring, *slot, &meta);
1630 /* recycle the descriptor buffer. */
1631 b43_poison_rx_buffer(ring, meta->skb);
1632 sync_descbuffer_for_device(ring, meta->dmaaddr,
1633 ring->rx_buffersize);
1634 *slot = next_slot(ring, *slot);
1635 cnt++;
1636 tmp -= ring->rx_buffersize;
1637 if (tmp <= 0)
1638 break;
1639 }
1640 b43err(ring->dev->wl, "DMA RX buffer too small "
1641 "(len: %u, buffer: %u, nr-dropped: %d)\n",
1642 len, ring->rx_buffersize, cnt);
1643 goto drop;
1644 }
1645
1646 dmaaddr = meta->dmaaddr;
1647 err = setup_rx_descbuffer(ring, desc, meta, GFP_ATOMIC);
1648 if (unlikely(err)) {
1649 b43dbg(ring->dev->wl, "DMA RX: setup_rx_descbuffer() failed\n");
1650 goto drop_recycle_buffer;
1651 }
1652
1653 unmap_descbuffer(ring, dmaaddr, ring->rx_buffersize, 0);
1654 skb_put(skb, len + ring->frameoffset);
1655 skb_pull(skb, ring->frameoffset);
1656
1657 b43_rx(ring->dev, skb, rxhdr);
1658 drop:
1659 return;
1660
1661 drop_recycle_buffer:
1662 /* Poison and recycle the RX buffer. */
1663 b43_poison_rx_buffer(ring, skb);
1664 sync_descbuffer_for_device(ring, dmaaddr, ring->rx_buffersize);
1665 }
1666
b43_dma_handle_rx_overflow(struct b43_dmaring * ring)1667 void b43_dma_handle_rx_overflow(struct b43_dmaring *ring)
1668 {
1669 int current_slot, previous_slot;
1670
1671 B43_WARN_ON(ring->tx);
1672
1673 /* Device has filled all buffers, drop all packets and let TCP
1674 * decrease speed.
1675 * Decrement RX index by one will let the device to see all slots
1676 * as free again
1677 */
1678 /*
1679 *TODO: How to increase rx_drop in mac80211?
1680 */
1681 current_slot = ring->ops->get_current_rxslot(ring);
1682 previous_slot = prev_slot(ring, current_slot);
1683 ring->ops->set_current_rxslot(ring, previous_slot);
1684 }
1685
b43_dma_rx(struct b43_dmaring * ring)1686 void b43_dma_rx(struct b43_dmaring *ring)
1687 {
1688 const struct b43_dma_ops *ops = ring->ops;
1689 int slot, current_slot;
1690 int used_slots = 0;
1691
1692 B43_WARN_ON(ring->tx);
1693 current_slot = ops->get_current_rxslot(ring);
1694 B43_WARN_ON(!(current_slot >= 0 && current_slot < ring->nr_slots));
1695
1696 slot = ring->current_slot;
1697 for (; slot != current_slot; slot = next_slot(ring, slot)) {
1698 dma_rx(ring, &slot);
1699 update_max_used_slots(ring, ++used_slots);
1700 }
1701 wmb();
1702 ops->set_current_rxslot(ring, slot);
1703 ring->current_slot = slot;
1704 }
1705
b43_dma_tx_suspend_ring(struct b43_dmaring * ring)1706 static void b43_dma_tx_suspend_ring(struct b43_dmaring *ring)
1707 {
1708 B43_WARN_ON(!ring->tx);
1709 ring->ops->tx_suspend(ring);
1710 }
1711
b43_dma_tx_resume_ring(struct b43_dmaring * ring)1712 static void b43_dma_tx_resume_ring(struct b43_dmaring *ring)
1713 {
1714 B43_WARN_ON(!ring->tx);
1715 ring->ops->tx_resume(ring);
1716 }
1717
b43_dma_tx_suspend(struct b43_wldev * dev)1718 void b43_dma_tx_suspend(struct b43_wldev *dev)
1719 {
1720 b43_power_saving_ctl_bits(dev, B43_PS_AWAKE);
1721 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BK);
1722 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_BE);
1723 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VI);
1724 b43_dma_tx_suspend_ring(dev->dma.tx_ring_AC_VO);
1725 b43_dma_tx_suspend_ring(dev->dma.tx_ring_mcast);
1726 }
1727
b43_dma_tx_resume(struct b43_wldev * dev)1728 void b43_dma_tx_resume(struct b43_wldev *dev)
1729 {
1730 b43_dma_tx_resume_ring(dev->dma.tx_ring_mcast);
1731 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VO);
1732 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_VI);
1733 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BE);
1734 b43_dma_tx_resume_ring(dev->dma.tx_ring_AC_BK);
1735 b43_power_saving_ctl_bits(dev, 0);
1736 }
1737
direct_fifo_rx(struct b43_wldev * dev,enum b43_dmatype type,u16 mmio_base,bool enable)1738 static void direct_fifo_rx(struct b43_wldev *dev, enum b43_dmatype type,
1739 u16 mmio_base, bool enable)
1740 {
1741 u32 ctl;
1742
1743 if (type == B43_DMA_64BIT) {
1744 ctl = b43_read32(dev, mmio_base + B43_DMA64_RXCTL);
1745 ctl &= ~B43_DMA64_RXDIRECTFIFO;
1746 if (enable)
1747 ctl |= B43_DMA64_RXDIRECTFIFO;
1748 b43_write32(dev, mmio_base + B43_DMA64_RXCTL, ctl);
1749 } else {
1750 ctl = b43_read32(dev, mmio_base + B43_DMA32_RXCTL);
1751 ctl &= ~B43_DMA32_RXDIRECTFIFO;
1752 if (enable)
1753 ctl |= B43_DMA32_RXDIRECTFIFO;
1754 b43_write32(dev, mmio_base + B43_DMA32_RXCTL, ctl);
1755 }
1756 }
1757
1758 /* Enable/Disable Direct FIFO Receive Mode (PIO) on a RX engine.
1759 * This is called from PIO code, so DMA structures are not available. */
b43_dma_direct_fifo_rx(struct b43_wldev * dev,unsigned int engine_index,bool enable)1760 void b43_dma_direct_fifo_rx(struct b43_wldev *dev,
1761 unsigned int engine_index, bool enable)
1762 {
1763 enum b43_dmatype type;
1764 u16 mmio_base;
1765
1766 type = b43_engine_type(dev);
1767
1768 mmio_base = b43_dmacontroller_base(type, engine_index);
1769 direct_fifo_rx(dev, type, mmio_base, enable);
1770 }
1771