xref: /linux/drivers/net/wireless/broadcom/b43legacy/pio.c (revision e5c86679d5e864947a52fb31e45a425dea3e7fa9)
1 /*
2 
3   Broadcom B43legacy wireless driver
4 
5   PIO Transmission
6 
7   Copyright (c) 2005 Michael Buesch <m@bues.ch>
8 
9   This program is free software; you can redistribute it and/or modify
10   it under the terms of the GNU General Public License as published by
11   the Free Software Foundation; either version 2 of the License, or
12   (at your option) any later version.
13 
14   This program is distributed in the hope that it will be useful,
15   but WITHOUT ANY WARRANTY; without even the implied warranty of
16   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17   GNU General Public License for more details.
18 
19   You should have received a copy of the GNU General Public License
20   along with this program; see the file COPYING.  If not, write to
21   the Free Software Foundation, Inc., 51 Franklin Steet, Fifth Floor,
22   Boston, MA 02110-1301, USA.
23 
24 */
25 
26 #include "b43legacy.h"
27 #include "pio.h"
28 #include "main.h"
29 #include "xmit.h"
30 
31 #include <linux/delay.h>
32 #include <linux/slab.h>
33 
34 
35 static void tx_start(struct b43legacy_pioqueue *queue)
36 {
37 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
38 			    B43legacy_PIO_TXCTL_INIT);
39 }
40 
41 static void tx_octet(struct b43legacy_pioqueue *queue,
42 		     u8 octet)
43 {
44 	if (queue->need_workarounds) {
45 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
46 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
47 				    B43legacy_PIO_TXCTL_WRITELO);
48 	} else {
49 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
50 				    B43legacy_PIO_TXCTL_WRITELO);
51 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, octet);
52 	}
53 }
54 
55 static u16 tx_get_next_word(const u8 *txhdr,
56 			    const u8 *packet,
57 			    size_t txhdr_size,
58 			    unsigned int *pos)
59 {
60 	const u8 *source;
61 	unsigned int i = *pos;
62 	u16 ret;
63 
64 	if (i < txhdr_size)
65 		source = txhdr;
66 	else {
67 		source = packet;
68 		i -= txhdr_size;
69 	}
70 	ret = le16_to_cpu(*((__le16 *)(source + i)));
71 	*pos += 2;
72 
73 	return ret;
74 }
75 
76 static void tx_data(struct b43legacy_pioqueue *queue,
77 		    u8 *txhdr,
78 		    const u8 *packet,
79 		    unsigned int octets)
80 {
81 	u16 data;
82 	unsigned int i = 0;
83 
84 	if (queue->need_workarounds) {
85 		data = tx_get_next_word(txhdr, packet,
86 					sizeof(struct b43legacy_txhdr_fw3), &i);
87 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
88 	}
89 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
90 			    B43legacy_PIO_TXCTL_WRITELO |
91 			    B43legacy_PIO_TXCTL_WRITEHI);
92 	while (i < octets - 1) {
93 		data = tx_get_next_word(txhdr, packet,
94 					sizeof(struct b43legacy_txhdr_fw3), &i);
95 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA, data);
96 	}
97 	if (octets % 2)
98 		tx_octet(queue, packet[octets -
99 			 sizeof(struct b43legacy_txhdr_fw3) - 1]);
100 }
101 
102 static void tx_complete(struct b43legacy_pioqueue *queue,
103 			struct sk_buff *skb)
104 {
105 	if (queue->need_workarounds) {
106 		b43legacy_pio_write(queue, B43legacy_PIO_TXDATA,
107 				    skb->data[skb->len - 1]);
108 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
109 				    B43legacy_PIO_TXCTL_WRITELO |
110 				    B43legacy_PIO_TXCTL_COMPLETE);
111 	} else
112 		b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
113 				    B43legacy_PIO_TXCTL_COMPLETE);
114 }
115 
116 static u16 generate_cookie(struct b43legacy_pioqueue *queue,
117 			   struct b43legacy_pio_txpacket *packet)
118 {
119 	u16 cookie = 0x0000;
120 	int packetindex;
121 
122 	/* We use the upper 4 bits for the PIO
123 	 * controller ID and the lower 12 bits
124 	 * for the packet index (in the cache).
125 	 */
126 	switch (queue->mmio_base) {
127 	case B43legacy_MMIO_PIO1_BASE:
128 		break;
129 	case B43legacy_MMIO_PIO2_BASE:
130 		cookie = 0x1000;
131 		break;
132 	case B43legacy_MMIO_PIO3_BASE:
133 		cookie = 0x2000;
134 		break;
135 	case B43legacy_MMIO_PIO4_BASE:
136 		cookie = 0x3000;
137 		break;
138 	default:
139 		B43legacy_WARN_ON(1);
140 	}
141 	packetindex = pio_txpacket_getindex(packet);
142 	B43legacy_WARN_ON(!(((u16)packetindex & 0xF000) == 0x0000));
143 	cookie |= (u16)packetindex;
144 
145 	return cookie;
146 }
147 
148 static
149 struct b43legacy_pioqueue *parse_cookie(struct b43legacy_wldev *dev,
150 					u16 cookie,
151 					struct b43legacy_pio_txpacket **packet)
152 {
153 	struct b43legacy_pio *pio = &dev->pio;
154 	struct b43legacy_pioqueue *queue = NULL;
155 	int packetindex;
156 
157 	switch (cookie & 0xF000) {
158 	case 0x0000:
159 		queue = pio->queue0;
160 		break;
161 	case 0x1000:
162 		queue = pio->queue1;
163 		break;
164 	case 0x2000:
165 		queue = pio->queue2;
166 		break;
167 	case 0x3000:
168 		queue = pio->queue3;
169 		break;
170 	default:
171 		B43legacy_WARN_ON(1);
172 	}
173 	packetindex = (cookie & 0x0FFF);
174 	B43legacy_WARN_ON(!(packetindex >= 0 && packetindex
175 			  < B43legacy_PIO_MAXTXPACKETS));
176 	*packet = &(queue->tx_packets_cache[packetindex]);
177 
178 	return queue;
179 }
180 
181 union txhdr_union {
182 	struct b43legacy_txhdr_fw3 txhdr_fw3;
183 };
184 
185 static int pio_tx_write_fragment(struct b43legacy_pioqueue *queue,
186 				  struct sk_buff *skb,
187 				  struct b43legacy_pio_txpacket *packet,
188 				  size_t txhdr_size)
189 {
190 	union txhdr_union txhdr_data;
191 	u8 *txhdr = NULL;
192 	unsigned int octets;
193 	int err;
194 
195 	txhdr = (u8 *)(&txhdr_data.txhdr_fw3);
196 
197 	B43legacy_WARN_ON(skb_shinfo(skb)->nr_frags != 0);
198 	err = b43legacy_generate_txhdr(queue->dev,
199 				 txhdr, skb->data, skb->len,
200 				 IEEE80211_SKB_CB(skb),
201 				 generate_cookie(queue, packet));
202 	if (err)
203 		return err;
204 
205 	tx_start(queue);
206 	octets = skb->len + txhdr_size;
207 	if (queue->need_workarounds)
208 		octets--;
209 	tx_data(queue, txhdr, (u8 *)skb->data, octets);
210 	tx_complete(queue, skb);
211 
212 	return 0;
213 }
214 
215 static void free_txpacket(struct b43legacy_pio_txpacket *packet,
216 			  int irq_context)
217 {
218 	struct b43legacy_pioqueue *queue = packet->queue;
219 
220 	if (packet->skb) {
221 		if (irq_context)
222 			dev_kfree_skb_irq(packet->skb);
223 		else
224 			dev_kfree_skb(packet->skb);
225 	}
226 	list_move(&packet->list, &queue->txfree);
227 	queue->nr_txfree++;
228 }
229 
230 static int pio_tx_packet(struct b43legacy_pio_txpacket *packet)
231 {
232 	struct b43legacy_pioqueue *queue = packet->queue;
233 	struct sk_buff *skb = packet->skb;
234 	u16 octets;
235 	int err;
236 
237 	octets = (u16)skb->len + sizeof(struct b43legacy_txhdr_fw3);
238 	if (queue->tx_devq_size < octets) {
239 		b43legacywarn(queue->dev->wl, "PIO queue too small. "
240 			"Dropping packet.\n");
241 		/* Drop it silently (return success) */
242 		free_txpacket(packet, 1);
243 		return 0;
244 	}
245 	B43legacy_WARN_ON(queue->tx_devq_packets >
246 			  B43legacy_PIO_MAXTXDEVQPACKETS);
247 	B43legacy_WARN_ON(queue->tx_devq_used > queue->tx_devq_size);
248 	/* Check if there is sufficient free space on the device
249 	 * TX queue. If not, return and let the TX tasklet
250 	 * retry later.
251 	 */
252 	if (queue->tx_devq_packets == B43legacy_PIO_MAXTXDEVQPACKETS)
253 		return -EBUSY;
254 	if (queue->tx_devq_used + octets > queue->tx_devq_size)
255 		return -EBUSY;
256 	/* Now poke the device. */
257 	err = pio_tx_write_fragment(queue, skb, packet,
258 			      sizeof(struct b43legacy_txhdr_fw3));
259 	if (unlikely(err == -ENOKEY)) {
260 		/* Drop this packet, as we don't have the encryption key
261 		 * anymore and must not transmit it unencrypted. */
262 		free_txpacket(packet, 1);
263 		return 0;
264 	}
265 
266 	/* Account for the packet size.
267 	 * (We must not overflow the device TX queue)
268 	 */
269 	queue->tx_devq_packets++;
270 	queue->tx_devq_used += octets;
271 
272 	/* Transmission started, everything ok, move the
273 	 * packet to the txrunning list.
274 	 */
275 	list_move_tail(&packet->list, &queue->txrunning);
276 
277 	return 0;
278 }
279 
280 static void tx_tasklet(unsigned long d)
281 {
282 	struct b43legacy_pioqueue *queue = (struct b43legacy_pioqueue *)d;
283 	struct b43legacy_wldev *dev = queue->dev;
284 	unsigned long flags;
285 	struct b43legacy_pio_txpacket *packet, *tmp_packet;
286 	int err;
287 	u16 txctl;
288 
289 	spin_lock_irqsave(&dev->wl->irq_lock, flags);
290 	if (queue->tx_frozen)
291 		goto out_unlock;
292 	txctl = b43legacy_pio_read(queue, B43legacy_PIO_TXCTL);
293 	if (txctl & B43legacy_PIO_TXCTL_SUSPEND)
294 		goto out_unlock;
295 
296 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list) {
297 		/* Try to transmit the packet. This can fail, if
298 		 * the device queue is full. In case of failure, the
299 		 * packet is left in the txqueue.
300 		 * If transmission succeed, the packet is moved to txrunning.
301 		 * If it is impossible to transmit the packet, it
302 		 * is dropped.
303 		 */
304 		err = pio_tx_packet(packet);
305 		if (err)
306 			break;
307 	}
308 out_unlock:
309 	spin_unlock_irqrestore(&dev->wl->irq_lock, flags);
310 }
311 
312 static void setup_txqueues(struct b43legacy_pioqueue *queue)
313 {
314 	struct b43legacy_pio_txpacket *packet;
315 	int i;
316 
317 	queue->nr_txfree = B43legacy_PIO_MAXTXPACKETS;
318 	for (i = 0; i < B43legacy_PIO_MAXTXPACKETS; i++) {
319 		packet = &(queue->tx_packets_cache[i]);
320 
321 		packet->queue = queue;
322 		INIT_LIST_HEAD(&packet->list);
323 
324 		list_add(&packet->list, &queue->txfree);
325 	}
326 }
327 
328 static
329 struct b43legacy_pioqueue *b43legacy_setup_pioqueue(struct b43legacy_wldev *dev,
330 						    u16 pio_mmio_base)
331 {
332 	struct b43legacy_pioqueue *queue;
333 	u32 value;
334 	u16 qsize;
335 
336 	queue = kzalloc(sizeof(*queue), GFP_KERNEL);
337 	if (!queue)
338 		goto out;
339 
340 	queue->dev = dev;
341 	queue->mmio_base = pio_mmio_base;
342 	queue->need_workarounds = (dev->dev->id.revision < 3);
343 
344 	INIT_LIST_HEAD(&queue->txfree);
345 	INIT_LIST_HEAD(&queue->txqueue);
346 	INIT_LIST_HEAD(&queue->txrunning);
347 	tasklet_init(&queue->txtask, tx_tasklet,
348 		     (unsigned long)queue);
349 
350 	value = b43legacy_read32(dev, B43legacy_MMIO_MACCTL);
351 	value &= ~B43legacy_MACCTL_BE;
352 	b43legacy_write32(dev, B43legacy_MMIO_MACCTL, value);
353 
354 	qsize = b43legacy_read16(dev, queue->mmio_base
355 				 + B43legacy_PIO_TXQBUFSIZE);
356 	if (qsize == 0) {
357 		b43legacyerr(dev->wl, "This card does not support PIO "
358 		       "operation mode. Please use DMA mode "
359 		       "(module parameter pio=0).\n");
360 		goto err_freequeue;
361 	}
362 	if (qsize <= B43legacy_PIO_TXQADJUST) {
363 		b43legacyerr(dev->wl, "PIO tx device-queue too small (%u)\n",
364 		       qsize);
365 		goto err_freequeue;
366 	}
367 	qsize -= B43legacy_PIO_TXQADJUST;
368 	queue->tx_devq_size = qsize;
369 
370 	setup_txqueues(queue);
371 
372 out:
373 	return queue;
374 
375 err_freequeue:
376 	kfree(queue);
377 	queue = NULL;
378 	goto out;
379 }
380 
381 static void cancel_transfers(struct b43legacy_pioqueue *queue)
382 {
383 	struct b43legacy_pio_txpacket *packet, *tmp_packet;
384 
385 	tasklet_kill(&queue->txtask);
386 
387 	list_for_each_entry_safe(packet, tmp_packet, &queue->txrunning, list)
388 		free_txpacket(packet, 0);
389 	list_for_each_entry_safe(packet, tmp_packet, &queue->txqueue, list)
390 		free_txpacket(packet, 0);
391 }
392 
393 static void b43legacy_destroy_pioqueue(struct b43legacy_pioqueue *queue)
394 {
395 	if (!queue)
396 		return;
397 
398 	cancel_transfers(queue);
399 	kfree(queue);
400 }
401 
402 void b43legacy_pio_free(struct b43legacy_wldev *dev)
403 {
404 	struct b43legacy_pio *pio;
405 
406 	if (!b43legacy_using_pio(dev))
407 		return;
408 	pio = &dev->pio;
409 
410 	b43legacy_destroy_pioqueue(pio->queue3);
411 	pio->queue3 = NULL;
412 	b43legacy_destroy_pioqueue(pio->queue2);
413 	pio->queue2 = NULL;
414 	b43legacy_destroy_pioqueue(pio->queue1);
415 	pio->queue1 = NULL;
416 	b43legacy_destroy_pioqueue(pio->queue0);
417 	pio->queue0 = NULL;
418 }
419 
420 int b43legacy_pio_init(struct b43legacy_wldev *dev)
421 {
422 	struct b43legacy_pio *pio = &dev->pio;
423 	struct b43legacy_pioqueue *queue;
424 	int err = -ENOMEM;
425 
426 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO1_BASE);
427 	if (!queue)
428 		goto out;
429 	pio->queue0 = queue;
430 
431 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO2_BASE);
432 	if (!queue)
433 		goto err_destroy0;
434 	pio->queue1 = queue;
435 
436 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO3_BASE);
437 	if (!queue)
438 		goto err_destroy1;
439 	pio->queue2 = queue;
440 
441 	queue = b43legacy_setup_pioqueue(dev, B43legacy_MMIO_PIO4_BASE);
442 	if (!queue)
443 		goto err_destroy2;
444 	pio->queue3 = queue;
445 
446 	if (dev->dev->id.revision < 3)
447 		dev->irq_mask |= B43legacy_IRQ_PIO_WORKAROUND;
448 
449 	b43legacydbg(dev->wl, "PIO initialized\n");
450 	err = 0;
451 out:
452 	return err;
453 
454 err_destroy2:
455 	b43legacy_destroy_pioqueue(pio->queue2);
456 	pio->queue2 = NULL;
457 err_destroy1:
458 	b43legacy_destroy_pioqueue(pio->queue1);
459 	pio->queue1 = NULL;
460 err_destroy0:
461 	b43legacy_destroy_pioqueue(pio->queue0);
462 	pio->queue0 = NULL;
463 	goto out;
464 }
465 
466 int b43legacy_pio_tx(struct b43legacy_wldev *dev,
467 		     struct sk_buff *skb)
468 {
469 	struct b43legacy_pioqueue *queue = dev->pio.queue1;
470 	struct b43legacy_pio_txpacket *packet;
471 
472 	B43legacy_WARN_ON(queue->tx_suspended);
473 	B43legacy_WARN_ON(list_empty(&queue->txfree));
474 
475 	packet = list_entry(queue->txfree.next, struct b43legacy_pio_txpacket,
476 			    list);
477 	packet->skb = skb;
478 
479 	list_move_tail(&packet->list, &queue->txqueue);
480 	queue->nr_txfree--;
481 	B43legacy_WARN_ON(queue->nr_txfree >= B43legacy_PIO_MAXTXPACKETS);
482 
483 	tasklet_schedule(&queue->txtask);
484 
485 	return 0;
486 }
487 
488 void b43legacy_pio_handle_txstatus(struct b43legacy_wldev *dev,
489 				   const struct b43legacy_txstatus *status)
490 {
491 	struct b43legacy_pioqueue *queue;
492 	struct b43legacy_pio_txpacket *packet;
493 	struct ieee80211_tx_info *info;
494 	int retry_limit;
495 
496 	queue = parse_cookie(dev, status->cookie, &packet);
497 	B43legacy_WARN_ON(!queue);
498 
499 	if (!packet->skb)
500 		return;
501 
502 	queue->tx_devq_packets--;
503 	queue->tx_devq_used -= (packet->skb->len +
504 				sizeof(struct b43legacy_txhdr_fw3));
505 
506 	info = IEEE80211_SKB_CB(packet->skb);
507 
508 	/* preserve the confiured retry limit before clearing the status
509 	 * The xmit function has overwritten the rc's value with the actual
510 	 * retry limit done by the hardware */
511 	retry_limit = info->status.rates[0].count;
512 	ieee80211_tx_info_clear_status(info);
513 
514 	if (status->acked)
515 		info->flags |= IEEE80211_TX_STAT_ACK;
516 
517 	if (status->rts_count > dev->wl->hw->conf.short_frame_max_tx_count) {
518 		/*
519 		 * If the short retries (RTS, not data frame) have exceeded
520 		 * the limit, the hw will not have tried the selected rate,
521 		 * but will have used the fallback rate instead.
522 		 * Don't let the rate control count attempts for the selected
523 		 * rate in this case, otherwise the statistics will be off.
524 		 */
525 		info->status.rates[0].count = 0;
526 		info->status.rates[1].count = status->frame_count;
527 	} else {
528 		if (status->frame_count > retry_limit) {
529 			info->status.rates[0].count = retry_limit;
530 			info->status.rates[1].count = status->frame_count -
531 					retry_limit;
532 
533 		} else {
534 			info->status.rates[0].count = status->frame_count;
535 			info->status.rates[1].idx = -1;
536 		}
537 	}
538 	ieee80211_tx_status_irqsafe(dev->wl->hw, packet->skb);
539 	packet->skb = NULL;
540 
541 	free_txpacket(packet, 1);
542 	/* If there are packets on the txqueue, poke the tasklet
543 	 * to transmit them.
544 	 */
545 	if (!list_empty(&queue->txqueue))
546 		tasklet_schedule(&queue->txtask);
547 }
548 
549 static void pio_rx_error(struct b43legacy_pioqueue *queue,
550 			 int clear_buffers,
551 			 const char *error)
552 {
553 	int i;
554 
555 	b43legacyerr(queue->dev->wl, "PIO RX error: %s\n", error);
556 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
557 			    B43legacy_PIO_RXCTL_READY);
558 	if (clear_buffers) {
559 		B43legacy_WARN_ON(queue->mmio_base != B43legacy_MMIO_PIO1_BASE);
560 		for (i = 0; i < 15; i++) {
561 			/* Dummy read. */
562 			b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
563 		}
564 	}
565 }
566 
567 void b43legacy_pio_rx(struct b43legacy_pioqueue *queue)
568 {
569 	__le16 preamble[21] = { 0 };
570 	struct b43legacy_rxhdr_fw3 *rxhdr;
571 	u16 tmp;
572 	u16 len;
573 	u16 macstat;
574 	int i;
575 	int preamble_readwords;
576 	struct sk_buff *skb;
577 
578 	tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
579 	if (!(tmp & B43legacy_PIO_RXCTL_DATAAVAILABLE))
580 		return;
581 	b43legacy_pio_write(queue, B43legacy_PIO_RXCTL,
582 			    B43legacy_PIO_RXCTL_DATAAVAILABLE);
583 
584 	for (i = 0; i < 10; i++) {
585 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXCTL);
586 		if (tmp & B43legacy_PIO_RXCTL_READY)
587 			goto data_ready;
588 		udelay(10);
589 	}
590 	b43legacydbg(queue->dev->wl, "PIO RX timed out\n");
591 	return;
592 data_ready:
593 
594 	len = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
595 	if (unlikely(len > 0x700)) {
596 		pio_rx_error(queue, 0, "len > 0x700");
597 		return;
598 	}
599 	if (unlikely(len == 0 && queue->mmio_base !=
600 		     B43legacy_MMIO_PIO4_BASE)) {
601 		pio_rx_error(queue, 0, "len == 0");
602 		return;
603 	}
604 	preamble[0] = cpu_to_le16(len);
605 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE)
606 		preamble_readwords = 14 / sizeof(u16);
607 	else
608 		preamble_readwords = 18 / sizeof(u16);
609 	for (i = 0; i < preamble_readwords; i++) {
610 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
611 		preamble[i + 1] = cpu_to_le16(tmp);
612 	}
613 	rxhdr = (struct b43legacy_rxhdr_fw3 *)preamble;
614 	macstat = le16_to_cpu(rxhdr->mac_status);
615 	if (macstat & B43legacy_RX_MAC_FCSERR) {
616 		pio_rx_error(queue,
617 			     (queue->mmio_base == B43legacy_MMIO_PIO1_BASE),
618 			     "Frame FCS error");
619 		return;
620 	}
621 	if (queue->mmio_base == B43legacy_MMIO_PIO4_BASE) {
622 		/* We received an xmit status. */
623 		struct b43legacy_hwtxstatus *hw;
624 
625 		hw = (struct b43legacy_hwtxstatus *)(preamble + 1);
626 		b43legacy_handle_hwtxstatus(queue->dev, hw);
627 
628 		return;
629 	}
630 
631 	skb = dev_alloc_skb(len);
632 	if (unlikely(!skb)) {
633 		pio_rx_error(queue, 1, "OOM");
634 		return;
635 	}
636 	skb_put(skb, len);
637 	for (i = 0; i < len - 1; i += 2) {
638 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
639 		*((__le16 *)(skb->data + i)) = cpu_to_le16(tmp);
640 	}
641 	if (len % 2) {
642 		tmp = b43legacy_pio_read(queue, B43legacy_PIO_RXDATA);
643 		skb->data[len - 1] = (tmp & 0x00FF);
644 	}
645 	b43legacy_rx(queue->dev, skb, rxhdr);
646 }
647 
648 void b43legacy_pio_tx_suspend(struct b43legacy_pioqueue *queue)
649 {
650 	b43legacy_power_saving_ctl_bits(queue->dev, -1, 1);
651 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
652 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
653 			    | B43legacy_PIO_TXCTL_SUSPEND);
654 }
655 
656 void b43legacy_pio_tx_resume(struct b43legacy_pioqueue *queue)
657 {
658 	b43legacy_pio_write(queue, B43legacy_PIO_TXCTL,
659 			    b43legacy_pio_read(queue, B43legacy_PIO_TXCTL)
660 			    & ~B43legacy_PIO_TXCTL_SUSPEND);
661 	b43legacy_power_saving_ctl_bits(queue->dev, -1, -1);
662 	tasklet_schedule(&queue->txtask);
663 }
664 
665 void b43legacy_pio_freeze_txqueues(struct b43legacy_wldev *dev)
666 {
667 	struct b43legacy_pio *pio;
668 
669 	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
670 	pio = &dev->pio;
671 	pio->queue0->tx_frozen = 1;
672 	pio->queue1->tx_frozen = 1;
673 	pio->queue2->tx_frozen = 1;
674 	pio->queue3->tx_frozen = 1;
675 }
676 
677 void b43legacy_pio_thaw_txqueues(struct b43legacy_wldev *dev)
678 {
679 	struct b43legacy_pio *pio;
680 
681 	B43legacy_WARN_ON(!b43legacy_using_pio(dev));
682 	pio = &dev->pio;
683 	pio->queue0->tx_frozen = 0;
684 	pio->queue1->tx_frozen = 0;
685 	pio->queue2->tx_frozen = 0;
686 	pio->queue3->tx_frozen = 0;
687 	if (!list_empty(&pio->queue0->txqueue))
688 		tasklet_schedule(&pio->queue0->txtask);
689 	if (!list_empty(&pio->queue1->txqueue))
690 		tasklet_schedule(&pio->queue1->txtask);
691 	if (!list_empty(&pio->queue2->txqueue))
692 		tasklet_schedule(&pio->queue2->txtask);
693 	if (!list_empty(&pio->queue3->txqueue))
694 		tasklet_schedule(&pio->queue3->txtask);
695 }
696