xref: /linux/drivers/net/wireless/ti/wlcore/tx.c (revision 5148fa52a12fa1b97c730b2fe321f2aad7ea041c)
1 /*
2  * This file is part of wl1271
3  *
4  * Copyright (C) 2009 Nokia Corporation
5  *
6  * Contact: Luciano Coelho <luciano.coelho@nokia.com>
7  *
8  * This program is free software; you can redistribute it and/or
9  * modify it under the terms of the GNU General Public License
10  * version 2 as published by the Free Software Foundation.
11  *
12  * This program is distributed in the hope that it will be useful, but
13  * WITHOUT ANY WARRANTY; without even the implied warranty of
14  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
15  * General Public License for more details.
16  *
17  * You should have received a copy of the GNU General Public License
18  * along with this program; if not, write to the Free Software
19  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
20  * 02110-1301 USA
21  *
22  */
23 
24 #include <linux/kernel.h>
25 #include <linux/module.h>
26 #include <linux/etherdevice.h>
27 
28 #include "wlcore.h"
29 #include "debug.h"
30 #include "io.h"
31 #include "ps.h"
32 #include "tx.h"
33 #include "event.h"
34 #include "hw_ops.h"
35 
36 /*
37  * TODO: this is here just for now, it must be removed when the data
38  * operations are in place.
39  */
40 #include "../wl12xx/reg.h"
41 
42 static int wl1271_set_default_wep_key(struct wl1271 *wl,
43 				      struct wl12xx_vif *wlvif, u8 id)
44 {
45 	int ret;
46 	bool is_ap = (wlvif->bss_type == BSS_TYPE_AP_BSS);
47 
48 	if (is_ap)
49 		ret = wl12xx_cmd_set_default_wep_key(wl, id,
50 						     wlvif->ap.bcast_hlid);
51 	else
52 		ret = wl12xx_cmd_set_default_wep_key(wl, id, wlvif->sta.hlid);
53 
54 	if (ret < 0)
55 		return ret;
56 
57 	wl1271_debug(DEBUG_CRYPT, "default wep key idx: %d", (int)id);
58 	return 0;
59 }
60 
61 static int wl1271_alloc_tx_id(struct wl1271 *wl, struct sk_buff *skb)
62 {
63 	int id;
64 
65 	id = find_first_zero_bit(wl->tx_frames_map, wl->num_tx_desc);
66 	if (id >= wl->num_tx_desc)
67 		return -EBUSY;
68 
69 	__set_bit(id, wl->tx_frames_map);
70 	wl->tx_frames[id] = skb;
71 	wl->tx_frames_cnt++;
72 	return id;
73 }
74 
75 static void wl1271_free_tx_id(struct wl1271 *wl, int id)
76 {
77 	if (__test_and_clear_bit(id, wl->tx_frames_map)) {
78 		if (unlikely(wl->tx_frames_cnt == wl->num_tx_desc))
79 			clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
80 
81 		wl->tx_frames[id] = NULL;
82 		wl->tx_frames_cnt--;
83 	}
84 }
85 
86 static void wl1271_tx_ap_update_inconnection_sta(struct wl1271 *wl,
87 						 struct sk_buff *skb)
88 {
89 	struct ieee80211_hdr *hdr;
90 
91 	/*
92 	 * add the station to the known list before transmitting the
93 	 * authentication response. this way it won't get de-authed by FW
94 	 * when transmitting too soon.
95 	 */
96 	hdr = (struct ieee80211_hdr *)(skb->data +
97 				       sizeof(struct wl1271_tx_hw_descr));
98 	if (ieee80211_is_auth(hdr->frame_control))
99 		wl1271_acx_set_inconnection_sta(wl, hdr->addr1);
100 }
101 
102 static void wl1271_tx_regulate_link(struct wl1271 *wl,
103 				    struct wl12xx_vif *wlvif,
104 				    u8 hlid)
105 {
106 	bool fw_ps, single_sta;
107 	u8 tx_pkts;
108 
109 	if (WARN_ON(!test_bit(hlid, wlvif->links_map)))
110 		return;
111 
112 	fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map);
113 	tx_pkts = wl->links[hlid].allocated_pkts;
114 	single_sta = (wl->active_sta_count == 1);
115 
116 	/*
117 	 * if in FW PS and there is enough data in FW we can put the link
118 	 * into high-level PS and clean out its TX queues.
119 	 * Make an exception if this is the only connected station. In this
120 	 * case FW-memory congestion is not a problem.
121 	 */
122 	if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS)
123 		wl12xx_ps_link_start(wl, wlvif, hlid, true);
124 }
125 
126 bool wl12xx_is_dummy_packet(struct wl1271 *wl, struct sk_buff *skb)
127 {
128 	return wl->dummy_packet == skb;
129 }
130 
131 u8 wl12xx_tx_get_hlid_ap(struct wl1271 *wl, struct wl12xx_vif *wlvif,
132 			 struct sk_buff *skb)
133 {
134 	struct ieee80211_tx_info *control = IEEE80211_SKB_CB(skb);
135 
136 	if (control->control.sta) {
137 		struct wl1271_station *wl_sta;
138 
139 		wl_sta = (struct wl1271_station *)
140 				control->control.sta->drv_priv;
141 		return wl_sta->hlid;
142 	} else {
143 		struct ieee80211_hdr *hdr;
144 
145 		if (!test_bit(WLVIF_FLAG_AP_STARTED, &wlvif->flags))
146 			return wl->system_hlid;
147 
148 		hdr = (struct ieee80211_hdr *)skb->data;
149 		if (ieee80211_is_mgmt(hdr->frame_control))
150 			return wlvif->ap.global_hlid;
151 		else
152 			return wlvif->ap.bcast_hlid;
153 	}
154 }
155 
156 u8 wl12xx_tx_get_hlid(struct wl1271 *wl, struct wl12xx_vif *wlvif,
157 		      struct sk_buff *skb)
158 {
159 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
160 
161 	if (!wlvif || wl12xx_is_dummy_packet(wl, skb))
162 		return wl->system_hlid;
163 
164 	if (wlvif->bss_type == BSS_TYPE_AP_BSS)
165 		return wl12xx_tx_get_hlid_ap(wl, wlvif, skb);
166 
167 	if ((test_bit(WLVIF_FLAG_STA_ASSOCIATED, &wlvif->flags) ||
168 	     test_bit(WLVIF_FLAG_IBSS_JOINED, &wlvif->flags)) &&
169 	    !ieee80211_is_auth(hdr->frame_control) &&
170 	    !ieee80211_is_assoc_req(hdr->frame_control))
171 		return wlvif->sta.hlid;
172 	else
173 		return wlvif->dev_hlid;
174 }
175 
176 unsigned int wlcore_calc_packet_alignment(struct wl1271 *wl,
177 					  unsigned int packet_length)
178 {
179 	if (wl->quirks & WLCORE_QUIRK_TX_BLOCKSIZE_ALIGN)
180 		return ALIGN(packet_length, WL12XX_BUS_BLOCK_SIZE);
181 	else
182 		return ALIGN(packet_length, WL1271_TX_ALIGN_TO);
183 }
184 EXPORT_SYMBOL(wlcore_calc_packet_alignment);
185 
186 static int wl1271_tx_allocate(struct wl1271 *wl, struct wl12xx_vif *wlvif,
187 			      struct sk_buff *skb, u32 extra, u32 buf_offset,
188 			      u8 hlid)
189 {
190 	struct wl1271_tx_hw_descr *desc;
191 	u32 total_len = skb->len + sizeof(struct wl1271_tx_hw_descr) + extra;
192 	u32 total_blocks;
193 	int id, ret = -EBUSY, ac;
194 	u32 spare_blocks = wl->normal_tx_spare;
195 	bool is_dummy = false;
196 
197 	if (buf_offset + total_len > WL1271_AGGR_BUFFER_SIZE)
198 		return -EAGAIN;
199 
200 	/* allocate free identifier for the packet */
201 	id = wl1271_alloc_tx_id(wl, skb);
202 	if (id < 0)
203 		return id;
204 
205 	if (unlikely(wl12xx_is_dummy_packet(wl, skb)))
206 		is_dummy = true;
207 	else if (wlvif->is_gem)
208 		spare_blocks = wl->gem_tx_spare;
209 
210 	total_blocks = wlcore_hw_calc_tx_blocks(wl, total_len, spare_blocks);
211 
212 	if (total_blocks <= wl->tx_blocks_available) {
213 		desc = (struct wl1271_tx_hw_descr *)skb_push(
214 			skb, total_len - skb->len);
215 
216 		wlcore_hw_set_tx_desc_blocks(wl, desc, total_blocks,
217 					     spare_blocks);
218 
219 		desc->id = id;
220 
221 		wl->tx_blocks_available -= total_blocks;
222 		wl->tx_allocated_blocks += total_blocks;
223 
224 		/* If the FW was empty before, arm the Tx watchdog */
225 		if (wl->tx_allocated_blocks == total_blocks)
226 			wl12xx_rearm_tx_watchdog_locked(wl);
227 
228 		ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
229 		wl->tx_allocated_pkts[ac]++;
230 
231 		if (!is_dummy && wlvif &&
232 		    wlvif->bss_type == BSS_TYPE_AP_BSS &&
233 		    test_bit(hlid, wlvif->ap.sta_hlid_map))
234 			wl->links[hlid].allocated_pkts++;
235 
236 		ret = 0;
237 
238 		wl1271_debug(DEBUG_TX,
239 			     "tx_allocate: size: %d, blocks: %d, id: %d",
240 			     total_len, total_blocks, id);
241 	} else {
242 		wl1271_free_tx_id(wl, id);
243 	}
244 
245 	return ret;
246 }
247 
248 static void wl1271_tx_fill_hdr(struct wl1271 *wl, struct wl12xx_vif *wlvif,
249 			       struct sk_buff *skb, u32 extra,
250 			       struct ieee80211_tx_info *control, u8 hlid)
251 {
252 	struct timespec ts;
253 	struct wl1271_tx_hw_descr *desc;
254 	int ac, rate_idx;
255 	s64 hosttime;
256 	u16 tx_attr = 0;
257 	__le16 frame_control;
258 	struct ieee80211_hdr *hdr;
259 	u8 *frame_start;
260 	bool is_dummy;
261 
262 	desc = (struct wl1271_tx_hw_descr *) skb->data;
263 	frame_start = (u8 *)(desc + 1);
264 	hdr = (struct ieee80211_hdr *)(frame_start + extra);
265 	frame_control = hdr->frame_control;
266 
267 	/* relocate space for security header */
268 	if (extra) {
269 		int hdrlen = ieee80211_hdrlen(frame_control);
270 		memmove(frame_start, hdr, hdrlen);
271 	}
272 
273 	/* configure packet life time */
274 	getnstimeofday(&ts);
275 	hosttime = (timespec_to_ns(&ts) >> 10);
276 	desc->start_time = cpu_to_le32(hosttime - wl->time_offset);
277 
278 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
279 	if (is_dummy || !wlvif || wlvif->bss_type != BSS_TYPE_AP_BSS)
280 		desc->life_time = cpu_to_le16(TX_HW_MGMT_PKT_LIFETIME_TU);
281 	else
282 		desc->life_time = cpu_to_le16(TX_HW_AP_MODE_PKT_LIFETIME_TU);
283 
284 	/* queue */
285 	ac = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
286 	desc->tid = skb->priority;
287 
288 	if (is_dummy) {
289 		/*
290 		 * FW expects the dummy packet to have an invalid session id -
291 		 * any session id that is different than the one set in the join
292 		 */
293 		tx_attr = (SESSION_COUNTER_INVALID <<
294 			   TX_HW_ATTR_OFST_SESSION_COUNTER) &
295 			   TX_HW_ATTR_SESSION_COUNTER;
296 
297 		tx_attr |= TX_HW_ATTR_TX_DUMMY_REQ;
298 	} else if (wlvif) {
299 		/* configure the tx attributes */
300 		tx_attr = wlvif->session_counter <<
301 			  TX_HW_ATTR_OFST_SESSION_COUNTER;
302 	}
303 
304 	desc->hlid = hlid;
305 	if (is_dummy || !wlvif)
306 		rate_idx = 0;
307 	else if (wlvif->bss_type != BSS_TYPE_AP_BSS) {
308 		/* if the packets are destined for AP (have a STA entry)
309 		   send them with AP rate policies, otherwise use default
310 		   basic rates */
311 		if (control->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
312 			rate_idx = wlvif->sta.p2p_rate_idx;
313 		else if (control->control.sta)
314 			rate_idx = wlvif->sta.ap_rate_idx;
315 		else
316 			rate_idx = wlvif->sta.basic_rate_idx;
317 	} else {
318 		if (hlid == wlvif->ap.global_hlid)
319 			rate_idx = wlvif->ap.mgmt_rate_idx;
320 		else if (hlid == wlvif->ap.bcast_hlid)
321 			rate_idx = wlvif->ap.bcast_rate_idx;
322 		else
323 			rate_idx = wlvif->ap.ucast_rate_idx[ac];
324 	}
325 
326 	tx_attr |= rate_idx << TX_HW_ATTR_OFST_RATE_POLICY;
327 
328 	/* for WEP shared auth - no fw encryption is needed */
329 	if (ieee80211_is_auth(frame_control) &&
330 	    ieee80211_has_protected(frame_control))
331 		tx_attr |= TX_HW_ATTR_HOST_ENCRYPT;
332 
333 	desc->reserved = 0;
334 	desc->tx_attr = cpu_to_le16(tx_attr);
335 
336 	wlcore_hw_set_tx_desc_data_len(wl, desc, skb);
337 }
338 
339 /* caller must hold wl->mutex */
340 static int wl1271_prepare_tx_frame(struct wl1271 *wl, struct wl12xx_vif *wlvif,
341 				   struct sk_buff *skb, u32 buf_offset)
342 {
343 	struct ieee80211_tx_info *info;
344 	u32 extra = 0;
345 	int ret = 0;
346 	u32 total_len;
347 	u8 hlid;
348 	bool is_dummy;
349 
350 	if (!skb)
351 		return -EINVAL;
352 
353 	info = IEEE80211_SKB_CB(skb);
354 
355 	/* TODO: handle dummy packets on multi-vifs */
356 	is_dummy = wl12xx_is_dummy_packet(wl, skb);
357 
358 	if (info->control.hw_key &&
359 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP)
360 		extra = WL1271_EXTRA_SPACE_TKIP;
361 
362 	if (info->control.hw_key) {
363 		bool is_wep;
364 		u8 idx = info->control.hw_key->hw_key_idx;
365 		u32 cipher = info->control.hw_key->cipher;
366 
367 		is_wep = (cipher == WLAN_CIPHER_SUITE_WEP40) ||
368 			 (cipher == WLAN_CIPHER_SUITE_WEP104);
369 
370 		if (unlikely(is_wep && wlvif->default_key != idx)) {
371 			ret = wl1271_set_default_wep_key(wl, wlvif, idx);
372 			if (ret < 0)
373 				return ret;
374 			wlvif->default_key = idx;
375 		}
376 	}
377 	hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
378 	if (hlid == WL12XX_INVALID_LINK_ID) {
379 		wl1271_error("invalid hlid. dropping skb 0x%p", skb);
380 		return -EINVAL;
381 	}
382 
383 	ret = wl1271_tx_allocate(wl, wlvif, skb, extra, buf_offset, hlid);
384 	if (ret < 0)
385 		return ret;
386 
387 	wl1271_tx_fill_hdr(wl, wlvif, skb, extra, info, hlid);
388 
389 	if (!is_dummy && wlvif && wlvif->bss_type == BSS_TYPE_AP_BSS) {
390 		wl1271_tx_ap_update_inconnection_sta(wl, skb);
391 		wl1271_tx_regulate_link(wl, wlvif, hlid);
392 	}
393 
394 	/*
395 	 * The length of each packet is stored in terms of
396 	 * words. Thus, we must pad the skb data to make sure its
397 	 * length is aligned.  The number of padding bytes is computed
398 	 * and set in wl1271_tx_fill_hdr.
399 	 * In special cases, we want to align to a specific block size
400 	 * (eg. for wl128x with SDIO we align to 256).
401 	 */
402 	total_len = wlcore_calc_packet_alignment(wl, skb->len);
403 
404 	memcpy(wl->aggr_buf + buf_offset, skb->data, skb->len);
405 	memset(wl->aggr_buf + buf_offset + skb->len, 0, total_len - skb->len);
406 
407 	/* Revert side effects in the dummy packet skb, so it can be reused */
408 	if (is_dummy)
409 		skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
410 
411 	return total_len;
412 }
413 
414 u32 wl1271_tx_enabled_rates_get(struct wl1271 *wl, u32 rate_set,
415 				enum ieee80211_band rate_band)
416 {
417 	struct ieee80211_supported_band *band;
418 	u32 enabled_rates = 0;
419 	int bit;
420 
421 	band = wl->hw->wiphy->bands[rate_band];
422 	for (bit = 0; bit < band->n_bitrates; bit++) {
423 		if (rate_set & 0x1)
424 			enabled_rates |= band->bitrates[bit].hw_value;
425 		rate_set >>= 1;
426 	}
427 
428 	/* MCS rates indication are on bits 16 - 23 */
429 	rate_set >>= HW_HT_RATES_OFFSET - band->n_bitrates;
430 
431 	for (bit = 0; bit < 8; bit++) {
432 		if (rate_set & 0x1)
433 			enabled_rates |= (CONF_HW_BIT_RATE_MCS_0 << bit);
434 		rate_set >>= 1;
435 	}
436 
437 	return enabled_rates;
438 }
439 
440 void wl1271_handle_tx_low_watermark(struct wl1271 *wl)
441 {
442 	unsigned long flags;
443 	int i;
444 
445 	for (i = 0; i < NUM_TX_QUEUES; i++) {
446 		if (test_bit(i, &wl->stopped_queues_map) &&
447 		    wl->tx_queue_count[i] <= WL1271_TX_QUEUE_LOW_WATERMARK) {
448 			/* firmware buffer has space, restart queues */
449 			spin_lock_irqsave(&wl->wl_lock, flags);
450 			ieee80211_wake_queue(wl->hw,
451 					     wl1271_tx_get_mac80211_queue(i));
452 			clear_bit(i, &wl->stopped_queues_map);
453 			spin_unlock_irqrestore(&wl->wl_lock, flags);
454 		}
455 	}
456 }
457 
458 static struct sk_buff_head *wl1271_select_queue(struct wl1271 *wl,
459 						struct sk_buff_head *queues)
460 {
461 	int i, q = -1, ac;
462 	u32 min_pkts = 0xffffffff;
463 
464 	/*
465 	 * Find a non-empty ac where:
466 	 * 1. There are packets to transmit
467 	 * 2. The FW has the least allocated blocks
468 	 *
469 	 * We prioritize the ACs according to VO>VI>BE>BK
470 	 */
471 	for (i = 0; i < NUM_TX_QUEUES; i++) {
472 		ac = wl1271_tx_get_queue(i);
473 		if (!skb_queue_empty(&queues[ac]) &&
474 		    (wl->tx_allocated_pkts[ac] < min_pkts)) {
475 			q = ac;
476 			min_pkts = wl->tx_allocated_pkts[q];
477 		}
478 	}
479 
480 	if (q == -1)
481 		return NULL;
482 
483 	return &queues[q];
484 }
485 
486 static struct sk_buff *wl12xx_lnk_skb_dequeue(struct wl1271 *wl,
487 					      struct wl1271_link *lnk)
488 {
489 	struct sk_buff *skb;
490 	unsigned long flags;
491 	struct sk_buff_head *queue;
492 
493 	queue = wl1271_select_queue(wl, lnk->tx_queue);
494 	if (!queue)
495 		return NULL;
496 
497 	skb = skb_dequeue(queue);
498 	if (skb) {
499 		int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
500 		spin_lock_irqsave(&wl->wl_lock, flags);
501 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
502 		wl->tx_queue_count[q]--;
503 		spin_unlock_irqrestore(&wl->wl_lock, flags);
504 	}
505 
506 	return skb;
507 }
508 
509 static struct sk_buff *wl12xx_vif_skb_dequeue(struct wl1271 *wl,
510 					      struct wl12xx_vif *wlvif)
511 {
512 	struct sk_buff *skb = NULL;
513 	int i, h, start_hlid;
514 
515 	/* start from the link after the last one */
516 	start_hlid = (wlvif->last_tx_hlid + 1) % WL12XX_MAX_LINKS;
517 
518 	/* dequeue according to AC, round robin on each link */
519 	for (i = 0; i < WL12XX_MAX_LINKS; i++) {
520 		h = (start_hlid + i) % WL12XX_MAX_LINKS;
521 
522 		/* only consider connected stations */
523 		if (!test_bit(h, wlvif->links_map))
524 			continue;
525 
526 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[h]);
527 		if (!skb)
528 			continue;
529 
530 		wlvif->last_tx_hlid = h;
531 		break;
532 	}
533 
534 	if (!skb)
535 		wlvif->last_tx_hlid = 0;
536 
537 	return skb;
538 }
539 
540 static struct sk_buff *wl1271_skb_dequeue(struct wl1271 *wl)
541 {
542 	unsigned long flags;
543 	struct wl12xx_vif *wlvif = wl->last_wlvif;
544 	struct sk_buff *skb = NULL;
545 
546 	/* continue from last wlvif (round robin) */
547 	if (wlvif) {
548 		wl12xx_for_each_wlvif_continue(wl, wlvif) {
549 			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
550 			if (skb) {
551 				wl->last_wlvif = wlvif;
552 				break;
553 			}
554 		}
555 	}
556 
557 	/* dequeue from the system HLID before the restarting wlvif list */
558 	if (!skb)
559 		skb = wl12xx_lnk_skb_dequeue(wl, &wl->links[wl->system_hlid]);
560 
561 	/* do a new pass over the wlvif list */
562 	if (!skb) {
563 		wl12xx_for_each_wlvif(wl, wlvif) {
564 			skb = wl12xx_vif_skb_dequeue(wl, wlvif);
565 			if (skb) {
566 				wl->last_wlvif = wlvif;
567 				break;
568 			}
569 
570 			/*
571 			 * No need to continue after last_wlvif. The previous
572 			 * pass should have found it.
573 			 */
574 			if (wlvif == wl->last_wlvif)
575 				break;
576 		}
577 	}
578 
579 	if (!skb &&
580 	    test_and_clear_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) {
581 		int q;
582 
583 		skb = wl->dummy_packet;
584 		q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
585 		spin_lock_irqsave(&wl->wl_lock, flags);
586 		WARN_ON_ONCE(wl->tx_queue_count[q] <= 0);
587 		wl->tx_queue_count[q]--;
588 		spin_unlock_irqrestore(&wl->wl_lock, flags);
589 	}
590 
591 	return skb;
592 }
593 
594 static void wl1271_skb_queue_head(struct wl1271 *wl, struct wl12xx_vif *wlvif,
595 				  struct sk_buff *skb)
596 {
597 	unsigned long flags;
598 	int q = wl1271_tx_get_queue(skb_get_queue_mapping(skb));
599 
600 	if (wl12xx_is_dummy_packet(wl, skb)) {
601 		set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags);
602 	} else {
603 		u8 hlid = wl12xx_tx_get_hlid(wl, wlvif, skb);
604 		skb_queue_head(&wl->links[hlid].tx_queue[q], skb);
605 
606 		/* make sure we dequeue the same packet next time */
607 		wlvif->last_tx_hlid = (hlid + WL12XX_MAX_LINKS - 1) %
608 				      WL12XX_MAX_LINKS;
609 	}
610 
611 	spin_lock_irqsave(&wl->wl_lock, flags);
612 	wl->tx_queue_count[q]++;
613 	spin_unlock_irqrestore(&wl->wl_lock, flags);
614 }
615 
616 static bool wl1271_tx_is_data_present(struct sk_buff *skb)
617 {
618 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)(skb->data);
619 
620 	return ieee80211_is_data_present(hdr->frame_control);
621 }
622 
623 void wl12xx_rearm_rx_streaming(struct wl1271 *wl, unsigned long *active_hlids)
624 {
625 	struct wl12xx_vif *wlvif;
626 	u32 timeout;
627 	u8 hlid;
628 
629 	if (!wl->conf.rx_streaming.interval)
630 		return;
631 
632 	if (!wl->conf.rx_streaming.always &&
633 	    !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))
634 		return;
635 
636 	timeout = wl->conf.rx_streaming.duration;
637 	wl12xx_for_each_wlvif_sta(wl, wlvif) {
638 		bool found = false;
639 		for_each_set_bit(hlid, active_hlids, WL12XX_MAX_LINKS) {
640 			if (test_bit(hlid, wlvif->links_map)) {
641 				found  = true;
642 				break;
643 			}
644 		}
645 
646 		if (!found)
647 			continue;
648 
649 		/* enable rx streaming */
650 		if (!test_bit(WLVIF_FLAG_RX_STREAMING_STARTED, &wlvif->flags))
651 			ieee80211_queue_work(wl->hw,
652 					     &wlvif->rx_streaming_enable_work);
653 
654 		mod_timer(&wlvif->rx_streaming_timer,
655 			  jiffies + msecs_to_jiffies(timeout));
656 	}
657 }
658 
659 void wl1271_tx_work_locked(struct wl1271 *wl)
660 {
661 	struct wl12xx_vif *wlvif;
662 	struct sk_buff *skb;
663 	struct wl1271_tx_hw_descr *desc;
664 	u32 buf_offset = 0;
665 	bool sent_packets = false;
666 	unsigned long active_hlids[BITS_TO_LONGS(WL12XX_MAX_LINKS)] = {0};
667 	int ret;
668 
669 	if (unlikely(wl->state == WL1271_STATE_OFF))
670 		return;
671 
672 	while ((skb = wl1271_skb_dequeue(wl))) {
673 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
674 		bool has_data = false;
675 
676 		wlvif = NULL;
677 		if (!wl12xx_is_dummy_packet(wl, skb) && info->control.vif)
678 			wlvif = wl12xx_vif_to_data(info->control.vif);
679 
680 		has_data = wlvif && wl1271_tx_is_data_present(skb);
681 		ret = wl1271_prepare_tx_frame(wl, wlvif, skb, buf_offset);
682 		if (ret == -EAGAIN) {
683 			/*
684 			 * Aggregation buffer is full.
685 			 * Flush buffer and try again.
686 			 */
687 			wl1271_skb_queue_head(wl, wlvif, skb);
688 			wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
689 					  buf_offset, true);
690 			sent_packets = true;
691 			buf_offset = 0;
692 			continue;
693 		} else if (ret == -EBUSY) {
694 			/*
695 			 * Firmware buffer is full.
696 			 * Queue back last skb, and stop aggregating.
697 			 */
698 			wl1271_skb_queue_head(wl, wlvif, skb);
699 			/* No work left, avoid scheduling redundant tx work */
700 			set_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags);
701 			goto out_ack;
702 		} else if (ret < 0) {
703 			if (wl12xx_is_dummy_packet(wl, skb))
704 				/*
705 				 * fw still expects dummy packet,
706 				 * so re-enqueue it
707 				 */
708 				wl1271_skb_queue_head(wl, wlvif, skb);
709 			else
710 				ieee80211_free_txskb(wl->hw, skb);
711 			goto out_ack;
712 		}
713 		buf_offset += ret;
714 		wl->tx_packets_count++;
715 		if (has_data) {
716 			desc = (struct wl1271_tx_hw_descr *) skb->data;
717 			__set_bit(desc->hlid, active_hlids);
718 		}
719 	}
720 
721 out_ack:
722 	if (buf_offset) {
723 		wlcore_write_data(wl, REG_SLV_MEM_DATA, wl->aggr_buf,
724 				  buf_offset, true);
725 		sent_packets = true;
726 	}
727 	if (sent_packets) {
728 		/*
729 		 * Interrupt the firmware with the new packets. This is only
730 		 * required for older hardware revisions
731 		 */
732 		if (wl->quirks & WLCORE_QUIRK_END_OF_TRANSACTION)
733 			wl1271_write32(wl, WL12XX_HOST_WR_ACCESS,
734 				       wl->tx_packets_count);
735 
736 		wl1271_handle_tx_low_watermark(wl);
737 	}
738 	wl12xx_rearm_rx_streaming(wl, active_hlids);
739 }
740 
741 void wl1271_tx_work(struct work_struct *work)
742 {
743 	struct wl1271 *wl = container_of(work, struct wl1271, tx_work);
744 	int ret;
745 
746 	mutex_lock(&wl->mutex);
747 	ret = wl1271_ps_elp_wakeup(wl);
748 	if (ret < 0)
749 		goto out;
750 
751 	wl1271_tx_work_locked(wl);
752 
753 	wl1271_ps_elp_sleep(wl);
754 out:
755 	mutex_unlock(&wl->mutex);
756 }
757 
758 static u8 wl1271_tx_get_rate_flags(u8 rate_class_index)
759 {
760 	u8 flags = 0;
761 
762 	/*
763 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
764 	 * only it uses Tx-completion.
765 	 */
766 	if (rate_class_index <= 8)
767 		flags |= IEEE80211_TX_RC_MCS;
768 
769 	/*
770 	 * TODO: use wl12xx constants when this code is moved to wl12xx, as
771 	 * only it uses Tx-completion.
772 	 */
773 	if (rate_class_index == 0)
774 		flags |= IEEE80211_TX_RC_SHORT_GI;
775 
776 	return flags;
777 }
778 
779 static void wl1271_tx_complete_packet(struct wl1271 *wl,
780 				      struct wl1271_tx_hw_res_descr *result)
781 {
782 	struct ieee80211_tx_info *info;
783 	struct ieee80211_vif *vif;
784 	struct wl12xx_vif *wlvif;
785 	struct sk_buff *skb;
786 	int id = result->id;
787 	int rate = -1;
788 	u8 rate_flags = 0;
789 	u8 retries = 0;
790 
791 	/* check for id legality */
792 	if (unlikely(id >= wl->num_tx_desc || wl->tx_frames[id] == NULL)) {
793 		wl1271_warning("TX result illegal id: %d", id);
794 		return;
795 	}
796 
797 	skb = wl->tx_frames[id];
798 	info = IEEE80211_SKB_CB(skb);
799 
800 	if (wl12xx_is_dummy_packet(wl, skb)) {
801 		wl1271_free_tx_id(wl, id);
802 		return;
803 	}
804 
805 	/* info->control is valid as long as we don't update info->status */
806 	vif = info->control.vif;
807 	wlvif = wl12xx_vif_to_data(vif);
808 
809 	/* update the TX status info */
810 	if (result->status == TX_SUCCESS) {
811 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK))
812 			info->flags |= IEEE80211_TX_STAT_ACK;
813 		rate = wlcore_rate_to_idx(wl, result->rate_class_index,
814 					  wlvif->band);
815 		rate_flags = wl1271_tx_get_rate_flags(result->rate_class_index);
816 		retries = result->ack_failures;
817 	} else if (result->status == TX_RETRY_EXCEEDED) {
818 		wl->stats.excessive_retries++;
819 		retries = result->ack_failures;
820 	}
821 
822 	info->status.rates[0].idx = rate;
823 	info->status.rates[0].count = retries;
824 	info->status.rates[0].flags = rate_flags;
825 	info->status.ack_signal = -1;
826 
827 	wl->stats.retry_count += result->ack_failures;
828 
829 	/*
830 	 * update sequence number only when relevant, i.e. only in
831 	 * sessions of TKIP, AES and GEM (not in open or WEP sessions)
832 	 */
833 	if (info->control.hw_key &&
834 	    (info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP ||
835 	     info->control.hw_key->cipher == WLAN_CIPHER_SUITE_CCMP ||
836 	     info->control.hw_key->cipher == WL1271_CIPHER_SUITE_GEM)) {
837 		u8 fw_lsb = result->tx_security_sequence_number_lsb;
838 		u8 cur_lsb = wlvif->tx_security_last_seq_lsb;
839 
840 		/*
841 		 * update security sequence number, taking care of potential
842 		 * wrap-around
843 		 */
844 		wlvif->tx_security_seq += (fw_lsb - cur_lsb) & 0xff;
845 		wlvif->tx_security_last_seq_lsb = fw_lsb;
846 	}
847 
848 	/* remove private header from packet */
849 	skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
850 
851 	/* remove TKIP header space if present */
852 	if (info->control.hw_key &&
853 	    info->control.hw_key->cipher == WLAN_CIPHER_SUITE_TKIP) {
854 		int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
855 		memmove(skb->data + WL1271_EXTRA_SPACE_TKIP, skb->data,
856 			hdrlen);
857 		skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
858 	}
859 
860 	wl1271_debug(DEBUG_TX, "tx status id %u skb 0x%p failures %u rate 0x%x"
861 		     " status 0x%x",
862 		     result->id, skb, result->ack_failures,
863 		     result->rate_class_index, result->status);
864 
865 	/* return the packet to the stack */
866 	skb_queue_tail(&wl->deferred_tx_queue, skb);
867 	queue_work(wl->freezable_wq, &wl->netstack_work);
868 	wl1271_free_tx_id(wl, result->id);
869 }
870 
871 /* Called upon reception of a TX complete interrupt */
872 void wl1271_tx_complete(struct wl1271 *wl)
873 {
874 	struct wl1271_acx_mem_map *memmap =
875 		(struct wl1271_acx_mem_map *)wl->target_mem_map;
876 	u32 count, fw_counter;
877 	u32 i;
878 
879 	/* read the tx results from the chipset */
880 	wl1271_read(wl, le32_to_cpu(memmap->tx_result),
881 		    wl->tx_res_if, sizeof(*wl->tx_res_if), false);
882 	fw_counter = le32_to_cpu(wl->tx_res_if->tx_result_fw_counter);
883 
884 	/* write host counter to chipset (to ack) */
885 	wl1271_write32(wl, le32_to_cpu(memmap->tx_result) +
886 		       offsetof(struct wl1271_tx_hw_res_if,
887 				tx_result_host_counter), fw_counter);
888 
889 	count = fw_counter - wl->tx_results_count;
890 	wl1271_debug(DEBUG_TX, "tx_complete received, packets: %d", count);
891 
892 	/* verify that the result buffer is not getting overrun */
893 	if (unlikely(count > TX_HW_RESULT_QUEUE_LEN))
894 		wl1271_warning("TX result overflow from chipset: %d", count);
895 
896 	/* process the results */
897 	for (i = 0; i < count; i++) {
898 		struct wl1271_tx_hw_res_descr *result;
899 		u8 offset = wl->tx_results_count & TX_HW_RESULT_QUEUE_LEN_MASK;
900 
901 		/* process the packet */
902 		result =  &(wl->tx_res_if->tx_results_queue[offset]);
903 		wl1271_tx_complete_packet(wl, result);
904 
905 		wl->tx_results_count++;
906 	}
907 }
908 EXPORT_SYMBOL(wl1271_tx_complete);
909 
910 void wl1271_tx_reset_link_queues(struct wl1271 *wl, u8 hlid)
911 {
912 	struct sk_buff *skb;
913 	int i;
914 	unsigned long flags;
915 	struct ieee80211_tx_info *info;
916 	int total[NUM_TX_QUEUES];
917 
918 	for (i = 0; i < NUM_TX_QUEUES; i++) {
919 		total[i] = 0;
920 		while ((skb = skb_dequeue(&wl->links[hlid].tx_queue[i]))) {
921 			wl1271_debug(DEBUG_TX, "link freeing skb 0x%p", skb);
922 
923 			if (!wl12xx_is_dummy_packet(wl, skb)) {
924 				info = IEEE80211_SKB_CB(skb);
925 				info->status.rates[0].idx = -1;
926 				info->status.rates[0].count = 0;
927 				ieee80211_tx_status_ni(wl->hw, skb);
928 			}
929 
930 			total[i]++;
931 		}
932 	}
933 
934 	spin_lock_irqsave(&wl->wl_lock, flags);
935 	for (i = 0; i < NUM_TX_QUEUES; i++)
936 		wl->tx_queue_count[i] -= total[i];
937 	spin_unlock_irqrestore(&wl->wl_lock, flags);
938 
939 	wl1271_handle_tx_low_watermark(wl);
940 }
941 
942 /* caller must hold wl->mutex and TX must be stopped */
943 void wl12xx_tx_reset_wlvif(struct wl1271 *wl, struct wl12xx_vif *wlvif)
944 {
945 	int i;
946 
947 	/* TX failure */
948 	for_each_set_bit(i, wlvif->links_map, WL12XX_MAX_LINKS) {
949 		if (wlvif->bss_type == BSS_TYPE_AP_BSS)
950 			wl1271_free_sta(wl, wlvif, i);
951 		else
952 			wlvif->sta.ba_rx_bitmap = 0;
953 
954 		wl->links[i].allocated_pkts = 0;
955 		wl->links[i].prev_freed_pkts = 0;
956 	}
957 	wlvif->last_tx_hlid = 0;
958 
959 }
960 /* caller must hold wl->mutex and TX must be stopped */
961 void wl12xx_tx_reset(struct wl1271 *wl, bool reset_tx_queues)
962 {
963 	int i;
964 	struct sk_buff *skb;
965 	struct ieee80211_tx_info *info;
966 
967 	/* only reset the queues if something bad happened */
968 	if (WARN_ON_ONCE(wl1271_tx_total_queue_count(wl) != 0)) {
969 		for (i = 0; i < WL12XX_MAX_LINKS; i++)
970 			wl1271_tx_reset_link_queues(wl, i);
971 
972 		for (i = 0; i < NUM_TX_QUEUES; i++)
973 			wl->tx_queue_count[i] = 0;
974 	}
975 
976 	wl->stopped_queues_map = 0;
977 
978 	/*
979 	 * Make sure the driver is at a consistent state, in case this
980 	 * function is called from a context other than interface removal.
981 	 * This call will always wake the TX queues.
982 	 */
983 	if (reset_tx_queues)
984 		wl1271_handle_tx_low_watermark(wl);
985 
986 	for (i = 0; i < wl->num_tx_desc; i++) {
987 		if (wl->tx_frames[i] == NULL)
988 			continue;
989 
990 		skb = wl->tx_frames[i];
991 		wl1271_free_tx_id(wl, i);
992 		wl1271_debug(DEBUG_TX, "freeing skb 0x%p", skb);
993 
994 		if (!wl12xx_is_dummy_packet(wl, skb)) {
995 			/*
996 			 * Remove private headers before passing the skb to
997 			 * mac80211
998 			 */
999 			info = IEEE80211_SKB_CB(skb);
1000 			skb_pull(skb, sizeof(struct wl1271_tx_hw_descr));
1001 			if (info->control.hw_key &&
1002 			    info->control.hw_key->cipher ==
1003 			    WLAN_CIPHER_SUITE_TKIP) {
1004 				int hdrlen = ieee80211_get_hdrlen_from_skb(skb);
1005 				memmove(skb->data + WL1271_EXTRA_SPACE_TKIP,
1006 					skb->data, hdrlen);
1007 				skb_pull(skb, WL1271_EXTRA_SPACE_TKIP);
1008 			}
1009 
1010 			info->status.rates[0].idx = -1;
1011 			info->status.rates[0].count = 0;
1012 
1013 			ieee80211_tx_status_ni(wl->hw, skb);
1014 		}
1015 	}
1016 }
1017 
1018 #define WL1271_TX_FLUSH_TIMEOUT 500000
1019 
1020 /* caller must *NOT* hold wl->mutex */
1021 void wl1271_tx_flush(struct wl1271 *wl)
1022 {
1023 	unsigned long timeout;
1024 	int i;
1025 	timeout = jiffies + usecs_to_jiffies(WL1271_TX_FLUSH_TIMEOUT);
1026 
1027 	while (!time_after(jiffies, timeout)) {
1028 		mutex_lock(&wl->mutex);
1029 		wl1271_debug(DEBUG_TX, "flushing tx buffer: %d %d",
1030 			     wl->tx_frames_cnt,
1031 			     wl1271_tx_total_queue_count(wl));
1032 		if ((wl->tx_frames_cnt == 0) &&
1033 		    (wl1271_tx_total_queue_count(wl) == 0)) {
1034 			mutex_unlock(&wl->mutex);
1035 			return;
1036 		}
1037 		mutex_unlock(&wl->mutex);
1038 		msleep(1);
1039 	}
1040 
1041 	wl1271_warning("Unable to flush all TX buffers, timed out.");
1042 
1043 	/* forcibly flush all Tx buffers on our queues */
1044 	mutex_lock(&wl->mutex);
1045 	for (i = 0; i < WL12XX_MAX_LINKS; i++)
1046 		wl1271_tx_reset_link_queues(wl, i);
1047 	mutex_unlock(&wl->mutex);
1048 }
1049 
1050 u32 wl1271_tx_min_rate_get(struct wl1271 *wl, u32 rate_set)
1051 {
1052 	if (WARN_ON(!rate_set))
1053 		return 0;
1054 
1055 	return BIT(__ffs(rate_set));
1056 }
1057