xref: /linux/drivers/net/wireless/ath/ath6kl/txrx.c (revision 1b2df4073447234034e2329f0df584c6346a8ec3)
1 /*
2  * Copyright (c) 2004-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2012 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "core.h"
19 #include "debug.h"
20 
21 /*
22  * tid - tid_mux0..tid_mux3
23  * aid - tid_mux4..tid_mux7
24  */
25 #define ATH6KL_TID_MASK 0xf
26 #define ATH6KL_AID_SHIFT 4
27 
28 static inline u8 ath6kl_get_tid(u8 tid_mux)
29 {
30 	return tid_mux & ATH6KL_TID_MASK;
31 }
32 
33 static inline u8 ath6kl_get_aid(u8 tid_mux)
34 {
35 	return tid_mux >> ATH6KL_AID_SHIFT;
36 }
37 
38 static u8 ath6kl_ibss_map_epid(struct sk_buff *skb, struct net_device *dev,
39 			       u32 *map_no)
40 {
41 	struct ath6kl *ar = ath6kl_priv(dev);
42 	struct ethhdr *eth_hdr;
43 	u32 i, ep_map = -1;
44 	u8 *datap;
45 
46 	*map_no = 0;
47 	datap = skb->data;
48 	eth_hdr = (struct ethhdr *) (datap + sizeof(struct wmi_data_hdr));
49 
50 	if (is_multicast_ether_addr(eth_hdr->h_dest))
51 		return ENDPOINT_2;
52 
53 	for (i = 0; i < ar->node_num; i++) {
54 		if (memcmp(eth_hdr->h_dest, ar->node_map[i].mac_addr,
55 			   ETH_ALEN) == 0) {
56 			*map_no = i + 1;
57 			ar->node_map[i].tx_pend++;
58 			return ar->node_map[i].ep_id;
59 		}
60 
61 		if ((ep_map == -1) && !ar->node_map[i].tx_pend)
62 			ep_map = i;
63 	}
64 
65 	if (ep_map == -1) {
66 		ep_map = ar->node_num;
67 		ar->node_num++;
68 		if (ar->node_num > MAX_NODE_NUM)
69 			return ENDPOINT_UNUSED;
70 	}
71 
72 	memcpy(ar->node_map[ep_map].mac_addr, eth_hdr->h_dest, ETH_ALEN);
73 
74 	for (i = ENDPOINT_2; i <= ENDPOINT_5; i++) {
75 		if (!ar->tx_pending[i]) {
76 			ar->node_map[ep_map].ep_id = i;
77 			break;
78 		}
79 
80 		/*
81 		 * No free endpoint is available, start redistribution on
82 		 * the inuse endpoints.
83 		 */
84 		if (i == ENDPOINT_5) {
85 			ar->node_map[ep_map].ep_id = ar->next_ep_id;
86 			ar->next_ep_id++;
87 			if (ar->next_ep_id > ENDPOINT_5)
88 				ar->next_ep_id = ENDPOINT_2;
89 		}
90 	}
91 
92 	*map_no = ep_map + 1;
93 	ar->node_map[ep_map].tx_pend++;
94 
95 	return ar->node_map[ep_map].ep_id;
96 }
97 
98 static bool ath6kl_process_uapsdq(struct ath6kl_sta *conn,
99 				struct ath6kl_vif *vif,
100 				struct sk_buff *skb,
101 				u32 *flags)
102 {
103 	struct ath6kl *ar = vif->ar;
104 	bool is_apsdq_empty = false;
105 	struct ethhdr *datap = (struct ethhdr *) skb->data;
106 	u8 up = 0, traffic_class, *ip_hdr;
107 	u16 ether_type;
108 	struct ath6kl_llc_snap_hdr *llc_hdr;
109 
110 	if (conn->sta_flags & STA_PS_APSD_TRIGGER) {
111 		/*
112 		 * This tx is because of a uAPSD trigger, determine
113 		 * more and EOSP bit. Set EOSP if queue is empty
114 		 * or sufficient frames are delivered for this trigger.
115 		 */
116 		spin_lock_bh(&conn->psq_lock);
117 		if (!skb_queue_empty(&conn->apsdq))
118 			*flags |= WMI_DATA_HDR_FLAGS_MORE;
119 		else if (conn->sta_flags & STA_PS_APSD_EOSP)
120 			*flags |= WMI_DATA_HDR_FLAGS_EOSP;
121 		*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
122 		spin_unlock_bh(&conn->psq_lock);
123 		return false;
124 	} else if (!conn->apsd_info)
125 		return false;
126 
127 	if (test_bit(WMM_ENABLED, &vif->flags)) {
128 		ether_type = be16_to_cpu(datap->h_proto);
129 		if (is_ethertype(ether_type)) {
130 			/* packet is in DIX format  */
131 			ip_hdr = (u8 *)(datap + 1);
132 		} else {
133 			/* packet is in 802.3 format */
134 			llc_hdr = (struct ath6kl_llc_snap_hdr *)
135 							(datap + 1);
136 			ether_type = be16_to_cpu(llc_hdr->eth_type);
137 			ip_hdr = (u8 *)(llc_hdr + 1);
138 		}
139 
140 		if (ether_type == IP_ETHERTYPE)
141 			up = ath6kl_wmi_determine_user_priority(
142 							ip_hdr, 0);
143 	}
144 
145 	traffic_class = ath6kl_wmi_get_traffic_class(up);
146 
147 	if ((conn->apsd_info & (1 << traffic_class)) == 0)
148 		return false;
149 
150 	/* Queue the frames if the STA is sleeping */
151 	spin_lock_bh(&conn->psq_lock);
152 	is_apsdq_empty = skb_queue_empty(&conn->apsdq);
153 	skb_queue_tail(&conn->apsdq, skb);
154 	spin_unlock_bh(&conn->psq_lock);
155 
156 	/*
157 	 * If this is the first pkt getting queued
158 	 * for this STA, update the PVB for this STA
159 	 */
160 	if (is_apsdq_empty) {
161 		ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
162 				vif->fw_vif_idx,
163 				conn->aid, 1, 0);
164 	}
165 	*flags |= WMI_DATA_HDR_FLAGS_UAPSD;
166 
167 	return true;
168 }
169 
170 static bool ath6kl_process_psq(struct ath6kl_sta *conn,
171 				struct ath6kl_vif *vif,
172 				struct sk_buff *skb,
173 				u32 *flags)
174 {
175 	bool is_psq_empty = false;
176 	struct ath6kl *ar = vif->ar;
177 
178 	if (conn->sta_flags & STA_PS_POLLED) {
179 		spin_lock_bh(&conn->psq_lock);
180 		if (!skb_queue_empty(&conn->psq))
181 			*flags |= WMI_DATA_HDR_FLAGS_MORE;
182 		spin_unlock_bh(&conn->psq_lock);
183 		return false;
184 	}
185 
186 	/* Queue the frames if the STA is sleeping */
187 	spin_lock_bh(&conn->psq_lock);
188 	is_psq_empty = skb_queue_empty(&conn->psq);
189 	skb_queue_tail(&conn->psq, skb);
190 	spin_unlock_bh(&conn->psq_lock);
191 
192 	/*
193 	 * If this is the first pkt getting queued
194 	 * for this STA, update the PVB for this
195 	 * STA.
196 	 */
197 	if (is_psq_empty)
198 		ath6kl_wmi_set_pvb_cmd(ar->wmi,
199 				       vif->fw_vif_idx,
200 				       conn->aid, 1);
201 	return true;
202 }
203 
204 static bool ath6kl_powersave_ap(struct ath6kl_vif *vif, struct sk_buff *skb,
205 				u32 *flags)
206 {
207 	struct ethhdr *datap = (struct ethhdr *) skb->data;
208 	struct ath6kl_sta *conn = NULL;
209 	bool ps_queued = false;
210 	struct ath6kl *ar = vif->ar;
211 
212 	if (is_multicast_ether_addr(datap->h_dest)) {
213 		u8 ctr = 0;
214 		bool q_mcast = false;
215 
216 		for (ctr = 0; ctr < AP_MAX_NUM_STA; ctr++) {
217 			if (ar->sta_list[ctr].sta_flags & STA_PS_SLEEP) {
218 				q_mcast = true;
219 				break;
220 			}
221 		}
222 
223 		if (q_mcast) {
224 			/*
225 			 * If this transmit is not because of a Dtim Expiry
226 			 * q it.
227 			 */
228 			if (!test_bit(DTIM_EXPIRED, &vif->flags)) {
229 				bool is_mcastq_empty = false;
230 
231 				spin_lock_bh(&ar->mcastpsq_lock);
232 				is_mcastq_empty =
233 					skb_queue_empty(&ar->mcastpsq);
234 				skb_queue_tail(&ar->mcastpsq, skb);
235 				spin_unlock_bh(&ar->mcastpsq_lock);
236 
237 				/*
238 				 * If this is the first Mcast pkt getting
239 				 * queued indicate to the target to set the
240 				 * BitmapControl LSB of the TIM IE.
241 				 */
242 				if (is_mcastq_empty)
243 					ath6kl_wmi_set_pvb_cmd(ar->wmi,
244 							       vif->fw_vif_idx,
245 							       MCAST_AID, 1);
246 
247 				ps_queued = true;
248 			} else {
249 				/*
250 				 * This transmit is because of Dtim expiry.
251 				 * Determine if MoreData bit has to be set.
252 				 */
253 				spin_lock_bh(&ar->mcastpsq_lock);
254 				if (!skb_queue_empty(&ar->mcastpsq))
255 					*flags |= WMI_DATA_HDR_FLAGS_MORE;
256 				spin_unlock_bh(&ar->mcastpsq_lock);
257 			}
258 		}
259 	} else {
260 		conn = ath6kl_find_sta(vif, datap->h_dest);
261 		if (!conn) {
262 			dev_kfree_skb(skb);
263 
264 			/* Inform the caller that the skb is consumed */
265 			return true;
266 		}
267 
268 		if (conn->sta_flags & STA_PS_SLEEP) {
269 			ps_queued = ath6kl_process_uapsdq(conn,
270 						vif, skb, flags);
271 			if (!(*flags & WMI_DATA_HDR_FLAGS_UAPSD))
272 				ps_queued = ath6kl_process_psq(conn,
273 						vif, skb, flags);
274 		}
275 	}
276 	return ps_queued;
277 }
278 
279 /* Tx functions */
280 
281 int ath6kl_control_tx(void *devt, struct sk_buff *skb,
282 		      enum htc_endpoint_id eid)
283 {
284 	struct ath6kl *ar = devt;
285 	int status = 0;
286 	struct ath6kl_cookie *cookie = NULL;
287 
288 	spin_lock_bh(&ar->lock);
289 
290 	ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
291 		   "%s: skb=0x%p, len=0x%x eid =%d\n", __func__,
292 		   skb, skb->len, eid);
293 
294 	if (test_bit(WMI_CTRL_EP_FULL, &ar->flag) && (eid == ar->ctrl_ep)) {
295 		/*
296 		 * Control endpoint is full, don't allocate resources, we
297 		 * are just going to drop this packet.
298 		 */
299 		cookie = NULL;
300 		ath6kl_err("wmi ctrl ep full, dropping pkt : 0x%p, len:%d\n",
301 			   skb, skb->len);
302 	} else
303 		cookie = ath6kl_alloc_cookie(ar);
304 
305 	if (cookie == NULL) {
306 		spin_unlock_bh(&ar->lock);
307 		status = -ENOMEM;
308 		goto fail_ctrl_tx;
309 	}
310 
311 	ar->tx_pending[eid]++;
312 
313 	if (eid != ar->ctrl_ep)
314 		ar->total_tx_data_pend++;
315 
316 	spin_unlock_bh(&ar->lock);
317 
318 	cookie->skb = skb;
319 	cookie->map_no = 0;
320 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
321 			 eid, ATH6KL_CONTROL_PKT_TAG);
322 
323 	/*
324 	 * This interface is asynchronous, if there is an error, cleanup
325 	 * will happen in the TX completion callback.
326 	 */
327 	ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
328 
329 	return 0;
330 
331 fail_ctrl_tx:
332 	dev_kfree_skb(skb);
333 	return status;
334 }
335 
336 int ath6kl_data_tx(struct sk_buff *skb, struct net_device *dev)
337 {
338 	struct ath6kl *ar = ath6kl_priv(dev);
339 	struct ath6kl_cookie *cookie = NULL;
340 	enum htc_endpoint_id eid = ENDPOINT_UNUSED;
341 	struct ath6kl_vif *vif = netdev_priv(dev);
342 	u32 map_no = 0;
343 	u16 htc_tag = ATH6KL_DATA_PKT_TAG;
344 	u8 ac = 99 ; /* initialize to unmapped ac */
345 	bool chk_adhoc_ps_mapping = false;
346 	int ret;
347 	struct wmi_tx_meta_v2 meta_v2;
348 	void *meta;
349 	u8 csum_start = 0, csum_dest = 0, csum = skb->ip_summed;
350 	u8 meta_ver = 0;
351 	u32 flags = 0;
352 
353 	ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
354 		   "%s: skb=0x%p, data=0x%p, len=0x%x\n", __func__,
355 		   skb, skb->data, skb->len);
356 
357 	/* If target is not associated */
358 	if (!test_bit(CONNECTED, &vif->flags)) {
359 		dev_kfree_skb(skb);
360 		return 0;
361 	}
362 
363 	if (!test_bit(WMI_READY, &ar->flag))
364 		goto fail_tx;
365 
366 	/* AP mode Power saving processing */
367 	if (vif->nw_type == AP_NETWORK) {
368 		if (ath6kl_powersave_ap(vif, skb, &flags))
369 			return 0;
370 	}
371 
372 	if (test_bit(WMI_ENABLED, &ar->flag)) {
373 		if ((dev->features & NETIF_F_IP_CSUM) &&
374 				(csum == CHECKSUM_PARTIAL)) {
375 			csum_start = skb->csum_start -
376 					(skb_network_header(skb) - skb->head) +
377 					sizeof(struct ath6kl_llc_snap_hdr);
378 			csum_dest = skb->csum_offset + csum_start;
379 		}
380 
381 		if (skb_headroom(skb) < dev->needed_headroom) {
382 			struct sk_buff *tmp_skb = skb;
383 
384 			skb = skb_realloc_headroom(skb, dev->needed_headroom);
385 			kfree_skb(tmp_skb);
386 			if (skb == NULL) {
387 				vif->net_stats.tx_dropped++;
388 				return 0;
389 			}
390 		}
391 
392 		if (ath6kl_wmi_dix_2_dot3(ar->wmi, skb)) {
393 			ath6kl_err("ath6kl_wmi_dix_2_dot3 failed\n");
394 			goto fail_tx;
395 		}
396 
397 		if ((dev->features & NETIF_F_IP_CSUM) &&
398 				(csum == CHECKSUM_PARTIAL)) {
399 			meta_v2.csum_start = csum_start;
400 			meta_v2.csum_dest = csum_dest;
401 
402 			/* instruct target to calculate checksum */
403 			meta_v2.csum_flags = WMI_META_V2_FLAG_CSUM_OFFLOAD;
404 			meta_ver = WMI_META_VERSION_2;
405 			meta = &meta_v2;
406 		} else {
407 			meta_ver = 0;
408 			meta = NULL;
409 		}
410 
411 		ret = ath6kl_wmi_data_hdr_add(ar->wmi, skb,
412 				DATA_MSGTYPE, flags, 0,
413 				meta_ver,
414 				meta, vif->fw_vif_idx);
415 
416 		if (ret) {
417 			ath6kl_warn("failed to add wmi data header:%d\n"
418 				, ret);
419 			goto fail_tx;
420 		}
421 
422 		if ((vif->nw_type == ADHOC_NETWORK) &&
423 		     ar->ibss_ps_enable && test_bit(CONNECTED, &vif->flags))
424 			chk_adhoc_ps_mapping = true;
425 		else {
426 			/* get the stream mapping */
427 			ret = ath6kl_wmi_implicit_create_pstream(ar->wmi,
428 				    vif->fw_vif_idx, skb,
429 				    0, test_bit(WMM_ENABLED, &vif->flags), &ac);
430 			if (ret)
431 				goto fail_tx;
432 		}
433 	} else
434 		goto fail_tx;
435 
436 	spin_lock_bh(&ar->lock);
437 
438 	if (chk_adhoc_ps_mapping)
439 		eid = ath6kl_ibss_map_epid(skb, dev, &map_no);
440 	else
441 		eid = ar->ac2ep_map[ac];
442 
443 	if (eid == 0 || eid == ENDPOINT_UNUSED) {
444 		ath6kl_err("eid %d is not mapped!\n", eid);
445 		spin_unlock_bh(&ar->lock);
446 		goto fail_tx;
447 	}
448 
449 	/* allocate resource for this packet */
450 	cookie = ath6kl_alloc_cookie(ar);
451 
452 	if (!cookie) {
453 		spin_unlock_bh(&ar->lock);
454 		goto fail_tx;
455 	}
456 
457 	/* update counts while the lock is held */
458 	ar->tx_pending[eid]++;
459 	ar->total_tx_data_pend++;
460 
461 	spin_unlock_bh(&ar->lock);
462 
463 	if (!IS_ALIGNED((unsigned long) skb->data - HTC_HDR_LENGTH, 4) &&
464 	    skb_cloned(skb)) {
465 		/*
466 		 * We will touch (move the buffer data to align it. Since the
467 		 * skb buffer is cloned and not only the header is changed, we
468 		 * have to copy it to allow the changes. Since we are copying
469 		 * the data here, we may as well align it by reserving suitable
470 		 * headroom to avoid the memmove in ath6kl_htc_tx_buf_align().
471 		 */
472 		struct sk_buff *nskb;
473 
474 		nskb = skb_copy_expand(skb, HTC_HDR_LENGTH, 0, GFP_ATOMIC);
475 		if (nskb == NULL)
476 			goto fail_tx;
477 		kfree_skb(skb);
478 		skb = nskb;
479 	}
480 
481 	cookie->skb = skb;
482 	cookie->map_no = map_no;
483 	set_htc_pkt_info(&cookie->htc_pkt, cookie, skb->data, skb->len,
484 			 eid, htc_tag);
485 
486 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "tx ",
487 			skb->data, skb->len);
488 
489 	/*
490 	 * HTC interface is asynchronous, if this fails, cleanup will
491 	 * happen in the ath6kl_tx_complete callback.
492 	 */
493 	ath6kl_htc_tx(ar->htc_target, &cookie->htc_pkt);
494 
495 	return 0;
496 
497 fail_tx:
498 	dev_kfree_skb(skb);
499 
500 	vif->net_stats.tx_dropped++;
501 	vif->net_stats.tx_aborted_errors++;
502 
503 	return 0;
504 }
505 
506 /* indicate tx activity or inactivity on a WMI stream */
507 void ath6kl_indicate_tx_activity(void *devt, u8 traffic_class, bool active)
508 {
509 	struct ath6kl *ar = devt;
510 	enum htc_endpoint_id eid;
511 	int i;
512 
513 	eid = ar->ac2ep_map[traffic_class];
514 
515 	if (!test_bit(WMI_ENABLED, &ar->flag))
516 		goto notify_htc;
517 
518 	spin_lock_bh(&ar->lock);
519 
520 	ar->ac_stream_active[traffic_class] = active;
521 
522 	if (active) {
523 		/*
524 		 * Keep track of the active stream with the highest
525 		 * priority.
526 		 */
527 		if (ar->ac_stream_pri_map[traffic_class] >
528 		    ar->hiac_stream_active_pri)
529 			/* set the new highest active priority */
530 			ar->hiac_stream_active_pri =
531 					ar->ac_stream_pri_map[traffic_class];
532 
533 	} else {
534 		/*
535 		 * We may have to search for the next active stream
536 		 * that is the highest priority.
537 		 */
538 		if (ar->hiac_stream_active_pri ==
539 			ar->ac_stream_pri_map[traffic_class]) {
540 			/*
541 			 * The highest priority stream just went inactive
542 			 * reset and search for the "next" highest "active"
543 			 * priority stream.
544 			 */
545 			ar->hiac_stream_active_pri = 0;
546 
547 			for (i = 0; i < WMM_NUM_AC; i++) {
548 				if (ar->ac_stream_active[i] &&
549 				    (ar->ac_stream_pri_map[i] >
550 				     ar->hiac_stream_active_pri))
551 					/*
552 					 * Set the new highest active
553 					 * priority.
554 					 */
555 					ar->hiac_stream_active_pri =
556 						ar->ac_stream_pri_map[i];
557 			}
558 		}
559 	}
560 
561 	spin_unlock_bh(&ar->lock);
562 
563 notify_htc:
564 	/* notify HTC, this may cause credit distribution changes */
565 	ath6kl_htc_indicate_activity_change(ar->htc_target, eid, active);
566 }
567 
568 enum htc_send_full_action ath6kl_tx_queue_full(struct htc_target *target,
569 					       struct htc_packet *packet)
570 {
571 	struct ath6kl *ar = target->dev->ar;
572 	struct ath6kl_vif *vif;
573 	enum htc_endpoint_id endpoint = packet->endpoint;
574 	enum htc_send_full_action action = HTC_SEND_FULL_KEEP;
575 
576 	if (endpoint == ar->ctrl_ep) {
577 		/*
578 		 * Under normal WMI if this is getting full, then something
579 		 * is running rampant the host should not be exhausting the
580 		 * WMI queue with too many commands the only exception to
581 		 * this is during testing using endpointping.
582 		 */
583 		set_bit(WMI_CTRL_EP_FULL, &ar->flag);
584 		ath6kl_err("wmi ctrl ep is full\n");
585 		return action;
586 	}
587 
588 	if (packet->info.tx.tag == ATH6KL_CONTROL_PKT_TAG)
589 		return action;
590 
591 	/*
592 	 * The last MAX_HI_COOKIE_NUM "batch" of cookies are reserved for
593 	 * the highest active stream.
594 	 */
595 	if (ar->ac_stream_pri_map[ar->ep2ac_map[endpoint]] <
596 	    ar->hiac_stream_active_pri &&
597 	    ar->cookie_count <= MAX_HI_COOKIE_NUM)
598 		/*
599 		 * Give preference to the highest priority stream by
600 		 * dropping the packets which overflowed.
601 		 */
602 		action = HTC_SEND_FULL_DROP;
603 
604 	/* FIXME: Locking */
605 	spin_lock_bh(&ar->list_lock);
606 	list_for_each_entry(vif, &ar->vif_list, list) {
607 		if (vif->nw_type == ADHOC_NETWORK ||
608 		    action != HTC_SEND_FULL_DROP) {
609 			spin_unlock_bh(&ar->list_lock);
610 
611 			set_bit(NETQ_STOPPED, &vif->flags);
612 			netif_stop_queue(vif->ndev);
613 
614 			return action;
615 		}
616 	}
617 	spin_unlock_bh(&ar->list_lock);
618 
619 	return action;
620 }
621 
622 /* TODO this needs to be looked at */
623 static void ath6kl_tx_clear_node_map(struct ath6kl_vif *vif,
624 				     enum htc_endpoint_id eid, u32 map_no)
625 {
626 	struct ath6kl *ar = vif->ar;
627 	u32 i;
628 
629 	if (vif->nw_type != ADHOC_NETWORK)
630 		return;
631 
632 	if (!ar->ibss_ps_enable)
633 		return;
634 
635 	if (eid == ar->ctrl_ep)
636 		return;
637 
638 	if (map_no == 0)
639 		return;
640 
641 	map_no--;
642 	ar->node_map[map_no].tx_pend--;
643 
644 	if (ar->node_map[map_no].tx_pend)
645 		return;
646 
647 	if (map_no != (ar->node_num - 1))
648 		return;
649 
650 	for (i = ar->node_num; i > 0; i--) {
651 		if (ar->node_map[i - 1].tx_pend)
652 			break;
653 
654 		memset(&ar->node_map[i - 1], 0,
655 		       sizeof(struct ath6kl_node_mapping));
656 		ar->node_num--;
657 	}
658 }
659 
660 void ath6kl_tx_complete(void *context, struct list_head *packet_queue)
661 {
662 	struct ath6kl *ar = context;
663 	struct sk_buff_head skb_queue;
664 	struct htc_packet *packet;
665 	struct sk_buff *skb;
666 	struct ath6kl_cookie *ath6kl_cookie;
667 	u32 map_no = 0;
668 	int status;
669 	enum htc_endpoint_id eid;
670 	bool wake_event = false;
671 	bool flushing[ATH6KL_VIF_MAX] = {false};
672 	u8 if_idx;
673 	struct ath6kl_vif *vif;
674 
675 	skb_queue_head_init(&skb_queue);
676 
677 	/* lock the driver as we update internal state */
678 	spin_lock_bh(&ar->lock);
679 
680 	/* reap completed packets */
681 	while (!list_empty(packet_queue)) {
682 
683 		packet = list_first_entry(packet_queue, struct htc_packet,
684 					  list);
685 		list_del(&packet->list);
686 
687 		ath6kl_cookie = (struct ath6kl_cookie *)packet->pkt_cntxt;
688 		if (!ath6kl_cookie)
689 			goto fatal;
690 
691 		status = packet->status;
692 		skb = ath6kl_cookie->skb;
693 		eid = packet->endpoint;
694 		map_no = ath6kl_cookie->map_no;
695 
696 		if (!skb || !skb->data)
697 			goto fatal;
698 
699 		__skb_queue_tail(&skb_queue, skb);
700 
701 		if (!status && (packet->act_len != skb->len))
702 			goto fatal;
703 
704 		ar->tx_pending[eid]--;
705 
706 		if (eid != ar->ctrl_ep)
707 			ar->total_tx_data_pend--;
708 
709 		if (eid == ar->ctrl_ep) {
710 			if (test_bit(WMI_CTRL_EP_FULL, &ar->flag))
711 				clear_bit(WMI_CTRL_EP_FULL, &ar->flag);
712 
713 			if (ar->tx_pending[eid] == 0)
714 				wake_event = true;
715 		}
716 
717 		if (eid == ar->ctrl_ep) {
718 			if_idx = wmi_cmd_hdr_get_if_idx(
719 				(struct wmi_cmd_hdr *) packet->buf);
720 		} else {
721 			if_idx = wmi_data_hdr_get_if_idx(
722 				(struct wmi_data_hdr *) packet->buf);
723 		}
724 
725 		vif = ath6kl_get_vif_by_index(ar, if_idx);
726 		if (!vif) {
727 			ath6kl_free_cookie(ar, ath6kl_cookie);
728 			continue;
729 		}
730 
731 		if (status) {
732 			if (status == -ECANCELED)
733 				/* a packet was flushed  */
734 				flushing[if_idx] = true;
735 
736 			vif->net_stats.tx_errors++;
737 
738 			if (status != -ENOSPC && status != -ECANCELED)
739 				ath6kl_warn("tx complete error: %d\n", status);
740 
741 			ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
742 				   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
743 				   __func__, skb, packet->buf, packet->act_len,
744 				   eid, "error!");
745 		} else {
746 			ath6kl_dbg(ATH6KL_DBG_WLAN_TX,
747 				   "%s: skb=0x%p data=0x%p len=0x%x eid=%d %s\n",
748 				   __func__, skb, packet->buf, packet->act_len,
749 				   eid, "OK");
750 
751 			flushing[if_idx] = false;
752 			vif->net_stats.tx_packets++;
753 			vif->net_stats.tx_bytes += skb->len;
754 		}
755 
756 		ath6kl_tx_clear_node_map(vif, eid, map_no);
757 
758 		ath6kl_free_cookie(ar, ath6kl_cookie);
759 
760 		if (test_bit(NETQ_STOPPED, &vif->flags))
761 			clear_bit(NETQ_STOPPED, &vif->flags);
762 	}
763 
764 	spin_unlock_bh(&ar->lock);
765 
766 	__skb_queue_purge(&skb_queue);
767 
768 	/* FIXME: Locking */
769 	spin_lock_bh(&ar->list_lock);
770 	list_for_each_entry(vif, &ar->vif_list, list) {
771 		if (test_bit(CONNECTED, &vif->flags) &&
772 		    !flushing[vif->fw_vif_idx]) {
773 			spin_unlock_bh(&ar->list_lock);
774 			netif_wake_queue(vif->ndev);
775 			spin_lock_bh(&ar->list_lock);
776 		}
777 	}
778 	spin_unlock_bh(&ar->list_lock);
779 
780 	if (wake_event)
781 		wake_up(&ar->event_wq);
782 
783 	return;
784 
785 fatal:
786 	WARN_ON(1);
787 	spin_unlock_bh(&ar->lock);
788 	return;
789 }
790 
791 void ath6kl_tx_data_cleanup(struct ath6kl *ar)
792 {
793 	int i;
794 
795 	/* flush all the data (non-control) streams */
796 	for (i = 0; i < WMM_NUM_AC; i++)
797 		ath6kl_htc_flush_txep(ar->htc_target, ar->ac2ep_map[i],
798 				      ATH6KL_DATA_PKT_TAG);
799 }
800 
801 /* Rx functions */
802 
803 static void ath6kl_deliver_frames_to_nw_stack(struct net_device *dev,
804 					      struct sk_buff *skb)
805 {
806 	if (!skb)
807 		return;
808 
809 	skb->dev = dev;
810 
811 	if (!(skb->dev->flags & IFF_UP)) {
812 		dev_kfree_skb(skb);
813 		return;
814 	}
815 
816 	skb->protocol = eth_type_trans(skb, skb->dev);
817 
818 	netif_rx_ni(skb);
819 }
820 
821 static void ath6kl_alloc_netbufs(struct sk_buff_head *q, u16 num)
822 {
823 	struct sk_buff *skb;
824 
825 	while (num) {
826 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
827 		if (!skb) {
828 			ath6kl_err("netbuf allocation failed\n");
829 			return;
830 		}
831 		skb_queue_tail(q, skb);
832 		num--;
833 	}
834 }
835 
836 static struct sk_buff *aggr_get_free_skb(struct aggr_info *p_aggr)
837 {
838 	struct sk_buff *skb = NULL;
839 
840 	if (skb_queue_len(&p_aggr->rx_amsdu_freeq) <
841 	    (AGGR_NUM_OF_FREE_NETBUFS >> 2))
842 		ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq,
843 				     AGGR_NUM_OF_FREE_NETBUFS);
844 
845 	skb = skb_dequeue(&p_aggr->rx_amsdu_freeq);
846 
847 	return skb;
848 }
849 
850 void ath6kl_rx_refill(struct htc_target *target, enum htc_endpoint_id endpoint)
851 {
852 	struct ath6kl *ar = target->dev->ar;
853 	struct sk_buff *skb;
854 	int rx_buf;
855 	int n_buf_refill;
856 	struct htc_packet *packet;
857 	struct list_head queue;
858 
859 	n_buf_refill = ATH6KL_MAX_RX_BUFFERS -
860 			  ath6kl_htc_get_rxbuf_num(ar->htc_target, endpoint);
861 
862 	if (n_buf_refill <= 0)
863 		return;
864 
865 	INIT_LIST_HEAD(&queue);
866 
867 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
868 		   "%s: providing htc with %d buffers at eid=%d\n",
869 		   __func__, n_buf_refill, endpoint);
870 
871 	for (rx_buf = 0; rx_buf < n_buf_refill; rx_buf++) {
872 		skb = ath6kl_buf_alloc(ATH6KL_BUFFER_SIZE);
873 		if (!skb)
874 			break;
875 
876 		packet = (struct htc_packet *) skb->head;
877 		if (!IS_ALIGNED((unsigned long) skb->data, 4))
878 			skb->data = PTR_ALIGN(skb->data - 4, 4);
879 		set_htc_rxpkt_info(packet, skb, skb->data,
880 				ATH6KL_BUFFER_SIZE, endpoint);
881 		list_add_tail(&packet->list, &queue);
882 	}
883 
884 	if (!list_empty(&queue))
885 		ath6kl_htc_add_rxbuf_multiple(ar->htc_target, &queue);
886 }
887 
888 void ath6kl_refill_amsdu_rxbufs(struct ath6kl *ar, int count)
889 {
890 	struct htc_packet *packet;
891 	struct sk_buff *skb;
892 
893 	while (count) {
894 		skb = ath6kl_buf_alloc(ATH6KL_AMSDU_BUFFER_SIZE);
895 		if (!skb)
896 			return;
897 
898 		packet = (struct htc_packet *) skb->head;
899 		if (!IS_ALIGNED((unsigned long) skb->data, 4))
900 			skb->data = PTR_ALIGN(skb->data - 4, 4);
901 		set_htc_rxpkt_info(packet, skb, skb->data,
902 				   ATH6KL_AMSDU_BUFFER_SIZE, 0);
903 		spin_lock_bh(&ar->lock);
904 		list_add_tail(&packet->list, &ar->amsdu_rx_buffer_queue);
905 		spin_unlock_bh(&ar->lock);
906 		count--;
907 	}
908 }
909 
910 /*
911  * Callback to allocate a receive buffer for a pending packet. We use a
912  * pre-allocated list of buffers of maximum AMSDU size (4K).
913  */
914 struct htc_packet *ath6kl_alloc_amsdu_rxbuf(struct htc_target *target,
915 					    enum htc_endpoint_id endpoint,
916 					    int len)
917 {
918 	struct ath6kl *ar = target->dev->ar;
919 	struct htc_packet *packet = NULL;
920 	struct list_head *pkt_pos;
921 	int refill_cnt = 0, depth = 0;
922 
923 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: eid=%d, len:%d\n",
924 		   __func__, endpoint, len);
925 
926 	if ((len <= ATH6KL_BUFFER_SIZE) ||
927 	    (len > ATH6KL_AMSDU_BUFFER_SIZE))
928 		return NULL;
929 
930 	spin_lock_bh(&ar->lock);
931 
932 	if (list_empty(&ar->amsdu_rx_buffer_queue)) {
933 		spin_unlock_bh(&ar->lock);
934 		refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS;
935 		goto refill_buf;
936 	}
937 
938 	packet = list_first_entry(&ar->amsdu_rx_buffer_queue,
939 				  struct htc_packet, list);
940 	list_del(&packet->list);
941 	list_for_each(pkt_pos, &ar->amsdu_rx_buffer_queue)
942 		depth++;
943 
944 	refill_cnt = ATH6KL_MAX_AMSDU_RX_BUFFERS - depth;
945 	spin_unlock_bh(&ar->lock);
946 
947 	/* set actual endpoint ID */
948 	packet->endpoint = endpoint;
949 
950 refill_buf:
951 	if (refill_cnt >= ATH6KL_AMSDU_REFILL_THRESHOLD)
952 		ath6kl_refill_amsdu_rxbufs(ar, refill_cnt);
953 
954 	return packet;
955 }
956 
957 static void aggr_slice_amsdu(struct aggr_info *p_aggr,
958 			     struct rxtid *rxtid, struct sk_buff *skb)
959 {
960 	struct sk_buff *new_skb;
961 	struct ethhdr *hdr;
962 	u16 frame_8023_len, payload_8023_len, mac_hdr_len, amsdu_len;
963 	u8 *framep;
964 
965 	mac_hdr_len = sizeof(struct ethhdr);
966 	framep = skb->data + mac_hdr_len;
967 	amsdu_len = skb->len - mac_hdr_len;
968 
969 	while (amsdu_len > mac_hdr_len) {
970 		hdr = (struct ethhdr *) framep;
971 		payload_8023_len = ntohs(hdr->h_proto);
972 
973 		if (payload_8023_len < MIN_MSDU_SUBFRAME_PAYLOAD_LEN ||
974 		    payload_8023_len > MAX_MSDU_SUBFRAME_PAYLOAD_LEN) {
975 			ath6kl_err("802.3 AMSDU frame bound check failed. len %d\n",
976 				   payload_8023_len);
977 			break;
978 		}
979 
980 		frame_8023_len = payload_8023_len + mac_hdr_len;
981 		new_skb = aggr_get_free_skb(p_aggr);
982 		if (!new_skb) {
983 			ath6kl_err("no buffer available\n");
984 			break;
985 		}
986 
987 		memcpy(new_skb->data, framep, frame_8023_len);
988 		skb_put(new_skb, frame_8023_len);
989 		if (ath6kl_wmi_dot3_2_dix(new_skb)) {
990 			ath6kl_err("dot3_2_dix error\n");
991 			dev_kfree_skb(new_skb);
992 			break;
993 		}
994 
995 		skb_queue_tail(&rxtid->q, new_skb);
996 
997 		/* Is this the last subframe within this aggregate ? */
998 		if ((amsdu_len - frame_8023_len) == 0)
999 			break;
1000 
1001 		/* Add the length of A-MSDU subframe padding bytes -
1002 		 * Round to nearest word.
1003 		 */
1004 		frame_8023_len = ALIGN(frame_8023_len, 4);
1005 
1006 		framep += frame_8023_len;
1007 		amsdu_len -= frame_8023_len;
1008 	}
1009 
1010 	dev_kfree_skb(skb);
1011 }
1012 
1013 static void aggr_deque_frms(struct aggr_info_conn *agg_conn, u8 tid,
1014 			    u16 seq_no, u8 order)
1015 {
1016 	struct sk_buff *skb;
1017 	struct rxtid *rxtid;
1018 	struct skb_hold_q *node;
1019 	u16 idx, idx_end, seq_end;
1020 	struct rxtid_stats *stats;
1021 
1022 	rxtid = &agg_conn->rx_tid[tid];
1023 	stats = &agg_conn->stat[tid];
1024 
1025 	idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1026 
1027 	/*
1028 	 * idx_end is typically the last possible frame in the window,
1029 	 * but changes to 'the' seq_no, when BAR comes. If seq_no
1030 	 * is non-zero, we will go up to that and stop.
1031 	 * Note: last seq no in current window will occupy the same
1032 	 * index position as index that is just previous to start.
1033 	 * An imp point : if win_sz is 7, for seq_no space of 4095,
1034 	 * then, there would be holes when sequence wrap around occurs.
1035 	 * Target should judiciously choose the win_sz, based on
1036 	 * this condition. For 4095, (TID_WINDOW_SZ = 2 x win_sz
1037 	 * 2, 4, 8, 16 win_sz works fine).
1038 	 * We must deque from "idx" to "idx_end", including both.
1039 	 */
1040 	seq_end = seq_no ? seq_no : rxtid->seq_next;
1041 	idx_end = AGGR_WIN_IDX(seq_end, rxtid->hold_q_sz);
1042 
1043 	spin_lock_bh(&rxtid->lock);
1044 
1045 	do {
1046 		node = &rxtid->hold_q[idx];
1047 		if ((order == 1) && (!node->skb))
1048 			break;
1049 
1050 		if (node->skb) {
1051 			if (node->is_amsdu)
1052 				aggr_slice_amsdu(agg_conn->aggr_info, rxtid,
1053 						 node->skb);
1054 			else
1055 				skb_queue_tail(&rxtid->q, node->skb);
1056 			node->skb = NULL;
1057 		} else
1058 			stats->num_hole++;
1059 
1060 		rxtid->seq_next = ATH6KL_NEXT_SEQ_NO(rxtid->seq_next);
1061 		idx = AGGR_WIN_IDX(rxtid->seq_next, rxtid->hold_q_sz);
1062 	} while (idx != idx_end);
1063 
1064 	spin_unlock_bh(&rxtid->lock);
1065 
1066 	stats->num_delivered += skb_queue_len(&rxtid->q);
1067 
1068 	while ((skb = skb_dequeue(&rxtid->q)))
1069 		ath6kl_deliver_frames_to_nw_stack(agg_conn->dev, skb);
1070 }
1071 
1072 static bool aggr_process_recv_frm(struct aggr_info_conn *agg_conn, u8 tid,
1073 				  u16 seq_no,
1074 				  bool is_amsdu, struct sk_buff *frame)
1075 {
1076 	struct rxtid *rxtid;
1077 	struct rxtid_stats *stats;
1078 	struct sk_buff *skb;
1079 	struct skb_hold_q *node;
1080 	u16 idx, st, cur, end;
1081 	bool is_queued = false;
1082 	u16 extended_end;
1083 
1084 	rxtid = &agg_conn->rx_tid[tid];
1085 	stats = &agg_conn->stat[tid];
1086 
1087 	stats->num_into_aggr++;
1088 
1089 	if (!rxtid->aggr) {
1090 		if (is_amsdu) {
1091 			aggr_slice_amsdu(agg_conn->aggr_info, rxtid, frame);
1092 			is_queued = true;
1093 			stats->num_amsdu++;
1094 			while ((skb = skb_dequeue(&rxtid->q)))
1095 				ath6kl_deliver_frames_to_nw_stack(agg_conn->dev,
1096 								  skb);
1097 		}
1098 		return is_queued;
1099 	}
1100 
1101 	/* Check the incoming sequence no, if it's in the window */
1102 	st = rxtid->seq_next;
1103 	cur = seq_no;
1104 	end = (st + rxtid->hold_q_sz-1) & ATH6KL_MAX_SEQ_NO;
1105 
1106 	if (((st < end) && (cur < st || cur > end)) ||
1107 	    ((st > end) && (cur > end) && (cur < st))) {
1108 		extended_end = (end + rxtid->hold_q_sz - 1) &
1109 			ATH6KL_MAX_SEQ_NO;
1110 
1111 		if (((end < extended_end) &&
1112 		     (cur < end || cur > extended_end)) ||
1113 		    ((end > extended_end) && (cur > extended_end) &&
1114 		     (cur < end))) {
1115 			aggr_deque_frms(agg_conn, tid, 0, 0);
1116 			if (cur >= rxtid->hold_q_sz - 1)
1117 				rxtid->seq_next = cur - (rxtid->hold_q_sz - 1);
1118 			else
1119 				rxtid->seq_next = ATH6KL_MAX_SEQ_NO -
1120 						  (rxtid->hold_q_sz - 2 - cur);
1121 		} else {
1122 			/*
1123 			 * Dequeue only those frames that are outside the
1124 			 * new shifted window.
1125 			 */
1126 			if (cur >= rxtid->hold_q_sz - 1)
1127 				st = cur - (rxtid->hold_q_sz - 1);
1128 			else
1129 				st = ATH6KL_MAX_SEQ_NO -
1130 					(rxtid->hold_q_sz - 2 - cur);
1131 
1132 			aggr_deque_frms(agg_conn, tid, st, 0);
1133 		}
1134 
1135 		stats->num_oow++;
1136 	}
1137 
1138 	idx = AGGR_WIN_IDX(seq_no, rxtid->hold_q_sz);
1139 
1140 	node = &rxtid->hold_q[idx];
1141 
1142 	spin_lock_bh(&rxtid->lock);
1143 
1144 	/*
1145 	 * Is the cur frame duplicate or something beyond our window(hold_q
1146 	 * -> which is 2x, already)?
1147 	 *
1148 	 * 1. Duplicate is easy - drop incoming frame.
1149 	 * 2. Not falling in current sliding window.
1150 	 *  2a. is the frame_seq_no preceding current tid_seq_no?
1151 	 *      -> drop the frame. perhaps sender did not get our ACK.
1152 	 *         this is taken care of above.
1153 	 *  2b. is the frame_seq_no beyond window(st, TID_WINDOW_SZ);
1154 	 *      -> Taken care of it above, by moving window forward.
1155 	 */
1156 	dev_kfree_skb(node->skb);
1157 	stats->num_dups++;
1158 
1159 	node->skb = frame;
1160 	is_queued = true;
1161 	node->is_amsdu = is_amsdu;
1162 	node->seq_no = seq_no;
1163 
1164 	if (node->is_amsdu)
1165 		stats->num_amsdu++;
1166 	else
1167 		stats->num_mpdu++;
1168 
1169 	spin_unlock_bh(&rxtid->lock);
1170 
1171 	aggr_deque_frms(agg_conn, tid, 0, 1);
1172 
1173 	if (agg_conn->timer_scheduled)
1174 		rxtid->progress = true;
1175 	else
1176 		for (idx = 0 ; idx < rxtid->hold_q_sz; idx++) {
1177 			if (rxtid->hold_q[idx].skb) {
1178 				/*
1179 				 * There is a frame in the queue and no
1180 				 * timer so start a timer to ensure that
1181 				 * the frame doesn't remain stuck
1182 				 * forever.
1183 				 */
1184 				agg_conn->timer_scheduled = true;
1185 				mod_timer(&agg_conn->timer,
1186 					  (jiffies +
1187 					   HZ * (AGGR_RX_TIMEOUT) / 1000));
1188 				rxtid->progress = false;
1189 				rxtid->timer_mon = true;
1190 				break;
1191 			}
1192 		}
1193 
1194 	return is_queued;
1195 }
1196 
1197 static void ath6kl_uapsd_trigger_frame_rx(struct ath6kl_vif *vif,
1198 						 struct ath6kl_sta *conn)
1199 {
1200 	struct ath6kl *ar = vif->ar;
1201 	bool is_apsdq_empty, is_apsdq_empty_at_start;
1202 	u32 num_frames_to_deliver, flags;
1203 	struct sk_buff *skb = NULL;
1204 
1205 	/*
1206 	 * If the APSD q for this STA is not empty, dequeue and
1207 	 * send a pkt from the head of the q. Also update the
1208 	 * More data bit in the WMI_DATA_HDR if there are
1209 	 * more pkts for this STA in the APSD q.
1210 	 * If there are no more pkts for this STA,
1211 	 * update the APSD bitmap for this STA.
1212 	 */
1213 
1214 	num_frames_to_deliver = (conn->apsd_info >> ATH6KL_APSD_NUM_OF_AC) &
1215 						    ATH6KL_APSD_FRAME_MASK;
1216 	/*
1217 	 * Number of frames to send in a service period is
1218 	 * indicated by the station
1219 	 * in the QOS_INFO of the association request
1220 	 * If it is zero, send all frames
1221 	 */
1222 	if (!num_frames_to_deliver)
1223 		num_frames_to_deliver = ATH6KL_APSD_ALL_FRAME;
1224 
1225 	spin_lock_bh(&conn->psq_lock);
1226 	is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1227 	spin_unlock_bh(&conn->psq_lock);
1228 	is_apsdq_empty_at_start = is_apsdq_empty;
1229 
1230 	while ((!is_apsdq_empty) && (num_frames_to_deliver)) {
1231 
1232 		spin_lock_bh(&conn->psq_lock);
1233 		skb = skb_dequeue(&conn->apsdq);
1234 		is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1235 		spin_unlock_bh(&conn->psq_lock);
1236 
1237 		/*
1238 		 * Set the STA flag to Trigger delivery,
1239 		 * so that the frame will go out
1240 		 */
1241 		conn->sta_flags |= STA_PS_APSD_TRIGGER;
1242 		num_frames_to_deliver--;
1243 
1244 		/* Last frame in the service period, set EOSP or queue empty */
1245 		if ((is_apsdq_empty) || (!num_frames_to_deliver))
1246 			conn->sta_flags |= STA_PS_APSD_EOSP;
1247 
1248 		ath6kl_data_tx(skb, vif->ndev);
1249 		conn->sta_flags &= ~(STA_PS_APSD_TRIGGER);
1250 		conn->sta_flags &= ~(STA_PS_APSD_EOSP);
1251 	}
1252 
1253 	if (is_apsdq_empty) {
1254 		if (is_apsdq_empty_at_start)
1255 			flags = WMI_AP_APSD_NO_DELIVERY_FRAMES;
1256 		else
1257 			flags = 0;
1258 
1259 		ath6kl_wmi_set_apsd_bfrd_traf(ar->wmi,
1260 				vif->fw_vif_idx,
1261 				conn->aid, 0, flags);
1262 	}
1263 
1264 	return;
1265 }
1266 
1267 void ath6kl_rx(struct htc_target *target, struct htc_packet *packet)
1268 {
1269 	struct ath6kl *ar = target->dev->ar;
1270 	struct sk_buff *skb = packet->pkt_cntxt;
1271 	struct wmi_rx_meta_v2 *meta;
1272 	struct wmi_data_hdr *dhdr;
1273 	int min_hdr_len;
1274 	u8 meta_type, dot11_hdr = 0;
1275 	int status = packet->status;
1276 	enum htc_endpoint_id ept = packet->endpoint;
1277 	bool is_amsdu, prev_ps, ps_state = false;
1278 	bool trig_state = false;
1279 	struct ath6kl_sta *conn = NULL;
1280 	struct sk_buff *skb1 = NULL;
1281 	struct ethhdr *datap = NULL;
1282 	struct ath6kl_vif *vif;
1283 	struct aggr_info_conn *aggr_conn;
1284 	u16 seq_no, offset;
1285 	u8 tid, if_idx;
1286 
1287 	ath6kl_dbg(ATH6KL_DBG_WLAN_RX,
1288 		   "%s: ar=0x%p eid=%d, skb=0x%p, data=0x%p, len=0x%x status:%d",
1289 		   __func__, ar, ept, skb, packet->buf,
1290 		   packet->act_len, status);
1291 
1292 	if (status || !(skb->data + HTC_HDR_LENGTH)) {
1293 		dev_kfree_skb(skb);
1294 		return;
1295 	}
1296 
1297 	skb_put(skb, packet->act_len + HTC_HDR_LENGTH);
1298 	skb_pull(skb, HTC_HDR_LENGTH);
1299 
1300 	if (ept == ar->ctrl_ep) {
1301 		if_idx =
1302 		wmi_cmd_hdr_get_if_idx((struct wmi_cmd_hdr *) skb->data);
1303 	} else {
1304 		if_idx =
1305 		wmi_data_hdr_get_if_idx((struct wmi_data_hdr *) skb->data);
1306 	}
1307 
1308 	vif = ath6kl_get_vif_by_index(ar, if_idx);
1309 	if (!vif) {
1310 		dev_kfree_skb(skb);
1311 		return;
1312 	}
1313 
1314 	/*
1315 	 * Take lock to protect buffer counts and adaptive power throughput
1316 	 * state.
1317 	 */
1318 	spin_lock_bh(&vif->if_lock);
1319 
1320 	vif->net_stats.rx_packets++;
1321 	vif->net_stats.rx_bytes += packet->act_len;
1322 
1323 	spin_unlock_bh(&vif->if_lock);
1324 
1325 
1326 	ath6kl_dbg_dump(ATH6KL_DBG_RAW_BYTES, __func__, "rx ",
1327 			skb->data, skb->len);
1328 
1329 	skb->dev = vif->ndev;
1330 
1331 	if (!test_bit(WMI_ENABLED, &ar->flag)) {
1332 		if (EPPING_ALIGNMENT_PAD > 0)
1333 			skb_pull(skb, EPPING_ALIGNMENT_PAD);
1334 		ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1335 		return;
1336 	}
1337 
1338 	ath6kl_check_wow_status(ar);
1339 
1340 	if (ept == ar->ctrl_ep) {
1341 		ath6kl_wmi_control_rx(ar->wmi, skb);
1342 		return;
1343 	}
1344 
1345 	min_hdr_len = sizeof(struct ethhdr) + sizeof(struct wmi_data_hdr) +
1346 		      sizeof(struct ath6kl_llc_snap_hdr);
1347 
1348 	dhdr = (struct wmi_data_hdr *) skb->data;
1349 
1350 	/*
1351 	 * In the case of AP mode we may receive NULL data frames
1352 	 * that do not have LLC hdr. They are 16 bytes in size.
1353 	 * Allow these frames in the AP mode.
1354 	 */
1355 	if (vif->nw_type != AP_NETWORK &&
1356 	    ((packet->act_len < min_hdr_len) ||
1357 	     (packet->act_len > WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH))) {
1358 		ath6kl_info("frame len is too short or too long\n");
1359 		vif->net_stats.rx_errors++;
1360 		vif->net_stats.rx_length_errors++;
1361 		dev_kfree_skb(skb);
1362 		return;
1363 	}
1364 
1365 	/* Get the Power save state of the STA */
1366 	if (vif->nw_type == AP_NETWORK) {
1367 		meta_type = wmi_data_hdr_get_meta(dhdr);
1368 
1369 		ps_state = !!((dhdr->info >> WMI_DATA_HDR_PS_SHIFT) &
1370 			      WMI_DATA_HDR_PS_MASK);
1371 
1372 		offset = sizeof(struct wmi_data_hdr);
1373 		trig_state = !!(le16_to_cpu(dhdr->info3) & WMI_DATA_HDR_TRIG);
1374 
1375 		switch (meta_type) {
1376 		case 0:
1377 			break;
1378 		case WMI_META_VERSION_1:
1379 			offset += sizeof(struct wmi_rx_meta_v1);
1380 			break;
1381 		case WMI_META_VERSION_2:
1382 			offset += sizeof(struct wmi_rx_meta_v2);
1383 			break;
1384 		default:
1385 			break;
1386 		}
1387 
1388 		datap = (struct ethhdr *) (skb->data + offset);
1389 		conn = ath6kl_find_sta(vif, datap->h_source);
1390 
1391 		if (!conn) {
1392 			dev_kfree_skb(skb);
1393 			return;
1394 		}
1395 
1396 		/*
1397 		 * If there is a change in PS state of the STA,
1398 		 * take appropriate steps:
1399 		 *
1400 		 * 1. If Sleep-->Awake, flush the psq for the STA
1401 		 *    Clear the PVB for the STA.
1402 		 * 2. If Awake-->Sleep, Starting queueing frames
1403 		 *    the STA.
1404 		 */
1405 		prev_ps = !!(conn->sta_flags & STA_PS_SLEEP);
1406 
1407 		if (ps_state)
1408 			conn->sta_flags |= STA_PS_SLEEP;
1409 		else
1410 			conn->sta_flags &= ~STA_PS_SLEEP;
1411 
1412 		/* Accept trigger only when the station is in sleep */
1413 		if ((conn->sta_flags & STA_PS_SLEEP) && trig_state)
1414 			ath6kl_uapsd_trigger_frame_rx(vif, conn);
1415 
1416 		if (prev_ps ^ !!(conn->sta_flags & STA_PS_SLEEP)) {
1417 			if (!(conn->sta_flags & STA_PS_SLEEP)) {
1418 				struct sk_buff *skbuff = NULL;
1419 				bool is_apsdq_empty;
1420 
1421 				spin_lock_bh(&conn->psq_lock);
1422 				while ((skbuff = skb_dequeue(&conn->psq))) {
1423 					spin_unlock_bh(&conn->psq_lock);
1424 					ath6kl_data_tx(skbuff, vif->ndev);
1425 					spin_lock_bh(&conn->psq_lock);
1426 				}
1427 
1428 				is_apsdq_empty = skb_queue_empty(&conn->apsdq);
1429 				while ((skbuff = skb_dequeue(&conn->apsdq))) {
1430 					spin_unlock_bh(&conn->psq_lock);
1431 					ath6kl_data_tx(skbuff, vif->ndev);
1432 					spin_lock_bh(&conn->psq_lock);
1433 				}
1434 				spin_unlock_bh(&conn->psq_lock);
1435 
1436 				if (!is_apsdq_empty)
1437 					ath6kl_wmi_set_apsd_bfrd_traf(
1438 							ar->wmi,
1439 							vif->fw_vif_idx,
1440 							conn->aid, 0, 0);
1441 
1442 				/* Clear the PVB for this STA */
1443 				ath6kl_wmi_set_pvb_cmd(ar->wmi, vif->fw_vif_idx,
1444 						       conn->aid, 0);
1445 			}
1446 		}
1447 
1448 		/* drop NULL data frames here */
1449 		if ((packet->act_len < min_hdr_len) ||
1450 		    (packet->act_len >
1451 		     WMI_MAX_AMSDU_RX_DATA_FRAME_LENGTH)) {
1452 			dev_kfree_skb(skb);
1453 			return;
1454 		}
1455 	}
1456 
1457 	is_amsdu = wmi_data_hdr_is_amsdu(dhdr) ? true : false;
1458 	tid = wmi_data_hdr_get_up(dhdr);
1459 	seq_no = wmi_data_hdr_get_seqno(dhdr);
1460 	meta_type = wmi_data_hdr_get_meta(dhdr);
1461 	dot11_hdr = wmi_data_hdr_get_dot11(dhdr);
1462 	skb_pull(skb, sizeof(struct wmi_data_hdr));
1463 
1464 	switch (meta_type) {
1465 	case WMI_META_VERSION_1:
1466 		skb_pull(skb, sizeof(struct wmi_rx_meta_v1));
1467 		break;
1468 	case WMI_META_VERSION_2:
1469 		meta = (struct wmi_rx_meta_v2 *) skb->data;
1470 		if (meta->csum_flags & 0x1) {
1471 			skb->ip_summed = CHECKSUM_COMPLETE;
1472 			skb->csum = (__force __wsum) meta->csum;
1473 		}
1474 		skb_pull(skb, sizeof(struct wmi_rx_meta_v2));
1475 		break;
1476 	default:
1477 		break;
1478 	}
1479 
1480 	if (dot11_hdr)
1481 		status = ath6kl_wmi_dot11_hdr_remove(ar->wmi, skb);
1482 	else if (!is_amsdu)
1483 		status = ath6kl_wmi_dot3_2_dix(skb);
1484 
1485 	if (status) {
1486 		/*
1487 		 * Drop frames that could not be processed (lack of
1488 		 * memory, etc.)
1489 		 */
1490 		dev_kfree_skb(skb);
1491 		return;
1492 	}
1493 
1494 	if (!(vif->ndev->flags & IFF_UP)) {
1495 		dev_kfree_skb(skb);
1496 		return;
1497 	}
1498 
1499 	if (vif->nw_type == AP_NETWORK) {
1500 		datap = (struct ethhdr *) skb->data;
1501 		if (is_multicast_ether_addr(datap->h_dest))
1502 			/*
1503 			 * Bcast/Mcast frames should be sent to the
1504 			 * OS stack as well as on the air.
1505 			 */
1506 			skb1 = skb_copy(skb, GFP_ATOMIC);
1507 		else {
1508 			/*
1509 			 * Search for a connected STA with dstMac
1510 			 * as the Mac address. If found send the
1511 			 * frame to it on the air else send the
1512 			 * frame up the stack.
1513 			 */
1514 			conn = ath6kl_find_sta(vif, datap->h_dest);
1515 
1516 			if (conn && ar->intra_bss) {
1517 				skb1 = skb;
1518 				skb = NULL;
1519 			} else if (conn && !ar->intra_bss) {
1520 				dev_kfree_skb(skb);
1521 				skb = NULL;
1522 			}
1523 		}
1524 		if (skb1)
1525 			ath6kl_data_tx(skb1, vif->ndev);
1526 
1527 		if (skb == NULL) {
1528 			/* nothing to deliver up the stack */
1529 			return;
1530 		}
1531 	}
1532 
1533 	datap = (struct ethhdr *) skb->data;
1534 
1535 	if (is_unicast_ether_addr(datap->h_dest)) {
1536 		if (vif->nw_type == AP_NETWORK) {
1537 			conn = ath6kl_find_sta(vif, datap->h_source);
1538 			if (!conn)
1539 				return;
1540 			aggr_conn = conn->aggr_conn;
1541 		} else
1542 			aggr_conn = vif->aggr_cntxt->aggr_conn;
1543 
1544 		if (aggr_process_recv_frm(aggr_conn, tid, seq_no,
1545 		    is_amsdu, skb)) {
1546 			/* aggregation code will handle the skb */
1547 			return;
1548 		}
1549 	}
1550 
1551 	ath6kl_deliver_frames_to_nw_stack(vif->ndev, skb);
1552 }
1553 
1554 static void aggr_timeout(unsigned long arg)
1555 {
1556 	u8 i, j;
1557 	struct aggr_info_conn *aggr_conn = (struct aggr_info_conn *) arg;
1558 	struct rxtid *rxtid;
1559 	struct rxtid_stats *stats;
1560 
1561 	for (i = 0; i < NUM_OF_TIDS; i++) {
1562 		rxtid = &aggr_conn->rx_tid[i];
1563 		stats = &aggr_conn->stat[i];
1564 
1565 		if (!rxtid->aggr || !rxtid->timer_mon || rxtid->progress)
1566 			continue;
1567 
1568 		stats->num_timeouts++;
1569 		ath6kl_dbg(ATH6KL_DBG_AGGR,
1570 			   "aggr timeout (st %d end %d)\n",
1571 			   rxtid->seq_next,
1572 			   ((rxtid->seq_next + rxtid->hold_q_sz-1) &
1573 			    ATH6KL_MAX_SEQ_NO));
1574 		aggr_deque_frms(aggr_conn, i, 0, 0);
1575 	}
1576 
1577 	aggr_conn->timer_scheduled = false;
1578 
1579 	for (i = 0; i < NUM_OF_TIDS; i++) {
1580 		rxtid = &aggr_conn->rx_tid[i];
1581 
1582 		if (rxtid->aggr && rxtid->hold_q) {
1583 			for (j = 0; j < rxtid->hold_q_sz; j++) {
1584 				if (rxtid->hold_q[j].skb) {
1585 					aggr_conn->timer_scheduled = true;
1586 					rxtid->timer_mon = true;
1587 					rxtid->progress = false;
1588 					break;
1589 				}
1590 			}
1591 
1592 			if (j >= rxtid->hold_q_sz)
1593 				rxtid->timer_mon = false;
1594 		}
1595 	}
1596 
1597 	if (aggr_conn->timer_scheduled)
1598 		mod_timer(&aggr_conn->timer,
1599 			  jiffies + msecs_to_jiffies(AGGR_RX_TIMEOUT));
1600 }
1601 
1602 static void aggr_delete_tid_state(struct aggr_info_conn *aggr_conn, u8 tid)
1603 {
1604 	struct rxtid *rxtid;
1605 	struct rxtid_stats *stats;
1606 
1607 	if (!aggr_conn || tid >= NUM_OF_TIDS)
1608 		return;
1609 
1610 	rxtid = &aggr_conn->rx_tid[tid];
1611 	stats = &aggr_conn->stat[tid];
1612 
1613 	if (rxtid->aggr)
1614 		aggr_deque_frms(aggr_conn, tid, 0, 0);
1615 
1616 	rxtid->aggr = false;
1617 	rxtid->progress = false;
1618 	rxtid->timer_mon = false;
1619 	rxtid->win_sz = 0;
1620 	rxtid->seq_next = 0;
1621 	rxtid->hold_q_sz = 0;
1622 
1623 	kfree(rxtid->hold_q);
1624 	rxtid->hold_q = NULL;
1625 
1626 	memset(stats, 0, sizeof(struct rxtid_stats));
1627 }
1628 
1629 void aggr_recv_addba_req_evt(struct ath6kl_vif *vif, u8 tid_mux, u16 seq_no,
1630 			     u8 win_sz)
1631 {
1632 	struct ath6kl_sta *sta;
1633 	struct aggr_info_conn *aggr_conn = NULL;
1634 	struct rxtid *rxtid;
1635 	struct rxtid_stats *stats;
1636 	u16 hold_q_size;
1637 	u8 tid, aid;
1638 
1639 	if (vif->nw_type == AP_NETWORK) {
1640 		aid = ath6kl_get_aid(tid_mux);
1641 		sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1642 		if (sta)
1643 			aggr_conn = sta->aggr_conn;
1644 	} else
1645 		aggr_conn = vif->aggr_cntxt->aggr_conn;
1646 
1647 	if (!aggr_conn)
1648 		return;
1649 
1650 	tid = ath6kl_get_tid(tid_mux);
1651 	if (tid >= NUM_OF_TIDS)
1652 		return;
1653 
1654 	rxtid = &aggr_conn->rx_tid[tid];
1655 	stats = &aggr_conn->stat[tid];
1656 
1657 	if (win_sz < AGGR_WIN_SZ_MIN || win_sz > AGGR_WIN_SZ_MAX)
1658 		ath6kl_dbg(ATH6KL_DBG_WLAN_RX, "%s: win_sz %d, tid %d\n",
1659 			   __func__, win_sz, tid);
1660 
1661 	if (rxtid->aggr)
1662 		aggr_delete_tid_state(aggr_conn, tid);
1663 
1664 	rxtid->seq_next = seq_no;
1665 	hold_q_size = TID_WINDOW_SZ(win_sz) * sizeof(struct skb_hold_q);
1666 	rxtid->hold_q = kzalloc(hold_q_size, GFP_KERNEL);
1667 	if (!rxtid->hold_q)
1668 		return;
1669 
1670 	rxtid->win_sz = win_sz;
1671 	rxtid->hold_q_sz = TID_WINDOW_SZ(win_sz);
1672 	if (!skb_queue_empty(&rxtid->q))
1673 		return;
1674 
1675 	rxtid->aggr = true;
1676 }
1677 
1678 void aggr_conn_init(struct ath6kl_vif *vif, struct aggr_info *aggr_info,
1679 		    struct aggr_info_conn *aggr_conn)
1680 {
1681 	struct rxtid *rxtid;
1682 	u8 i;
1683 
1684 	aggr_conn->aggr_sz = AGGR_SZ_DEFAULT;
1685 	aggr_conn->dev = vif->ndev;
1686 	init_timer(&aggr_conn->timer);
1687 	aggr_conn->timer.function = aggr_timeout;
1688 	aggr_conn->timer.data = (unsigned long) aggr_conn;
1689 	aggr_conn->aggr_info = aggr_info;
1690 
1691 	aggr_conn->timer_scheduled = false;
1692 
1693 	for (i = 0; i < NUM_OF_TIDS; i++) {
1694 		rxtid = &aggr_conn->rx_tid[i];
1695 		rxtid->aggr = false;
1696 		rxtid->progress = false;
1697 		rxtid->timer_mon = false;
1698 		skb_queue_head_init(&rxtid->q);
1699 		spin_lock_init(&rxtid->lock);
1700 	}
1701 
1702 }
1703 
1704 struct aggr_info *aggr_init(struct ath6kl_vif *vif)
1705 {
1706 	struct aggr_info *p_aggr = NULL;
1707 
1708 	p_aggr = kzalloc(sizeof(struct aggr_info), GFP_KERNEL);
1709 	if (!p_aggr) {
1710 		ath6kl_err("failed to alloc memory for aggr_node\n");
1711 		return NULL;
1712 	}
1713 
1714 	p_aggr->aggr_conn = kzalloc(sizeof(struct aggr_info_conn), GFP_KERNEL);
1715 	if (!p_aggr->aggr_conn) {
1716 		ath6kl_err("failed to alloc memory for connection specific aggr info\n");
1717 		kfree(p_aggr);
1718 		return NULL;
1719 	}
1720 
1721 	aggr_conn_init(vif, p_aggr, p_aggr->aggr_conn);
1722 
1723 	skb_queue_head_init(&p_aggr->rx_amsdu_freeq);
1724 	ath6kl_alloc_netbufs(&p_aggr->rx_amsdu_freeq, AGGR_NUM_OF_FREE_NETBUFS);
1725 
1726 	return p_aggr;
1727 }
1728 
1729 void aggr_recv_delba_req_evt(struct ath6kl_vif *vif, u8 tid_mux)
1730 {
1731 	struct ath6kl_sta *sta;
1732 	struct rxtid *rxtid;
1733 	struct aggr_info_conn *aggr_conn = NULL;
1734 	u8 tid, aid;
1735 
1736 	if (vif->nw_type == AP_NETWORK) {
1737 		aid = ath6kl_get_aid(tid_mux);
1738 		sta = ath6kl_find_sta_by_aid(vif->ar, aid);
1739 		if (sta)
1740 			aggr_conn = sta->aggr_conn;
1741 	} else
1742 		aggr_conn = vif->aggr_cntxt->aggr_conn;
1743 
1744 	if (!aggr_conn)
1745 		return;
1746 
1747 	tid = ath6kl_get_tid(tid_mux);
1748 	if (tid >= NUM_OF_TIDS)
1749 		return;
1750 
1751 	rxtid = &aggr_conn->rx_tid[tid];
1752 
1753 	if (rxtid->aggr)
1754 		aggr_delete_tid_state(aggr_conn, tid);
1755 }
1756 
1757 void aggr_reset_state(struct aggr_info_conn *aggr_conn)
1758 {
1759 	u8 tid;
1760 
1761 	if (!aggr_conn)
1762 		return;
1763 
1764 	if (aggr_conn->timer_scheduled) {
1765 		del_timer(&aggr_conn->timer);
1766 		aggr_conn->timer_scheduled = false;
1767 	}
1768 
1769 	for (tid = 0; tid < NUM_OF_TIDS; tid++)
1770 		aggr_delete_tid_state(aggr_conn, tid);
1771 }
1772 
1773 /* clean up our amsdu buffer list */
1774 void ath6kl_cleanup_amsdu_rxbufs(struct ath6kl *ar)
1775 {
1776 	struct htc_packet *packet, *tmp_pkt;
1777 
1778 	spin_lock_bh(&ar->lock);
1779 	if (list_empty(&ar->amsdu_rx_buffer_queue)) {
1780 		spin_unlock_bh(&ar->lock);
1781 		return;
1782 	}
1783 
1784 	list_for_each_entry_safe(packet, tmp_pkt, &ar->amsdu_rx_buffer_queue,
1785 				 list) {
1786 		list_del(&packet->list);
1787 		spin_unlock_bh(&ar->lock);
1788 		dev_kfree_skb(packet->pkt_cntxt);
1789 		spin_lock_bh(&ar->lock);
1790 	}
1791 
1792 	spin_unlock_bh(&ar->lock);
1793 }
1794 
1795 void aggr_module_destroy(struct aggr_info *aggr_info)
1796 {
1797 	if (!aggr_info)
1798 		return;
1799 
1800 	aggr_reset_state(aggr_info->aggr_conn);
1801 	skb_queue_purge(&aggr_info->rx_amsdu_freeq);
1802 	kfree(aggr_info->aggr_conn);
1803 	kfree(aggr_info);
1804 }
1805