xref: /linux/drivers/net/wireless/marvell/mwifiex/wmm.c (revision 9410645520e9b820069761f3450ef6661418e279)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * NXP Wireless LAN device driver: WMM
4  *
5  * Copyright 2011-2020 NXP
6  */
7 
8 #include "decl.h"
9 #include "ioctl.h"
10 #include "util.h"
11 #include "fw.h"
12 #include "main.h"
13 #include "wmm.h"
14 #include "11n.h"
15 
16 
17 /* Maximum value FW can accept for driver delay in packet transmission */
18 #define DRV_PKT_DELAY_TO_FW_MAX   512
19 
20 
21 #define WMM_QUEUED_PACKET_LOWER_LIMIT   180
22 
23 #define WMM_QUEUED_PACKET_UPPER_LIMIT   200
24 
25 /* Offset for TOS field in the IP header */
26 #define IPTOS_OFFSET 5
27 
28 static bool disable_tx_amsdu;
29 module_param(disable_tx_amsdu, bool, 0644);
30 
31 /* This table inverses the tos_to_tid operation to get a priority
32  * which is in sequential order, and can be compared.
33  * Use this to compare the priority of two different TIDs.
34  */
35 const u8 tos_to_tid_inv[] = {
36 	0x02,  /* from tos_to_tid[2] = 0 */
37 	0x00,  /* from tos_to_tid[0] = 1 */
38 	0x01,  /* from tos_to_tid[1] = 2 */
39 	0x03,
40 	0x04,
41 	0x05,
42 	0x06,
43 	0x07
44 };
45 
46 /* WMM information IE */
47 static const u8 wmm_info_ie[] = { WLAN_EID_VENDOR_SPECIFIC, 0x07,
48 	0x00, 0x50, 0xf2, 0x02,
49 	0x00, 0x01, 0x00
50 };
51 
52 static const u8 wmm_aci_to_qidx_map[] = { WMM_AC_BE,
53 	WMM_AC_BK,
54 	WMM_AC_VI,
55 	WMM_AC_VO
56 };
57 
58 static u8 tos_to_tid[] = {
59 	/* TID DSCP_P2 DSCP_P1 DSCP_P0 WMM_AC */
60 	0x01,			/* 0 1 0 AC_BK */
61 	0x02,			/* 0 0 0 AC_BK */
62 	0x00,			/* 0 0 1 AC_BE */
63 	0x03,			/* 0 1 1 AC_BE */
64 	0x04,			/* 1 0 0 AC_VI */
65 	0x05,			/* 1 0 1 AC_VI */
66 	0x06,			/* 1 1 0 AC_VO */
67 	0x07			/* 1 1 1 AC_VO */
68 };
69 
70 static u8 ac_to_tid[4][2] = { {1, 2}, {0, 3}, {4, 5}, {6, 7} };
71 
72 /*
73  * This function debug prints the priority parameters for a WMM AC.
74  */
75 static void
mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters * ac_param)76 mwifiex_wmm_ac_debug_print(const struct ieee_types_wmm_ac_parameters *ac_param)
77 {
78 	const char *ac_str[] = { "BK", "BE", "VI", "VO" };
79 
80 	pr_debug("info: WMM AC_%s: ACI=%d, ACM=%d, Aifsn=%d, "
81 		 "EcwMin=%d, EcwMax=%d, TxopLimit=%d\n",
82 		 ac_str[wmm_aci_to_qidx_map[(ac_param->aci_aifsn_bitmap
83 					     & MWIFIEX_ACI) >> 5]],
84 		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACI) >> 5,
85 		 (ac_param->aci_aifsn_bitmap & MWIFIEX_ACM) >> 4,
86 		 ac_param->aci_aifsn_bitmap & MWIFIEX_AIFSN,
87 		 ac_param->ecw_bitmap & MWIFIEX_ECW_MIN,
88 		 (ac_param->ecw_bitmap & MWIFIEX_ECW_MAX) >> 4,
89 		 le16_to_cpu(ac_param->tx_op_limit));
90 }
91 
92 /*
93  * This function allocates a route address list.
94  *
95  * The function also initializes the list with the provided RA.
96  */
97 static struct mwifiex_ra_list_tbl *
mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter * adapter,const u8 * ra)98 mwifiex_wmm_allocate_ralist_node(struct mwifiex_adapter *adapter, const u8 *ra)
99 {
100 	struct mwifiex_ra_list_tbl *ra_list;
101 
102 	ra_list = kzalloc(sizeof(struct mwifiex_ra_list_tbl), GFP_ATOMIC);
103 	if (!ra_list)
104 		return NULL;
105 
106 	INIT_LIST_HEAD(&ra_list->list);
107 	skb_queue_head_init(&ra_list->skb_head);
108 
109 	memcpy(ra_list->ra, ra, ETH_ALEN);
110 
111 	ra_list->total_pkt_count = 0;
112 
113 	mwifiex_dbg(adapter, INFO, "info: allocated ra_list %p\n", ra_list);
114 
115 	return ra_list;
116 }
117 
118 /* This function returns random no between 16 and 32 to be used as threshold
119  * for no of packets after which BA setup is initiated.
120  */
mwifiex_get_random_ba_threshold(void)121 static u8 mwifiex_get_random_ba_threshold(void)
122 {
123 	u64 ns;
124 	/* setup ba_packet_threshold here random number between
125 	 * [BA_SETUP_PACKET_OFFSET,
126 	 * BA_SETUP_PACKET_OFFSET+BA_SETUP_MAX_PACKET_THRESHOLD-1]
127 	 */
128 	ns = ktime_get_ns();
129 	ns += (ns >> 32) + (ns >> 16);
130 
131 	return ((u8)ns % BA_SETUP_MAX_PACKET_THRESHOLD) + BA_SETUP_PACKET_OFFSET;
132 }
133 
134 /*
135  * This function allocates and adds a RA list for all TIDs
136  * with the given RA.
137  */
mwifiex_ralist_add(struct mwifiex_private * priv,const u8 * ra)138 void mwifiex_ralist_add(struct mwifiex_private *priv, const u8 *ra)
139 {
140 	int i;
141 	struct mwifiex_ra_list_tbl *ra_list;
142 	struct mwifiex_adapter *adapter = priv->adapter;
143 	struct mwifiex_sta_node *node;
144 
145 
146 	for (i = 0; i < MAX_NUM_TID; ++i) {
147 		ra_list = mwifiex_wmm_allocate_ralist_node(adapter, ra);
148 		mwifiex_dbg(adapter, INFO,
149 			    "info: created ra_list %p\n", ra_list);
150 
151 		if (!ra_list)
152 			break;
153 
154 		ra_list->is_11n_enabled = 0;
155 		ra_list->tdls_link = false;
156 		ra_list->ba_status = BA_SETUP_NONE;
157 		ra_list->amsdu_in_ampdu = false;
158 		if (!mwifiex_queuing_ra_based(priv)) {
159 			if (mwifiex_is_tdls_link_setup
160 				(mwifiex_get_tdls_link_status(priv, ra))) {
161 				ra_list->tdls_link = true;
162 				ra_list->is_11n_enabled =
163 					mwifiex_tdls_peer_11n_enabled(priv, ra);
164 			} else {
165 				ra_list->is_11n_enabled = IS_11N_ENABLED(priv);
166 			}
167 		} else {
168 			spin_lock_bh(&priv->sta_list_spinlock);
169 			node = mwifiex_get_sta_entry(priv, ra);
170 			if (node)
171 				ra_list->tx_paused = node->tx_pause;
172 			ra_list->is_11n_enabled =
173 				      mwifiex_is_sta_11n_enabled(priv, node);
174 			if (ra_list->is_11n_enabled)
175 				ra_list->max_amsdu = node->max_amsdu;
176 			spin_unlock_bh(&priv->sta_list_spinlock);
177 		}
178 
179 		mwifiex_dbg(adapter, DATA, "data: ralist %p: is_11n_enabled=%d\n",
180 			    ra_list, ra_list->is_11n_enabled);
181 
182 		if (ra_list->is_11n_enabled) {
183 			ra_list->ba_pkt_count = 0;
184 			ra_list->ba_packet_thr =
185 					      mwifiex_get_random_ba_threshold();
186 		}
187 		list_add_tail(&ra_list->list,
188 			      &priv->wmm.tid_tbl_ptr[i].ra_list);
189 	}
190 }
191 
192 /*
193  * This function sets the WMM queue priorities to their default values.
194  */
mwifiex_wmm_default_queue_priorities(struct mwifiex_private * priv)195 static void mwifiex_wmm_default_queue_priorities(struct mwifiex_private *priv)
196 {
197 	/* Default queue priorities: VO->VI->BE->BK */
198 	priv->wmm.queue_priority[0] = WMM_AC_VO;
199 	priv->wmm.queue_priority[1] = WMM_AC_VI;
200 	priv->wmm.queue_priority[2] = WMM_AC_BE;
201 	priv->wmm.queue_priority[3] = WMM_AC_BK;
202 }
203 
204 /*
205  * This function map ACs to TIDs.
206  */
207 static void
mwifiex_wmm_queue_priorities_tid(struct mwifiex_private * priv)208 mwifiex_wmm_queue_priorities_tid(struct mwifiex_private *priv)
209 {
210 	struct mwifiex_wmm_desc *wmm = &priv->wmm;
211 	u8 *queue_priority = wmm->queue_priority;
212 	int i;
213 
214 	for (i = 0; i < 4; ++i) {
215 		tos_to_tid[7 - (i * 2)] = ac_to_tid[queue_priority[i]][1];
216 		tos_to_tid[6 - (i * 2)] = ac_to_tid[queue_priority[i]][0];
217 	}
218 
219 	for (i = 0; i < MAX_NUM_TID; ++i)
220 		priv->tos_to_tid_inv[tos_to_tid[i]] = (u8)i;
221 
222 	atomic_set(&wmm->highest_queued_prio, HIGH_PRIO_TID);
223 }
224 
225 /*
226  * This function initializes WMM priority queues.
227  */
228 void
mwifiex_wmm_setup_queue_priorities(struct mwifiex_private * priv,struct ieee_types_wmm_parameter * wmm_ie)229 mwifiex_wmm_setup_queue_priorities(struct mwifiex_private *priv,
230 				   struct ieee_types_wmm_parameter *wmm_ie)
231 {
232 	u16 cw_min, avg_back_off, tmp[4];
233 	u32 i, j, num_ac;
234 	u8 ac_idx;
235 
236 	if (!wmm_ie || !priv->wmm_enabled) {
237 		/* WMM is not enabled, just set the defaults and return */
238 		mwifiex_wmm_default_queue_priorities(priv);
239 		return;
240 	}
241 
242 	mwifiex_dbg(priv->adapter, INFO,
243 		    "info: WMM Parameter IE: version=%d,\t"
244 		    "qos_info Parameter Set Count=%d, Reserved=%#x\n",
245 		    wmm_ie->version, wmm_ie->qos_info_bitmap &
246 		    IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK,
247 		    wmm_ie->reserved);
248 
249 	for (num_ac = 0; num_ac < ARRAY_SIZE(wmm_ie->ac_params); num_ac++) {
250 		u8 ecw = wmm_ie->ac_params[num_ac].ecw_bitmap;
251 		u8 aci_aifsn = wmm_ie->ac_params[num_ac].aci_aifsn_bitmap;
252 		cw_min = (1 << (ecw & MWIFIEX_ECW_MIN)) - 1;
253 		avg_back_off = (cw_min >> 1) + (aci_aifsn & MWIFIEX_AIFSN);
254 
255 		ac_idx = wmm_aci_to_qidx_map[(aci_aifsn & MWIFIEX_ACI) >> 5];
256 		priv->wmm.queue_priority[ac_idx] = ac_idx;
257 		tmp[ac_idx] = avg_back_off;
258 
259 		mwifiex_dbg(priv->adapter, INFO,
260 			    "info: WMM: CWmax=%d CWmin=%d Avg Back-off=%d\n",
261 			    (1 << ((ecw & MWIFIEX_ECW_MAX) >> 4)) - 1,
262 			    cw_min, avg_back_off);
263 		mwifiex_wmm_ac_debug_print(&wmm_ie->ac_params[num_ac]);
264 	}
265 
266 	/* Bubble sort */
267 	for (i = 0; i < num_ac; i++) {
268 		for (j = 1; j < num_ac - i; j++) {
269 			if (tmp[j - 1] > tmp[j]) {
270 				swap(tmp[j - 1], tmp[j]);
271 				swap(priv->wmm.queue_priority[j - 1],
272 				     priv->wmm.queue_priority[j]);
273 			} else if (tmp[j - 1] == tmp[j]) {
274 				if (priv->wmm.queue_priority[j - 1]
275 				    < priv->wmm.queue_priority[j])
276 					swap(priv->wmm.queue_priority[j - 1],
277 					     priv->wmm.queue_priority[j]);
278 			}
279 		}
280 	}
281 
282 	mwifiex_wmm_queue_priorities_tid(priv);
283 }
284 
285 /*
286  * This function evaluates whether or not an AC is to be downgraded.
287  *
288  * In case the AC is not enabled, the highest AC is returned that is
289  * enabled and does not require admission control.
290  */
291 static enum mwifiex_wmm_ac_e
mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private * priv,enum mwifiex_wmm_ac_e eval_ac)292 mwifiex_wmm_eval_downgrade_ac(struct mwifiex_private *priv,
293 			      enum mwifiex_wmm_ac_e eval_ac)
294 {
295 	int down_ac;
296 	enum mwifiex_wmm_ac_e ret_ac;
297 	struct mwifiex_wmm_ac_status *ac_status;
298 
299 	ac_status = &priv->wmm.ac_status[eval_ac];
300 
301 	if (!ac_status->disabled)
302 		/* Okay to use this AC, its enabled */
303 		return eval_ac;
304 
305 	/* Setup a default return value of the lowest priority */
306 	ret_ac = WMM_AC_BK;
307 
308 	/*
309 	 *  Find the highest AC that is enabled and does not require
310 	 *  admission control. The spec disallows downgrading to an AC,
311 	 *  which is enabled due to a completed admission control.
312 	 *  Unadmitted traffic is not to be sent on an AC with admitted
313 	 *  traffic.
314 	 */
315 	for (down_ac = WMM_AC_BK; down_ac < eval_ac; down_ac++) {
316 		ac_status = &priv->wmm.ac_status[down_ac];
317 
318 		if (!ac_status->disabled && !ac_status->flow_required)
319 			/* AC is enabled and does not require admission
320 			   control */
321 			ret_ac = (enum mwifiex_wmm_ac_e) down_ac;
322 	}
323 
324 	return ret_ac;
325 }
326 
327 /*
328  * This function downgrades WMM priority queue.
329  */
330 void
mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private * priv)331 mwifiex_wmm_setup_ac_downgrade(struct mwifiex_private *priv)
332 {
333 	int ac_val;
334 
335 	mwifiex_dbg(priv->adapter, INFO, "info: WMM: AC Priorities:\t"
336 		    "BK(0), BE(1), VI(2), VO(3)\n");
337 
338 	if (!priv->wmm_enabled) {
339 		/* WMM is not enabled, default priorities */
340 		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++)
341 			priv->wmm.ac_down_graded_vals[ac_val] =
342 						(enum mwifiex_wmm_ac_e) ac_val;
343 	} else {
344 		for (ac_val = WMM_AC_BK; ac_val <= WMM_AC_VO; ac_val++) {
345 			priv->wmm.ac_down_graded_vals[ac_val]
346 				= mwifiex_wmm_eval_downgrade_ac(priv,
347 						(enum mwifiex_wmm_ac_e) ac_val);
348 			mwifiex_dbg(priv->adapter, INFO,
349 				    "info: WMM: AC PRIO %d maps to %d\n",
350 				    ac_val,
351 				    priv->wmm.ac_down_graded_vals[ac_val]);
352 		}
353 	}
354 }
355 
356 /*
357  * This function converts the IP TOS field to an WMM AC
358  * Queue assignment.
359  */
360 static enum mwifiex_wmm_ac_e
mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter * adapter,u32 tos)361 mwifiex_wmm_convert_tos_to_ac(struct mwifiex_adapter *adapter, u32 tos)
362 {
363 	/* Map of TOS UP values to WMM AC */
364 	static const enum mwifiex_wmm_ac_e tos_to_ac[] = {
365 		WMM_AC_BE,
366 		WMM_AC_BK,
367 		WMM_AC_BK,
368 		WMM_AC_BE,
369 		WMM_AC_VI,
370 		WMM_AC_VI,
371 		WMM_AC_VO,
372 		WMM_AC_VO
373 	};
374 
375 	if (tos >= ARRAY_SIZE(tos_to_ac))
376 		return WMM_AC_BE;
377 
378 	return tos_to_ac[tos];
379 }
380 
381 /*
382  * This function evaluates a given TID and downgrades it to a lower
383  * TID if the WMM Parameter IE received from the AP indicates that the
384  * AP is disabled (due to call admission control (ACM bit). Mapping
385  * of TID to AC is taken care of internally.
386  */
mwifiex_wmm_downgrade_tid(struct mwifiex_private * priv,u32 tid)387 u8 mwifiex_wmm_downgrade_tid(struct mwifiex_private *priv, u32 tid)
388 {
389 	enum mwifiex_wmm_ac_e ac, ac_down;
390 	u8 new_tid;
391 
392 	ac = mwifiex_wmm_convert_tos_to_ac(priv->adapter, tid);
393 	ac_down = priv->wmm.ac_down_graded_vals[ac];
394 
395 	/* Send the index to tid array, picking from the array will be
396 	 * taken care by dequeuing function
397 	 */
398 	new_tid = ac_to_tid[ac_down][tid % 2];
399 
400 	return new_tid;
401 }
402 
403 /*
404  * This function initializes the WMM state information and the
405  * WMM data path queues.
406  */
407 void
mwifiex_wmm_init(struct mwifiex_adapter * adapter)408 mwifiex_wmm_init(struct mwifiex_adapter *adapter)
409 {
410 	int i, j;
411 	struct mwifiex_private *priv;
412 
413 	for (j = 0; j < adapter->priv_num; ++j) {
414 		priv = adapter->priv[j];
415 		if (!priv)
416 			continue;
417 
418 		for (i = 0; i < MAX_NUM_TID; ++i) {
419 			if (!disable_tx_amsdu &&
420 			    adapter->tx_buf_size > MWIFIEX_TX_DATA_BUF_SIZE_2K)
421 				priv->aggr_prio_tbl[i].amsdu =
422 							priv->tos_to_tid_inv[i];
423 			else
424 				priv->aggr_prio_tbl[i].amsdu =
425 							BA_STREAM_NOT_ALLOWED;
426 			priv->aggr_prio_tbl[i].ampdu_ap =
427 							priv->tos_to_tid_inv[i];
428 			priv->aggr_prio_tbl[i].ampdu_user =
429 							priv->tos_to_tid_inv[i];
430 		}
431 
432 		priv->aggr_prio_tbl[6].amsdu
433 					= priv->aggr_prio_tbl[6].ampdu_ap
434 					= priv->aggr_prio_tbl[6].ampdu_user
435 					= BA_STREAM_NOT_ALLOWED;
436 
437 		priv->aggr_prio_tbl[7].amsdu = priv->aggr_prio_tbl[7].ampdu_ap
438 					= priv->aggr_prio_tbl[7].ampdu_user
439 					= BA_STREAM_NOT_ALLOWED;
440 
441 		mwifiex_set_ba_params(priv);
442 		mwifiex_reset_11n_rx_seq_num(priv);
443 
444 		priv->wmm.drv_pkt_delay_max = MWIFIEX_WMM_DRV_DELAY_MAX;
445 		atomic_set(&priv->wmm.tx_pkts_queued, 0);
446 		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
447 	}
448 }
449 
mwifiex_bypass_txlist_empty(struct mwifiex_adapter * adapter)450 int mwifiex_bypass_txlist_empty(struct mwifiex_adapter *adapter)
451 {
452 	struct mwifiex_private *priv;
453 	int i;
454 
455 	for (i = 0; i < adapter->priv_num; i++) {
456 		priv = adapter->priv[i];
457 		if (adapter->if_ops.is_port_ready &&
458 		    !adapter->if_ops.is_port_ready(priv))
459 			continue;
460 		if (!skb_queue_empty(&priv->bypass_txq))
461 			return false;
462 	}
463 
464 	return true;
465 }
466 
467 /*
468  * This function checks if WMM Tx queue is empty.
469  */
470 int
mwifiex_wmm_lists_empty(struct mwifiex_adapter * adapter)471 mwifiex_wmm_lists_empty(struct mwifiex_adapter *adapter)
472 {
473 	int i;
474 	struct mwifiex_private *priv;
475 
476 	for (i = 0; i < adapter->priv_num; ++i) {
477 		priv = adapter->priv[i];
478 		if (!priv->port_open &&
479 		    (priv->bss_mode != NL80211_IFTYPE_ADHOC))
480 			continue;
481 		if (adapter->if_ops.is_port_ready &&
482 		    !adapter->if_ops.is_port_ready(priv))
483 			continue;
484 		if (atomic_read(&priv->wmm.tx_pkts_queued))
485 			return false;
486 	}
487 
488 	return true;
489 }
490 
491 /*
492  * This function deletes all packets in an RA list node.
493  *
494  * The packet sent completion callback handler are called with
495  * status failure, after they are dequeued to ensure proper
496  * cleanup. The RA list node itself is freed at the end.
497  */
498 static void
mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra_list)499 mwifiex_wmm_del_pkts_in_ralist_node(struct mwifiex_private *priv,
500 				    struct mwifiex_ra_list_tbl *ra_list)
501 {
502 	struct mwifiex_adapter *adapter = priv->adapter;
503 	struct sk_buff *skb, *tmp;
504 
505 	skb_queue_walk_safe(&ra_list->skb_head, skb, tmp) {
506 		skb_unlink(skb, &ra_list->skb_head);
507 		mwifiex_write_data_complete(adapter, skb, 0, -1);
508 	}
509 }
510 
511 /*
512  * This function deletes all packets in an RA list.
513  *
514  * Each nodes in the RA list are freed individually first, and then
515  * the RA list itself is freed.
516  */
517 static void
mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private * priv,struct list_head * ra_list_head)518 mwifiex_wmm_del_pkts_in_ralist(struct mwifiex_private *priv,
519 			       struct list_head *ra_list_head)
520 {
521 	struct mwifiex_ra_list_tbl *ra_list;
522 
523 	list_for_each_entry(ra_list, ra_list_head, list)
524 		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
525 }
526 
527 /*
528  * This function deletes all packets in all RA lists.
529  */
mwifiex_wmm_cleanup_queues(struct mwifiex_private * priv)530 static void mwifiex_wmm_cleanup_queues(struct mwifiex_private *priv)
531 {
532 	int i;
533 
534 	for (i = 0; i < MAX_NUM_TID; i++)
535 		mwifiex_wmm_del_pkts_in_ralist(priv, &priv->wmm.tid_tbl_ptr[i].
536 								       ra_list);
537 
538 	atomic_set(&priv->wmm.tx_pkts_queued, 0);
539 	atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
540 }
541 
542 /*
543  * This function deletes all route addresses from all RA lists.
544  */
mwifiex_wmm_delete_all_ralist(struct mwifiex_private * priv)545 static void mwifiex_wmm_delete_all_ralist(struct mwifiex_private *priv)
546 {
547 	struct mwifiex_ra_list_tbl *ra_list, *tmp_node;
548 	int i;
549 
550 	for (i = 0; i < MAX_NUM_TID; ++i) {
551 		mwifiex_dbg(priv->adapter, INFO,
552 			    "info: ra_list: freeing buf for tid %d\n", i);
553 		list_for_each_entry_safe(ra_list, tmp_node,
554 					 &priv->wmm.tid_tbl_ptr[i].ra_list,
555 					 list) {
556 			list_del(&ra_list->list);
557 			kfree(ra_list);
558 		}
559 
560 		INIT_LIST_HEAD(&priv->wmm.tid_tbl_ptr[i].ra_list);
561 	}
562 }
563 
mwifiex_free_ack_frame(int id,void * p,void * data)564 static int mwifiex_free_ack_frame(int id, void *p, void *data)
565 {
566 	pr_warn("Have pending ack frames!\n");
567 	kfree_skb(p);
568 	return 0;
569 }
570 
571 /*
572  * This function cleans up the Tx and Rx queues.
573  *
574  * Cleanup includes -
575  *      - All packets in RA lists
576  *      - All entries in Rx reorder table
577  *      - All entries in Tx BA stream table
578  *      - MPA buffer (if required)
579  *      - All RA lists
580  */
581 void
mwifiex_clean_txrx(struct mwifiex_private * priv)582 mwifiex_clean_txrx(struct mwifiex_private *priv)
583 {
584 	struct sk_buff *skb, *tmp;
585 
586 	mwifiex_11n_cleanup_reorder_tbl(priv);
587 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
588 
589 	mwifiex_wmm_cleanup_queues(priv);
590 	mwifiex_11n_delete_all_tx_ba_stream_tbl(priv);
591 
592 	if (priv->adapter->if_ops.cleanup_mpa_buf)
593 		priv->adapter->if_ops.cleanup_mpa_buf(priv->adapter);
594 
595 	mwifiex_wmm_delete_all_ralist(priv);
596 	memcpy(tos_to_tid, ac_to_tid, sizeof(tos_to_tid));
597 
598 	if (priv->adapter->if_ops.clean_pcie_ring &&
599 	    !test_bit(MWIFIEX_SURPRISE_REMOVED, &priv->adapter->work_flags))
600 		priv->adapter->if_ops.clean_pcie_ring(priv->adapter);
601 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
602 
603 	skb_queue_walk_safe(&priv->tdls_txq, skb, tmp) {
604 		skb_unlink(skb, &priv->tdls_txq);
605 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
606 	}
607 
608 	skb_queue_walk_safe(&priv->bypass_txq, skb, tmp) {
609 		skb_unlink(skb, &priv->bypass_txq);
610 		mwifiex_write_data_complete(priv->adapter, skb, 0, -1);
611 	}
612 	atomic_set(&priv->adapter->bypass_tx_pending, 0);
613 
614 	idr_for_each(&priv->ack_status_frames, mwifiex_free_ack_frame, NULL);
615 	idr_destroy(&priv->ack_status_frames);
616 }
617 
618 /*
619  * This function retrieves a particular RA list node, matching with the
620  * given TID and RA address.
621  */
622 struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_ralist_node(struct mwifiex_private * priv,u8 tid,const u8 * ra_addr)623 mwifiex_wmm_get_ralist_node(struct mwifiex_private *priv, u8 tid,
624 			    const u8 *ra_addr)
625 {
626 	struct mwifiex_ra_list_tbl *ra_list;
627 
628 	list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[tid].ra_list,
629 			    list) {
630 		if (!memcmp(ra_list->ra, ra_addr, ETH_ALEN))
631 			return ra_list;
632 	}
633 
634 	return NULL;
635 }
636 
mwifiex_update_ralist_tx_pause(struct mwifiex_private * priv,u8 * mac,u8 tx_pause)637 void mwifiex_update_ralist_tx_pause(struct mwifiex_private *priv, u8 *mac,
638 				    u8 tx_pause)
639 {
640 	struct mwifiex_ra_list_tbl *ra_list;
641 	u32 pkt_cnt = 0, tx_pkts_queued;
642 	int i;
643 
644 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
645 
646 	for (i = 0; i < MAX_NUM_TID; ++i) {
647 		ra_list = mwifiex_wmm_get_ralist_node(priv, i, mac);
648 		if (ra_list && ra_list->tx_paused != tx_pause) {
649 			pkt_cnt += ra_list->total_pkt_count;
650 			ra_list->tx_paused = tx_pause;
651 			if (tx_pause)
652 				priv->wmm.pkts_paused[i] +=
653 					ra_list->total_pkt_count;
654 			else
655 				priv->wmm.pkts_paused[i] -=
656 					ra_list->total_pkt_count;
657 		}
658 	}
659 
660 	if (pkt_cnt) {
661 		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
662 		if (tx_pause)
663 			tx_pkts_queued -= pkt_cnt;
664 		else
665 			tx_pkts_queued += pkt_cnt;
666 
667 		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
668 		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
669 	}
670 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
671 }
672 
673 /* This function updates non-tdls peer ralist tx_pause while
674  * tdls channel switching
675  */
mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private * priv,u8 * mac,u8 tx_pause)676 void mwifiex_update_ralist_tx_pause_in_tdls_cs(struct mwifiex_private *priv,
677 					       u8 *mac, u8 tx_pause)
678 {
679 	struct mwifiex_ra_list_tbl *ra_list;
680 	u32 pkt_cnt = 0, tx_pkts_queued;
681 	int i;
682 
683 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
684 
685 	for (i = 0; i < MAX_NUM_TID; ++i) {
686 		list_for_each_entry(ra_list, &priv->wmm.tid_tbl_ptr[i].ra_list,
687 				    list) {
688 			if (!memcmp(ra_list->ra, mac, ETH_ALEN))
689 				continue;
690 
691 			if (ra_list->tx_paused != tx_pause) {
692 				pkt_cnt += ra_list->total_pkt_count;
693 				ra_list->tx_paused = tx_pause;
694 				if (tx_pause)
695 					priv->wmm.pkts_paused[i] +=
696 						ra_list->total_pkt_count;
697 				else
698 					priv->wmm.pkts_paused[i] -=
699 						ra_list->total_pkt_count;
700 			}
701 		}
702 	}
703 
704 	if (pkt_cnt) {
705 		tx_pkts_queued = atomic_read(&priv->wmm.tx_pkts_queued);
706 		if (tx_pause)
707 			tx_pkts_queued -= pkt_cnt;
708 		else
709 			tx_pkts_queued += pkt_cnt;
710 
711 		atomic_set(&priv->wmm.tx_pkts_queued, tx_pkts_queued);
712 		atomic_set(&priv->wmm.highest_queued_prio, HIGH_PRIO_TID);
713 	}
714 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
715 }
716 
717 /*
718  * This function retrieves an RA list node for a given TID and
719  * RA address pair.
720  *
721  * If no such node is found, a new node is added first and then
722  * retrieved.
723  */
724 struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_queue_raptr(struct mwifiex_private * priv,u8 tid,const u8 * ra_addr)725 mwifiex_wmm_get_queue_raptr(struct mwifiex_private *priv, u8 tid,
726 			    const u8 *ra_addr)
727 {
728 	struct mwifiex_ra_list_tbl *ra_list;
729 
730 	ra_list = mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
731 	if (ra_list)
732 		return ra_list;
733 	mwifiex_ralist_add(priv, ra_addr);
734 
735 	return mwifiex_wmm_get_ralist_node(priv, tid, ra_addr);
736 }
737 
738 /*
739  * This function deletes RA list nodes for given mac for all TIDs.
740  * Function also decrements TX pending count accordingly.
741  */
742 void
mwifiex_wmm_del_peer_ra_list(struct mwifiex_private * priv,const u8 * ra_addr)743 mwifiex_wmm_del_peer_ra_list(struct mwifiex_private *priv, const u8 *ra_addr)
744 {
745 	struct mwifiex_ra_list_tbl *ra_list;
746 	int i;
747 
748 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
749 
750 	for (i = 0; i < MAX_NUM_TID; ++i) {
751 		ra_list = mwifiex_wmm_get_ralist_node(priv, i, ra_addr);
752 
753 		if (!ra_list)
754 			continue;
755 		mwifiex_wmm_del_pkts_in_ralist_node(priv, ra_list);
756 		if (ra_list->tx_paused)
757 			priv->wmm.pkts_paused[i] -= ra_list->total_pkt_count;
758 		else
759 			atomic_sub(ra_list->total_pkt_count,
760 				   &priv->wmm.tx_pkts_queued);
761 		list_del(&ra_list->list);
762 		kfree(ra_list);
763 	}
764 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
765 }
766 
767 /*
768  * This function checks if a particular RA list node exists in a given TID
769  * table index.
770  */
771 int
mwifiex_is_ralist_valid(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra_list,int ptr_index)772 mwifiex_is_ralist_valid(struct mwifiex_private *priv,
773 			struct mwifiex_ra_list_tbl *ra_list, int ptr_index)
774 {
775 	struct mwifiex_ra_list_tbl *rlist;
776 
777 	list_for_each_entry(rlist, &priv->wmm.tid_tbl_ptr[ptr_index].ra_list,
778 			    list) {
779 		if (rlist == ra_list)
780 			return true;
781 	}
782 
783 	return false;
784 }
785 
786 /*
787  * This function adds a packet to bypass TX queue.
788  * This is special TX queue for packets which can be sent even when port_open
789  * is false.
790  */
791 void
mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private * priv,struct sk_buff * skb)792 mwifiex_wmm_add_buf_bypass_txqueue(struct mwifiex_private *priv,
793 				   struct sk_buff *skb)
794 {
795 	skb_queue_tail(&priv->bypass_txq, skb);
796 }
797 
798 /*
799  * This function adds a packet to WMM queue.
800  *
801  * In disconnected state the packet is immediately dropped and the
802  * packet send completion callback is called with status failure.
803  *
804  * Otherwise, the correct RA list node is located and the packet
805  * is queued at the list tail.
806  */
807 void
mwifiex_wmm_add_buf_txqueue(struct mwifiex_private * priv,struct sk_buff * skb)808 mwifiex_wmm_add_buf_txqueue(struct mwifiex_private *priv,
809 			    struct sk_buff *skb)
810 {
811 	struct mwifiex_adapter *adapter = priv->adapter;
812 	u32 tid;
813 	struct mwifiex_ra_list_tbl *ra_list;
814 	u8 ra[ETH_ALEN], tid_down;
815 	struct list_head list_head;
816 	int tdls_status = TDLS_NOT_SETUP;
817 	struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
818 	struct mwifiex_txinfo *tx_info = MWIFIEX_SKB_TXCB(skb);
819 
820 	memcpy(ra, eth_hdr->h_dest, ETH_ALEN);
821 
822 	if (GET_BSS_ROLE(priv) == MWIFIEX_BSS_ROLE_STA &&
823 	    ISSUPP_TDLS_ENABLED(adapter->fw_cap_info)) {
824 		if (ntohs(eth_hdr->h_proto) == ETH_P_TDLS)
825 			mwifiex_dbg(adapter, DATA,
826 				    "TDLS setup packet for %pM.\t"
827 				    "Don't block\n", ra);
828 		else if (memcmp(priv->cfg_bssid, ra, ETH_ALEN))
829 			tdls_status = mwifiex_get_tdls_link_status(priv, ra);
830 	}
831 
832 	if (!priv->media_connected && !mwifiex_is_skb_mgmt_frame(skb)) {
833 		mwifiex_dbg(adapter, DATA, "data: drop packet in disconnect\n");
834 		mwifiex_write_data_complete(adapter, skb, 0, -1);
835 		return;
836 	}
837 
838 	tid = skb->priority;
839 
840 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
841 
842 	tid_down = mwifiex_wmm_downgrade_tid(priv, tid);
843 
844 	/* In case of infra as we have already created the list during
845 	   association we just don't have to call get_queue_raptr, we will
846 	   have only 1 raptr for a tid in case of infra */
847 	if (!mwifiex_queuing_ra_based(priv) &&
848 	    !mwifiex_is_skb_mgmt_frame(skb)) {
849 		switch (tdls_status) {
850 		case TDLS_SETUP_COMPLETE:
851 		case TDLS_CHAN_SWITCHING:
852 		case TDLS_IN_BASE_CHAN:
853 		case TDLS_IN_OFF_CHAN:
854 			ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down,
855 							      ra);
856 			tx_info->flags |= MWIFIEX_BUF_FLAG_TDLS_PKT;
857 			break;
858 		case TDLS_SETUP_INPROGRESS:
859 			skb_queue_tail(&priv->tdls_txq, skb);
860 			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
861 			return;
862 		default:
863 			list_head = priv->wmm.tid_tbl_ptr[tid_down].ra_list;
864 			ra_list = list_first_entry_or_null(&list_head,
865 					struct mwifiex_ra_list_tbl, list);
866 			break;
867 		}
868 	} else {
869 		memcpy(ra, skb->data, ETH_ALEN);
870 		if (is_multicast_ether_addr(ra) || mwifiex_is_skb_mgmt_frame(skb))
871 			eth_broadcast_addr(ra);
872 		ra_list = mwifiex_wmm_get_queue_raptr(priv, tid_down, ra);
873 	}
874 
875 	if (!ra_list) {
876 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
877 		mwifiex_write_data_complete(adapter, skb, 0, -1);
878 		return;
879 	}
880 
881 	skb_queue_tail(&ra_list->skb_head, skb);
882 
883 	ra_list->ba_pkt_count++;
884 	ra_list->total_pkt_count++;
885 
886 	if (atomic_read(&priv->wmm.highest_queued_prio) <
887 						priv->tos_to_tid_inv[tid_down])
888 		atomic_set(&priv->wmm.highest_queued_prio,
889 			   priv->tos_to_tid_inv[tid_down]);
890 
891 	if (ra_list->tx_paused)
892 		priv->wmm.pkts_paused[tid_down]++;
893 	else
894 		atomic_inc(&priv->wmm.tx_pkts_queued);
895 
896 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
897 }
898 
899 /*
900  * This function processes the get WMM status command response from firmware.
901  *
902  * The response may contain multiple TLVs -
903  *      - AC Queue status TLVs
904  *      - Current WMM Parameter IE TLV
905  *      - Admission Control action frame TLVs
906  *
907  * This function parses the TLVs and then calls further specific functions
908  * to process any changes in the queue prioritize or state.
909  */
mwifiex_ret_wmm_get_status(struct mwifiex_private * priv,const struct host_cmd_ds_command * resp)910 int mwifiex_ret_wmm_get_status(struct mwifiex_private *priv,
911 			       const struct host_cmd_ds_command *resp)
912 {
913 	u8 *curr = (u8 *) &resp->params.get_wmm_status;
914 	uint16_t resp_len = le16_to_cpu(resp->size), tlv_len;
915 	int mask = IEEE80211_WMM_IE_AP_QOSINFO_PARAM_SET_CNT_MASK;
916 	bool valid = true;
917 
918 	struct mwifiex_ie_types_data *tlv_hdr;
919 	struct mwifiex_ie_types_wmm_queue_status *tlv_wmm_qstatus;
920 	struct ieee_types_wmm_parameter *wmm_param_ie = NULL;
921 	struct mwifiex_wmm_ac_status *ac_status;
922 
923 	mwifiex_dbg(priv->adapter, INFO,
924 		    "info: WMM: WMM_GET_STATUS cmdresp received: %d\n",
925 		    resp_len);
926 
927 	while ((resp_len >= sizeof(tlv_hdr->header)) && valid) {
928 		tlv_hdr = (struct mwifiex_ie_types_data *) curr;
929 		tlv_len = le16_to_cpu(tlv_hdr->header.len);
930 
931 		if (resp_len < tlv_len + sizeof(tlv_hdr->header))
932 			break;
933 
934 		switch (le16_to_cpu(tlv_hdr->header.type)) {
935 		case TLV_TYPE_WMMQSTATUS:
936 			tlv_wmm_qstatus =
937 				(struct mwifiex_ie_types_wmm_queue_status *)
938 				tlv_hdr;
939 			mwifiex_dbg(priv->adapter, CMD,
940 				    "info: CMD_RESP: WMM_GET_STATUS:\t"
941 				    "QSTATUS TLV: %d, %d, %d\n",
942 				    tlv_wmm_qstatus->queue_index,
943 				    tlv_wmm_qstatus->flow_required,
944 				    tlv_wmm_qstatus->disabled);
945 
946 			ac_status = &priv->wmm.ac_status[tlv_wmm_qstatus->
947 							 queue_index];
948 			ac_status->disabled = tlv_wmm_qstatus->disabled;
949 			ac_status->flow_required =
950 						tlv_wmm_qstatus->flow_required;
951 			ac_status->flow_created = tlv_wmm_qstatus->flow_created;
952 			break;
953 
954 		case WLAN_EID_VENDOR_SPECIFIC:
955 			/*
956 			 * Point the regular IEEE IE 2 bytes into the Marvell IE
957 			 *   and setup the IEEE IE type and length byte fields
958 			 */
959 
960 			wmm_param_ie =
961 				(struct ieee_types_wmm_parameter *) (curr +
962 								    2);
963 			wmm_param_ie->vend_hdr.len = (u8) tlv_len;
964 			wmm_param_ie->vend_hdr.element_id =
965 						WLAN_EID_VENDOR_SPECIFIC;
966 
967 			mwifiex_dbg(priv->adapter, CMD,
968 				    "info: CMD_RESP: WMM_GET_STATUS:\t"
969 				    "WMM Parameter Set Count: %d\n",
970 				    wmm_param_ie->qos_info_bitmap & mask);
971 
972 			if (wmm_param_ie->vend_hdr.len + 2 >
973 				sizeof(struct ieee_types_wmm_parameter))
974 				break;
975 
976 			memcpy((u8 *) &priv->curr_bss_params.bss_descriptor.
977 			       wmm_ie, wmm_param_ie,
978 			       wmm_param_ie->vend_hdr.len + 2);
979 
980 			break;
981 
982 		default:
983 			valid = false;
984 			break;
985 		}
986 
987 		curr += (tlv_len + sizeof(tlv_hdr->header));
988 		resp_len -= (tlv_len + sizeof(tlv_hdr->header));
989 	}
990 
991 	mwifiex_wmm_setup_queue_priorities(priv, wmm_param_ie);
992 	mwifiex_wmm_setup_ac_downgrade(priv);
993 
994 	return 0;
995 }
996 
997 /*
998  * Callback handler from the command module to allow insertion of a WMM TLV.
999  *
1000  * If the BSS we are associating to supports WMM, this function adds the
1001  * required WMM Information IE to the association request command buffer in
1002  * the form of a Marvell extended IEEE IE.
1003  */
1004 u32
mwifiex_wmm_process_association_req(struct mwifiex_private * priv,u8 ** assoc_buf,struct ieee_types_wmm_parameter * wmm_ie,struct ieee80211_ht_cap * ht_cap)1005 mwifiex_wmm_process_association_req(struct mwifiex_private *priv,
1006 				    u8 **assoc_buf,
1007 				    struct ieee_types_wmm_parameter *wmm_ie,
1008 				    struct ieee80211_ht_cap *ht_cap)
1009 {
1010 	struct mwifiex_ie_types_wmm_param_set *wmm_tlv;
1011 	u32 ret_len = 0;
1012 
1013 	/* Null checks */
1014 	if (!assoc_buf)
1015 		return 0;
1016 	if (!(*assoc_buf))
1017 		return 0;
1018 
1019 	if (!wmm_ie)
1020 		return 0;
1021 
1022 	mwifiex_dbg(priv->adapter, INFO,
1023 		    "info: WMM: process assoc req: bss->wmm_ie=%#x\n",
1024 		    wmm_ie->vend_hdr.element_id);
1025 
1026 	if ((priv->wmm_required ||
1027 	     (ht_cap && (priv->adapter->config_bands & BAND_GN ||
1028 	     priv->adapter->config_bands & BAND_AN))) &&
1029 	    wmm_ie->vend_hdr.element_id == WLAN_EID_VENDOR_SPECIFIC) {
1030 		wmm_tlv = (struct mwifiex_ie_types_wmm_param_set *) *assoc_buf;
1031 		wmm_tlv->header.type = cpu_to_le16((u16) wmm_info_ie[0]);
1032 		wmm_tlv->header.len = cpu_to_le16((u16) wmm_info_ie[1]);
1033 		memcpy(wmm_tlv->wmm_ie, &wmm_info_ie[2],
1034 		       le16_to_cpu(wmm_tlv->header.len));
1035 		if (wmm_ie->qos_info_bitmap & IEEE80211_WMM_IE_AP_QOSINFO_UAPSD)
1036 			memcpy((u8 *) (wmm_tlv->wmm_ie
1037 				       + le16_to_cpu(wmm_tlv->header.len)
1038 				       - sizeof(priv->wmm_qosinfo)),
1039 			       &priv->wmm_qosinfo, sizeof(priv->wmm_qosinfo));
1040 
1041 		ret_len = sizeof(wmm_tlv->header)
1042 			  + le16_to_cpu(wmm_tlv->header.len);
1043 
1044 		*assoc_buf += ret_len;
1045 	}
1046 
1047 	return ret_len;
1048 }
1049 
1050 /*
1051  * This function computes the time delay in the driver queues for a
1052  * given packet.
1053  *
1054  * When the packet is received at the OS/Driver interface, the current
1055  * time is set in the packet structure. The difference between the present
1056  * time and that received time is computed in this function and limited
1057  * based on pre-compiled limits in the driver.
1058  */
1059 u8
mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private * priv,const struct sk_buff * skb)1060 mwifiex_wmm_compute_drv_pkt_delay(struct mwifiex_private *priv,
1061 				  const struct sk_buff *skb)
1062 {
1063 	u32 queue_delay = ktime_to_ms(net_timedelta(skb->tstamp));
1064 	u8 ret_val;
1065 
1066 	/*
1067 	 * Queue delay is passed as a uint8 in units of 2ms (ms shifted
1068 	 *  by 1). Min value (other than 0) is therefore 2ms, max is 510ms.
1069 	 *
1070 	 * Pass max value if queue_delay is beyond the uint8 range
1071 	 */
1072 	ret_val = (u8) (min(queue_delay, priv->wmm.drv_pkt_delay_max) >> 1);
1073 
1074 	mwifiex_dbg(priv->adapter, DATA, "data: WMM: Pkt Delay: %d ms,\t"
1075 		    "%d ms sent to FW\n", queue_delay, ret_val);
1076 
1077 	return ret_val;
1078 }
1079 
1080 /*
1081  * This function retrieves the highest priority RA list table pointer.
1082  */
1083 static struct mwifiex_ra_list_tbl *
mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter * adapter,struct mwifiex_private ** priv,int * tid)1084 mwifiex_wmm_get_highest_priolist_ptr(struct mwifiex_adapter *adapter,
1085 				     struct mwifiex_private **priv, int *tid)
1086 {
1087 	struct mwifiex_private *priv_tmp;
1088 	struct mwifiex_ra_list_tbl *ptr;
1089 	struct mwifiex_tid_tbl *tid_ptr;
1090 	atomic_t *hqp;
1091 	int i, j;
1092 
1093 	/* check the BSS with highest priority first */
1094 	for (j = adapter->priv_num - 1; j >= 0; --j) {
1095 		/* iterate over BSS with the equal priority */
1096 		list_for_each_entry(adapter->bss_prio_tbl[j].bss_prio_cur,
1097 				    &adapter->bss_prio_tbl[j].bss_prio_head,
1098 				    list) {
1099 
1100 try_again:
1101 			priv_tmp = adapter->bss_prio_tbl[j].bss_prio_cur->priv;
1102 
1103 			if (((priv_tmp->bss_mode != NL80211_IFTYPE_ADHOC) &&
1104 			     !priv_tmp->port_open) ||
1105 			    (atomic_read(&priv_tmp->wmm.tx_pkts_queued) == 0))
1106 				continue;
1107 
1108 			if (adapter->if_ops.is_port_ready &&
1109 			    !adapter->if_ops.is_port_ready(priv_tmp))
1110 				continue;
1111 
1112 			/* iterate over the WMM queues of the BSS */
1113 			hqp = &priv_tmp->wmm.highest_queued_prio;
1114 			for (i = atomic_read(hqp); i >= LOW_PRIO_TID; --i) {
1115 
1116 				spin_lock_bh(&priv_tmp->wmm.ra_list_spinlock);
1117 
1118 				tid_ptr = &(priv_tmp)->wmm.
1119 					tid_tbl_ptr[tos_to_tid[i]];
1120 
1121 				/* iterate over receiver addresses */
1122 				list_for_each_entry(ptr, &tid_ptr->ra_list,
1123 						    list) {
1124 
1125 					if (!ptr->tx_paused &&
1126 					    !skb_queue_empty(&ptr->skb_head))
1127 						/* holds both locks */
1128 						goto found;
1129 				}
1130 
1131 				spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1132 			}
1133 
1134 			if (atomic_read(&priv_tmp->wmm.tx_pkts_queued) != 0) {
1135 				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1136 					   HIGH_PRIO_TID);
1137 				/* Iterate current private once more, since
1138 				 * there still exist packets in data queue
1139 				 */
1140 				goto try_again;
1141 			} else
1142 				atomic_set(&priv_tmp->wmm.highest_queued_prio,
1143 					   NO_PKT_PRIO_TID);
1144 		}
1145 	}
1146 
1147 	return NULL;
1148 
1149 found:
1150 	/* holds ra_list_spinlock */
1151 	if (atomic_read(hqp) > i)
1152 		atomic_set(hqp, i);
1153 	spin_unlock_bh(&priv_tmp->wmm.ra_list_spinlock);
1154 
1155 	*priv = priv_tmp;
1156 	*tid = tos_to_tid[i];
1157 
1158 	return ptr;
1159 }
1160 
1161 /* This functions rotates ra and bss lists so packets are picked round robin.
1162  *
1163  * After a packet is successfully transmitted, rotate the ra list, so the ra
1164  * next to the one transmitted, will come first in the list. This way we pick
1165  * the ra' in a round robin fashion. Same applies to bss nodes of equal
1166  * priority.
1167  *
1168  * Function also increments wmm.packets_out counter.
1169  */
mwifiex_rotate_priolists(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ra,int tid)1170 void mwifiex_rotate_priolists(struct mwifiex_private *priv,
1171 				 struct mwifiex_ra_list_tbl *ra,
1172 				 int tid)
1173 {
1174 	struct mwifiex_adapter *adapter = priv->adapter;
1175 	struct mwifiex_bss_prio_tbl *tbl = adapter->bss_prio_tbl;
1176 	struct mwifiex_tid_tbl *tid_ptr = &priv->wmm.tid_tbl_ptr[tid];
1177 
1178 	spin_lock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1179 	/*
1180 	 * dirty trick: we remove 'head' temporarily and reinsert it after
1181 	 * curr bss node. imagine list to stay fixed while head is moved
1182 	 */
1183 	list_move(&tbl[priv->bss_priority].bss_prio_head,
1184 		  &tbl[priv->bss_priority].bss_prio_cur->list);
1185 	spin_unlock_bh(&tbl[priv->bss_priority].bss_prio_lock);
1186 
1187 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1188 	if (mwifiex_is_ralist_valid(priv, ra, tid)) {
1189 		priv->wmm.packets_out[tid]++;
1190 		/* same as above */
1191 		list_move(&tid_ptr->ra_list, &ra->list);
1192 	}
1193 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1194 }
1195 
1196 /*
1197  * This function checks if 11n aggregation is possible.
1198  */
1199 static int
mwifiex_is_11n_aggragation_possible(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int max_buf_size)1200 mwifiex_is_11n_aggragation_possible(struct mwifiex_private *priv,
1201 				    struct mwifiex_ra_list_tbl *ptr,
1202 				    int max_buf_size)
1203 {
1204 	int count = 0, total_size = 0;
1205 	struct sk_buff *skb, *tmp;
1206 	int max_amsdu_size;
1207 
1208 	if (priv->bss_role == MWIFIEX_BSS_ROLE_UAP && priv->ap_11n_enabled &&
1209 	    ptr->is_11n_enabled)
1210 		max_amsdu_size = min_t(int, ptr->max_amsdu, max_buf_size);
1211 	else
1212 		max_amsdu_size = max_buf_size;
1213 
1214 	skb_queue_walk_safe(&ptr->skb_head, skb, tmp) {
1215 		total_size += skb->len;
1216 		if (total_size >= max_amsdu_size)
1217 			break;
1218 		if (++count >= MIN_NUM_AMSDU)
1219 			return true;
1220 	}
1221 
1222 	return false;
1223 }
1224 
1225 /*
1226  * This function sends a single packet to firmware for transmission.
1227  */
1228 static void
mwifiex_send_single_packet(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int ptr_index)1229 mwifiex_send_single_packet(struct mwifiex_private *priv,
1230 			   struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1231 			   __releases(&priv->wmm.ra_list_spinlock)
1232 {
1233 	struct sk_buff *skb, *skb_next;
1234 	struct mwifiex_tx_param tx_param;
1235 	struct mwifiex_adapter *adapter = priv->adapter;
1236 	struct mwifiex_txinfo *tx_info;
1237 
1238 	if (skb_queue_empty(&ptr->skb_head)) {
1239 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1240 		mwifiex_dbg(adapter, DATA, "data: nothing to send\n");
1241 		return;
1242 	}
1243 
1244 	skb = skb_dequeue(&ptr->skb_head);
1245 
1246 	tx_info = MWIFIEX_SKB_TXCB(skb);
1247 	mwifiex_dbg(adapter, DATA,
1248 		    "data: dequeuing the packet %p %p\n", ptr, skb);
1249 
1250 	ptr->total_pkt_count--;
1251 
1252 	if (!skb_queue_empty(&ptr->skb_head))
1253 		skb_next = skb_peek(&ptr->skb_head);
1254 	else
1255 		skb_next = NULL;
1256 
1257 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1258 
1259 	tx_param.next_pkt_len = ((skb_next) ? skb_next->len +
1260 				sizeof(struct txpd) : 0);
1261 
1262 	if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1263 		/* Queue the packet back at the head */
1264 		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1265 
1266 		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1267 			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1268 			mwifiex_write_data_complete(adapter, skb, 0, -1);
1269 			return;
1270 		}
1271 
1272 		skb_queue_tail(&ptr->skb_head, skb);
1273 
1274 		ptr->total_pkt_count++;
1275 		ptr->ba_pkt_count++;
1276 		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1277 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1278 	} else {
1279 		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1280 		atomic_dec(&priv->wmm.tx_pkts_queued);
1281 	}
1282 }
1283 
1284 /*
1285  * This function checks if the first packet in the given RA list
1286  * is already processed or not.
1287  */
1288 static int
mwifiex_is_ptr_processed(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr)1289 mwifiex_is_ptr_processed(struct mwifiex_private *priv,
1290 			 struct mwifiex_ra_list_tbl *ptr)
1291 {
1292 	struct sk_buff *skb;
1293 	struct mwifiex_txinfo *tx_info;
1294 
1295 	if (skb_queue_empty(&ptr->skb_head))
1296 		return false;
1297 
1298 	skb = skb_peek(&ptr->skb_head);
1299 
1300 	tx_info = MWIFIEX_SKB_TXCB(skb);
1301 	if (tx_info->flags & MWIFIEX_BUF_FLAG_REQUEUED_PKT)
1302 		return true;
1303 
1304 	return false;
1305 }
1306 
1307 /*
1308  * This function sends a single processed packet to firmware for
1309  * transmission.
1310  */
1311 static void
mwifiex_send_processed_packet(struct mwifiex_private * priv,struct mwifiex_ra_list_tbl * ptr,int ptr_index)1312 mwifiex_send_processed_packet(struct mwifiex_private *priv,
1313 			      struct mwifiex_ra_list_tbl *ptr, int ptr_index)
1314 				__releases(&priv->wmm.ra_list_spinlock)
1315 {
1316 	struct mwifiex_tx_param tx_param;
1317 	struct mwifiex_adapter *adapter = priv->adapter;
1318 	int ret = -1;
1319 	struct sk_buff *skb, *skb_next;
1320 	struct mwifiex_txinfo *tx_info;
1321 
1322 	if (skb_queue_empty(&ptr->skb_head)) {
1323 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1324 		return;
1325 	}
1326 
1327 	skb = skb_dequeue(&ptr->skb_head);
1328 
1329 	if (adapter->data_sent || adapter->tx_lock_flag) {
1330 		ptr->total_pkt_count--;
1331 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1332 		skb_queue_tail(&adapter->tx_data_q, skb);
1333 		atomic_dec(&priv->wmm.tx_pkts_queued);
1334 		atomic_inc(&adapter->tx_queued);
1335 		return;
1336 	}
1337 
1338 	if (!skb_queue_empty(&ptr->skb_head))
1339 		skb_next = skb_peek(&ptr->skb_head);
1340 	else
1341 		skb_next = NULL;
1342 
1343 	tx_info = MWIFIEX_SKB_TXCB(skb);
1344 
1345 	spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1346 
1347 	tx_param.next_pkt_len =
1348 		((skb_next) ? skb_next->len +
1349 		 sizeof(struct txpd) : 0);
1350 	if (adapter->iface_type == MWIFIEX_USB) {
1351 		ret = adapter->if_ops.host_to_card(adapter, priv->usb_port,
1352 						   skb, &tx_param);
1353 	} else {
1354 		ret = adapter->if_ops.host_to_card(adapter, MWIFIEX_TYPE_DATA,
1355 						   skb, &tx_param);
1356 	}
1357 
1358 	switch (ret) {
1359 	case -EBUSY:
1360 		mwifiex_dbg(adapter, ERROR, "data: -EBUSY is returned\n");
1361 		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1362 
1363 		if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1364 			spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1365 			mwifiex_write_data_complete(adapter, skb, 0, -1);
1366 			return;
1367 		}
1368 
1369 		skb_queue_tail(&ptr->skb_head, skb);
1370 
1371 		tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1372 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1373 		break;
1374 	case -1:
1375 		mwifiex_dbg(adapter, ERROR, "host_to_card failed: %#x\n", ret);
1376 		adapter->dbg.num_tx_host_to_card_failure++;
1377 		mwifiex_write_data_complete(adapter, skb, 0, ret);
1378 		break;
1379 	case -EINPROGRESS:
1380 		break;
1381 	case 0:
1382 		mwifiex_write_data_complete(adapter, skb, 0, ret);
1383 		break;
1384 	default:
1385 		break;
1386 	}
1387 	if (ret != -EBUSY) {
1388 		mwifiex_rotate_priolists(priv, ptr, ptr_index);
1389 		atomic_dec(&priv->wmm.tx_pkts_queued);
1390 		spin_lock_bh(&priv->wmm.ra_list_spinlock);
1391 		ptr->total_pkt_count--;
1392 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1393 	}
1394 }
1395 
1396 /*
1397  * This function dequeues a packet from the highest priority list
1398  * and transmits it.
1399  */
1400 static int
mwifiex_dequeue_tx_packet(struct mwifiex_adapter * adapter)1401 mwifiex_dequeue_tx_packet(struct mwifiex_adapter *adapter)
1402 {
1403 	struct mwifiex_ra_list_tbl *ptr;
1404 	struct mwifiex_private *priv = NULL;
1405 	int ptr_index = 0;
1406 	u8 ra[ETH_ALEN];
1407 	int tid_del = 0, tid = 0;
1408 
1409 	ptr = mwifiex_wmm_get_highest_priolist_ptr(adapter, &priv, &ptr_index);
1410 	if (!ptr)
1411 		return -1;
1412 
1413 	tid = mwifiex_get_tid(ptr);
1414 
1415 	mwifiex_dbg(adapter, DATA, "data: tid=%d\n", tid);
1416 
1417 	spin_lock_bh(&priv->wmm.ra_list_spinlock);
1418 	if (!mwifiex_is_ralist_valid(priv, ptr, ptr_index)) {
1419 		spin_unlock_bh(&priv->wmm.ra_list_spinlock);
1420 		return -1;
1421 	}
1422 
1423 	if (mwifiex_is_ptr_processed(priv, ptr)) {
1424 		mwifiex_send_processed_packet(priv, ptr, ptr_index);
1425 		/* ra_list_spinlock has been freed in
1426 		   mwifiex_send_processed_packet() */
1427 		return 0;
1428 	}
1429 
1430 	if (!ptr->is_11n_enabled ||
1431 		ptr->ba_status ||
1432 		priv->wps.session_enable) {
1433 		if (ptr->is_11n_enabled &&
1434 			ptr->ba_status &&
1435 			ptr->amsdu_in_ampdu &&
1436 			mwifiex_is_amsdu_allowed(priv, tid) &&
1437 			mwifiex_is_11n_aggragation_possible(priv, ptr,
1438 							adapter->tx_buf_size))
1439 			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1440 			/* ra_list_spinlock has been freed in
1441 			 * mwifiex_11n_aggregate_pkt()
1442 			 */
1443 		else
1444 			mwifiex_send_single_packet(priv, ptr, ptr_index);
1445 			/* ra_list_spinlock has been freed in
1446 			 * mwifiex_send_single_packet()
1447 			 */
1448 	} else {
1449 		if (mwifiex_is_ampdu_allowed(priv, ptr, tid) &&
1450 		    ptr->ba_pkt_count > ptr->ba_packet_thr) {
1451 			if (mwifiex_space_avail_for_new_ba_stream(adapter)) {
1452 				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1453 						      BA_SETUP_INPROGRESS);
1454 				mwifiex_send_addba(priv, tid, ptr->ra);
1455 			} else if (mwifiex_find_stream_to_delete
1456 				   (priv, tid, &tid_del, ra)) {
1457 				mwifiex_create_ba_tbl(priv, ptr->ra, tid,
1458 						      BA_SETUP_INPROGRESS);
1459 				mwifiex_send_delba(priv, tid_del, ra, 1);
1460 			}
1461 		}
1462 		if (mwifiex_is_amsdu_allowed(priv, tid) &&
1463 		    mwifiex_is_11n_aggragation_possible(priv, ptr,
1464 							adapter->tx_buf_size))
1465 			mwifiex_11n_aggregate_pkt(priv, ptr, ptr_index);
1466 			/* ra_list_spinlock has been freed in
1467 			   mwifiex_11n_aggregate_pkt() */
1468 		else
1469 			mwifiex_send_single_packet(priv, ptr, ptr_index);
1470 			/* ra_list_spinlock has been freed in
1471 			   mwifiex_send_single_packet() */
1472 	}
1473 	return 0;
1474 }
1475 
mwifiex_process_bypass_tx(struct mwifiex_adapter * adapter)1476 void mwifiex_process_bypass_tx(struct mwifiex_adapter *adapter)
1477 {
1478 	struct mwifiex_tx_param tx_param;
1479 	struct sk_buff *skb;
1480 	struct mwifiex_txinfo *tx_info;
1481 	struct mwifiex_private *priv;
1482 	int i;
1483 
1484 	if (adapter->data_sent || adapter->tx_lock_flag)
1485 		return;
1486 
1487 	for (i = 0; i < adapter->priv_num; ++i) {
1488 		priv = adapter->priv[i];
1489 
1490 		if (adapter->if_ops.is_port_ready &&
1491 		    !adapter->if_ops.is_port_ready(priv))
1492 			continue;
1493 
1494 		if (skb_queue_empty(&priv->bypass_txq))
1495 			continue;
1496 
1497 		skb = skb_dequeue(&priv->bypass_txq);
1498 		tx_info = MWIFIEX_SKB_TXCB(skb);
1499 
1500 		/* no aggregation for bypass packets */
1501 		tx_param.next_pkt_len = 0;
1502 
1503 		if (mwifiex_process_tx(priv, skb, &tx_param) == -EBUSY) {
1504 			skb_queue_head(&priv->bypass_txq, skb);
1505 			tx_info->flags |= MWIFIEX_BUF_FLAG_REQUEUED_PKT;
1506 		} else {
1507 			atomic_dec(&adapter->bypass_tx_pending);
1508 		}
1509 	}
1510 }
1511 
1512 /*
1513  * This function transmits the highest priority packet awaiting in the
1514  * WMM Queues.
1515  */
1516 void
mwifiex_wmm_process_tx(struct mwifiex_adapter * adapter)1517 mwifiex_wmm_process_tx(struct mwifiex_adapter *adapter)
1518 {
1519 	do {
1520 		if (mwifiex_dequeue_tx_packet(adapter))
1521 			break;
1522 		if (adapter->iface_type != MWIFIEX_SDIO) {
1523 			if (adapter->data_sent ||
1524 			    adapter->tx_lock_flag)
1525 				break;
1526 		} else {
1527 			if (atomic_read(&adapter->tx_queued) >=
1528 			    MWIFIEX_MAX_PKTS_TXQ)
1529 				break;
1530 		}
1531 	} while (!mwifiex_wmm_lists_empty(adapter));
1532 }
1533