xref: /linux/drivers/net/wireless/ath/ath10k/mac.c (revision 1fc31357ad194fb98691f3d122bcd47e59239e83)
1 /*
2  * Copyright (c) 2005-2011 Atheros Communications Inc.
3  * Copyright (c) 2011-2013 Qualcomm Atheros, Inc.
4  *
5  * Permission to use, copy, modify, and/or distribute this software for any
6  * purpose with or without fee is hereby granted, provided that the above
7  * copyright notice and this permission notice appear in all copies.
8  *
9  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
10  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
11  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
12  * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
13  * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
14  * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
15  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
16  */
17 
18 #include "mac.h"
19 
20 #include <net/mac80211.h>
21 #include <linux/etherdevice.h>
22 #include <linux/acpi.h>
23 
24 #include "hif.h"
25 #include "core.h"
26 #include "debug.h"
27 #include "wmi.h"
28 #include "htt.h"
29 #include "txrx.h"
30 #include "testmode.h"
31 #include "wmi.h"
32 #include "wmi-tlv.h"
33 #include "wmi-ops.h"
34 #include "wow.h"
35 
36 /*********/
37 /* Rates */
38 /*********/
39 
40 static struct ieee80211_rate ath10k_rates[] = {
41 	{ .bitrate = 10,
42 	  .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
43 	{ .bitrate = 20,
44 	  .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
45 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
46 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
47 	{ .bitrate = 55,
48 	  .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
49 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
50 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
51 	{ .bitrate = 110,
52 	  .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
53 	  .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
54 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
55 
56 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
57 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
58 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
59 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
60 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
61 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
62 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
63 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
64 };
65 
66 static struct ieee80211_rate ath10k_rates_rev2[] = {
67 	{ .bitrate = 10,
68 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
69 	{ .bitrate = 20,
70 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
71 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
72 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
73 	{ .bitrate = 55,
74 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
75 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
76 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
77 	{ .bitrate = 110,
78 	  .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
79 	  .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
80 	  .flags = IEEE80211_RATE_SHORT_PREAMBLE },
81 
82 	{ .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
83 	{ .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
84 	{ .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
85 	{ .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
86 	{ .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
87 	{ .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
88 	{ .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
89 	{ .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
90 };
91 
92 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
93 
94 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
95 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
96 			     ATH10K_MAC_FIRST_OFDM_RATE_IDX)
97 #define ath10k_g_rates (ath10k_rates + 0)
98 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
99 
100 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
101 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
102 
103 static bool ath10k_mac_bitrate_is_cck(int bitrate)
104 {
105 	switch (bitrate) {
106 	case 10:
107 	case 20:
108 	case 55:
109 	case 110:
110 		return true;
111 	}
112 
113 	return false;
114 }
115 
116 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
117 {
118 	return DIV_ROUND_UP(bitrate, 5) |
119 	       (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
120 }
121 
122 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
123 			     u8 hw_rate, bool cck)
124 {
125 	const struct ieee80211_rate *rate;
126 	int i;
127 
128 	for (i = 0; i < sband->n_bitrates; i++) {
129 		rate = &sband->bitrates[i];
130 
131 		if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
132 			continue;
133 
134 		if (rate->hw_value == hw_rate)
135 			return i;
136 		else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
137 			 rate->hw_value_short == hw_rate)
138 			return i;
139 	}
140 
141 	return 0;
142 }
143 
144 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
145 			     u32 bitrate)
146 {
147 	int i;
148 
149 	for (i = 0; i < sband->n_bitrates; i++)
150 		if (sband->bitrates[i].bitrate == bitrate)
151 			return i;
152 
153 	return 0;
154 }
155 
156 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
157 {
158 	switch ((mcs_map >> (2 * nss)) & 0x3) {
159 	case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
160 	case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
161 	case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
162 	}
163 	return 0;
164 }
165 
166 static u32
167 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
168 {
169 	int nss;
170 
171 	for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
172 		if (ht_mcs_mask[nss])
173 			return nss + 1;
174 
175 	return 1;
176 }
177 
178 static u32
179 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
180 {
181 	int nss;
182 
183 	for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
184 		if (vht_mcs_mask[nss])
185 			return nss + 1;
186 
187 	return 1;
188 }
189 
190 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
191 {
192 	enum wmi_host_platform_type platform_type;
193 	int ret;
194 
195 	if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
196 		platform_type = WMI_HOST_PLATFORM_LOW_PERF;
197 	else
198 		platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
199 
200 	ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
201 
202 	if (ret && ret != -EOPNOTSUPP) {
203 		ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
204 		return ret;
205 	}
206 
207 	return 0;
208 }
209 
210 /**********/
211 /* Crypto */
212 /**********/
213 
214 static int ath10k_send_key(struct ath10k_vif *arvif,
215 			   struct ieee80211_key_conf *key,
216 			   enum set_key_cmd cmd,
217 			   const u8 *macaddr, u32 flags)
218 {
219 	struct ath10k *ar = arvif->ar;
220 	struct wmi_vdev_install_key_arg arg = {
221 		.vdev_id = arvif->vdev_id,
222 		.key_idx = key->keyidx,
223 		.key_len = key->keylen,
224 		.key_data = key->key,
225 		.key_flags = flags,
226 		.macaddr = macaddr,
227 	};
228 
229 	lockdep_assert_held(&arvif->ar->conf_mutex);
230 
231 	switch (key->cipher) {
232 	case WLAN_CIPHER_SUITE_CCMP:
233 		arg.key_cipher = WMI_CIPHER_AES_CCM;
234 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
235 		break;
236 	case WLAN_CIPHER_SUITE_TKIP:
237 		arg.key_cipher = WMI_CIPHER_TKIP;
238 		arg.key_txmic_len = 8;
239 		arg.key_rxmic_len = 8;
240 		break;
241 	case WLAN_CIPHER_SUITE_WEP40:
242 	case WLAN_CIPHER_SUITE_WEP104:
243 		arg.key_cipher = WMI_CIPHER_WEP;
244 		break;
245 	case WLAN_CIPHER_SUITE_AES_CMAC:
246 		WARN_ON(1);
247 		return -EINVAL;
248 	default:
249 		ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
250 		return -EOPNOTSUPP;
251 	}
252 
253 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
254 		key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
255 
256 	if (cmd == DISABLE_KEY) {
257 		arg.key_cipher = WMI_CIPHER_NONE;
258 		arg.key_data = NULL;
259 	}
260 
261 	return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
262 }
263 
264 static int ath10k_install_key(struct ath10k_vif *arvif,
265 			      struct ieee80211_key_conf *key,
266 			      enum set_key_cmd cmd,
267 			      const u8 *macaddr, u32 flags)
268 {
269 	struct ath10k *ar = arvif->ar;
270 	int ret;
271 	unsigned long time_left;
272 
273 	lockdep_assert_held(&ar->conf_mutex);
274 
275 	reinit_completion(&ar->install_key_done);
276 
277 	if (arvif->nohwcrypt)
278 		return 1;
279 
280 	ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
281 	if (ret)
282 		return ret;
283 
284 	time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
285 	if (time_left == 0)
286 		return -ETIMEDOUT;
287 
288 	return 0;
289 }
290 
291 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
292 					const u8 *addr)
293 {
294 	struct ath10k *ar = arvif->ar;
295 	struct ath10k_peer *peer;
296 	int ret;
297 	int i;
298 	u32 flags;
299 
300 	lockdep_assert_held(&ar->conf_mutex);
301 
302 	if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
303 		    arvif->vif->type != NL80211_IFTYPE_ADHOC &&
304 		    arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
305 		return -EINVAL;
306 
307 	spin_lock_bh(&ar->data_lock);
308 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
309 	spin_unlock_bh(&ar->data_lock);
310 
311 	if (!peer)
312 		return -ENOENT;
313 
314 	for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
315 		if (arvif->wep_keys[i] == NULL)
316 			continue;
317 
318 		switch (arvif->vif->type) {
319 		case NL80211_IFTYPE_AP:
320 			flags = WMI_KEY_PAIRWISE;
321 
322 			if (arvif->def_wep_key_idx == i)
323 				flags |= WMI_KEY_TX_USAGE;
324 
325 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
326 						 SET_KEY, addr, flags);
327 			if (ret < 0)
328 				return ret;
329 			break;
330 		case NL80211_IFTYPE_ADHOC:
331 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
332 						 SET_KEY, addr,
333 						 WMI_KEY_PAIRWISE);
334 			if (ret < 0)
335 				return ret;
336 
337 			ret = ath10k_install_key(arvif, arvif->wep_keys[i],
338 						 SET_KEY, addr, WMI_KEY_GROUP);
339 			if (ret < 0)
340 				return ret;
341 			break;
342 		default:
343 			WARN_ON(1);
344 			return -EINVAL;
345 		}
346 
347 		spin_lock_bh(&ar->data_lock);
348 		peer->keys[i] = arvif->wep_keys[i];
349 		spin_unlock_bh(&ar->data_lock);
350 	}
351 
352 	/* In some cases (notably with static WEP IBSS with multiple keys)
353 	 * multicast Tx becomes broken. Both pairwise and groupwise keys are
354 	 * installed already. Using WMI_KEY_TX_USAGE in different combinations
355 	 * didn't seem help. Using def_keyid vdev parameter seems to be
356 	 * effective so use that.
357 	 *
358 	 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
359 	 */
360 	if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
361 		return 0;
362 
363 	if (arvif->def_wep_key_idx == -1)
364 		return 0;
365 
366 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
367 					arvif->vdev_id,
368 					arvif->ar->wmi.vdev_param->def_keyid,
369 					arvif->def_wep_key_idx);
370 	if (ret) {
371 		ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
372 			    arvif->vdev_id, ret);
373 		return ret;
374 	}
375 
376 	return 0;
377 }
378 
379 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
380 				  const u8 *addr)
381 {
382 	struct ath10k *ar = arvif->ar;
383 	struct ath10k_peer *peer;
384 	int first_errno = 0;
385 	int ret;
386 	int i;
387 	u32 flags = 0;
388 
389 	lockdep_assert_held(&ar->conf_mutex);
390 
391 	spin_lock_bh(&ar->data_lock);
392 	peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
393 	spin_unlock_bh(&ar->data_lock);
394 
395 	if (!peer)
396 		return -ENOENT;
397 
398 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
399 		if (peer->keys[i] == NULL)
400 			continue;
401 
402 		/* key flags are not required to delete the key */
403 		ret = ath10k_install_key(arvif, peer->keys[i],
404 					 DISABLE_KEY, addr, flags);
405 		if (ret < 0 && first_errno == 0)
406 			first_errno = ret;
407 
408 		if (ret < 0)
409 			ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
410 				    i, ret);
411 
412 		spin_lock_bh(&ar->data_lock);
413 		peer->keys[i] = NULL;
414 		spin_unlock_bh(&ar->data_lock);
415 	}
416 
417 	return first_errno;
418 }
419 
420 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
421 				    u8 keyidx)
422 {
423 	struct ath10k_peer *peer;
424 	int i;
425 
426 	lockdep_assert_held(&ar->data_lock);
427 
428 	/* We don't know which vdev this peer belongs to,
429 	 * since WMI doesn't give us that information.
430 	 *
431 	 * FIXME: multi-bss needs to be handled.
432 	 */
433 	peer = ath10k_peer_find(ar, 0, addr);
434 	if (!peer)
435 		return false;
436 
437 	for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
438 		if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
439 			return true;
440 	}
441 
442 	return false;
443 }
444 
445 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
446 				 struct ieee80211_key_conf *key)
447 {
448 	struct ath10k *ar = arvif->ar;
449 	struct ath10k_peer *peer;
450 	u8 addr[ETH_ALEN];
451 	int first_errno = 0;
452 	int ret;
453 	int i;
454 	u32 flags = 0;
455 
456 	lockdep_assert_held(&ar->conf_mutex);
457 
458 	for (;;) {
459 		/* since ath10k_install_key we can't hold data_lock all the
460 		 * time, so we try to remove the keys incrementally */
461 		spin_lock_bh(&ar->data_lock);
462 		i = 0;
463 		list_for_each_entry(peer, &ar->peers, list) {
464 			for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
465 				if (peer->keys[i] == key) {
466 					ether_addr_copy(addr, peer->addr);
467 					peer->keys[i] = NULL;
468 					break;
469 				}
470 			}
471 
472 			if (i < ARRAY_SIZE(peer->keys))
473 				break;
474 		}
475 		spin_unlock_bh(&ar->data_lock);
476 
477 		if (i == ARRAY_SIZE(peer->keys))
478 			break;
479 		/* key flags are not required to delete the key */
480 		ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
481 		if (ret < 0 && first_errno == 0)
482 			first_errno = ret;
483 
484 		if (ret)
485 			ath10k_warn(ar, "failed to remove key for %pM: %d\n",
486 				    addr, ret);
487 	}
488 
489 	return first_errno;
490 }
491 
492 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
493 					 struct ieee80211_key_conf *key)
494 {
495 	struct ath10k *ar = arvif->ar;
496 	struct ath10k_peer *peer;
497 	int ret;
498 
499 	lockdep_assert_held(&ar->conf_mutex);
500 
501 	list_for_each_entry(peer, &ar->peers, list) {
502 		if (ether_addr_equal(peer->addr, arvif->vif->addr))
503 			continue;
504 
505 		if (ether_addr_equal(peer->addr, arvif->bssid))
506 			continue;
507 
508 		if (peer->keys[key->keyidx] == key)
509 			continue;
510 
511 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
512 			   arvif->vdev_id, key->keyidx);
513 
514 		ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
515 		if (ret) {
516 			ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
517 				    arvif->vdev_id, peer->addr, ret);
518 			return ret;
519 		}
520 	}
521 
522 	return 0;
523 }
524 
525 /*********************/
526 /* General utilities */
527 /*********************/
528 
529 static inline enum wmi_phy_mode
530 chan_to_phymode(const struct cfg80211_chan_def *chandef)
531 {
532 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
533 
534 	switch (chandef->chan->band) {
535 	case NL80211_BAND_2GHZ:
536 		switch (chandef->width) {
537 		case NL80211_CHAN_WIDTH_20_NOHT:
538 			if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
539 				phymode = MODE_11B;
540 			else
541 				phymode = MODE_11G;
542 			break;
543 		case NL80211_CHAN_WIDTH_20:
544 			phymode = MODE_11NG_HT20;
545 			break;
546 		case NL80211_CHAN_WIDTH_40:
547 			phymode = MODE_11NG_HT40;
548 			break;
549 		case NL80211_CHAN_WIDTH_5:
550 		case NL80211_CHAN_WIDTH_10:
551 		case NL80211_CHAN_WIDTH_80:
552 		case NL80211_CHAN_WIDTH_80P80:
553 		case NL80211_CHAN_WIDTH_160:
554 			phymode = MODE_UNKNOWN;
555 			break;
556 		}
557 		break;
558 	case NL80211_BAND_5GHZ:
559 		switch (chandef->width) {
560 		case NL80211_CHAN_WIDTH_20_NOHT:
561 			phymode = MODE_11A;
562 			break;
563 		case NL80211_CHAN_WIDTH_20:
564 			phymode = MODE_11NA_HT20;
565 			break;
566 		case NL80211_CHAN_WIDTH_40:
567 			phymode = MODE_11NA_HT40;
568 			break;
569 		case NL80211_CHAN_WIDTH_80:
570 			phymode = MODE_11AC_VHT80;
571 			break;
572 		case NL80211_CHAN_WIDTH_5:
573 		case NL80211_CHAN_WIDTH_10:
574 		case NL80211_CHAN_WIDTH_80P80:
575 		case NL80211_CHAN_WIDTH_160:
576 			phymode = MODE_UNKNOWN;
577 			break;
578 		}
579 		break;
580 	default:
581 		break;
582 	}
583 
584 	WARN_ON(phymode == MODE_UNKNOWN);
585 	return phymode;
586 }
587 
588 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
589 {
590 /*
591  * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
592  *   0 for no restriction
593  *   1 for 1/4 us
594  *   2 for 1/2 us
595  *   3 for 1 us
596  *   4 for 2 us
597  *   5 for 4 us
598  *   6 for 8 us
599  *   7 for 16 us
600  */
601 	switch (mpdudensity) {
602 	case 0:
603 		return 0;
604 	case 1:
605 	case 2:
606 	case 3:
607 	/* Our lower layer calculations limit our precision to
608 	   1 microsecond */
609 		return 1;
610 	case 4:
611 		return 2;
612 	case 5:
613 		return 4;
614 	case 6:
615 		return 8;
616 	case 7:
617 		return 16;
618 	default:
619 		return 0;
620 	}
621 }
622 
623 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
624 			struct cfg80211_chan_def *def)
625 {
626 	struct ieee80211_chanctx_conf *conf;
627 
628 	rcu_read_lock();
629 	conf = rcu_dereference(vif->chanctx_conf);
630 	if (!conf) {
631 		rcu_read_unlock();
632 		return -ENOENT;
633 	}
634 
635 	*def = conf->def;
636 	rcu_read_unlock();
637 
638 	return 0;
639 }
640 
641 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
642 					 struct ieee80211_chanctx_conf *conf,
643 					 void *data)
644 {
645 	int *num = data;
646 
647 	(*num)++;
648 }
649 
650 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
651 {
652 	int num = 0;
653 
654 	ieee80211_iter_chan_contexts_atomic(ar->hw,
655 					    ath10k_mac_num_chanctxs_iter,
656 					    &num);
657 
658 	return num;
659 }
660 
661 static void
662 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
663 				struct ieee80211_chanctx_conf *conf,
664 				void *data)
665 {
666 	struct cfg80211_chan_def **def = data;
667 
668 	*def = &conf->def;
669 }
670 
671 static int ath10k_peer_create(struct ath10k *ar,
672 			      struct ieee80211_vif *vif,
673 			      struct ieee80211_sta *sta,
674 			      u32 vdev_id,
675 			      const u8 *addr,
676 			      enum wmi_peer_type peer_type)
677 {
678 	struct ath10k_vif *arvif;
679 	struct ath10k_peer *peer;
680 	int num_peers = 0;
681 	int ret;
682 
683 	lockdep_assert_held(&ar->conf_mutex);
684 
685 	num_peers = ar->num_peers;
686 
687 	/* Each vdev consumes a peer entry as well */
688 	list_for_each_entry(arvif, &ar->arvifs, list)
689 		num_peers++;
690 
691 	if (num_peers >= ar->max_num_peers)
692 		return -ENOBUFS;
693 
694 	ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
695 	if (ret) {
696 		ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
697 			    addr, vdev_id, ret);
698 		return ret;
699 	}
700 
701 	ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
702 	if (ret) {
703 		ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
704 			    addr, vdev_id, ret);
705 		return ret;
706 	}
707 
708 	spin_lock_bh(&ar->data_lock);
709 
710 	peer = ath10k_peer_find(ar, vdev_id, addr);
711 	if (!peer) {
712 		spin_unlock_bh(&ar->data_lock);
713 		ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
714 			    addr, vdev_id);
715 		ath10k_wmi_peer_delete(ar, vdev_id, addr);
716 		return -ENOENT;
717 	}
718 
719 	peer->vif = vif;
720 	peer->sta = sta;
721 
722 	spin_unlock_bh(&ar->data_lock);
723 
724 	ar->num_peers++;
725 
726 	return 0;
727 }
728 
729 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
730 {
731 	struct ath10k *ar = arvif->ar;
732 	u32 param;
733 	int ret;
734 
735 	param = ar->wmi.pdev_param->sta_kickout_th;
736 	ret = ath10k_wmi_pdev_set_param(ar, param,
737 					ATH10K_KICKOUT_THRESHOLD);
738 	if (ret) {
739 		ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
740 			    arvif->vdev_id, ret);
741 		return ret;
742 	}
743 
744 	param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
745 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
746 					ATH10K_KEEPALIVE_MIN_IDLE);
747 	if (ret) {
748 		ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
749 			    arvif->vdev_id, ret);
750 		return ret;
751 	}
752 
753 	param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
754 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
755 					ATH10K_KEEPALIVE_MAX_IDLE);
756 	if (ret) {
757 		ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
758 			    arvif->vdev_id, ret);
759 		return ret;
760 	}
761 
762 	param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
763 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
764 					ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
765 	if (ret) {
766 		ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
767 			    arvif->vdev_id, ret);
768 		return ret;
769 	}
770 
771 	return 0;
772 }
773 
774 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
775 {
776 	struct ath10k *ar = arvif->ar;
777 	u32 vdev_param;
778 
779 	vdev_param = ar->wmi.vdev_param->rts_threshold;
780 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
781 }
782 
783 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
784 {
785 	int ret;
786 
787 	lockdep_assert_held(&ar->conf_mutex);
788 
789 	ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
790 	if (ret)
791 		return ret;
792 
793 	ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
794 	if (ret)
795 		return ret;
796 
797 	ar->num_peers--;
798 
799 	return 0;
800 }
801 
802 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
803 {
804 	struct ath10k_peer *peer, *tmp;
805 	int peer_id;
806 	int i;
807 
808 	lockdep_assert_held(&ar->conf_mutex);
809 
810 	spin_lock_bh(&ar->data_lock);
811 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
812 		if (peer->vdev_id != vdev_id)
813 			continue;
814 
815 		ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
816 			    peer->addr, vdev_id);
817 
818 		for_each_set_bit(peer_id, peer->peer_ids,
819 				 ATH10K_MAX_NUM_PEER_IDS) {
820 			ar->peer_map[peer_id] = NULL;
821 		}
822 
823 		/* Double check that peer is properly un-referenced from
824 		 * the peer_map
825 		 */
826 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
827 			if (ar->peer_map[i] == peer) {
828 				ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
829 					    peer->addr, peer, i);
830 				ar->peer_map[i] = NULL;
831 			}
832 		}
833 
834 		list_del(&peer->list);
835 		kfree(peer);
836 		ar->num_peers--;
837 	}
838 	spin_unlock_bh(&ar->data_lock);
839 }
840 
841 static void ath10k_peer_cleanup_all(struct ath10k *ar)
842 {
843 	struct ath10k_peer *peer, *tmp;
844 	int i;
845 
846 	lockdep_assert_held(&ar->conf_mutex);
847 
848 	spin_lock_bh(&ar->data_lock);
849 	list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
850 		list_del(&peer->list);
851 		kfree(peer);
852 	}
853 
854 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
855 		ar->peer_map[i] = NULL;
856 
857 	spin_unlock_bh(&ar->data_lock);
858 
859 	ar->num_peers = 0;
860 	ar->num_stations = 0;
861 }
862 
863 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
864 				       struct ieee80211_sta *sta,
865 				       enum wmi_tdls_peer_state state)
866 {
867 	int ret;
868 	struct wmi_tdls_peer_update_cmd_arg arg = {};
869 	struct wmi_tdls_peer_capab_arg cap = {};
870 	struct wmi_channel_arg chan_arg = {};
871 
872 	lockdep_assert_held(&ar->conf_mutex);
873 
874 	arg.vdev_id = vdev_id;
875 	arg.peer_state = state;
876 	ether_addr_copy(arg.addr, sta->addr);
877 
878 	cap.peer_max_sp = sta->max_sp;
879 	cap.peer_uapsd_queues = sta->uapsd_queues;
880 
881 	if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
882 	    !sta->tdls_initiator)
883 		cap.is_peer_responder = 1;
884 
885 	ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
886 	if (ret) {
887 		ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
888 			    arg.addr, vdev_id, ret);
889 		return ret;
890 	}
891 
892 	return 0;
893 }
894 
895 /************************/
896 /* Interface management */
897 /************************/
898 
899 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
900 {
901 	struct ath10k *ar = arvif->ar;
902 
903 	lockdep_assert_held(&ar->data_lock);
904 
905 	if (!arvif->beacon)
906 		return;
907 
908 	if (!arvif->beacon_buf)
909 		dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
910 				 arvif->beacon->len, DMA_TO_DEVICE);
911 
912 	if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
913 		    arvif->beacon_state != ATH10K_BEACON_SENT))
914 		return;
915 
916 	dev_kfree_skb_any(arvif->beacon);
917 
918 	arvif->beacon = NULL;
919 	arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
920 }
921 
922 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
923 {
924 	struct ath10k *ar = arvif->ar;
925 
926 	lockdep_assert_held(&ar->data_lock);
927 
928 	ath10k_mac_vif_beacon_free(arvif);
929 
930 	if (arvif->beacon_buf) {
931 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
932 				  arvif->beacon_buf, arvif->beacon_paddr);
933 		arvif->beacon_buf = NULL;
934 	}
935 }
936 
937 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
938 {
939 	unsigned long time_left;
940 
941 	lockdep_assert_held(&ar->conf_mutex);
942 
943 	if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
944 		return -ESHUTDOWN;
945 
946 	time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
947 						ATH10K_VDEV_SETUP_TIMEOUT_HZ);
948 	if (time_left == 0)
949 		return -ETIMEDOUT;
950 
951 	return 0;
952 }
953 
954 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
955 {
956 	struct cfg80211_chan_def *chandef = NULL;
957 	struct ieee80211_channel *channel = NULL;
958 	struct wmi_vdev_start_request_arg arg = {};
959 	int ret = 0;
960 
961 	lockdep_assert_held(&ar->conf_mutex);
962 
963 	ieee80211_iter_chan_contexts_atomic(ar->hw,
964 					    ath10k_mac_get_any_chandef_iter,
965 					    &chandef);
966 	if (WARN_ON_ONCE(!chandef))
967 		return -ENOENT;
968 
969 	channel = chandef->chan;
970 
971 	arg.vdev_id = vdev_id;
972 	arg.channel.freq = channel->center_freq;
973 	arg.channel.band_center_freq1 = chandef->center_freq1;
974 
975 	/* TODO setup this dynamically, what in case we
976 	   don't have any vifs? */
977 	arg.channel.mode = chan_to_phymode(chandef);
978 	arg.channel.chan_radar =
979 			!!(channel->flags & IEEE80211_CHAN_RADAR);
980 
981 	arg.channel.min_power = 0;
982 	arg.channel.max_power = channel->max_power * 2;
983 	arg.channel.max_reg_power = channel->max_reg_power * 2;
984 	arg.channel.max_antenna_gain = channel->max_antenna_gain * 2;
985 
986 	reinit_completion(&ar->vdev_setup_done);
987 
988 	ret = ath10k_wmi_vdev_start(ar, &arg);
989 	if (ret) {
990 		ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
991 			    vdev_id, ret);
992 		return ret;
993 	}
994 
995 	ret = ath10k_vdev_setup_sync(ar);
996 	if (ret) {
997 		ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
998 			    vdev_id, ret);
999 		return ret;
1000 	}
1001 
1002 	ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1003 	if (ret) {
1004 		ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1005 			    vdev_id, ret);
1006 		goto vdev_stop;
1007 	}
1008 
1009 	ar->monitor_vdev_id = vdev_id;
1010 
1011 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1012 		   ar->monitor_vdev_id);
1013 	return 0;
1014 
1015 vdev_stop:
1016 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1017 	if (ret)
1018 		ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1019 			    ar->monitor_vdev_id, ret);
1020 
1021 	return ret;
1022 }
1023 
1024 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1025 {
1026 	int ret = 0;
1027 
1028 	lockdep_assert_held(&ar->conf_mutex);
1029 
1030 	ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1031 	if (ret)
1032 		ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1033 			    ar->monitor_vdev_id, ret);
1034 
1035 	reinit_completion(&ar->vdev_setup_done);
1036 
1037 	ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1038 	if (ret)
1039 		ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n",
1040 			    ar->monitor_vdev_id, ret);
1041 
1042 	ret = ath10k_vdev_setup_sync(ar);
1043 	if (ret)
1044 		ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1045 			    ar->monitor_vdev_id, ret);
1046 
1047 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1048 		   ar->monitor_vdev_id);
1049 	return ret;
1050 }
1051 
1052 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1053 {
1054 	int bit, ret = 0;
1055 
1056 	lockdep_assert_held(&ar->conf_mutex);
1057 
1058 	if (ar->free_vdev_map == 0) {
1059 		ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1060 		return -ENOMEM;
1061 	}
1062 
1063 	bit = __ffs64(ar->free_vdev_map);
1064 
1065 	ar->monitor_vdev_id = bit;
1066 
1067 	ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1068 				     WMI_VDEV_TYPE_MONITOR,
1069 				     0, ar->mac_addr);
1070 	if (ret) {
1071 		ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1072 			    ar->monitor_vdev_id, ret);
1073 		return ret;
1074 	}
1075 
1076 	ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1077 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1078 		   ar->monitor_vdev_id);
1079 
1080 	return 0;
1081 }
1082 
1083 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1084 {
1085 	int ret = 0;
1086 
1087 	lockdep_assert_held(&ar->conf_mutex);
1088 
1089 	ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1090 	if (ret) {
1091 		ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1092 			    ar->monitor_vdev_id, ret);
1093 		return ret;
1094 	}
1095 
1096 	ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1097 
1098 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1099 		   ar->monitor_vdev_id);
1100 	return ret;
1101 }
1102 
1103 static int ath10k_monitor_start(struct ath10k *ar)
1104 {
1105 	int ret;
1106 
1107 	lockdep_assert_held(&ar->conf_mutex);
1108 
1109 	ret = ath10k_monitor_vdev_create(ar);
1110 	if (ret) {
1111 		ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1112 		return ret;
1113 	}
1114 
1115 	ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1116 	if (ret) {
1117 		ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1118 		ath10k_monitor_vdev_delete(ar);
1119 		return ret;
1120 	}
1121 
1122 	ar->monitor_started = true;
1123 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1124 
1125 	return 0;
1126 }
1127 
1128 static int ath10k_monitor_stop(struct ath10k *ar)
1129 {
1130 	int ret;
1131 
1132 	lockdep_assert_held(&ar->conf_mutex);
1133 
1134 	ret = ath10k_monitor_vdev_stop(ar);
1135 	if (ret) {
1136 		ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1137 		return ret;
1138 	}
1139 
1140 	ret = ath10k_monitor_vdev_delete(ar);
1141 	if (ret) {
1142 		ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1143 		return ret;
1144 	}
1145 
1146 	ar->monitor_started = false;
1147 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1148 
1149 	return 0;
1150 }
1151 
1152 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1153 {
1154 	int num_ctx;
1155 
1156 	/* At least one chanctx is required to derive a channel to start
1157 	 * monitor vdev on.
1158 	 */
1159 	num_ctx = ath10k_mac_num_chanctxs(ar);
1160 	if (num_ctx == 0)
1161 		return false;
1162 
1163 	/* If there's already an existing special monitor interface then don't
1164 	 * bother creating another monitor vdev.
1165 	 */
1166 	if (ar->monitor_arvif)
1167 		return false;
1168 
1169 	return ar->monitor ||
1170 	       ar->filter_flags & FIF_OTHER_BSS ||
1171 	       test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1172 }
1173 
1174 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1175 {
1176 	int num_ctx;
1177 
1178 	num_ctx = ath10k_mac_num_chanctxs(ar);
1179 
1180 	/* FIXME: Current interface combinations and cfg80211/mac80211 code
1181 	 * shouldn't allow this but make sure to prevent handling the following
1182 	 * case anyway since multi-channel DFS hasn't been tested at all.
1183 	 */
1184 	if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1185 		return false;
1186 
1187 	return true;
1188 }
1189 
1190 static int ath10k_monitor_recalc(struct ath10k *ar)
1191 {
1192 	bool needed;
1193 	bool allowed;
1194 	int ret;
1195 
1196 	lockdep_assert_held(&ar->conf_mutex);
1197 
1198 	needed = ath10k_mac_monitor_vdev_is_needed(ar);
1199 	allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1200 
1201 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1202 		   "mac monitor recalc started? %d needed? %d allowed? %d\n",
1203 		   ar->monitor_started, needed, allowed);
1204 
1205 	if (WARN_ON(needed && !allowed)) {
1206 		if (ar->monitor_started) {
1207 			ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1208 
1209 			ret = ath10k_monitor_stop(ar);
1210 			if (ret)
1211 				ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1212 					    ret);
1213 				/* not serious */
1214 		}
1215 
1216 		return -EPERM;
1217 	}
1218 
1219 	if (needed == ar->monitor_started)
1220 		return 0;
1221 
1222 	if (needed)
1223 		return ath10k_monitor_start(ar);
1224 	else
1225 		return ath10k_monitor_stop(ar);
1226 }
1227 
1228 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1229 {
1230 	struct ath10k *ar = arvif->ar;
1231 	u32 vdev_param, rts_cts = 0;
1232 
1233 	lockdep_assert_held(&ar->conf_mutex);
1234 
1235 	vdev_param = ar->wmi.vdev_param->enable_rtscts;
1236 
1237 	rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1238 
1239 	if (arvif->num_legacy_stations > 0)
1240 		rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1241 			      WMI_RTSCTS_PROFILE);
1242 	else
1243 		rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1244 			      WMI_RTSCTS_PROFILE);
1245 
1246 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1247 					 rts_cts);
1248 }
1249 
1250 static int ath10k_start_cac(struct ath10k *ar)
1251 {
1252 	int ret;
1253 
1254 	lockdep_assert_held(&ar->conf_mutex);
1255 
1256 	set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1257 
1258 	ret = ath10k_monitor_recalc(ar);
1259 	if (ret) {
1260 		ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1261 		clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1262 		return ret;
1263 	}
1264 
1265 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1266 		   ar->monitor_vdev_id);
1267 
1268 	return 0;
1269 }
1270 
1271 static int ath10k_stop_cac(struct ath10k *ar)
1272 {
1273 	lockdep_assert_held(&ar->conf_mutex);
1274 
1275 	/* CAC is not running - do nothing */
1276 	if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1277 		return 0;
1278 
1279 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1280 	ath10k_monitor_stop(ar);
1281 
1282 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1283 
1284 	return 0;
1285 }
1286 
1287 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1288 				      struct ieee80211_chanctx_conf *conf,
1289 				      void *data)
1290 {
1291 	bool *ret = data;
1292 
1293 	if (!*ret && conf->radar_enabled)
1294 		*ret = true;
1295 }
1296 
1297 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1298 {
1299 	bool has_radar = false;
1300 
1301 	ieee80211_iter_chan_contexts_atomic(ar->hw,
1302 					    ath10k_mac_has_radar_iter,
1303 					    &has_radar);
1304 
1305 	return has_radar;
1306 }
1307 
1308 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1309 {
1310 	int ret;
1311 
1312 	lockdep_assert_held(&ar->conf_mutex);
1313 
1314 	ath10k_stop_cac(ar);
1315 
1316 	if (!ath10k_mac_has_radar_enabled(ar))
1317 		return;
1318 
1319 	if (ar->num_started_vdevs > 0)
1320 		return;
1321 
1322 	ret = ath10k_start_cac(ar);
1323 	if (ret) {
1324 		/*
1325 		 * Not possible to start CAC on current channel so starting
1326 		 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1327 		 * by indicating that radar was detected.
1328 		 */
1329 		ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1330 		ieee80211_radar_detected(ar->hw);
1331 	}
1332 }
1333 
1334 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1335 {
1336 	struct ath10k *ar = arvif->ar;
1337 	int ret;
1338 
1339 	lockdep_assert_held(&ar->conf_mutex);
1340 
1341 	reinit_completion(&ar->vdev_setup_done);
1342 
1343 	ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1344 	if (ret) {
1345 		ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1346 			    arvif->vdev_id, ret);
1347 		return ret;
1348 	}
1349 
1350 	ret = ath10k_vdev_setup_sync(ar);
1351 	if (ret) {
1352 		ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n",
1353 			    arvif->vdev_id, ret);
1354 		return ret;
1355 	}
1356 
1357 	WARN_ON(ar->num_started_vdevs == 0);
1358 
1359 	if (ar->num_started_vdevs != 0) {
1360 		ar->num_started_vdevs--;
1361 		ath10k_recalc_radar_detection(ar);
1362 	}
1363 
1364 	return ret;
1365 }
1366 
1367 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1368 				     const struct cfg80211_chan_def *chandef,
1369 				     bool restart)
1370 {
1371 	struct ath10k *ar = arvif->ar;
1372 	struct wmi_vdev_start_request_arg arg = {};
1373 	int ret = 0;
1374 
1375 	lockdep_assert_held(&ar->conf_mutex);
1376 
1377 	reinit_completion(&ar->vdev_setup_done);
1378 
1379 	arg.vdev_id = arvif->vdev_id;
1380 	arg.dtim_period = arvif->dtim_period;
1381 	arg.bcn_intval = arvif->beacon_interval;
1382 
1383 	arg.channel.freq = chandef->chan->center_freq;
1384 	arg.channel.band_center_freq1 = chandef->center_freq1;
1385 	arg.channel.mode = chan_to_phymode(chandef);
1386 
1387 	arg.channel.min_power = 0;
1388 	arg.channel.max_power = chandef->chan->max_power * 2;
1389 	arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1390 	arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2;
1391 
1392 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1393 		arg.ssid = arvif->u.ap.ssid;
1394 		arg.ssid_len = arvif->u.ap.ssid_len;
1395 		arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1396 
1397 		/* For now allow DFS for AP mode */
1398 		arg.channel.chan_radar =
1399 			!!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1400 	} else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1401 		arg.ssid = arvif->vif->bss_conf.ssid;
1402 		arg.ssid_len = arvif->vif->bss_conf.ssid_len;
1403 	}
1404 
1405 	ath10k_dbg(ar, ATH10K_DBG_MAC,
1406 		   "mac vdev %d start center_freq %d phymode %s\n",
1407 		   arg.vdev_id, arg.channel.freq,
1408 		   ath10k_wmi_phymode_str(arg.channel.mode));
1409 
1410 	if (restart)
1411 		ret = ath10k_wmi_vdev_restart(ar, &arg);
1412 	else
1413 		ret = ath10k_wmi_vdev_start(ar, &arg);
1414 
1415 	if (ret) {
1416 		ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1417 			    arg.vdev_id, ret);
1418 		return ret;
1419 	}
1420 
1421 	ret = ath10k_vdev_setup_sync(ar);
1422 	if (ret) {
1423 		ath10k_warn(ar,
1424 			    "failed to synchronize setup for vdev %i restart %d: %d\n",
1425 			    arg.vdev_id, restart, ret);
1426 		return ret;
1427 	}
1428 
1429 	ar->num_started_vdevs++;
1430 	ath10k_recalc_radar_detection(ar);
1431 
1432 	return ret;
1433 }
1434 
1435 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1436 			     const struct cfg80211_chan_def *def)
1437 {
1438 	return ath10k_vdev_start_restart(arvif, def, false);
1439 }
1440 
1441 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1442 			       const struct cfg80211_chan_def *def)
1443 {
1444 	return ath10k_vdev_start_restart(arvif, def, true);
1445 }
1446 
1447 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1448 				       struct sk_buff *bcn)
1449 {
1450 	struct ath10k *ar = arvif->ar;
1451 	struct ieee80211_mgmt *mgmt;
1452 	const u8 *p2p_ie;
1453 	int ret;
1454 
1455 	if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1456 		return 0;
1457 
1458 	mgmt = (void *)bcn->data;
1459 	p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1460 					 mgmt->u.beacon.variable,
1461 					 bcn->len - (mgmt->u.beacon.variable -
1462 						     bcn->data));
1463 	if (!p2p_ie)
1464 		return -ENOENT;
1465 
1466 	ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1467 	if (ret) {
1468 		ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1469 			    arvif->vdev_id, ret);
1470 		return ret;
1471 	}
1472 
1473 	return 0;
1474 }
1475 
1476 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1477 				       u8 oui_type, size_t ie_offset)
1478 {
1479 	size_t len;
1480 	const u8 *next;
1481 	const u8 *end;
1482 	u8 *ie;
1483 
1484 	if (WARN_ON(skb->len < ie_offset))
1485 		return -EINVAL;
1486 
1487 	ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1488 					   skb->data + ie_offset,
1489 					   skb->len - ie_offset);
1490 	if (!ie)
1491 		return -ENOENT;
1492 
1493 	len = ie[1] + 2;
1494 	end = skb->data + skb->len;
1495 	next = ie + len;
1496 
1497 	if (WARN_ON(next > end))
1498 		return -EINVAL;
1499 
1500 	memmove(ie, next, end - next);
1501 	skb_trim(skb, skb->len - len);
1502 
1503 	return 0;
1504 }
1505 
1506 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1507 {
1508 	struct ath10k *ar = arvif->ar;
1509 	struct ieee80211_hw *hw = ar->hw;
1510 	struct ieee80211_vif *vif = arvif->vif;
1511 	struct ieee80211_mutable_offsets offs = {};
1512 	struct sk_buff *bcn;
1513 	int ret;
1514 
1515 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1516 		return 0;
1517 
1518 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1519 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1520 		return 0;
1521 
1522 	bcn = ieee80211_beacon_get_template(hw, vif, &offs);
1523 	if (!bcn) {
1524 		ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1525 		return -EPERM;
1526 	}
1527 
1528 	ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1529 	if (ret) {
1530 		ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1531 		kfree_skb(bcn);
1532 		return ret;
1533 	}
1534 
1535 	/* P2P IE is inserted by firmware automatically (as configured above)
1536 	 * so remove it from the base beacon template to avoid duplicate P2P
1537 	 * IEs in beacon frames.
1538 	 */
1539 	ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1540 				    offsetof(struct ieee80211_mgmt,
1541 					     u.beacon.variable));
1542 
1543 	ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1544 				  0, NULL, 0);
1545 	kfree_skb(bcn);
1546 
1547 	if (ret) {
1548 		ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1549 			    ret);
1550 		return ret;
1551 	}
1552 
1553 	return 0;
1554 }
1555 
1556 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1557 {
1558 	struct ath10k *ar = arvif->ar;
1559 	struct ieee80211_hw *hw = ar->hw;
1560 	struct ieee80211_vif *vif = arvif->vif;
1561 	struct sk_buff *prb;
1562 	int ret;
1563 
1564 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1565 		return 0;
1566 
1567 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1568 		return 0;
1569 
1570 	prb = ieee80211_proberesp_get(hw, vif);
1571 	if (!prb) {
1572 		ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1573 		return -EPERM;
1574 	}
1575 
1576 	ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1577 	kfree_skb(prb);
1578 
1579 	if (ret) {
1580 		ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1581 			    ret);
1582 		return ret;
1583 	}
1584 
1585 	return 0;
1586 }
1587 
1588 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1589 {
1590 	struct ath10k *ar = arvif->ar;
1591 	struct cfg80211_chan_def def;
1592 	int ret;
1593 
1594 	/* When originally vdev is started during assign_vif_chanctx() some
1595 	 * information is missing, notably SSID. Firmware revisions with beacon
1596 	 * offloading require the SSID to be provided during vdev (re)start to
1597 	 * handle hidden SSID properly.
1598 	 *
1599 	 * Vdev restart must be done after vdev has been both started and
1600 	 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1601 	 * deliver vdev restart response event causing timeouts during vdev
1602 	 * syncing in ath10k.
1603 	 *
1604 	 * Note: The vdev down/up and template reinstallation could be skipped
1605 	 * since only wmi-tlv firmware are known to have beacon offload and
1606 	 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1607 	 * response delivery. It's probably more robust to keep it as is.
1608 	 */
1609 	if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1610 		return 0;
1611 
1612 	if (WARN_ON(!arvif->is_started))
1613 		return -EINVAL;
1614 
1615 	if (WARN_ON(!arvif->is_up))
1616 		return -EINVAL;
1617 
1618 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1619 		return -EINVAL;
1620 
1621 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1622 	if (ret) {
1623 		ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1624 			    arvif->vdev_id, ret);
1625 		return ret;
1626 	}
1627 
1628 	/* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1629 	 * firmware will crash upon vdev up.
1630 	 */
1631 
1632 	ret = ath10k_mac_setup_bcn_tmpl(arvif);
1633 	if (ret) {
1634 		ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1635 		return ret;
1636 	}
1637 
1638 	ret = ath10k_mac_setup_prb_tmpl(arvif);
1639 	if (ret) {
1640 		ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1641 		return ret;
1642 	}
1643 
1644 	ret = ath10k_vdev_restart(arvif, &def);
1645 	if (ret) {
1646 		ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1647 			    arvif->vdev_id, ret);
1648 		return ret;
1649 	}
1650 
1651 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1652 				 arvif->bssid);
1653 	if (ret) {
1654 		ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1655 			    arvif->vdev_id, ret);
1656 		return ret;
1657 	}
1658 
1659 	return 0;
1660 }
1661 
1662 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1663 				     struct ieee80211_bss_conf *info)
1664 {
1665 	struct ath10k *ar = arvif->ar;
1666 	int ret = 0;
1667 
1668 	lockdep_assert_held(&arvif->ar->conf_mutex);
1669 
1670 	if (!info->enable_beacon) {
1671 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1672 		if (ret)
1673 			ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1674 				    arvif->vdev_id, ret);
1675 
1676 		arvif->is_up = false;
1677 
1678 		spin_lock_bh(&arvif->ar->data_lock);
1679 		ath10k_mac_vif_beacon_free(arvif);
1680 		spin_unlock_bh(&arvif->ar->data_lock);
1681 
1682 		return;
1683 	}
1684 
1685 	arvif->tx_seq_no = 0x1000;
1686 
1687 	arvif->aid = 0;
1688 	ether_addr_copy(arvif->bssid, info->bssid);
1689 
1690 	ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1691 				 arvif->bssid);
1692 	if (ret) {
1693 		ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1694 			    arvif->vdev_id, ret);
1695 		return;
1696 	}
1697 
1698 	arvif->is_up = true;
1699 
1700 	ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1701 	if (ret) {
1702 		ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1703 			    arvif->vdev_id, ret);
1704 		return;
1705 	}
1706 
1707 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1708 }
1709 
1710 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1711 				struct ieee80211_bss_conf *info,
1712 				const u8 self_peer[ETH_ALEN])
1713 {
1714 	struct ath10k *ar = arvif->ar;
1715 	u32 vdev_param;
1716 	int ret = 0;
1717 
1718 	lockdep_assert_held(&arvif->ar->conf_mutex);
1719 
1720 	if (!info->ibss_joined) {
1721 		if (is_zero_ether_addr(arvif->bssid))
1722 			return;
1723 
1724 		eth_zero_addr(arvif->bssid);
1725 
1726 		return;
1727 	}
1728 
1729 	vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1730 	ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1731 					ATH10K_DEFAULT_ATIM);
1732 	if (ret)
1733 		ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1734 			    arvif->vdev_id, ret);
1735 }
1736 
1737 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1738 {
1739 	struct ath10k *ar = arvif->ar;
1740 	u32 param;
1741 	u32 value;
1742 	int ret;
1743 
1744 	lockdep_assert_held(&arvif->ar->conf_mutex);
1745 
1746 	if (arvif->u.sta.uapsd)
1747 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1748 	else
1749 		value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1750 
1751 	param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1752 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1753 	if (ret) {
1754 		ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1755 			    value, arvif->vdev_id, ret);
1756 		return ret;
1757 	}
1758 
1759 	return 0;
1760 }
1761 
1762 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1763 {
1764 	struct ath10k *ar = arvif->ar;
1765 	u32 param;
1766 	u32 value;
1767 	int ret;
1768 
1769 	lockdep_assert_held(&arvif->ar->conf_mutex);
1770 
1771 	if (arvif->u.sta.uapsd)
1772 		value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1773 	else
1774 		value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1775 
1776 	param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1777 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1778 					  param, value);
1779 	if (ret) {
1780 		ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1781 			    value, arvif->vdev_id, ret);
1782 		return ret;
1783 	}
1784 
1785 	return 0;
1786 }
1787 
1788 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1789 {
1790 	struct ath10k_vif *arvif;
1791 	int num = 0;
1792 
1793 	lockdep_assert_held(&ar->conf_mutex);
1794 
1795 	list_for_each_entry(arvif, &ar->arvifs, list)
1796 		if (arvif->is_started)
1797 			num++;
1798 
1799 	return num;
1800 }
1801 
1802 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1803 {
1804 	struct ath10k *ar = arvif->ar;
1805 	struct ieee80211_vif *vif = arvif->vif;
1806 	struct ieee80211_conf *conf = &ar->hw->conf;
1807 	enum wmi_sta_powersave_param param;
1808 	enum wmi_sta_ps_mode psmode;
1809 	int ret;
1810 	int ps_timeout;
1811 	bool enable_ps;
1812 
1813 	lockdep_assert_held(&arvif->ar->conf_mutex);
1814 
1815 	if (arvif->vif->type != NL80211_IFTYPE_STATION)
1816 		return 0;
1817 
1818 	enable_ps = arvif->ps;
1819 
1820 	if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1821 	    !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1822 		      ar->running_fw->fw_file.fw_features)) {
1823 		ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1824 			    arvif->vdev_id);
1825 		enable_ps = false;
1826 	}
1827 
1828 	if (!arvif->is_started) {
1829 		/* mac80211 can update vif powersave state while disconnected.
1830 		 * Firmware doesn't behave nicely and consumes more power than
1831 		 * necessary if PS is disabled on a non-started vdev. Hence
1832 		 * force-enable PS for non-running vdevs.
1833 		 */
1834 		psmode = WMI_STA_PS_MODE_ENABLED;
1835 	} else if (enable_ps) {
1836 		psmode = WMI_STA_PS_MODE_ENABLED;
1837 		param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1838 
1839 		ps_timeout = conf->dynamic_ps_timeout;
1840 		if (ps_timeout == 0) {
1841 			/* Firmware doesn't like 0 */
1842 			ps_timeout = ieee80211_tu_to_usec(
1843 				vif->bss_conf.beacon_int) / 1000;
1844 		}
1845 
1846 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1847 						  ps_timeout);
1848 		if (ret) {
1849 			ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1850 				    arvif->vdev_id, ret);
1851 			return ret;
1852 		}
1853 	} else {
1854 		psmode = WMI_STA_PS_MODE_DISABLED;
1855 	}
1856 
1857 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1858 		   arvif->vdev_id, psmode ? "enable" : "disable");
1859 
1860 	ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1861 	if (ret) {
1862 		ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1863 			    psmode, arvif->vdev_id, ret);
1864 		return ret;
1865 	}
1866 
1867 	return 0;
1868 }
1869 
1870 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1871 {
1872 	struct ath10k *ar = arvif->ar;
1873 	struct wmi_sta_keepalive_arg arg = {};
1874 	int ret;
1875 
1876 	lockdep_assert_held(&arvif->ar->conf_mutex);
1877 
1878 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1879 		return 0;
1880 
1881 	if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1882 		return 0;
1883 
1884 	/* Some firmware revisions have a bug and ignore the `enabled` field.
1885 	 * Instead use the interval to disable the keepalive.
1886 	 */
1887 	arg.vdev_id = arvif->vdev_id;
1888 	arg.enabled = 1;
1889 	arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
1890 	arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
1891 
1892 	ret = ath10k_wmi_sta_keepalive(ar, &arg);
1893 	if (ret) {
1894 		ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
1895 			    arvif->vdev_id, ret);
1896 		return ret;
1897 	}
1898 
1899 	return 0;
1900 }
1901 
1902 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
1903 {
1904 	struct ath10k *ar = arvif->ar;
1905 	struct ieee80211_vif *vif = arvif->vif;
1906 	int ret;
1907 
1908 	lockdep_assert_held(&arvif->ar->conf_mutex);
1909 
1910 	if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
1911 		return;
1912 
1913 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1914 		return;
1915 
1916 	if (!vif->csa_active)
1917 		return;
1918 
1919 	if (!arvif->is_up)
1920 		return;
1921 
1922 	if (!ieee80211_csa_is_complete(vif)) {
1923 		ieee80211_csa_update_counter(vif);
1924 
1925 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
1926 		if (ret)
1927 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
1928 				    ret);
1929 
1930 		ret = ath10k_mac_setup_prb_tmpl(arvif);
1931 		if (ret)
1932 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
1933 				    ret);
1934 	} else {
1935 		ieee80211_csa_finish(vif);
1936 	}
1937 }
1938 
1939 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
1940 {
1941 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
1942 						ap_csa_work);
1943 	struct ath10k *ar = arvif->ar;
1944 
1945 	mutex_lock(&ar->conf_mutex);
1946 	ath10k_mac_vif_ap_csa_count_down(arvif);
1947 	mutex_unlock(&ar->conf_mutex);
1948 }
1949 
1950 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
1951 					  struct ieee80211_vif *vif)
1952 {
1953 	struct sk_buff *skb = data;
1954 	struct ieee80211_mgmt *mgmt = (void *)skb->data;
1955 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1956 
1957 	if (vif->type != NL80211_IFTYPE_STATION)
1958 		return;
1959 
1960 	if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
1961 		return;
1962 
1963 	cancel_delayed_work(&arvif->connection_loss_work);
1964 }
1965 
1966 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
1967 {
1968 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
1969 						   IEEE80211_IFACE_ITER_NORMAL,
1970 						   ath10k_mac_handle_beacon_iter,
1971 						   skb);
1972 }
1973 
1974 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
1975 					       struct ieee80211_vif *vif)
1976 {
1977 	u32 *vdev_id = data;
1978 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
1979 	struct ath10k *ar = arvif->ar;
1980 	struct ieee80211_hw *hw = ar->hw;
1981 
1982 	if (arvif->vdev_id != *vdev_id)
1983 		return;
1984 
1985 	if (!arvif->is_up)
1986 		return;
1987 
1988 	ieee80211_beacon_loss(vif);
1989 
1990 	/* Firmware doesn't report beacon loss events repeatedly. If AP probe
1991 	 * (done by mac80211) succeeds but beacons do not resume then it
1992 	 * doesn't make sense to continue operation. Queue connection loss work
1993 	 * which can be cancelled when beacon is received.
1994 	 */
1995 	ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
1996 				     ATH10K_CONNECTION_LOSS_HZ);
1997 }
1998 
1999 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2000 {
2001 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
2002 						   IEEE80211_IFACE_ITER_NORMAL,
2003 						   ath10k_mac_handle_beacon_miss_iter,
2004 						   &vdev_id);
2005 }
2006 
2007 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2008 {
2009 	struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2010 						connection_loss_work.work);
2011 	struct ieee80211_vif *vif = arvif->vif;
2012 
2013 	if (!arvif->is_up)
2014 		return;
2015 
2016 	ieee80211_connection_loss(vif);
2017 }
2018 
2019 /**********************/
2020 /* Station management */
2021 /**********************/
2022 
2023 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2024 					     struct ieee80211_vif *vif)
2025 {
2026 	/* Some firmware revisions have unstable STA powersave when listen
2027 	 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2028 	 * generate NullFunc frames properly even if buffered frames have been
2029 	 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2030 	 * buffered frames. Often pinging the device from AP would simply fail.
2031 	 *
2032 	 * As a workaround set it to 1.
2033 	 */
2034 	if (vif->type == NL80211_IFTYPE_STATION)
2035 		return 1;
2036 
2037 	return ar->hw->conf.listen_interval;
2038 }
2039 
2040 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2041 				      struct ieee80211_vif *vif,
2042 				      struct ieee80211_sta *sta,
2043 				      struct wmi_peer_assoc_complete_arg *arg)
2044 {
2045 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2046 	u32 aid;
2047 
2048 	lockdep_assert_held(&ar->conf_mutex);
2049 
2050 	if (vif->type == NL80211_IFTYPE_STATION)
2051 		aid = vif->bss_conf.aid;
2052 	else
2053 		aid = sta->aid;
2054 
2055 	ether_addr_copy(arg->addr, sta->addr);
2056 	arg->vdev_id = arvif->vdev_id;
2057 	arg->peer_aid = aid;
2058 	arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2059 	arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2060 	arg->peer_num_spatial_streams = 1;
2061 	arg->peer_caps = vif->bss_conf.assoc_capability;
2062 }
2063 
2064 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2065 				       struct ieee80211_vif *vif,
2066 				       struct ieee80211_sta *sta,
2067 				       struct wmi_peer_assoc_complete_arg *arg)
2068 {
2069 	struct ieee80211_bss_conf *info = &vif->bss_conf;
2070 	struct cfg80211_chan_def def;
2071 	struct cfg80211_bss *bss;
2072 	const u8 *rsnie = NULL;
2073 	const u8 *wpaie = NULL;
2074 
2075 	lockdep_assert_held(&ar->conf_mutex);
2076 
2077 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2078 		return;
2079 
2080 	bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0,
2081 			       IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2082 	if (bss) {
2083 		const struct cfg80211_bss_ies *ies;
2084 
2085 		rcu_read_lock();
2086 		rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2087 
2088 		ies = rcu_dereference(bss->ies);
2089 
2090 		wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2091 						WLAN_OUI_TYPE_MICROSOFT_WPA,
2092 						ies->data,
2093 						ies->len);
2094 		rcu_read_unlock();
2095 		cfg80211_put_bss(ar->hw->wiphy, bss);
2096 	}
2097 
2098 	/* FIXME: base on RSN IE/WPA IE is a correct idea? */
2099 	if (rsnie || wpaie) {
2100 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2101 		arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2102 	}
2103 
2104 	if (wpaie) {
2105 		ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2106 		arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2107 	}
2108 
2109 	if (sta->mfp &&
2110 	    test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2111 		     ar->running_fw->fw_file.fw_features)) {
2112 		arg->peer_flags |= ar->wmi.peer_flags->pmf;
2113 	}
2114 }
2115 
2116 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2117 				      struct ieee80211_vif *vif,
2118 				      struct ieee80211_sta *sta,
2119 				      struct wmi_peer_assoc_complete_arg *arg)
2120 {
2121 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2122 	struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2123 	struct cfg80211_chan_def def;
2124 	const struct ieee80211_supported_band *sband;
2125 	const struct ieee80211_rate *rates;
2126 	enum nl80211_band band;
2127 	u32 ratemask;
2128 	u8 rate;
2129 	int i;
2130 
2131 	lockdep_assert_held(&ar->conf_mutex);
2132 
2133 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2134 		return;
2135 
2136 	band = def.chan->band;
2137 	sband = ar->hw->wiphy->bands[band];
2138 	ratemask = sta->supp_rates[band];
2139 	ratemask &= arvif->bitrate_mask.control[band].legacy;
2140 	rates = sband->bitrates;
2141 
2142 	rateset->num_rates = 0;
2143 
2144 	for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2145 		if (!(ratemask & 1))
2146 			continue;
2147 
2148 		rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2149 		rateset->rates[rateset->num_rates] = rate;
2150 		rateset->num_rates++;
2151 	}
2152 }
2153 
2154 static bool
2155 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2156 {
2157 	int nss;
2158 
2159 	for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2160 		if (ht_mcs_mask[nss])
2161 			return false;
2162 
2163 	return true;
2164 }
2165 
2166 static bool
2167 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2168 {
2169 	int nss;
2170 
2171 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2172 		if (vht_mcs_mask[nss])
2173 			return false;
2174 
2175 	return true;
2176 }
2177 
2178 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2179 				   struct ieee80211_vif *vif,
2180 				   struct ieee80211_sta *sta,
2181 				   struct wmi_peer_assoc_complete_arg *arg)
2182 {
2183 	const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap;
2184 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2185 	struct cfg80211_chan_def def;
2186 	enum nl80211_band band;
2187 	const u8 *ht_mcs_mask;
2188 	const u16 *vht_mcs_mask;
2189 	int i, n;
2190 	u8 max_nss;
2191 	u32 stbc;
2192 
2193 	lockdep_assert_held(&ar->conf_mutex);
2194 
2195 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2196 		return;
2197 
2198 	if (!ht_cap->ht_supported)
2199 		return;
2200 
2201 	band = def.chan->band;
2202 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2203 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2204 
2205 	if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2206 	    ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2207 		return;
2208 
2209 	arg->peer_flags |= ar->wmi.peer_flags->ht;
2210 	arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2211 				    ht_cap->ampdu_factor)) - 1;
2212 
2213 	arg->peer_mpdu_density =
2214 		ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2215 
2216 	arg->peer_ht_caps = ht_cap->cap;
2217 	arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2218 
2219 	if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2220 		arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2221 
2222 	if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) {
2223 		arg->peer_flags |= ar->wmi.peer_flags->bw40;
2224 		arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2225 	}
2226 
2227 	if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2228 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2229 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2230 
2231 		if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2232 			arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2233 	}
2234 
2235 	if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2236 		arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2237 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2238 	}
2239 
2240 	if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2241 		stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2242 		stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2243 		stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2244 		arg->peer_rate_caps |= stbc;
2245 		arg->peer_flags |= ar->wmi.peer_flags->stbc;
2246 	}
2247 
2248 	if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2249 		arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2250 	else if (ht_cap->mcs.rx_mask[1])
2251 		arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2252 
2253 	for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2254 		if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2255 		    (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2256 			max_nss = (i / 8) + 1;
2257 			arg->peer_ht_rates.rates[n++] = i;
2258 		}
2259 
2260 	/*
2261 	 * This is a workaround for HT-enabled STAs which break the spec
2262 	 * and have no HT capabilities RX mask (no HT RX MCS map).
2263 	 *
2264 	 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2265 	 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2266 	 *
2267 	 * Firmware asserts if such situation occurs.
2268 	 */
2269 	if (n == 0) {
2270 		arg->peer_ht_rates.num_rates = 8;
2271 		for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2272 			arg->peer_ht_rates.rates[i] = i;
2273 	} else {
2274 		arg->peer_ht_rates.num_rates = n;
2275 		arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss);
2276 	}
2277 
2278 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2279 		   arg->addr,
2280 		   arg->peer_ht_rates.num_rates,
2281 		   arg->peer_num_spatial_streams);
2282 }
2283 
2284 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2285 				    struct ath10k_vif *arvif,
2286 				    struct ieee80211_sta *sta)
2287 {
2288 	u32 uapsd = 0;
2289 	u32 max_sp = 0;
2290 	int ret = 0;
2291 
2292 	lockdep_assert_held(&ar->conf_mutex);
2293 
2294 	if (sta->wme && sta->uapsd_queues) {
2295 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2296 			   sta->uapsd_queues, sta->max_sp);
2297 
2298 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2299 			uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2300 				 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2301 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2302 			uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2303 				 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2304 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2305 			uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2306 				 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2307 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2308 			uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2309 				 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2310 
2311 		if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2312 			max_sp = sta->max_sp;
2313 
2314 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2315 						 sta->addr,
2316 						 WMI_AP_PS_PEER_PARAM_UAPSD,
2317 						 uapsd);
2318 		if (ret) {
2319 			ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2320 				    arvif->vdev_id, ret);
2321 			return ret;
2322 		}
2323 
2324 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2325 						 sta->addr,
2326 						 WMI_AP_PS_PEER_PARAM_MAX_SP,
2327 						 max_sp);
2328 		if (ret) {
2329 			ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2330 				    arvif->vdev_id, ret);
2331 			return ret;
2332 		}
2333 
2334 		/* TODO setup this based on STA listen interval and
2335 		   beacon interval. Currently we don't know
2336 		   sta->listen_interval - mac80211 patch required.
2337 		   Currently use 10 seconds */
2338 		ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2339 						 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2340 						 10);
2341 		if (ret) {
2342 			ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2343 				    arvif->vdev_id, ret);
2344 			return ret;
2345 		}
2346 	}
2347 
2348 	return 0;
2349 }
2350 
2351 static u16
2352 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2353 			      const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2354 {
2355 	int idx_limit;
2356 	int nss;
2357 	u16 mcs_map;
2358 	u16 mcs;
2359 
2360 	for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2361 		mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2362 			  vht_mcs_limit[nss];
2363 
2364 		if (mcs_map)
2365 			idx_limit = fls(mcs_map) - 1;
2366 		else
2367 			idx_limit = -1;
2368 
2369 		switch (idx_limit) {
2370 		case 0: /* fall through */
2371 		case 1: /* fall through */
2372 		case 2: /* fall through */
2373 		case 3: /* fall through */
2374 		case 4: /* fall through */
2375 		case 5: /* fall through */
2376 		case 6: /* fall through */
2377 		default:
2378 			/* see ath10k_mac_can_set_bitrate_mask() */
2379 			WARN_ON(1);
2380 			/* fall through */
2381 		case -1:
2382 			mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2383 			break;
2384 		case 7:
2385 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2386 			break;
2387 		case 8:
2388 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2389 			break;
2390 		case 9:
2391 			mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2392 			break;
2393 		}
2394 
2395 		tx_mcs_set &= ~(0x3 << (nss * 2));
2396 		tx_mcs_set |= mcs << (nss * 2);
2397 	}
2398 
2399 	return tx_mcs_set;
2400 }
2401 
2402 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2403 				    struct ieee80211_vif *vif,
2404 				    struct ieee80211_sta *sta,
2405 				    struct wmi_peer_assoc_complete_arg *arg)
2406 {
2407 	const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap;
2408 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2409 	struct cfg80211_chan_def def;
2410 	enum nl80211_band band;
2411 	const u16 *vht_mcs_mask;
2412 	u8 ampdu_factor;
2413 
2414 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2415 		return;
2416 
2417 	if (!vht_cap->vht_supported)
2418 		return;
2419 
2420 	band = def.chan->band;
2421 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2422 
2423 	if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2424 		return;
2425 
2426 	arg->peer_flags |= ar->wmi.peer_flags->vht;
2427 
2428 	if (def.chan->band == NL80211_BAND_2GHZ)
2429 		arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2430 
2431 	arg->peer_vht_caps = vht_cap->cap;
2432 
2433 	ampdu_factor = (vht_cap->cap &
2434 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2435 		       IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2436 
2437 	/* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2438 	 * zero in VHT IE. Using it would result in degraded throughput.
2439 	 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2440 	 * it if VHT max_mpdu is smaller. */
2441 	arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2442 				 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2443 					ampdu_factor)) - 1);
2444 
2445 	if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2446 		arg->peer_flags |= ar->wmi.peer_flags->bw80;
2447 
2448 	arg->peer_vht_rates.rx_max_rate =
2449 		__le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2450 	arg->peer_vht_rates.rx_mcs_set =
2451 		__le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2452 	arg->peer_vht_rates.tx_max_rate =
2453 		__le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2454 	arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2455 		__le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2456 
2457 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n",
2458 		   sta->addr, arg->peer_max_mpdu, arg->peer_flags);
2459 }
2460 
2461 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2462 				    struct ieee80211_vif *vif,
2463 				    struct ieee80211_sta *sta,
2464 				    struct wmi_peer_assoc_complete_arg *arg)
2465 {
2466 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2467 
2468 	switch (arvif->vdev_type) {
2469 	case WMI_VDEV_TYPE_AP:
2470 		if (sta->wme)
2471 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2472 
2473 		if (sta->wme && sta->uapsd_queues) {
2474 			arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2475 			arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2476 		}
2477 		break;
2478 	case WMI_VDEV_TYPE_STA:
2479 		if (vif->bss_conf.qos)
2480 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2481 		break;
2482 	case WMI_VDEV_TYPE_IBSS:
2483 		if (sta->wme)
2484 			arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2485 		break;
2486 	default:
2487 		break;
2488 	}
2489 
2490 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2491 		   sta->addr, !!(arg->peer_flags &
2492 		   arvif->ar->wmi.peer_flags->qos));
2493 }
2494 
2495 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2496 {
2497 	return sta->supp_rates[NL80211_BAND_2GHZ] >>
2498 	       ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2499 }
2500 
2501 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2502 					struct ieee80211_vif *vif,
2503 					struct ieee80211_sta *sta,
2504 					struct wmi_peer_assoc_complete_arg *arg)
2505 {
2506 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2507 	struct cfg80211_chan_def def;
2508 	enum nl80211_band band;
2509 	const u8 *ht_mcs_mask;
2510 	const u16 *vht_mcs_mask;
2511 	enum wmi_phy_mode phymode = MODE_UNKNOWN;
2512 
2513 	if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2514 		return;
2515 
2516 	band = def.chan->band;
2517 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2518 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2519 
2520 	switch (band) {
2521 	case NL80211_BAND_2GHZ:
2522 		if (sta->vht_cap.vht_supported &&
2523 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2524 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2525 				phymode = MODE_11AC_VHT40;
2526 			else
2527 				phymode = MODE_11AC_VHT20;
2528 		} else if (sta->ht_cap.ht_supported &&
2529 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2530 			if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2531 				phymode = MODE_11NG_HT40;
2532 			else
2533 				phymode = MODE_11NG_HT20;
2534 		} else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2535 			phymode = MODE_11G;
2536 		} else {
2537 			phymode = MODE_11B;
2538 		}
2539 
2540 		break;
2541 	case NL80211_BAND_5GHZ:
2542 		/*
2543 		 * Check VHT first.
2544 		 */
2545 		if (sta->vht_cap.vht_supported &&
2546 		    !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2547 			if (sta->bandwidth == IEEE80211_STA_RX_BW_80)
2548 				phymode = MODE_11AC_VHT80;
2549 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_40)
2550 				phymode = MODE_11AC_VHT40;
2551 			else if (sta->bandwidth == IEEE80211_STA_RX_BW_20)
2552 				phymode = MODE_11AC_VHT20;
2553 		} else if (sta->ht_cap.ht_supported &&
2554 			   !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2555 			if (sta->bandwidth >= IEEE80211_STA_RX_BW_40)
2556 				phymode = MODE_11NA_HT40;
2557 			else
2558 				phymode = MODE_11NA_HT20;
2559 		} else {
2560 			phymode = MODE_11A;
2561 		}
2562 
2563 		break;
2564 	default:
2565 		break;
2566 	}
2567 
2568 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2569 		   sta->addr, ath10k_wmi_phymode_str(phymode));
2570 
2571 	arg->peer_phymode = phymode;
2572 	WARN_ON(phymode == MODE_UNKNOWN);
2573 }
2574 
2575 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2576 				     struct ieee80211_vif *vif,
2577 				     struct ieee80211_sta *sta,
2578 				     struct wmi_peer_assoc_complete_arg *arg)
2579 {
2580 	lockdep_assert_held(&ar->conf_mutex);
2581 
2582 	memset(arg, 0, sizeof(*arg));
2583 
2584 	ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2585 	ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2586 	ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2587 	ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2588 	ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2589 	ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2590 	ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2591 
2592 	return 0;
2593 }
2594 
2595 static const u32 ath10k_smps_map[] = {
2596 	[WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2597 	[WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2598 	[WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2599 	[WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2600 };
2601 
2602 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2603 				  const u8 *addr,
2604 				  const struct ieee80211_sta_ht_cap *ht_cap)
2605 {
2606 	int smps;
2607 
2608 	if (!ht_cap->ht_supported)
2609 		return 0;
2610 
2611 	smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2612 	smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2613 
2614 	if (smps >= ARRAY_SIZE(ath10k_smps_map))
2615 		return -EINVAL;
2616 
2617 	return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2618 					 WMI_PEER_SMPS_STATE,
2619 					 ath10k_smps_map[smps]);
2620 }
2621 
2622 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2623 				      struct ieee80211_vif *vif,
2624 				      struct ieee80211_sta_vht_cap vht_cap)
2625 {
2626 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2627 	int ret;
2628 	u32 param;
2629 	u32 value;
2630 
2631 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2632 		return 0;
2633 
2634 	if (!(ar->vht_cap_info &
2635 	      (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2636 	       IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2637 	       IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2638 	       IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2639 		return 0;
2640 
2641 	param = ar->wmi.vdev_param->txbf;
2642 	value = 0;
2643 
2644 	if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2645 		return 0;
2646 
2647 	/* The following logic is correct. If a remote STA advertises support
2648 	 * for being a beamformer then we should enable us being a beamformee.
2649 	 */
2650 
2651 	if (ar->vht_cap_info &
2652 	    (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2653 	     IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2654 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2655 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2656 
2657 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2658 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2659 	}
2660 
2661 	if (ar->vht_cap_info &
2662 	    (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2663 	     IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2664 		if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2665 			value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2666 
2667 		if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2668 			value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2669 	}
2670 
2671 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2672 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2673 
2674 	if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2675 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2676 
2677 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2678 	if (ret) {
2679 		ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2680 			    value, ret);
2681 		return ret;
2682 	}
2683 
2684 	return 0;
2685 }
2686 
2687 /* can be called only in mac80211 callbacks due to `key_count` usage */
2688 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
2689 			     struct ieee80211_vif *vif,
2690 			     struct ieee80211_bss_conf *bss_conf)
2691 {
2692 	struct ath10k *ar = hw->priv;
2693 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2694 	struct ieee80211_sta_ht_cap ht_cap;
2695 	struct ieee80211_sta_vht_cap vht_cap;
2696 	struct wmi_peer_assoc_complete_arg peer_arg;
2697 	struct ieee80211_sta *ap_sta;
2698 	int ret;
2699 
2700 	lockdep_assert_held(&ar->conf_mutex);
2701 
2702 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
2703 		   arvif->vdev_id, arvif->bssid, arvif->aid);
2704 
2705 	rcu_read_lock();
2706 
2707 	ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
2708 	if (!ap_sta) {
2709 		ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
2710 			    bss_conf->bssid, arvif->vdev_id);
2711 		rcu_read_unlock();
2712 		return;
2713 	}
2714 
2715 	/* ap_sta must be accessed only within rcu section which must be left
2716 	 * before calling ath10k_setup_peer_smps() which might sleep. */
2717 	ht_cap = ap_sta->ht_cap;
2718 	vht_cap = ap_sta->vht_cap;
2719 
2720 	ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
2721 	if (ret) {
2722 		ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
2723 			    bss_conf->bssid, arvif->vdev_id, ret);
2724 		rcu_read_unlock();
2725 		return;
2726 	}
2727 
2728 	rcu_read_unlock();
2729 
2730 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2731 	if (ret) {
2732 		ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
2733 			    bss_conf->bssid, arvif->vdev_id, ret);
2734 		return;
2735 	}
2736 
2737 	ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
2738 	if (ret) {
2739 		ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
2740 			    arvif->vdev_id, ret);
2741 		return;
2742 	}
2743 
2744 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2745 	if (ret) {
2746 		ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
2747 			    arvif->vdev_id, bss_conf->bssid, ret);
2748 		return;
2749 	}
2750 
2751 	ath10k_dbg(ar, ATH10K_DBG_MAC,
2752 		   "mac vdev %d up (associated) bssid %pM aid %d\n",
2753 		   arvif->vdev_id, bss_conf->bssid, bss_conf->aid);
2754 
2755 	WARN_ON(arvif->is_up);
2756 
2757 	arvif->aid = bss_conf->aid;
2758 	ether_addr_copy(arvif->bssid, bss_conf->bssid);
2759 
2760 	ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
2761 	if (ret) {
2762 		ath10k_warn(ar, "failed to set vdev %d up: %d\n",
2763 			    arvif->vdev_id, ret);
2764 		return;
2765 	}
2766 
2767 	arvif->is_up = true;
2768 
2769 	/* Workaround: Some firmware revisions (tested with qca6174
2770 	 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
2771 	 * poked with peer param command.
2772 	 */
2773 	ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
2774 					WMI_PEER_DUMMY_VAR, 1);
2775 	if (ret) {
2776 		ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
2777 			    arvif->bssid, arvif->vdev_id, ret);
2778 		return;
2779 	}
2780 }
2781 
2782 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
2783 				struct ieee80211_vif *vif)
2784 {
2785 	struct ath10k *ar = hw->priv;
2786 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2787 	struct ieee80211_sta_vht_cap vht_cap = {};
2788 	int ret;
2789 
2790 	lockdep_assert_held(&ar->conf_mutex);
2791 
2792 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
2793 		   arvif->vdev_id, arvif->bssid);
2794 
2795 	ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
2796 	if (ret)
2797 		ath10k_warn(ar, "failed to down vdev %i: %d\n",
2798 			    arvif->vdev_id, ret);
2799 
2800 	arvif->def_wep_key_idx = -1;
2801 
2802 	ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
2803 	if (ret) {
2804 		ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
2805 			    arvif->vdev_id, ret);
2806 		return;
2807 	}
2808 
2809 	arvif->is_up = false;
2810 
2811 	cancel_delayed_work_sync(&arvif->connection_loss_work);
2812 }
2813 
2814 static int ath10k_station_assoc(struct ath10k *ar,
2815 				struct ieee80211_vif *vif,
2816 				struct ieee80211_sta *sta,
2817 				bool reassoc)
2818 {
2819 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2820 	struct wmi_peer_assoc_complete_arg peer_arg;
2821 	int ret = 0;
2822 
2823 	lockdep_assert_held(&ar->conf_mutex);
2824 
2825 	ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
2826 	if (ret) {
2827 		ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
2828 			    sta->addr, arvif->vdev_id, ret);
2829 		return ret;
2830 	}
2831 
2832 	ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
2833 	if (ret) {
2834 		ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
2835 			    sta->addr, arvif->vdev_id, ret);
2836 		return ret;
2837 	}
2838 
2839 	/* Re-assoc is run only to update supported rates for given station. It
2840 	 * doesn't make much sense to reconfigure the peer completely.
2841 	 */
2842 	if (!reassoc) {
2843 		ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
2844 					     &sta->ht_cap);
2845 		if (ret) {
2846 			ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
2847 				    arvif->vdev_id, ret);
2848 			return ret;
2849 		}
2850 
2851 		ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
2852 		if (ret) {
2853 			ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
2854 				    sta->addr, arvif->vdev_id, ret);
2855 			return ret;
2856 		}
2857 
2858 		if (!sta->wme) {
2859 			arvif->num_legacy_stations++;
2860 			ret  = ath10k_recalc_rtscts_prot(arvif);
2861 			if (ret) {
2862 				ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2863 					    arvif->vdev_id, ret);
2864 				return ret;
2865 			}
2866 		}
2867 
2868 		/* Plumb cached keys only for static WEP */
2869 		if (arvif->def_wep_key_idx != -1) {
2870 			ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
2871 			if (ret) {
2872 				ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
2873 					    arvif->vdev_id, ret);
2874 				return ret;
2875 			}
2876 		}
2877 	}
2878 
2879 	return ret;
2880 }
2881 
2882 static int ath10k_station_disassoc(struct ath10k *ar,
2883 				   struct ieee80211_vif *vif,
2884 				   struct ieee80211_sta *sta)
2885 {
2886 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
2887 	int ret = 0;
2888 
2889 	lockdep_assert_held(&ar->conf_mutex);
2890 
2891 	if (!sta->wme) {
2892 		arvif->num_legacy_stations--;
2893 		ret = ath10k_recalc_rtscts_prot(arvif);
2894 		if (ret) {
2895 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
2896 				    arvif->vdev_id, ret);
2897 			return ret;
2898 		}
2899 	}
2900 
2901 	ret = ath10k_clear_peer_keys(arvif, sta->addr);
2902 	if (ret) {
2903 		ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
2904 			    arvif->vdev_id, ret);
2905 		return ret;
2906 	}
2907 
2908 	return ret;
2909 }
2910 
2911 /**************/
2912 /* Regulatory */
2913 /**************/
2914 
2915 static int ath10k_update_channel_list(struct ath10k *ar)
2916 {
2917 	struct ieee80211_hw *hw = ar->hw;
2918 	struct ieee80211_supported_band **bands;
2919 	enum nl80211_band band;
2920 	struct ieee80211_channel *channel;
2921 	struct wmi_scan_chan_list_arg arg = {0};
2922 	struct wmi_channel_arg *ch;
2923 	bool passive;
2924 	int len;
2925 	int ret;
2926 	int i;
2927 
2928 	lockdep_assert_held(&ar->conf_mutex);
2929 
2930 	bands = hw->wiphy->bands;
2931 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2932 		if (!bands[band])
2933 			continue;
2934 
2935 		for (i = 0; i < bands[band]->n_channels; i++) {
2936 			if (bands[band]->channels[i].flags &
2937 			    IEEE80211_CHAN_DISABLED)
2938 				continue;
2939 
2940 			arg.n_channels++;
2941 		}
2942 	}
2943 
2944 	len = sizeof(struct wmi_channel_arg) * arg.n_channels;
2945 	arg.channels = kzalloc(len, GFP_KERNEL);
2946 	if (!arg.channels)
2947 		return -ENOMEM;
2948 
2949 	ch = arg.channels;
2950 	for (band = 0; band < NUM_NL80211_BANDS; band++) {
2951 		if (!bands[band])
2952 			continue;
2953 
2954 		for (i = 0; i < bands[band]->n_channels; i++) {
2955 			channel = &bands[band]->channels[i];
2956 
2957 			if (channel->flags & IEEE80211_CHAN_DISABLED)
2958 				continue;
2959 
2960 			ch->allow_ht = true;
2961 
2962 			/* FIXME: when should we really allow VHT? */
2963 			ch->allow_vht = true;
2964 
2965 			ch->allow_ibss =
2966 				!(channel->flags & IEEE80211_CHAN_NO_IR);
2967 
2968 			ch->ht40plus =
2969 				!(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
2970 
2971 			ch->chan_radar =
2972 				!!(channel->flags & IEEE80211_CHAN_RADAR);
2973 
2974 			passive = channel->flags & IEEE80211_CHAN_NO_IR;
2975 			ch->passive = passive;
2976 
2977 			ch->freq = channel->center_freq;
2978 			ch->band_center_freq1 = channel->center_freq;
2979 			ch->min_power = 0;
2980 			ch->max_power = channel->max_power * 2;
2981 			ch->max_reg_power = channel->max_reg_power * 2;
2982 			ch->max_antenna_gain = channel->max_antenna_gain * 2;
2983 			ch->reg_class_id = 0; /* FIXME */
2984 
2985 			/* FIXME: why use only legacy modes, why not any
2986 			 * HT/VHT modes? Would that even make any
2987 			 * difference? */
2988 			if (channel->band == NL80211_BAND_2GHZ)
2989 				ch->mode = MODE_11G;
2990 			else
2991 				ch->mode = MODE_11A;
2992 
2993 			if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
2994 				continue;
2995 
2996 			ath10k_dbg(ar, ATH10K_DBG_WMI,
2997 				   "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
2998 				    ch - arg.channels, arg.n_channels,
2999 				   ch->freq, ch->max_power, ch->max_reg_power,
3000 				   ch->max_antenna_gain, ch->mode);
3001 
3002 			ch++;
3003 		}
3004 	}
3005 
3006 	ret = ath10k_wmi_scan_chan_list(ar, &arg);
3007 	kfree(arg.channels);
3008 
3009 	return ret;
3010 }
3011 
3012 static enum wmi_dfs_region
3013 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3014 {
3015 	switch (dfs_region) {
3016 	case NL80211_DFS_UNSET:
3017 		return WMI_UNINIT_DFS_DOMAIN;
3018 	case NL80211_DFS_FCC:
3019 		return WMI_FCC_DFS_DOMAIN;
3020 	case NL80211_DFS_ETSI:
3021 		return WMI_ETSI_DFS_DOMAIN;
3022 	case NL80211_DFS_JP:
3023 		return WMI_MKK4_DFS_DOMAIN;
3024 	}
3025 	return WMI_UNINIT_DFS_DOMAIN;
3026 }
3027 
3028 static void ath10k_regd_update(struct ath10k *ar)
3029 {
3030 	struct reg_dmn_pair_mapping *regpair;
3031 	int ret;
3032 	enum wmi_dfs_region wmi_dfs_reg;
3033 	enum nl80211_dfs_regions nl_dfs_reg;
3034 
3035 	lockdep_assert_held(&ar->conf_mutex);
3036 
3037 	ret = ath10k_update_channel_list(ar);
3038 	if (ret)
3039 		ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3040 
3041 	regpair = ar->ath_common.regulatory.regpair;
3042 
3043 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3044 		nl_dfs_reg = ar->dfs_detector->region;
3045 		wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3046 	} else {
3047 		wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3048 	}
3049 
3050 	/* Target allows setting up per-band regdomain but ath_common provides
3051 	 * a combined one only */
3052 	ret = ath10k_wmi_pdev_set_regdomain(ar,
3053 					    regpair->reg_domain,
3054 					    regpair->reg_domain, /* 2ghz */
3055 					    regpair->reg_domain, /* 5ghz */
3056 					    regpair->reg_2ghz_ctl,
3057 					    regpair->reg_5ghz_ctl,
3058 					    wmi_dfs_reg);
3059 	if (ret)
3060 		ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3061 }
3062 
3063 static void ath10k_reg_notifier(struct wiphy *wiphy,
3064 				struct regulatory_request *request)
3065 {
3066 	struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3067 	struct ath10k *ar = hw->priv;
3068 	bool result;
3069 
3070 	ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3071 
3072 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3073 		ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3074 			   request->dfs_region);
3075 		result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3076 							  request->dfs_region);
3077 		if (!result)
3078 			ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3079 				    request->dfs_region);
3080 	}
3081 
3082 	mutex_lock(&ar->conf_mutex);
3083 	if (ar->state == ATH10K_STATE_ON)
3084 		ath10k_regd_update(ar);
3085 	mutex_unlock(&ar->conf_mutex);
3086 }
3087 
3088 /***************/
3089 /* TX handlers */
3090 /***************/
3091 
3092 enum ath10k_mac_tx_path {
3093 	ATH10K_MAC_TX_HTT,
3094 	ATH10K_MAC_TX_HTT_MGMT,
3095 	ATH10K_MAC_TX_WMI_MGMT,
3096 	ATH10K_MAC_TX_UNKNOWN,
3097 };
3098 
3099 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3100 {
3101 	lockdep_assert_held(&ar->htt.tx_lock);
3102 
3103 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3104 	ar->tx_paused |= BIT(reason);
3105 	ieee80211_stop_queues(ar->hw);
3106 }
3107 
3108 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3109 				      struct ieee80211_vif *vif)
3110 {
3111 	struct ath10k *ar = data;
3112 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3113 
3114 	if (arvif->tx_paused)
3115 		return;
3116 
3117 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3118 }
3119 
3120 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3121 {
3122 	lockdep_assert_held(&ar->htt.tx_lock);
3123 
3124 	WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3125 	ar->tx_paused &= ~BIT(reason);
3126 
3127 	if (ar->tx_paused)
3128 		return;
3129 
3130 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3131 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3132 						   ath10k_mac_tx_unlock_iter,
3133 						   ar);
3134 
3135 	ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3136 }
3137 
3138 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3139 {
3140 	struct ath10k *ar = arvif->ar;
3141 
3142 	lockdep_assert_held(&ar->htt.tx_lock);
3143 
3144 	WARN_ON(reason >= BITS_PER_LONG);
3145 	arvif->tx_paused |= BIT(reason);
3146 	ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3147 }
3148 
3149 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3150 {
3151 	struct ath10k *ar = arvif->ar;
3152 
3153 	lockdep_assert_held(&ar->htt.tx_lock);
3154 
3155 	WARN_ON(reason >= BITS_PER_LONG);
3156 	arvif->tx_paused &= ~BIT(reason);
3157 
3158 	if (ar->tx_paused)
3159 		return;
3160 
3161 	if (arvif->tx_paused)
3162 		return;
3163 
3164 	ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3165 }
3166 
3167 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3168 					   enum wmi_tlv_tx_pause_id pause_id,
3169 					   enum wmi_tlv_tx_pause_action action)
3170 {
3171 	struct ath10k *ar = arvif->ar;
3172 
3173 	lockdep_assert_held(&ar->htt.tx_lock);
3174 
3175 	switch (action) {
3176 	case WMI_TLV_TX_PAUSE_ACTION_STOP:
3177 		ath10k_mac_vif_tx_lock(arvif, pause_id);
3178 		break;
3179 	case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3180 		ath10k_mac_vif_tx_unlock(arvif, pause_id);
3181 		break;
3182 	default:
3183 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
3184 			   "received unknown tx pause action %d on vdev %i, ignoring\n",
3185 			    action, arvif->vdev_id);
3186 		break;
3187 	}
3188 }
3189 
3190 struct ath10k_mac_tx_pause {
3191 	u32 vdev_id;
3192 	enum wmi_tlv_tx_pause_id pause_id;
3193 	enum wmi_tlv_tx_pause_action action;
3194 };
3195 
3196 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3197 					    struct ieee80211_vif *vif)
3198 {
3199 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3200 	struct ath10k_mac_tx_pause *arg = data;
3201 
3202 	if (arvif->vdev_id != arg->vdev_id)
3203 		return;
3204 
3205 	ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3206 }
3207 
3208 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3209 				     enum wmi_tlv_tx_pause_id pause_id,
3210 				     enum wmi_tlv_tx_pause_action action)
3211 {
3212 	struct ath10k_mac_tx_pause arg = {
3213 		.vdev_id = vdev_id,
3214 		.pause_id = pause_id,
3215 		.action = action,
3216 	};
3217 
3218 	spin_lock_bh(&ar->htt.tx_lock);
3219 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
3220 						   IEEE80211_IFACE_ITER_RESUME_ALL,
3221 						   ath10k_mac_handle_tx_pause_iter,
3222 						   &arg);
3223 	spin_unlock_bh(&ar->htt.tx_lock);
3224 }
3225 
3226 static enum ath10k_hw_txrx_mode
3227 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3228 			   struct ieee80211_vif *vif,
3229 			   struct ieee80211_sta *sta,
3230 			   struct sk_buff *skb)
3231 {
3232 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3233 	__le16 fc = hdr->frame_control;
3234 
3235 	if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3236 		return ATH10K_HW_TXRX_RAW;
3237 
3238 	if (ieee80211_is_mgmt(fc))
3239 		return ATH10K_HW_TXRX_MGMT;
3240 
3241 	/* Workaround:
3242 	 *
3243 	 * NullFunc frames are mostly used to ping if a client or AP are still
3244 	 * reachable and responsive. This implies tx status reports must be
3245 	 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3246 	 * come to a conclusion that the other end disappeared and tear down
3247 	 * BSS connection or it can never disconnect from BSS/client (which is
3248 	 * the case).
3249 	 *
3250 	 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3251 	 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3252 	 * which seems to deliver correct tx reports for NullFunc frames. The
3253 	 * downside of using it is it ignores client powersave state so it can
3254 	 * end up disconnecting sleeping clients in AP mode. It should fix STA
3255 	 * mode though because AP don't sleep.
3256 	 */
3257 	if (ar->htt.target_version_major < 3 &&
3258 	    (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3259 	    !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3260 		      ar->running_fw->fw_file.fw_features))
3261 		return ATH10K_HW_TXRX_MGMT;
3262 
3263 	/* Workaround:
3264 	 *
3265 	 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3266 	 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3267 	 * to work with Ethernet txmode so use it.
3268 	 *
3269 	 * FIXME: Check if raw mode works with TDLS.
3270 	 */
3271 	if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3272 		return ATH10K_HW_TXRX_ETHERNET;
3273 
3274 	if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
3275 		return ATH10K_HW_TXRX_RAW;
3276 
3277 	return ATH10K_HW_TXRX_NATIVE_WIFI;
3278 }
3279 
3280 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3281 				     struct sk_buff *skb)
3282 {
3283 	const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3284 	const struct ieee80211_hdr *hdr = (void *)skb->data;
3285 	const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3286 			 IEEE80211_TX_CTL_INJECTED;
3287 
3288 	if (!ieee80211_has_protected(hdr->frame_control))
3289 		return false;
3290 
3291 	if ((info->flags & mask) == mask)
3292 		return false;
3293 
3294 	if (vif)
3295 		return !ath10k_vif_to_arvif(vif)->nohwcrypt;
3296 
3297 	return true;
3298 }
3299 
3300 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3301  * Control in the header.
3302  */
3303 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3304 {
3305 	struct ieee80211_hdr *hdr = (void *)skb->data;
3306 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3307 	u8 *qos_ctl;
3308 
3309 	if (!ieee80211_is_data_qos(hdr->frame_control))
3310 		return;
3311 
3312 	qos_ctl = ieee80211_get_qos_ctl(hdr);
3313 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3314 		skb->data, (void *)qos_ctl - (void *)skb->data);
3315 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3316 
3317 	/* Some firmware revisions don't handle sending QoS NullFunc well.
3318 	 * These frames are mainly used for CQM purposes so it doesn't really
3319 	 * matter whether QoS NullFunc or NullFunc are sent.
3320 	 */
3321 	hdr = (void *)skb->data;
3322 	if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3323 		cb->flags &= ~ATH10K_SKB_F_QOS;
3324 
3325 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3326 }
3327 
3328 static void ath10k_tx_h_8023(struct sk_buff *skb)
3329 {
3330 	struct ieee80211_hdr *hdr;
3331 	struct rfc1042_hdr *rfc1042;
3332 	struct ethhdr *eth;
3333 	size_t hdrlen;
3334 	u8 da[ETH_ALEN];
3335 	u8 sa[ETH_ALEN];
3336 	__be16 type;
3337 
3338 	hdr = (void *)skb->data;
3339 	hdrlen = ieee80211_hdrlen(hdr->frame_control);
3340 	rfc1042 = (void *)skb->data + hdrlen;
3341 
3342 	ether_addr_copy(da, ieee80211_get_DA(hdr));
3343 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
3344 	type = rfc1042->snap_type;
3345 
3346 	skb_pull(skb, hdrlen + sizeof(*rfc1042));
3347 	skb_push(skb, sizeof(*eth));
3348 
3349 	eth = (void *)skb->data;
3350 	ether_addr_copy(eth->h_dest, da);
3351 	ether_addr_copy(eth->h_source, sa);
3352 	eth->h_proto = type;
3353 }
3354 
3355 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3356 				       struct ieee80211_vif *vif,
3357 				       struct sk_buff *skb)
3358 {
3359 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3360 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
3361 
3362 	/* This is case only for P2P_GO */
3363 	if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3364 		return;
3365 
3366 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3367 		spin_lock_bh(&ar->data_lock);
3368 		if (arvif->u.ap.noa_data)
3369 			if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3370 					      GFP_ATOMIC))
3371 				memcpy(skb_put(skb, arvif->u.ap.noa_len),
3372 				       arvif->u.ap.noa_data,
3373 				       arvif->u.ap.noa_len);
3374 		spin_unlock_bh(&ar->data_lock);
3375 	}
3376 }
3377 
3378 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3379 				    struct ieee80211_vif *vif,
3380 				    struct ieee80211_txq *txq,
3381 				    struct sk_buff *skb)
3382 {
3383 	struct ieee80211_hdr *hdr = (void *)skb->data;
3384 	struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3385 
3386 	cb->flags = 0;
3387 	if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3388 		cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3389 
3390 	if (ieee80211_is_mgmt(hdr->frame_control))
3391 		cb->flags |= ATH10K_SKB_F_MGMT;
3392 
3393 	if (ieee80211_is_data_qos(hdr->frame_control))
3394 		cb->flags |= ATH10K_SKB_F_QOS;
3395 
3396 	cb->vif = vif;
3397 	cb->txq = txq;
3398 }
3399 
3400 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3401 {
3402 	/* FIXME: Not really sure since when the behaviour changed. At some
3403 	 * point new firmware stopped requiring creation of peer entries for
3404 	 * offchannel tx (and actually creating them causes issues with wmi-htc
3405 	 * tx credit replenishment and reliability). Assuming it's at least 3.4
3406 	 * because that's when the `freq` was introduced to TX_FRM HTT command.
3407 	 */
3408 	return (ar->htt.target_version_major >= 3 &&
3409 		ar->htt.target_version_minor >= 4 &&
3410 		ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3411 }
3412 
3413 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3414 {
3415 	struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3416 	int ret = 0;
3417 
3418 	spin_lock_bh(&ar->data_lock);
3419 
3420 	if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) {
3421 		ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3422 		ret = -ENOSPC;
3423 		goto unlock;
3424 	}
3425 
3426 	__skb_queue_tail(q, skb);
3427 	ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3428 
3429 unlock:
3430 	spin_unlock_bh(&ar->data_lock);
3431 
3432 	return ret;
3433 }
3434 
3435 static enum ath10k_mac_tx_path
3436 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3437 			   struct sk_buff *skb,
3438 			   enum ath10k_hw_txrx_mode txmode)
3439 {
3440 	switch (txmode) {
3441 	case ATH10K_HW_TXRX_RAW:
3442 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3443 	case ATH10K_HW_TXRX_ETHERNET:
3444 		return ATH10K_MAC_TX_HTT;
3445 	case ATH10K_HW_TXRX_MGMT:
3446 		if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3447 			     ar->running_fw->fw_file.fw_features))
3448 			return ATH10K_MAC_TX_WMI_MGMT;
3449 		else if (ar->htt.target_version_major >= 3)
3450 			return ATH10K_MAC_TX_HTT;
3451 		else
3452 			return ATH10K_MAC_TX_HTT_MGMT;
3453 	}
3454 
3455 	return ATH10K_MAC_TX_UNKNOWN;
3456 }
3457 
3458 static int ath10k_mac_tx_submit(struct ath10k *ar,
3459 				enum ath10k_hw_txrx_mode txmode,
3460 				enum ath10k_mac_tx_path txpath,
3461 				struct sk_buff *skb)
3462 {
3463 	struct ath10k_htt *htt = &ar->htt;
3464 	int ret = -EINVAL;
3465 
3466 	switch (txpath) {
3467 	case ATH10K_MAC_TX_HTT:
3468 		ret = ath10k_htt_tx(htt, txmode, skb);
3469 		break;
3470 	case ATH10K_MAC_TX_HTT_MGMT:
3471 		ret = ath10k_htt_mgmt_tx(htt, skb);
3472 		break;
3473 	case ATH10K_MAC_TX_WMI_MGMT:
3474 		ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
3475 		break;
3476 	case ATH10K_MAC_TX_UNKNOWN:
3477 		WARN_ON_ONCE(1);
3478 		ret = -EINVAL;
3479 		break;
3480 	}
3481 
3482 	if (ret) {
3483 		ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
3484 			    ret);
3485 		ieee80211_free_txskb(ar->hw, skb);
3486 	}
3487 
3488 	return ret;
3489 }
3490 
3491 /* This function consumes the sk_buff regardless of return value as far as
3492  * caller is concerned so no freeing is necessary afterwards.
3493  */
3494 static int ath10k_mac_tx(struct ath10k *ar,
3495 			 struct ieee80211_vif *vif,
3496 			 struct ieee80211_sta *sta,
3497 			 enum ath10k_hw_txrx_mode txmode,
3498 			 enum ath10k_mac_tx_path txpath,
3499 			 struct sk_buff *skb)
3500 {
3501 	struct ieee80211_hw *hw = ar->hw;
3502 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3503 	int ret;
3504 
3505 	/* We should disable CCK RATE due to P2P */
3506 	if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
3507 		ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
3508 
3509 	switch (txmode) {
3510 	case ATH10K_HW_TXRX_MGMT:
3511 	case ATH10K_HW_TXRX_NATIVE_WIFI:
3512 		ath10k_tx_h_nwifi(hw, skb);
3513 		ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
3514 		ath10k_tx_h_seq_no(vif, skb);
3515 		break;
3516 	case ATH10K_HW_TXRX_ETHERNET:
3517 		ath10k_tx_h_8023(skb);
3518 		break;
3519 	case ATH10K_HW_TXRX_RAW:
3520 		if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
3521 			WARN_ON_ONCE(1);
3522 			ieee80211_free_txskb(hw, skb);
3523 			return -ENOTSUPP;
3524 		}
3525 	}
3526 
3527 	if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
3528 		if (!ath10k_mac_tx_frm_has_freq(ar)) {
3529 			ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n",
3530 				   skb);
3531 
3532 			skb_queue_tail(&ar->offchan_tx_queue, skb);
3533 			ieee80211_queue_work(hw, &ar->offchan_tx_work);
3534 			return 0;
3535 		}
3536 	}
3537 
3538 	ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
3539 	if (ret) {
3540 		ath10k_warn(ar, "failed to submit frame: %d\n", ret);
3541 		return ret;
3542 	}
3543 
3544 	return 0;
3545 }
3546 
3547 void ath10k_offchan_tx_purge(struct ath10k *ar)
3548 {
3549 	struct sk_buff *skb;
3550 
3551 	for (;;) {
3552 		skb = skb_dequeue(&ar->offchan_tx_queue);
3553 		if (!skb)
3554 			break;
3555 
3556 		ieee80211_free_txskb(ar->hw, skb);
3557 	}
3558 }
3559 
3560 void ath10k_offchan_tx_work(struct work_struct *work)
3561 {
3562 	struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
3563 	struct ath10k_peer *peer;
3564 	struct ath10k_vif *arvif;
3565 	enum ath10k_hw_txrx_mode txmode;
3566 	enum ath10k_mac_tx_path txpath;
3567 	struct ieee80211_hdr *hdr;
3568 	struct ieee80211_vif *vif;
3569 	struct ieee80211_sta *sta;
3570 	struct sk_buff *skb;
3571 	const u8 *peer_addr;
3572 	int vdev_id;
3573 	int ret;
3574 	unsigned long time_left;
3575 	bool tmp_peer_created = false;
3576 
3577 	/* FW requirement: We must create a peer before FW will send out
3578 	 * an offchannel frame. Otherwise the frame will be stuck and
3579 	 * never transmitted. We delete the peer upon tx completion.
3580 	 * It is unlikely that a peer for offchannel tx will already be
3581 	 * present. However it may be in some rare cases so account for that.
3582 	 * Otherwise we might remove a legitimate peer and break stuff. */
3583 
3584 	for (;;) {
3585 		skb = skb_dequeue(&ar->offchan_tx_queue);
3586 		if (!skb)
3587 			break;
3588 
3589 		mutex_lock(&ar->conf_mutex);
3590 
3591 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n",
3592 			   skb);
3593 
3594 		hdr = (struct ieee80211_hdr *)skb->data;
3595 		peer_addr = ieee80211_get_DA(hdr);
3596 
3597 		spin_lock_bh(&ar->data_lock);
3598 		vdev_id = ar->scan.vdev_id;
3599 		peer = ath10k_peer_find(ar, vdev_id, peer_addr);
3600 		spin_unlock_bh(&ar->data_lock);
3601 
3602 		if (peer)
3603 			/* FIXME: should this use ath10k_warn()? */
3604 			ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n",
3605 				   peer_addr, vdev_id);
3606 
3607 		if (!peer) {
3608 			ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
3609 						 peer_addr,
3610 						 WMI_PEER_TYPE_DEFAULT);
3611 			if (ret)
3612 				ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
3613 					    peer_addr, vdev_id, ret);
3614 			tmp_peer_created = (ret == 0);
3615 		}
3616 
3617 		spin_lock_bh(&ar->data_lock);
3618 		reinit_completion(&ar->offchan_tx_completed);
3619 		ar->offchan_tx_skb = skb;
3620 		spin_unlock_bh(&ar->data_lock);
3621 
3622 		/* It's safe to access vif and sta - conf_mutex guarantees that
3623 		 * sta_state() and remove_interface() are locked exclusively
3624 		 * out wrt to this offchannel worker.
3625 		 */
3626 		arvif = ath10k_get_arvif(ar, vdev_id);
3627 		if (arvif) {
3628 			vif = arvif->vif;
3629 			sta = ieee80211_find_sta(vif, peer_addr);
3630 		} else {
3631 			vif = NULL;
3632 			sta = NULL;
3633 		}
3634 
3635 		txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3636 		txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3637 
3638 		ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3639 		if (ret) {
3640 			ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
3641 				    ret);
3642 			/* not serious */
3643 		}
3644 
3645 		time_left =
3646 		wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
3647 		if (time_left == 0)
3648 			ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n",
3649 				    skb);
3650 
3651 		if (!peer && tmp_peer_created) {
3652 			ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
3653 			if (ret)
3654 				ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
3655 					    peer_addr, vdev_id, ret);
3656 		}
3657 
3658 		mutex_unlock(&ar->conf_mutex);
3659 	}
3660 }
3661 
3662 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
3663 {
3664 	struct sk_buff *skb;
3665 
3666 	for (;;) {
3667 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3668 		if (!skb)
3669 			break;
3670 
3671 		ieee80211_free_txskb(ar->hw, skb);
3672 	}
3673 }
3674 
3675 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
3676 {
3677 	struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
3678 	struct sk_buff *skb;
3679 	int ret;
3680 
3681 	for (;;) {
3682 		skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
3683 		if (!skb)
3684 			break;
3685 
3686 		ret = ath10k_wmi_mgmt_tx(ar, skb);
3687 		if (ret) {
3688 			ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
3689 				    ret);
3690 			ieee80211_free_txskb(ar->hw, skb);
3691 		}
3692 	}
3693 }
3694 
3695 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
3696 {
3697 	struct ath10k_txq *artxq;
3698 
3699 	if (!txq)
3700 		return;
3701 
3702 	artxq = (void *)txq->drv_priv;
3703 	INIT_LIST_HEAD(&artxq->list);
3704 }
3705 
3706 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
3707 {
3708 	struct ath10k_txq *artxq;
3709 	struct ath10k_skb_cb *cb;
3710 	struct sk_buff *msdu;
3711 	int msdu_id;
3712 
3713 	if (!txq)
3714 		return;
3715 
3716 	artxq = (void *)txq->drv_priv;
3717 	spin_lock_bh(&ar->txqs_lock);
3718 	if (!list_empty(&artxq->list))
3719 		list_del_init(&artxq->list);
3720 	spin_unlock_bh(&ar->txqs_lock);
3721 
3722 	spin_lock_bh(&ar->htt.tx_lock);
3723 	idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
3724 		cb = ATH10K_SKB_CB(msdu);
3725 		if (cb->txq == txq)
3726 			cb->txq = NULL;
3727 	}
3728 	spin_unlock_bh(&ar->htt.tx_lock);
3729 }
3730 
3731 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
3732 					    u16 peer_id,
3733 					    u8 tid)
3734 {
3735 	struct ath10k_peer *peer;
3736 
3737 	lockdep_assert_held(&ar->data_lock);
3738 
3739 	peer = ar->peer_map[peer_id];
3740 	if (!peer)
3741 		return NULL;
3742 
3743 	if (peer->sta)
3744 		return peer->sta->txq[tid];
3745 	else if (peer->vif)
3746 		return peer->vif->txq;
3747 	else
3748 		return NULL;
3749 }
3750 
3751 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
3752 				   struct ieee80211_txq *txq)
3753 {
3754 	struct ath10k *ar = hw->priv;
3755 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3756 
3757 	/* No need to get locks */
3758 
3759 	if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
3760 		return true;
3761 
3762 	if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
3763 		return true;
3764 
3765 	if (artxq->num_fw_queued < artxq->num_push_allowed)
3766 		return true;
3767 
3768 	return false;
3769 }
3770 
3771 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
3772 			   struct ieee80211_txq *txq)
3773 {
3774 	struct ath10k *ar = hw->priv;
3775 	struct ath10k_htt *htt = &ar->htt;
3776 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
3777 	struct ieee80211_vif *vif = txq->vif;
3778 	struct ieee80211_sta *sta = txq->sta;
3779 	enum ath10k_hw_txrx_mode txmode;
3780 	enum ath10k_mac_tx_path txpath;
3781 	struct sk_buff *skb;
3782 	struct ieee80211_hdr *hdr;
3783 	size_t skb_len;
3784 	bool is_mgmt, is_presp;
3785 	int ret;
3786 
3787 	spin_lock_bh(&ar->htt.tx_lock);
3788 	ret = ath10k_htt_tx_inc_pending(htt);
3789 	spin_unlock_bh(&ar->htt.tx_lock);
3790 
3791 	if (ret)
3792 		return ret;
3793 
3794 	skb = ieee80211_tx_dequeue(hw, txq);
3795 	if (!skb) {
3796 		spin_lock_bh(&ar->htt.tx_lock);
3797 		ath10k_htt_tx_dec_pending(htt);
3798 		spin_unlock_bh(&ar->htt.tx_lock);
3799 
3800 		return -ENOENT;
3801 	}
3802 
3803 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
3804 
3805 	skb_len = skb->len;
3806 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
3807 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
3808 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
3809 
3810 	if (is_mgmt) {
3811 		hdr = (struct ieee80211_hdr *)skb->data;
3812 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
3813 
3814 		spin_lock_bh(&ar->htt.tx_lock);
3815 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
3816 
3817 		if (ret) {
3818 			ath10k_htt_tx_dec_pending(htt);
3819 			spin_unlock_bh(&ar->htt.tx_lock);
3820 			return ret;
3821 		}
3822 		spin_unlock_bh(&ar->htt.tx_lock);
3823 	}
3824 
3825 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
3826 	if (unlikely(ret)) {
3827 		ath10k_warn(ar, "failed to push frame: %d\n", ret);
3828 
3829 		spin_lock_bh(&ar->htt.tx_lock);
3830 		ath10k_htt_tx_dec_pending(htt);
3831 		if (is_mgmt)
3832 			ath10k_htt_tx_mgmt_dec_pending(htt);
3833 		spin_unlock_bh(&ar->htt.tx_lock);
3834 
3835 		return ret;
3836 	}
3837 
3838 	spin_lock_bh(&ar->htt.tx_lock);
3839 	artxq->num_fw_queued++;
3840 	spin_unlock_bh(&ar->htt.tx_lock);
3841 
3842 	return skb_len;
3843 }
3844 
3845 void ath10k_mac_tx_push_pending(struct ath10k *ar)
3846 {
3847 	struct ieee80211_hw *hw = ar->hw;
3848 	struct ieee80211_txq *txq;
3849 	struct ath10k_txq *artxq;
3850 	struct ath10k_txq *last;
3851 	int ret;
3852 	int max;
3853 
3854 	if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
3855 		return;
3856 
3857 	spin_lock_bh(&ar->txqs_lock);
3858 	rcu_read_lock();
3859 
3860 	last = list_last_entry(&ar->txqs, struct ath10k_txq, list);
3861 	while (!list_empty(&ar->txqs)) {
3862 		artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
3863 		txq = container_of((void *)artxq, struct ieee80211_txq,
3864 				   drv_priv);
3865 
3866 		/* Prevent aggressive sta/tid taking over tx queue */
3867 		max = 16;
3868 		ret = 0;
3869 		while (ath10k_mac_tx_can_push(hw, txq) && max--) {
3870 			ret = ath10k_mac_tx_push_txq(hw, txq);
3871 			if (ret < 0)
3872 				break;
3873 		}
3874 
3875 		list_del_init(&artxq->list);
3876 		if (ret != -ENOENT)
3877 			list_add_tail(&artxq->list, &ar->txqs);
3878 
3879 		ath10k_htt_tx_txq_update(hw, txq);
3880 
3881 		if (artxq == last || (ret < 0 && ret != -ENOENT))
3882 			break;
3883 	}
3884 
3885 	rcu_read_unlock();
3886 	spin_unlock_bh(&ar->txqs_lock);
3887 }
3888 
3889 /************/
3890 /* Scanning */
3891 /************/
3892 
3893 void __ath10k_scan_finish(struct ath10k *ar)
3894 {
3895 	lockdep_assert_held(&ar->data_lock);
3896 
3897 	switch (ar->scan.state) {
3898 	case ATH10K_SCAN_IDLE:
3899 		break;
3900 	case ATH10K_SCAN_RUNNING:
3901 	case ATH10K_SCAN_ABORTING:
3902 		if (!ar->scan.is_roc) {
3903 			struct cfg80211_scan_info info = {
3904 				.aborted = (ar->scan.state ==
3905 					    ATH10K_SCAN_ABORTING),
3906 			};
3907 
3908 			ieee80211_scan_completed(ar->hw, &info);
3909 		} else if (ar->scan.roc_notify) {
3910 			ieee80211_remain_on_channel_expired(ar->hw);
3911 		}
3912 		/* fall through */
3913 	case ATH10K_SCAN_STARTING:
3914 		ar->scan.state = ATH10K_SCAN_IDLE;
3915 		ar->scan_channel = NULL;
3916 		ar->scan.roc_freq = 0;
3917 		ath10k_offchan_tx_purge(ar);
3918 		cancel_delayed_work(&ar->scan.timeout);
3919 		complete(&ar->scan.completed);
3920 		break;
3921 	}
3922 }
3923 
3924 void ath10k_scan_finish(struct ath10k *ar)
3925 {
3926 	spin_lock_bh(&ar->data_lock);
3927 	__ath10k_scan_finish(ar);
3928 	spin_unlock_bh(&ar->data_lock);
3929 }
3930 
3931 static int ath10k_scan_stop(struct ath10k *ar)
3932 {
3933 	struct wmi_stop_scan_arg arg = {
3934 		.req_id = 1, /* FIXME */
3935 		.req_type = WMI_SCAN_STOP_ONE,
3936 		.u.scan_id = ATH10K_SCAN_ID,
3937 	};
3938 	int ret;
3939 
3940 	lockdep_assert_held(&ar->conf_mutex);
3941 
3942 	ret = ath10k_wmi_stop_scan(ar, &arg);
3943 	if (ret) {
3944 		ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
3945 		goto out;
3946 	}
3947 
3948 	ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
3949 	if (ret == 0) {
3950 		ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
3951 		ret = -ETIMEDOUT;
3952 	} else if (ret > 0) {
3953 		ret = 0;
3954 	}
3955 
3956 out:
3957 	/* Scan state should be updated upon scan completion but in case
3958 	 * firmware fails to deliver the event (for whatever reason) it is
3959 	 * desired to clean up scan state anyway. Firmware may have just
3960 	 * dropped the scan completion event delivery due to transport pipe
3961 	 * being overflown with data and/or it can recover on its own before
3962 	 * next scan request is submitted.
3963 	 */
3964 	spin_lock_bh(&ar->data_lock);
3965 	if (ar->scan.state != ATH10K_SCAN_IDLE)
3966 		__ath10k_scan_finish(ar);
3967 	spin_unlock_bh(&ar->data_lock);
3968 
3969 	return ret;
3970 }
3971 
3972 static void ath10k_scan_abort(struct ath10k *ar)
3973 {
3974 	int ret;
3975 
3976 	lockdep_assert_held(&ar->conf_mutex);
3977 
3978 	spin_lock_bh(&ar->data_lock);
3979 
3980 	switch (ar->scan.state) {
3981 	case ATH10K_SCAN_IDLE:
3982 		/* This can happen if timeout worker kicked in and called
3983 		 * abortion while scan completion was being processed.
3984 		 */
3985 		break;
3986 	case ATH10K_SCAN_STARTING:
3987 	case ATH10K_SCAN_ABORTING:
3988 		ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
3989 			    ath10k_scan_state_str(ar->scan.state),
3990 			    ar->scan.state);
3991 		break;
3992 	case ATH10K_SCAN_RUNNING:
3993 		ar->scan.state = ATH10K_SCAN_ABORTING;
3994 		spin_unlock_bh(&ar->data_lock);
3995 
3996 		ret = ath10k_scan_stop(ar);
3997 		if (ret)
3998 			ath10k_warn(ar, "failed to abort scan: %d\n", ret);
3999 
4000 		spin_lock_bh(&ar->data_lock);
4001 		break;
4002 	}
4003 
4004 	spin_unlock_bh(&ar->data_lock);
4005 }
4006 
4007 void ath10k_scan_timeout_work(struct work_struct *work)
4008 {
4009 	struct ath10k *ar = container_of(work, struct ath10k,
4010 					 scan.timeout.work);
4011 
4012 	mutex_lock(&ar->conf_mutex);
4013 	ath10k_scan_abort(ar);
4014 	mutex_unlock(&ar->conf_mutex);
4015 }
4016 
4017 static int ath10k_start_scan(struct ath10k *ar,
4018 			     const struct wmi_start_scan_arg *arg)
4019 {
4020 	int ret;
4021 
4022 	lockdep_assert_held(&ar->conf_mutex);
4023 
4024 	ret = ath10k_wmi_start_scan(ar, arg);
4025 	if (ret)
4026 		return ret;
4027 
4028 	ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4029 	if (ret == 0) {
4030 		ret = ath10k_scan_stop(ar);
4031 		if (ret)
4032 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4033 
4034 		return -ETIMEDOUT;
4035 	}
4036 
4037 	/* If we failed to start the scan, return error code at
4038 	 * this point.  This is probably due to some issue in the
4039 	 * firmware, but no need to wedge the driver due to that...
4040 	 */
4041 	spin_lock_bh(&ar->data_lock);
4042 	if (ar->scan.state == ATH10K_SCAN_IDLE) {
4043 		spin_unlock_bh(&ar->data_lock);
4044 		return -EINVAL;
4045 	}
4046 	spin_unlock_bh(&ar->data_lock);
4047 
4048 	return 0;
4049 }
4050 
4051 /**********************/
4052 /* mac80211 callbacks */
4053 /**********************/
4054 
4055 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4056 			     struct ieee80211_tx_control *control,
4057 			     struct sk_buff *skb)
4058 {
4059 	struct ath10k *ar = hw->priv;
4060 	struct ath10k_htt *htt = &ar->htt;
4061 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4062 	struct ieee80211_vif *vif = info->control.vif;
4063 	struct ieee80211_sta *sta = control->sta;
4064 	struct ieee80211_txq *txq = NULL;
4065 	struct ieee80211_hdr *hdr = (void *)skb->data;
4066 	enum ath10k_hw_txrx_mode txmode;
4067 	enum ath10k_mac_tx_path txpath;
4068 	bool is_htt;
4069 	bool is_mgmt;
4070 	bool is_presp;
4071 	int ret;
4072 
4073 	ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb);
4074 
4075 	txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4076 	txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4077 	is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4078 		  txpath == ATH10K_MAC_TX_HTT_MGMT);
4079 	is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4080 
4081 	if (is_htt) {
4082 		spin_lock_bh(&ar->htt.tx_lock);
4083 		is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4084 
4085 		ret = ath10k_htt_tx_inc_pending(htt);
4086 		if (ret) {
4087 			ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4088 				    ret);
4089 			spin_unlock_bh(&ar->htt.tx_lock);
4090 			ieee80211_free_txskb(ar->hw, skb);
4091 			return;
4092 		}
4093 
4094 		ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4095 		if (ret) {
4096 			ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4097 				   ret);
4098 			ath10k_htt_tx_dec_pending(htt);
4099 			spin_unlock_bh(&ar->htt.tx_lock);
4100 			ieee80211_free_txskb(ar->hw, skb);
4101 			return;
4102 		}
4103 		spin_unlock_bh(&ar->htt.tx_lock);
4104 	}
4105 
4106 	ret = ath10k_mac_tx(ar, vif, sta, txmode, txpath, skb);
4107 	if (ret) {
4108 		ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4109 		if (is_htt) {
4110 			spin_lock_bh(&ar->htt.tx_lock);
4111 			ath10k_htt_tx_dec_pending(htt);
4112 			if (is_mgmt)
4113 				ath10k_htt_tx_mgmt_dec_pending(htt);
4114 			spin_unlock_bh(&ar->htt.tx_lock);
4115 		}
4116 		return;
4117 	}
4118 }
4119 
4120 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4121 					struct ieee80211_txq *txq)
4122 {
4123 	struct ath10k *ar = hw->priv;
4124 	struct ath10k_txq *artxq = (void *)txq->drv_priv;
4125 	struct ieee80211_txq *f_txq;
4126 	struct ath10k_txq *f_artxq;
4127 	int ret = 0;
4128 	int max = 16;
4129 
4130 	spin_lock_bh(&ar->txqs_lock);
4131 	if (list_empty(&artxq->list))
4132 		list_add_tail(&artxq->list, &ar->txqs);
4133 
4134 	f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list);
4135 	f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv);
4136 	list_del_init(&f_artxq->list);
4137 
4138 	while (ath10k_mac_tx_can_push(hw, f_txq) && max--) {
4139 		ret = ath10k_mac_tx_push_txq(hw, f_txq);
4140 		if (ret)
4141 			break;
4142 	}
4143 	if (ret != -ENOENT)
4144 		list_add_tail(&f_artxq->list, &ar->txqs);
4145 	spin_unlock_bh(&ar->txqs_lock);
4146 
4147 	ath10k_htt_tx_txq_update(hw, f_txq);
4148 	ath10k_htt_tx_txq_update(hw, txq);
4149 }
4150 
4151 /* Must not be called with conf_mutex held as workers can use that also. */
4152 void ath10k_drain_tx(struct ath10k *ar)
4153 {
4154 	/* make sure rcu-protected mac80211 tx path itself is drained */
4155 	synchronize_net();
4156 
4157 	ath10k_offchan_tx_purge(ar);
4158 	ath10k_mgmt_over_wmi_tx_purge(ar);
4159 
4160 	cancel_work_sync(&ar->offchan_tx_work);
4161 	cancel_work_sync(&ar->wmi_mgmt_tx_work);
4162 }
4163 
4164 void ath10k_halt(struct ath10k *ar)
4165 {
4166 	struct ath10k_vif *arvif;
4167 
4168 	lockdep_assert_held(&ar->conf_mutex);
4169 
4170 	clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4171 	ar->filter_flags = 0;
4172 	ar->monitor = false;
4173 	ar->monitor_arvif = NULL;
4174 
4175 	if (ar->monitor_started)
4176 		ath10k_monitor_stop(ar);
4177 
4178 	ar->monitor_started = false;
4179 	ar->tx_paused = 0;
4180 
4181 	ath10k_scan_finish(ar);
4182 	ath10k_peer_cleanup_all(ar);
4183 	ath10k_core_stop(ar);
4184 	ath10k_hif_power_down(ar);
4185 
4186 	spin_lock_bh(&ar->data_lock);
4187 	list_for_each_entry(arvif, &ar->arvifs, list)
4188 		ath10k_mac_vif_beacon_cleanup(arvif);
4189 	spin_unlock_bh(&ar->data_lock);
4190 }
4191 
4192 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4193 {
4194 	struct ath10k *ar = hw->priv;
4195 
4196 	mutex_lock(&ar->conf_mutex);
4197 
4198 	*tx_ant = ar->cfg_tx_chainmask;
4199 	*rx_ant = ar->cfg_rx_chainmask;
4200 
4201 	mutex_unlock(&ar->conf_mutex);
4202 
4203 	return 0;
4204 }
4205 
4206 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4207 {
4208 	/* It is not clear that allowing gaps in chainmask
4209 	 * is helpful.  Probably it will not do what user
4210 	 * is hoping for, so warn in that case.
4211 	 */
4212 	if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4213 		return;
4214 
4215 	ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x.  Suggested values: 15, 7, 3, 1 or 0.\n",
4216 		    dbg, cm);
4217 }
4218 
4219 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4220 {
4221 	int nsts = ar->vht_cap_info;
4222 
4223 	nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4224 	nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4225 
4226 	/* If firmware does not deliver to host number of space-time
4227 	 * streams supported, assume it support up to 4 BF STS and return
4228 	 * the value for VHT CAP: nsts-1)
4229 	 */
4230 	if (nsts == 0)
4231 		return 3;
4232 
4233 	return nsts;
4234 }
4235 
4236 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4237 {
4238 	int sound_dim = ar->vht_cap_info;
4239 
4240 	sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4241 	sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4242 
4243 	/* If the sounding dimension is not advertised by the firmware,
4244 	 * let's use a default value of 1
4245 	 */
4246 	if (sound_dim == 0)
4247 		return 1;
4248 
4249 	return sound_dim;
4250 }
4251 
4252 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4253 {
4254 	struct ieee80211_sta_vht_cap vht_cap = {0};
4255 	u16 mcs_map;
4256 	u32 val;
4257 	int i;
4258 
4259 	vht_cap.vht_supported = 1;
4260 	vht_cap.cap = ar->vht_cap_info;
4261 
4262 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4263 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4264 		val = ath10k_mac_get_vht_cap_bf_sts(ar);
4265 		val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4266 		val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4267 
4268 		vht_cap.cap |= val;
4269 	}
4270 
4271 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4272 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4273 		val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4274 		val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4275 		val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4276 
4277 		vht_cap.cap |= val;
4278 	}
4279 
4280 	mcs_map = 0;
4281 	for (i = 0; i < 8; i++) {
4282 		if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4283 			mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4284 		else
4285 			mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4286 	}
4287 
4288 	if (ar->cfg_tx_chainmask <= 1)
4289 		vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4290 
4291 	vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4292 	vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4293 
4294 	return vht_cap;
4295 }
4296 
4297 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4298 {
4299 	int i;
4300 	struct ieee80211_sta_ht_cap ht_cap = {0};
4301 
4302 	if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4303 		return ht_cap;
4304 
4305 	ht_cap.ht_supported = 1;
4306 	ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4307 	ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4308 	ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4309 	ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4310 	ht_cap.cap |=
4311 		WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4312 
4313 	if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4314 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4315 
4316 	if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4317 		ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4318 
4319 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4320 		u32 smps;
4321 
4322 		smps   = WLAN_HT_CAP_SM_PS_DYNAMIC;
4323 		smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4324 
4325 		ht_cap.cap |= smps;
4326 	}
4327 
4328 	if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4329 		ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4330 
4331 	if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4332 		u32 stbc;
4333 
4334 		stbc   = ar->ht_cap_info;
4335 		stbc  &= WMI_HT_CAP_RX_STBC;
4336 		stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4337 		stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4338 		stbc  &= IEEE80211_HT_CAP_RX_STBC;
4339 
4340 		ht_cap.cap |= stbc;
4341 	}
4342 
4343 	if (ar->ht_cap_info & WMI_HT_CAP_LDPC)
4344 		ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4345 
4346 	if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4347 		ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4348 
4349 	/* max AMSDU is implicitly taken from vht_cap_info */
4350 	if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4351 		ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4352 
4353 	for (i = 0; i < ar->num_rf_chains; i++) {
4354 		if (ar->cfg_rx_chainmask & BIT(i))
4355 			ht_cap.mcs.rx_mask[i] = 0xFF;
4356 	}
4357 
4358 	ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4359 
4360 	return ht_cap;
4361 }
4362 
4363 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4364 {
4365 	struct ieee80211_supported_band *band;
4366 	struct ieee80211_sta_vht_cap vht_cap;
4367 	struct ieee80211_sta_ht_cap ht_cap;
4368 
4369 	ht_cap = ath10k_get_ht_cap(ar);
4370 	vht_cap = ath10k_create_vht_cap(ar);
4371 
4372 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4373 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4374 		band->ht_cap = ht_cap;
4375 	}
4376 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
4377 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
4378 		band->ht_cap = ht_cap;
4379 		band->vht_cap = vht_cap;
4380 	}
4381 }
4382 
4383 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
4384 {
4385 	int ret;
4386 
4387 	lockdep_assert_held(&ar->conf_mutex);
4388 
4389 	ath10k_check_chain_mask(ar, tx_ant, "tx");
4390 	ath10k_check_chain_mask(ar, rx_ant, "rx");
4391 
4392 	ar->cfg_tx_chainmask = tx_ant;
4393 	ar->cfg_rx_chainmask = rx_ant;
4394 
4395 	if ((ar->state != ATH10K_STATE_ON) &&
4396 	    (ar->state != ATH10K_STATE_RESTARTED))
4397 		return 0;
4398 
4399 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
4400 					tx_ant);
4401 	if (ret) {
4402 		ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
4403 			    ret, tx_ant);
4404 		return ret;
4405 	}
4406 
4407 	ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
4408 					rx_ant);
4409 	if (ret) {
4410 		ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
4411 			    ret, rx_ant);
4412 		return ret;
4413 	}
4414 
4415 	/* Reload HT/VHT capability */
4416 	ath10k_mac_setup_ht_vht_cap(ar);
4417 
4418 	return 0;
4419 }
4420 
4421 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
4422 {
4423 	struct ath10k *ar = hw->priv;
4424 	int ret;
4425 
4426 	mutex_lock(&ar->conf_mutex);
4427 	ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
4428 	mutex_unlock(&ar->conf_mutex);
4429 	return ret;
4430 }
4431 
4432 static int ath10k_start(struct ieee80211_hw *hw)
4433 {
4434 	struct ath10k *ar = hw->priv;
4435 	u32 param;
4436 	int ret = 0;
4437 
4438 	/*
4439 	 * This makes sense only when restarting hw. It is harmless to call
4440 	 * unconditionally. This is necessary to make sure no HTT/WMI tx
4441 	 * commands will be submitted while restarting.
4442 	 */
4443 	ath10k_drain_tx(ar);
4444 
4445 	mutex_lock(&ar->conf_mutex);
4446 
4447 	switch (ar->state) {
4448 	case ATH10K_STATE_OFF:
4449 		ar->state = ATH10K_STATE_ON;
4450 		break;
4451 	case ATH10K_STATE_RESTARTING:
4452 		ath10k_halt(ar);
4453 		ar->state = ATH10K_STATE_RESTARTED;
4454 		break;
4455 	case ATH10K_STATE_ON:
4456 	case ATH10K_STATE_RESTARTED:
4457 	case ATH10K_STATE_WEDGED:
4458 		WARN_ON(1);
4459 		ret = -EINVAL;
4460 		goto err;
4461 	case ATH10K_STATE_UTF:
4462 		ret = -EBUSY;
4463 		goto err;
4464 	}
4465 
4466 	ret = ath10k_hif_power_up(ar);
4467 	if (ret) {
4468 		ath10k_err(ar, "Could not init hif: %d\n", ret);
4469 		goto err_off;
4470 	}
4471 
4472 	ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
4473 				&ar->normal_mode_fw);
4474 	if (ret) {
4475 		ath10k_err(ar, "Could not init core: %d\n", ret);
4476 		goto err_power_down;
4477 	}
4478 
4479 	param = ar->wmi.pdev_param->pmf_qos;
4480 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4481 	if (ret) {
4482 		ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
4483 		goto err_core_stop;
4484 	}
4485 
4486 	param = ar->wmi.pdev_param->dynamic_bw;
4487 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4488 	if (ret) {
4489 		ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
4490 		goto err_core_stop;
4491 	}
4492 
4493 	if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
4494 		ret = ath10k_wmi_adaptive_qcs(ar, true);
4495 		if (ret) {
4496 			ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
4497 				    ret);
4498 			goto err_core_stop;
4499 		}
4500 	}
4501 
4502 	if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
4503 		param = ar->wmi.pdev_param->burst_enable;
4504 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4505 		if (ret) {
4506 			ath10k_warn(ar, "failed to disable burst: %d\n", ret);
4507 			goto err_core_stop;
4508 		}
4509 	}
4510 
4511 	__ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
4512 
4513 	/*
4514 	 * By default FW set ARP frames ac to voice (6). In that case ARP
4515 	 * exchange is not working properly for UAPSD enabled AP. ARP requests
4516 	 * which arrives with access category 0 are processed by network stack
4517 	 * and send back with access category 0, but FW changes access category
4518 	 * to 6. Set ARP frames access category to best effort (0) solves
4519 	 * this problem.
4520 	 */
4521 
4522 	param = ar->wmi.pdev_param->arp_ac_override;
4523 	ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4524 	if (ret) {
4525 		ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
4526 			    ret);
4527 		goto err_core_stop;
4528 	}
4529 
4530 	if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
4531 		     ar->running_fw->fw_file.fw_features)) {
4532 		ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
4533 							  WMI_CCA_DETECT_LEVEL_AUTO,
4534 							  WMI_CCA_DETECT_MARGIN_AUTO);
4535 		if (ret) {
4536 			ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
4537 				    ret);
4538 			goto err_core_stop;
4539 		}
4540 	}
4541 
4542 	param = ar->wmi.pdev_param->ani_enable;
4543 	ret = ath10k_wmi_pdev_set_param(ar, param, 1);
4544 	if (ret) {
4545 		ath10k_warn(ar, "failed to enable ani by default: %d\n",
4546 			    ret);
4547 		goto err_core_stop;
4548 	}
4549 
4550 	ar->ani_enabled = true;
4551 
4552 	if (ath10k_peer_stats_enabled(ar)) {
4553 		param = ar->wmi.pdev_param->peer_stats_update_period;
4554 		ret = ath10k_wmi_pdev_set_param(ar, param,
4555 						PEER_DEFAULT_STATS_UPDATE_PERIOD);
4556 		if (ret) {
4557 			ath10k_warn(ar,
4558 				    "failed to set peer stats period : %d\n",
4559 				    ret);
4560 			goto err_core_stop;
4561 		}
4562 	}
4563 
4564 	param = ar->wmi.pdev_param->enable_btcoex;
4565 	if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
4566 	    test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
4567 		     ar->running_fw->fw_file.fw_features)) {
4568 		ret = ath10k_wmi_pdev_set_param(ar, param, 0);
4569 		if (ret) {
4570 			ath10k_warn(ar,
4571 				    "failed to set btcoex param: %d\n", ret);
4572 			goto err_core_stop;
4573 		}
4574 		clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
4575 	}
4576 
4577 	ar->num_started_vdevs = 0;
4578 	ath10k_regd_update(ar);
4579 
4580 	ath10k_spectral_start(ar);
4581 	ath10k_thermal_set_throttling(ar);
4582 
4583 	mutex_unlock(&ar->conf_mutex);
4584 	return 0;
4585 
4586 err_core_stop:
4587 	ath10k_core_stop(ar);
4588 
4589 err_power_down:
4590 	ath10k_hif_power_down(ar);
4591 
4592 err_off:
4593 	ar->state = ATH10K_STATE_OFF;
4594 
4595 err:
4596 	mutex_unlock(&ar->conf_mutex);
4597 	return ret;
4598 }
4599 
4600 static void ath10k_stop(struct ieee80211_hw *hw)
4601 {
4602 	struct ath10k *ar = hw->priv;
4603 
4604 	ath10k_drain_tx(ar);
4605 
4606 	mutex_lock(&ar->conf_mutex);
4607 	if (ar->state != ATH10K_STATE_OFF) {
4608 		ath10k_halt(ar);
4609 		ar->state = ATH10K_STATE_OFF;
4610 	}
4611 	mutex_unlock(&ar->conf_mutex);
4612 
4613 	cancel_delayed_work_sync(&ar->scan.timeout);
4614 	cancel_work_sync(&ar->restart_work);
4615 }
4616 
4617 static int ath10k_config_ps(struct ath10k *ar)
4618 {
4619 	struct ath10k_vif *arvif;
4620 	int ret = 0;
4621 
4622 	lockdep_assert_held(&ar->conf_mutex);
4623 
4624 	list_for_each_entry(arvif, &ar->arvifs, list) {
4625 		ret = ath10k_mac_vif_setup_ps(arvif);
4626 		if (ret) {
4627 			ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
4628 			break;
4629 		}
4630 	}
4631 
4632 	return ret;
4633 }
4634 
4635 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
4636 {
4637 	int ret;
4638 	u32 param;
4639 
4640 	lockdep_assert_held(&ar->conf_mutex);
4641 
4642 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower);
4643 
4644 	param = ar->wmi.pdev_param->txpower_limit2g;
4645 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4646 	if (ret) {
4647 		ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
4648 			    txpower, ret);
4649 		return ret;
4650 	}
4651 
4652 	param = ar->wmi.pdev_param->txpower_limit5g;
4653 	ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2);
4654 	if (ret) {
4655 		ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
4656 			    txpower, ret);
4657 		return ret;
4658 	}
4659 
4660 	return 0;
4661 }
4662 
4663 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
4664 {
4665 	struct ath10k_vif *arvif;
4666 	int ret, txpower = -1;
4667 
4668 	lockdep_assert_held(&ar->conf_mutex);
4669 
4670 	list_for_each_entry(arvif, &ar->arvifs, list) {
4671 		WARN_ON(arvif->txpower < 0);
4672 
4673 		if (txpower == -1)
4674 			txpower = arvif->txpower;
4675 		else
4676 			txpower = min(txpower, arvif->txpower);
4677 	}
4678 
4679 	if (WARN_ON(txpower == -1))
4680 		return -EINVAL;
4681 
4682 	ret = ath10k_mac_txpower_setup(ar, txpower);
4683 	if (ret) {
4684 		ath10k_warn(ar, "failed to setup tx power %d: %d\n",
4685 			    txpower, ret);
4686 		return ret;
4687 	}
4688 
4689 	return 0;
4690 }
4691 
4692 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
4693 {
4694 	struct ath10k *ar = hw->priv;
4695 	struct ieee80211_conf *conf = &hw->conf;
4696 	int ret = 0;
4697 
4698 	mutex_lock(&ar->conf_mutex);
4699 
4700 	if (changed & IEEE80211_CONF_CHANGE_PS)
4701 		ath10k_config_ps(ar);
4702 
4703 	if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
4704 		ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
4705 		ret = ath10k_monitor_recalc(ar);
4706 		if (ret)
4707 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
4708 	}
4709 
4710 	mutex_unlock(&ar->conf_mutex);
4711 	return ret;
4712 }
4713 
4714 static u32 get_nss_from_chainmask(u16 chain_mask)
4715 {
4716 	if ((chain_mask & 0xf) == 0xf)
4717 		return 4;
4718 	else if ((chain_mask & 0x7) == 0x7)
4719 		return 3;
4720 	else if ((chain_mask & 0x3) == 0x3)
4721 		return 2;
4722 	return 1;
4723 }
4724 
4725 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
4726 {
4727 	u32 value = 0;
4728 	struct ath10k *ar = arvif->ar;
4729 	int nsts;
4730 	int sound_dim;
4731 
4732 	if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
4733 		return 0;
4734 
4735 	nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
4736 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4737 				IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
4738 		value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
4739 
4740 	sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4741 	if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4742 				IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
4743 		value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
4744 
4745 	if (!value)
4746 		return 0;
4747 
4748 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
4749 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
4750 
4751 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
4752 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
4753 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
4754 
4755 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
4756 		value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
4757 
4758 	if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
4759 		value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
4760 			  WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
4761 
4762 	return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
4763 					 ar->wmi.vdev_param->txbf, value);
4764 }
4765 
4766 /*
4767  * TODO:
4768  * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
4769  * because we will send mgmt frames without CCK. This requirement
4770  * for P2P_FIND/GO_NEG should be handled by checking CCK flag
4771  * in the TX packet.
4772  */
4773 static int ath10k_add_interface(struct ieee80211_hw *hw,
4774 				struct ieee80211_vif *vif)
4775 {
4776 	struct ath10k *ar = hw->priv;
4777 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
4778 	struct ath10k_peer *peer;
4779 	enum wmi_sta_powersave_param param;
4780 	int ret = 0;
4781 	u32 value;
4782 	int bit;
4783 	int i;
4784 	u32 vdev_param;
4785 
4786 	vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
4787 
4788 	mutex_lock(&ar->conf_mutex);
4789 
4790 	memset(arvif, 0, sizeof(*arvif));
4791 	ath10k_mac_txq_init(vif->txq);
4792 
4793 	arvif->ar = ar;
4794 	arvif->vif = vif;
4795 
4796 	INIT_LIST_HEAD(&arvif->list);
4797 	INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
4798 	INIT_DELAYED_WORK(&arvif->connection_loss_work,
4799 			  ath10k_mac_vif_sta_connection_loss_work);
4800 
4801 	for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
4802 		arvif->bitrate_mask.control[i].legacy = 0xffffffff;
4803 		memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
4804 		       sizeof(arvif->bitrate_mask.control[i].ht_mcs));
4805 		memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
4806 		       sizeof(arvif->bitrate_mask.control[i].vht_mcs));
4807 	}
4808 
4809 	if (ar->num_peers >= ar->max_num_peers) {
4810 		ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
4811 		ret = -ENOBUFS;
4812 		goto err;
4813 	}
4814 
4815 	if (ar->free_vdev_map == 0) {
4816 		ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
4817 		ret = -EBUSY;
4818 		goto err;
4819 	}
4820 	bit = __ffs64(ar->free_vdev_map);
4821 
4822 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
4823 		   bit, ar->free_vdev_map);
4824 
4825 	arvif->vdev_id = bit;
4826 	arvif->vdev_subtype =
4827 		ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
4828 
4829 	switch (vif->type) {
4830 	case NL80211_IFTYPE_P2P_DEVICE:
4831 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4832 		arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4833 					(ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
4834 		break;
4835 	case NL80211_IFTYPE_UNSPECIFIED:
4836 	case NL80211_IFTYPE_STATION:
4837 		arvif->vdev_type = WMI_VDEV_TYPE_STA;
4838 		if (vif->p2p)
4839 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4840 					(ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
4841 		break;
4842 	case NL80211_IFTYPE_ADHOC:
4843 		arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
4844 		break;
4845 	case NL80211_IFTYPE_MESH_POINT:
4846 		if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
4847 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4848 						(ar, WMI_VDEV_SUBTYPE_MESH_11S);
4849 		} else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4850 			ret = -EINVAL;
4851 			ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
4852 			goto err;
4853 		}
4854 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4855 		break;
4856 	case NL80211_IFTYPE_AP:
4857 		arvif->vdev_type = WMI_VDEV_TYPE_AP;
4858 
4859 		if (vif->p2p)
4860 			arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
4861 						(ar, WMI_VDEV_SUBTYPE_P2P_GO);
4862 		break;
4863 	case NL80211_IFTYPE_MONITOR:
4864 		arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
4865 		break;
4866 	default:
4867 		WARN_ON(1);
4868 		break;
4869 	}
4870 
4871 	/* Using vdev_id as queue number will make it very easy to do per-vif
4872 	 * tx queue locking. This shouldn't wrap due to interface combinations
4873 	 * but do a modulo for correctness sake and prevent using offchannel tx
4874 	 * queues for regular vif tx.
4875 	 */
4876 	vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4877 	for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
4878 		vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
4879 
4880 	/* Some firmware revisions don't wait for beacon tx completion before
4881 	 * sending another SWBA event. This could lead to hardware using old
4882 	 * (freed) beacon data in some cases, e.g. tx credit starvation
4883 	 * combined with missed TBTT. This is very very rare.
4884 	 *
4885 	 * On non-IOMMU-enabled hosts this could be a possible security issue
4886 	 * because hw could beacon some random data on the air.  On
4887 	 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
4888 	 * device would crash.
4889 	 *
4890 	 * Since there are no beacon tx completions (implicit nor explicit)
4891 	 * propagated to host the only workaround for this is to allocate a
4892 	 * DMA-coherent buffer for a lifetime of a vif and use it for all
4893 	 * beacon tx commands. Worst case for this approach is some beacons may
4894 	 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
4895 	 */
4896 	if (vif->type == NL80211_IFTYPE_ADHOC ||
4897 	    vif->type == NL80211_IFTYPE_MESH_POINT ||
4898 	    vif->type == NL80211_IFTYPE_AP) {
4899 		arvif->beacon_buf = dma_zalloc_coherent(ar->dev,
4900 							IEEE80211_MAX_FRAME_LEN,
4901 							&arvif->beacon_paddr,
4902 							GFP_ATOMIC);
4903 		if (!arvif->beacon_buf) {
4904 			ret = -ENOMEM;
4905 			ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
4906 				    ret);
4907 			goto err;
4908 		}
4909 	}
4910 	if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
4911 		arvif->nohwcrypt = true;
4912 
4913 	if (arvif->nohwcrypt &&
4914 	    !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
4915 		ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
4916 		goto err;
4917 	}
4918 
4919 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
4920 		   arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
4921 		   arvif->beacon_buf ? "single-buf" : "per-skb");
4922 
4923 	ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
4924 				     arvif->vdev_subtype, vif->addr);
4925 	if (ret) {
4926 		ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
4927 			    arvif->vdev_id, ret);
4928 		goto err;
4929 	}
4930 
4931 	ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
4932 	spin_lock_bh(&ar->data_lock);
4933 	list_add(&arvif->list, &ar->arvifs);
4934 	spin_unlock_bh(&ar->data_lock);
4935 
4936 	/* It makes no sense to have firmware do keepalives. mac80211 already
4937 	 * takes care of this with idle connection polling.
4938 	 */
4939 	ret = ath10k_mac_vif_disable_keepalive(arvif);
4940 	if (ret) {
4941 		ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
4942 			    arvif->vdev_id, ret);
4943 		goto err_vdev_delete;
4944 	}
4945 
4946 	arvif->def_wep_key_idx = -1;
4947 
4948 	vdev_param = ar->wmi.vdev_param->tx_encap_type;
4949 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4950 					ATH10K_HW_TXRX_NATIVE_WIFI);
4951 	/* 10.X firmware does not support this VDEV parameter. Do not warn */
4952 	if (ret && ret != -EOPNOTSUPP) {
4953 		ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
4954 			    arvif->vdev_id, ret);
4955 		goto err_vdev_delete;
4956 	}
4957 
4958 	/* Configuring number of spatial stream for monitor interface is causing
4959 	 * target assert in qca9888 and qca6174.
4960 	 */
4961 	if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
4962 		u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
4963 
4964 		vdev_param = ar->wmi.vdev_param->nss;
4965 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
4966 						nss);
4967 		if (ret) {
4968 			ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
4969 				    arvif->vdev_id, ar->cfg_tx_chainmask, nss,
4970 				    ret);
4971 			goto err_vdev_delete;
4972 		}
4973 	}
4974 
4975 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
4976 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
4977 		ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
4978 					 vif->addr, WMI_PEER_TYPE_DEFAULT);
4979 		if (ret) {
4980 			ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
4981 				    arvif->vdev_id, ret);
4982 			goto err_vdev_delete;
4983 		}
4984 
4985 		spin_lock_bh(&ar->data_lock);
4986 
4987 		peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
4988 		if (!peer) {
4989 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
4990 				    vif->addr, arvif->vdev_id);
4991 			spin_unlock_bh(&ar->data_lock);
4992 			ret = -ENOENT;
4993 			goto err_peer_delete;
4994 		}
4995 
4996 		arvif->peer_id = find_first_bit(peer->peer_ids,
4997 						ATH10K_MAX_NUM_PEER_IDS);
4998 
4999 		spin_unlock_bh(&ar->data_lock);
5000 	} else {
5001 		arvif->peer_id = HTT_INVALID_PEERID;
5002 	}
5003 
5004 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5005 		ret = ath10k_mac_set_kickout(arvif);
5006 		if (ret) {
5007 			ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5008 				    arvif->vdev_id, ret);
5009 			goto err_peer_delete;
5010 		}
5011 	}
5012 
5013 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5014 		param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5015 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5016 		ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5017 						  param, value);
5018 		if (ret) {
5019 			ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5020 				    arvif->vdev_id, ret);
5021 			goto err_peer_delete;
5022 		}
5023 
5024 		ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5025 		if (ret) {
5026 			ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5027 				    arvif->vdev_id, ret);
5028 			goto err_peer_delete;
5029 		}
5030 
5031 		ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5032 		if (ret) {
5033 			ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5034 				    arvif->vdev_id, ret);
5035 			goto err_peer_delete;
5036 		}
5037 	}
5038 
5039 	ret = ath10k_mac_set_txbf_conf(arvif);
5040 	if (ret) {
5041 		ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5042 			    arvif->vdev_id, ret);
5043 		goto err_peer_delete;
5044 	}
5045 
5046 	ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5047 	if (ret) {
5048 		ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5049 			    arvif->vdev_id, ret);
5050 		goto err_peer_delete;
5051 	}
5052 
5053 	arvif->txpower = vif->bss_conf.txpower;
5054 	ret = ath10k_mac_txpower_recalc(ar);
5055 	if (ret) {
5056 		ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5057 		goto err_peer_delete;
5058 	}
5059 
5060 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5061 		ar->monitor_arvif = arvif;
5062 		ret = ath10k_monitor_recalc(ar);
5063 		if (ret) {
5064 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5065 			goto err_peer_delete;
5066 		}
5067 	}
5068 
5069 	spin_lock_bh(&ar->htt.tx_lock);
5070 	if (!ar->tx_paused)
5071 		ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5072 	spin_unlock_bh(&ar->htt.tx_lock);
5073 
5074 	mutex_unlock(&ar->conf_mutex);
5075 	return 0;
5076 
5077 err_peer_delete:
5078 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5079 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS)
5080 		ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5081 
5082 err_vdev_delete:
5083 	ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5084 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5085 	spin_lock_bh(&ar->data_lock);
5086 	list_del(&arvif->list);
5087 	spin_unlock_bh(&ar->data_lock);
5088 
5089 err:
5090 	if (arvif->beacon_buf) {
5091 		dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5092 				  arvif->beacon_buf, arvif->beacon_paddr);
5093 		arvif->beacon_buf = NULL;
5094 	}
5095 
5096 	mutex_unlock(&ar->conf_mutex);
5097 
5098 	return ret;
5099 }
5100 
5101 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5102 {
5103 	int i;
5104 
5105 	for (i = 0; i < BITS_PER_LONG; i++)
5106 		ath10k_mac_vif_tx_unlock(arvif, i);
5107 }
5108 
5109 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5110 				    struct ieee80211_vif *vif)
5111 {
5112 	struct ath10k *ar = hw->priv;
5113 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5114 	struct ath10k_peer *peer;
5115 	int ret;
5116 	int i;
5117 
5118 	cancel_work_sync(&arvif->ap_csa_work);
5119 	cancel_delayed_work_sync(&arvif->connection_loss_work);
5120 
5121 	mutex_lock(&ar->conf_mutex);
5122 
5123 	spin_lock_bh(&ar->data_lock);
5124 	ath10k_mac_vif_beacon_cleanup(arvif);
5125 	spin_unlock_bh(&ar->data_lock);
5126 
5127 	ret = ath10k_spectral_vif_stop(arvif);
5128 	if (ret)
5129 		ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5130 			    arvif->vdev_id, ret);
5131 
5132 	ar->free_vdev_map |= 1LL << arvif->vdev_id;
5133 	spin_lock_bh(&ar->data_lock);
5134 	list_del(&arvif->list);
5135 	spin_unlock_bh(&ar->data_lock);
5136 
5137 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5138 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5139 		ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5140 					     vif->addr);
5141 		if (ret)
5142 			ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5143 				    arvif->vdev_id, ret);
5144 
5145 		kfree(arvif->u.ap.noa_data);
5146 	}
5147 
5148 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5149 		   arvif->vdev_id);
5150 
5151 	ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5152 	if (ret)
5153 		ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5154 			    arvif->vdev_id, ret);
5155 
5156 	/* Some firmware revisions don't notify host about self-peer removal
5157 	 * until after associated vdev is deleted.
5158 	 */
5159 	if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5160 	    arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5161 		ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5162 						   vif->addr);
5163 		if (ret)
5164 			ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5165 				    arvif->vdev_id, ret);
5166 
5167 		spin_lock_bh(&ar->data_lock);
5168 		ar->num_peers--;
5169 		spin_unlock_bh(&ar->data_lock);
5170 	}
5171 
5172 	spin_lock_bh(&ar->data_lock);
5173 	for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5174 		peer = ar->peer_map[i];
5175 		if (!peer)
5176 			continue;
5177 
5178 		if (peer->vif == vif) {
5179 			ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5180 				    vif->addr, arvif->vdev_id);
5181 			peer->vif = NULL;
5182 		}
5183 	}
5184 	spin_unlock_bh(&ar->data_lock);
5185 
5186 	ath10k_peer_cleanup(ar, arvif->vdev_id);
5187 	ath10k_mac_txq_unref(ar, vif->txq);
5188 
5189 	if (vif->type == NL80211_IFTYPE_MONITOR) {
5190 		ar->monitor_arvif = NULL;
5191 		ret = ath10k_monitor_recalc(ar);
5192 		if (ret)
5193 			ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5194 	}
5195 
5196 	spin_lock_bh(&ar->htt.tx_lock);
5197 	ath10k_mac_vif_tx_unlock_all(arvif);
5198 	spin_unlock_bh(&ar->htt.tx_lock);
5199 
5200 	ath10k_mac_txq_unref(ar, vif->txq);
5201 
5202 	mutex_unlock(&ar->conf_mutex);
5203 }
5204 
5205 /*
5206  * FIXME: Has to be verified.
5207  */
5208 #define SUPPORTED_FILTERS			\
5209 	(FIF_ALLMULTI |				\
5210 	FIF_CONTROL |				\
5211 	FIF_PSPOLL |				\
5212 	FIF_OTHER_BSS |				\
5213 	FIF_BCN_PRBRESP_PROMISC |		\
5214 	FIF_PROBE_REQ |				\
5215 	FIF_FCSFAIL)
5216 
5217 static void ath10k_configure_filter(struct ieee80211_hw *hw,
5218 				    unsigned int changed_flags,
5219 				    unsigned int *total_flags,
5220 				    u64 multicast)
5221 {
5222 	struct ath10k *ar = hw->priv;
5223 	int ret;
5224 
5225 	mutex_lock(&ar->conf_mutex);
5226 
5227 	changed_flags &= SUPPORTED_FILTERS;
5228 	*total_flags &= SUPPORTED_FILTERS;
5229 	ar->filter_flags = *total_flags;
5230 
5231 	ret = ath10k_monitor_recalc(ar);
5232 	if (ret)
5233 		ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5234 
5235 	mutex_unlock(&ar->conf_mutex);
5236 }
5237 
5238 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
5239 				    struct ieee80211_vif *vif,
5240 				    struct ieee80211_bss_conf *info,
5241 				    u32 changed)
5242 {
5243 	struct ath10k *ar = hw->priv;
5244 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5245 	int ret = 0;
5246 	u32 vdev_param, pdev_param, slottime, preamble;
5247 
5248 	mutex_lock(&ar->conf_mutex);
5249 
5250 	if (changed & BSS_CHANGED_IBSS)
5251 		ath10k_control_ibss(arvif, info, vif->addr);
5252 
5253 	if (changed & BSS_CHANGED_BEACON_INT) {
5254 		arvif->beacon_interval = info->beacon_int;
5255 		vdev_param = ar->wmi.vdev_param->beacon_interval;
5256 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5257 						arvif->beacon_interval);
5258 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5259 			   "mac vdev %d beacon_interval %d\n",
5260 			   arvif->vdev_id, arvif->beacon_interval);
5261 
5262 		if (ret)
5263 			ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
5264 				    arvif->vdev_id, ret);
5265 	}
5266 
5267 	if (changed & BSS_CHANGED_BEACON) {
5268 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5269 			   "vdev %d set beacon tx mode to staggered\n",
5270 			   arvif->vdev_id);
5271 
5272 		pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
5273 		ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
5274 						WMI_BEACON_STAGGERED_MODE);
5275 		if (ret)
5276 			ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
5277 				    arvif->vdev_id, ret);
5278 
5279 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
5280 		if (ret)
5281 			ath10k_warn(ar, "failed to update beacon template: %d\n",
5282 				    ret);
5283 
5284 		if (ieee80211_vif_is_mesh(vif)) {
5285 			/* mesh doesn't use SSID but firmware needs it */
5286 			strncpy(arvif->u.ap.ssid, "mesh",
5287 				sizeof(arvif->u.ap.ssid));
5288 			arvif->u.ap.ssid_len = 4;
5289 		}
5290 	}
5291 
5292 	if (changed & BSS_CHANGED_AP_PROBE_RESP) {
5293 		ret = ath10k_mac_setup_prb_tmpl(arvif);
5294 		if (ret)
5295 			ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
5296 				    arvif->vdev_id, ret);
5297 	}
5298 
5299 	if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
5300 		arvif->dtim_period = info->dtim_period;
5301 
5302 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5303 			   "mac vdev %d dtim_period %d\n",
5304 			   arvif->vdev_id, arvif->dtim_period);
5305 
5306 		vdev_param = ar->wmi.vdev_param->dtim_period;
5307 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5308 						arvif->dtim_period);
5309 		if (ret)
5310 			ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
5311 				    arvif->vdev_id, ret);
5312 	}
5313 
5314 	if (changed & BSS_CHANGED_SSID &&
5315 	    vif->type == NL80211_IFTYPE_AP) {
5316 		arvif->u.ap.ssid_len = info->ssid_len;
5317 		if (info->ssid_len)
5318 			memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len);
5319 		arvif->u.ap.hidden_ssid = info->hidden_ssid;
5320 	}
5321 
5322 	if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
5323 		ether_addr_copy(arvif->bssid, info->bssid);
5324 
5325 	if (changed & BSS_CHANGED_BEACON_ENABLED)
5326 		ath10k_control_beaconing(arvif, info);
5327 
5328 	if (changed & BSS_CHANGED_ERP_CTS_PROT) {
5329 		arvif->use_cts_prot = info->use_cts_prot;
5330 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n",
5331 			   arvif->vdev_id, info->use_cts_prot);
5332 
5333 		ret = ath10k_recalc_rtscts_prot(arvif);
5334 		if (ret)
5335 			ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
5336 				    arvif->vdev_id, ret);
5337 
5338 		vdev_param = ar->wmi.vdev_param->protection_mode;
5339 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5340 						info->use_cts_prot ? 1 : 0);
5341 		if (ret)
5342 			ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n",
5343 				    info->use_cts_prot, arvif->vdev_id, ret);
5344 	}
5345 
5346 	if (changed & BSS_CHANGED_ERP_SLOT) {
5347 		if (info->use_short_slot)
5348 			slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
5349 
5350 		else
5351 			slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
5352 
5353 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
5354 			   arvif->vdev_id, slottime);
5355 
5356 		vdev_param = ar->wmi.vdev_param->slot_time;
5357 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5358 						slottime);
5359 		if (ret)
5360 			ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
5361 				    arvif->vdev_id, ret);
5362 	}
5363 
5364 	if (changed & BSS_CHANGED_ERP_PREAMBLE) {
5365 		if (info->use_short_preamble)
5366 			preamble = WMI_VDEV_PREAMBLE_SHORT;
5367 		else
5368 			preamble = WMI_VDEV_PREAMBLE_LONG;
5369 
5370 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5371 			   "mac vdev %d preamble %dn",
5372 			   arvif->vdev_id, preamble);
5373 
5374 		vdev_param = ar->wmi.vdev_param->preamble;
5375 		ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5376 						preamble);
5377 		if (ret)
5378 			ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
5379 				    arvif->vdev_id, ret);
5380 	}
5381 
5382 	if (changed & BSS_CHANGED_ASSOC) {
5383 		if (info->assoc) {
5384 			/* Workaround: Make sure monitor vdev is not running
5385 			 * when associating to prevent some firmware revisions
5386 			 * (e.g. 10.1 and 10.2) from crashing.
5387 			 */
5388 			if (ar->monitor_started)
5389 				ath10k_monitor_stop(ar);
5390 			ath10k_bss_assoc(hw, vif, info);
5391 			ath10k_monitor_recalc(ar);
5392 		} else {
5393 			ath10k_bss_disassoc(hw, vif);
5394 		}
5395 	}
5396 
5397 	if (changed & BSS_CHANGED_TXPOWER) {
5398 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
5399 			   arvif->vdev_id, info->txpower);
5400 
5401 		arvif->txpower = info->txpower;
5402 		ret = ath10k_mac_txpower_recalc(ar);
5403 		if (ret)
5404 			ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5405 	}
5406 
5407 	if (changed & BSS_CHANGED_PS) {
5408 		arvif->ps = vif->bss_conf.ps;
5409 
5410 		ret = ath10k_config_ps(ar);
5411 		if (ret)
5412 			ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
5413 				    arvif->vdev_id, ret);
5414 	}
5415 
5416 	mutex_unlock(&ar->conf_mutex);
5417 }
5418 
5419 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
5420 {
5421 	struct ath10k *ar = hw->priv;
5422 
5423 	/* This function should never be called if setting the coverage class
5424 	 * is not supported on this hardware.
5425 	 */
5426 	if (!ar->hw_params.hw_ops->set_coverage_class) {
5427 		WARN_ON_ONCE(1);
5428 		return;
5429 	}
5430 	ar->hw_params.hw_ops->set_coverage_class(ar, value);
5431 }
5432 
5433 static int ath10k_hw_scan(struct ieee80211_hw *hw,
5434 			  struct ieee80211_vif *vif,
5435 			  struct ieee80211_scan_request *hw_req)
5436 {
5437 	struct ath10k *ar = hw->priv;
5438 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5439 	struct cfg80211_scan_request *req = &hw_req->req;
5440 	struct wmi_start_scan_arg arg;
5441 	int ret = 0;
5442 	int i;
5443 
5444 	mutex_lock(&ar->conf_mutex);
5445 
5446 	spin_lock_bh(&ar->data_lock);
5447 	switch (ar->scan.state) {
5448 	case ATH10K_SCAN_IDLE:
5449 		reinit_completion(&ar->scan.started);
5450 		reinit_completion(&ar->scan.completed);
5451 		ar->scan.state = ATH10K_SCAN_STARTING;
5452 		ar->scan.is_roc = false;
5453 		ar->scan.vdev_id = arvif->vdev_id;
5454 		ret = 0;
5455 		break;
5456 	case ATH10K_SCAN_STARTING:
5457 	case ATH10K_SCAN_RUNNING:
5458 	case ATH10K_SCAN_ABORTING:
5459 		ret = -EBUSY;
5460 		break;
5461 	}
5462 	spin_unlock_bh(&ar->data_lock);
5463 
5464 	if (ret)
5465 		goto exit;
5466 
5467 	memset(&arg, 0, sizeof(arg));
5468 	ath10k_wmi_start_scan_init(ar, &arg);
5469 	arg.vdev_id = arvif->vdev_id;
5470 	arg.scan_id = ATH10K_SCAN_ID;
5471 
5472 	if (req->ie_len) {
5473 		arg.ie_len = req->ie_len;
5474 		memcpy(arg.ie, req->ie, arg.ie_len);
5475 	}
5476 
5477 	if (req->n_ssids) {
5478 		arg.n_ssids = req->n_ssids;
5479 		for (i = 0; i < arg.n_ssids; i++) {
5480 			arg.ssids[i].len  = req->ssids[i].ssid_len;
5481 			arg.ssids[i].ssid = req->ssids[i].ssid;
5482 		}
5483 	} else {
5484 		arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
5485 	}
5486 
5487 	if (req->n_channels) {
5488 		arg.n_channels = req->n_channels;
5489 		for (i = 0; i < arg.n_channels; i++)
5490 			arg.channels[i] = req->channels[i]->center_freq;
5491 	}
5492 
5493 	ret = ath10k_start_scan(ar, &arg);
5494 	if (ret) {
5495 		ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
5496 		spin_lock_bh(&ar->data_lock);
5497 		ar->scan.state = ATH10K_SCAN_IDLE;
5498 		spin_unlock_bh(&ar->data_lock);
5499 	}
5500 
5501 	/* Add a 200ms margin to account for event/command processing */
5502 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
5503 				     msecs_to_jiffies(arg.max_scan_time +
5504 						      200));
5505 
5506 exit:
5507 	mutex_unlock(&ar->conf_mutex);
5508 	return ret;
5509 }
5510 
5511 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
5512 				  struct ieee80211_vif *vif)
5513 {
5514 	struct ath10k *ar = hw->priv;
5515 
5516 	mutex_lock(&ar->conf_mutex);
5517 	ath10k_scan_abort(ar);
5518 	mutex_unlock(&ar->conf_mutex);
5519 
5520 	cancel_delayed_work_sync(&ar->scan.timeout);
5521 }
5522 
5523 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
5524 					struct ath10k_vif *arvif,
5525 					enum set_key_cmd cmd,
5526 					struct ieee80211_key_conf *key)
5527 {
5528 	u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
5529 	int ret;
5530 
5531 	/* 10.1 firmware branch requires default key index to be set to group
5532 	 * key index after installing it. Otherwise FW/HW Txes corrupted
5533 	 * frames with multi-vif APs. This is not required for main firmware
5534 	 * branch (e.g. 636).
5535 	 *
5536 	 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
5537 	 *
5538 	 * FIXME: It remains unknown if this is required for multi-vif STA
5539 	 * interfaces on 10.1.
5540 	 */
5541 
5542 	if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
5543 	    arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
5544 		return;
5545 
5546 	if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
5547 		return;
5548 
5549 	if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
5550 		return;
5551 
5552 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5553 		return;
5554 
5555 	if (cmd != SET_KEY)
5556 		return;
5557 
5558 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5559 					key->keyidx);
5560 	if (ret)
5561 		ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
5562 			    arvif->vdev_id, ret);
5563 }
5564 
5565 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
5566 			  struct ieee80211_vif *vif, struct ieee80211_sta *sta,
5567 			  struct ieee80211_key_conf *key)
5568 {
5569 	struct ath10k *ar = hw->priv;
5570 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5571 	struct ath10k_peer *peer;
5572 	const u8 *peer_addr;
5573 	bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
5574 		      key->cipher == WLAN_CIPHER_SUITE_WEP104;
5575 	int ret = 0;
5576 	int ret2;
5577 	u32 flags = 0;
5578 	u32 flags2;
5579 
5580 	/* this one needs to be done in software */
5581 	if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC)
5582 		return 1;
5583 
5584 	if (arvif->nohwcrypt)
5585 		return 1;
5586 
5587 	if (key->keyidx > WMI_MAX_KEY_INDEX)
5588 		return -ENOSPC;
5589 
5590 	mutex_lock(&ar->conf_mutex);
5591 
5592 	if (sta)
5593 		peer_addr = sta->addr;
5594 	else if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
5595 		peer_addr = vif->bss_conf.bssid;
5596 	else
5597 		peer_addr = vif->addr;
5598 
5599 	key->hw_key_idx = key->keyidx;
5600 
5601 	if (is_wep) {
5602 		if (cmd == SET_KEY)
5603 			arvif->wep_keys[key->keyidx] = key;
5604 		else
5605 			arvif->wep_keys[key->keyidx] = NULL;
5606 	}
5607 
5608 	/* the peer should not disappear in mid-way (unless FW goes awry) since
5609 	 * we already hold conf_mutex. we just make sure its there now. */
5610 	spin_lock_bh(&ar->data_lock);
5611 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5612 	spin_unlock_bh(&ar->data_lock);
5613 
5614 	if (!peer) {
5615 		if (cmd == SET_KEY) {
5616 			ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
5617 				    peer_addr);
5618 			ret = -EOPNOTSUPP;
5619 			goto exit;
5620 		} else {
5621 			/* if the peer doesn't exist there is no key to disable
5622 			 * anymore */
5623 			goto exit;
5624 		}
5625 	}
5626 
5627 	if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
5628 		flags |= WMI_KEY_PAIRWISE;
5629 	else
5630 		flags |= WMI_KEY_GROUP;
5631 
5632 	if (is_wep) {
5633 		if (cmd == DISABLE_KEY)
5634 			ath10k_clear_vdev_key(arvif, key);
5635 
5636 		/* When WEP keys are uploaded it's possible that there are
5637 		 * stations associated already (e.g. when merging) without any
5638 		 * keys. Static WEP needs an explicit per-peer key upload.
5639 		 */
5640 		if (vif->type == NL80211_IFTYPE_ADHOC &&
5641 		    cmd == SET_KEY)
5642 			ath10k_mac_vif_update_wep_key(arvif, key);
5643 
5644 		/* 802.1x never sets the def_wep_key_idx so each set_key()
5645 		 * call changes default tx key.
5646 		 *
5647 		 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
5648 		 * after first set_key().
5649 		 */
5650 		if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
5651 			flags |= WMI_KEY_TX_USAGE;
5652 	}
5653 
5654 	ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
5655 	if (ret) {
5656 		WARN_ON(ret > 0);
5657 		ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
5658 			    arvif->vdev_id, peer_addr, ret);
5659 		goto exit;
5660 	}
5661 
5662 	/* mac80211 sets static WEP keys as groupwise while firmware requires
5663 	 * them to be installed twice as both pairwise and groupwise.
5664 	 */
5665 	if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
5666 		flags2 = flags;
5667 		flags2 &= ~WMI_KEY_GROUP;
5668 		flags2 |= WMI_KEY_PAIRWISE;
5669 
5670 		ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
5671 		if (ret) {
5672 			WARN_ON(ret > 0);
5673 			ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
5674 				    arvif->vdev_id, peer_addr, ret);
5675 			ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
5676 						  peer_addr, flags);
5677 			if (ret2) {
5678 				WARN_ON(ret2 > 0);
5679 				ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
5680 					    arvif->vdev_id, peer_addr, ret2);
5681 			}
5682 			goto exit;
5683 		}
5684 	}
5685 
5686 	ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
5687 
5688 	spin_lock_bh(&ar->data_lock);
5689 	peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
5690 	if (peer && cmd == SET_KEY)
5691 		peer->keys[key->keyidx] = key;
5692 	else if (peer && cmd == DISABLE_KEY)
5693 		peer->keys[key->keyidx] = NULL;
5694 	else if (peer == NULL)
5695 		/* impossible unless FW goes crazy */
5696 		ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
5697 	spin_unlock_bh(&ar->data_lock);
5698 
5699 exit:
5700 	mutex_unlock(&ar->conf_mutex);
5701 	return ret;
5702 }
5703 
5704 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
5705 					   struct ieee80211_vif *vif,
5706 					   int keyidx)
5707 {
5708 	struct ath10k *ar = hw->priv;
5709 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5710 	int ret;
5711 
5712 	mutex_lock(&arvif->ar->conf_mutex);
5713 
5714 	if (arvif->ar->state != ATH10K_STATE_ON)
5715 		goto unlock;
5716 
5717 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
5718 		   arvif->vdev_id, keyidx);
5719 
5720 	ret = ath10k_wmi_vdev_set_param(arvif->ar,
5721 					arvif->vdev_id,
5722 					arvif->ar->wmi.vdev_param->def_keyid,
5723 					keyidx);
5724 
5725 	if (ret) {
5726 		ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
5727 			    arvif->vdev_id,
5728 			    ret);
5729 		goto unlock;
5730 	}
5731 
5732 	arvif->def_wep_key_idx = keyidx;
5733 
5734 unlock:
5735 	mutex_unlock(&arvif->ar->conf_mutex);
5736 }
5737 
5738 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
5739 {
5740 	struct ath10k *ar;
5741 	struct ath10k_vif *arvif;
5742 	struct ath10k_sta *arsta;
5743 	struct ieee80211_sta *sta;
5744 	struct cfg80211_chan_def def;
5745 	enum nl80211_band band;
5746 	const u8 *ht_mcs_mask;
5747 	const u16 *vht_mcs_mask;
5748 	u32 changed, bw, nss, smps;
5749 	int err;
5750 
5751 	arsta = container_of(wk, struct ath10k_sta, update_wk);
5752 	sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
5753 	arvif = arsta->arvif;
5754 	ar = arvif->ar;
5755 
5756 	if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
5757 		return;
5758 
5759 	band = def.chan->band;
5760 	ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
5761 	vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
5762 
5763 	spin_lock_bh(&ar->data_lock);
5764 
5765 	changed = arsta->changed;
5766 	arsta->changed = 0;
5767 
5768 	bw = arsta->bw;
5769 	nss = arsta->nss;
5770 	smps = arsta->smps;
5771 
5772 	spin_unlock_bh(&ar->data_lock);
5773 
5774 	mutex_lock(&ar->conf_mutex);
5775 
5776 	nss = max_t(u32, 1, nss);
5777 	nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
5778 			   ath10k_mac_max_vht_nss(vht_mcs_mask)));
5779 
5780 	if (changed & IEEE80211_RC_BW_CHANGED) {
5781 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n",
5782 			   sta->addr, bw);
5783 
5784 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5785 						WMI_PEER_CHAN_WIDTH, bw);
5786 		if (err)
5787 			ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
5788 				    sta->addr, bw, err);
5789 	}
5790 
5791 	if (changed & IEEE80211_RC_NSS_CHANGED) {
5792 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n",
5793 			   sta->addr, nss);
5794 
5795 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5796 						WMI_PEER_NSS, nss);
5797 		if (err)
5798 			ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
5799 				    sta->addr, nss, err);
5800 	}
5801 
5802 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
5803 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n",
5804 			   sta->addr, smps);
5805 
5806 		err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
5807 						WMI_PEER_SMPS_STATE, smps);
5808 		if (err)
5809 			ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
5810 				    sta->addr, smps, err);
5811 	}
5812 
5813 	if (changed & IEEE80211_RC_SUPP_RATES_CHANGED ||
5814 	    changed & IEEE80211_RC_NSS_CHANGED) {
5815 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n",
5816 			   sta->addr);
5817 
5818 		err = ath10k_station_assoc(ar, arvif->vif, sta, true);
5819 		if (err)
5820 			ath10k_warn(ar, "failed to reassociate station: %pM\n",
5821 				    sta->addr);
5822 	}
5823 
5824 	mutex_unlock(&ar->conf_mutex);
5825 }
5826 
5827 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
5828 				       struct ieee80211_sta *sta)
5829 {
5830 	struct ath10k *ar = arvif->ar;
5831 
5832 	lockdep_assert_held(&ar->conf_mutex);
5833 
5834 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5835 		return 0;
5836 
5837 	if (ar->num_stations >= ar->max_num_stations)
5838 		return -ENOBUFS;
5839 
5840 	ar->num_stations++;
5841 
5842 	return 0;
5843 }
5844 
5845 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
5846 					struct ieee80211_sta *sta)
5847 {
5848 	struct ath10k *ar = arvif->ar;
5849 
5850 	lockdep_assert_held(&ar->conf_mutex);
5851 
5852 	if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
5853 		return;
5854 
5855 	ar->num_stations--;
5856 }
5857 
5858 struct ath10k_mac_tdls_iter_data {
5859 	u32 num_tdls_stations;
5860 	struct ieee80211_vif *curr_vif;
5861 };
5862 
5863 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
5864 						    struct ieee80211_sta *sta)
5865 {
5866 	struct ath10k_mac_tdls_iter_data *iter_data = data;
5867 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5868 	struct ieee80211_vif *sta_vif = arsta->arvif->vif;
5869 
5870 	if (sta->tdls && sta_vif == iter_data->curr_vif)
5871 		iter_data->num_tdls_stations++;
5872 }
5873 
5874 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
5875 					      struct ieee80211_vif *vif)
5876 {
5877 	struct ath10k_mac_tdls_iter_data data = {};
5878 
5879 	data.curr_vif = vif;
5880 
5881 	ieee80211_iterate_stations_atomic(hw,
5882 					  ath10k_mac_tdls_vif_stations_count_iter,
5883 					  &data);
5884 	return data.num_tdls_stations;
5885 }
5886 
5887 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac,
5888 					    struct ieee80211_vif *vif)
5889 {
5890 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5891 	int *num_tdls_vifs = data;
5892 
5893 	if (vif->type != NL80211_IFTYPE_STATION)
5894 		return;
5895 
5896 	if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0)
5897 		(*num_tdls_vifs)++;
5898 }
5899 
5900 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw)
5901 {
5902 	int num_tdls_vifs = 0;
5903 
5904 	ieee80211_iterate_active_interfaces_atomic(hw,
5905 						   IEEE80211_IFACE_ITER_NORMAL,
5906 						   ath10k_mac_tdls_vifs_count_iter,
5907 						   &num_tdls_vifs);
5908 	return num_tdls_vifs;
5909 }
5910 
5911 static int ath10k_sta_state(struct ieee80211_hw *hw,
5912 			    struct ieee80211_vif *vif,
5913 			    struct ieee80211_sta *sta,
5914 			    enum ieee80211_sta_state old_state,
5915 			    enum ieee80211_sta_state new_state)
5916 {
5917 	struct ath10k *ar = hw->priv;
5918 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
5919 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
5920 	struct ath10k_peer *peer;
5921 	int ret = 0;
5922 	int i;
5923 
5924 	if (old_state == IEEE80211_STA_NOTEXIST &&
5925 	    new_state == IEEE80211_STA_NONE) {
5926 		memset(arsta, 0, sizeof(*arsta));
5927 		arsta->arvif = arvif;
5928 		INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
5929 
5930 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
5931 			ath10k_mac_txq_init(sta->txq[i]);
5932 	}
5933 
5934 	/* cancel must be done outside the mutex to avoid deadlock */
5935 	if ((old_state == IEEE80211_STA_NONE &&
5936 	     new_state == IEEE80211_STA_NOTEXIST))
5937 		cancel_work_sync(&arsta->update_wk);
5938 
5939 	mutex_lock(&ar->conf_mutex);
5940 
5941 	if (old_state == IEEE80211_STA_NOTEXIST &&
5942 	    new_state == IEEE80211_STA_NONE) {
5943 		/*
5944 		 * New station addition.
5945 		 */
5946 		enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
5947 		u32 num_tdls_stations;
5948 		u32 num_tdls_vifs;
5949 
5950 		ath10k_dbg(ar, ATH10K_DBG_MAC,
5951 			   "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
5952 			   arvif->vdev_id, sta->addr,
5953 			   ar->num_stations + 1, ar->max_num_stations,
5954 			   ar->num_peers + 1, ar->max_num_peers);
5955 
5956 		ret = ath10k_mac_inc_num_stations(arvif, sta);
5957 		if (ret) {
5958 			ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
5959 				    ar->max_num_stations);
5960 			goto exit;
5961 		}
5962 
5963 		if (sta->tdls)
5964 			peer_type = WMI_PEER_TYPE_TDLS;
5965 
5966 		ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
5967 					 sta->addr, peer_type);
5968 		if (ret) {
5969 			ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
5970 				    sta->addr, arvif->vdev_id, ret);
5971 			ath10k_mac_dec_num_stations(arvif, sta);
5972 			goto exit;
5973 		}
5974 
5975 		spin_lock_bh(&ar->data_lock);
5976 
5977 		peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
5978 		if (!peer) {
5979 			ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5980 				    vif->addr, arvif->vdev_id);
5981 			spin_unlock_bh(&ar->data_lock);
5982 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
5983 			ath10k_mac_dec_num_stations(arvif, sta);
5984 			ret = -ENOENT;
5985 			goto exit;
5986 		}
5987 
5988 		arsta->peer_id = find_first_bit(peer->peer_ids,
5989 						ATH10K_MAX_NUM_PEER_IDS);
5990 
5991 		spin_unlock_bh(&ar->data_lock);
5992 
5993 		if (!sta->tdls)
5994 			goto exit;
5995 
5996 		num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
5997 		num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw);
5998 
5999 		if (num_tdls_vifs >= ar->max_num_tdls_vdevs &&
6000 		    num_tdls_stations == 0) {
6001 			ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
6002 				    arvif->vdev_id, ar->max_num_tdls_vdevs);
6003 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6004 			ath10k_mac_dec_num_stations(arvif, sta);
6005 			ret = -ENOBUFS;
6006 			goto exit;
6007 		}
6008 
6009 		if (num_tdls_stations == 0) {
6010 			/* This is the first tdls peer in current vif */
6011 			enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE;
6012 
6013 			ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6014 							      state);
6015 			if (ret) {
6016 				ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6017 					    arvif->vdev_id, ret);
6018 				ath10k_peer_delete(ar, arvif->vdev_id,
6019 						   sta->addr);
6020 				ath10k_mac_dec_num_stations(arvif, sta);
6021 				goto exit;
6022 			}
6023 		}
6024 
6025 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6026 						  WMI_TDLS_PEER_STATE_PEERING);
6027 		if (ret) {
6028 			ath10k_warn(ar,
6029 				    "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
6030 				    sta->addr, arvif->vdev_id, ret);
6031 			ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6032 			ath10k_mac_dec_num_stations(arvif, sta);
6033 
6034 			if (num_tdls_stations != 0)
6035 				goto exit;
6036 			ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6037 							WMI_TDLS_DISABLE);
6038 		}
6039 	} else if ((old_state == IEEE80211_STA_NONE &&
6040 		    new_state == IEEE80211_STA_NOTEXIST)) {
6041 		/*
6042 		 * Existing station deletion.
6043 		 */
6044 		ath10k_dbg(ar, ATH10K_DBG_MAC,
6045 			   "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
6046 			   arvif->vdev_id, sta->addr, sta);
6047 
6048 		ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
6049 		if (ret)
6050 			ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
6051 				    sta->addr, arvif->vdev_id, ret);
6052 
6053 		ath10k_mac_dec_num_stations(arvif, sta);
6054 
6055 		spin_lock_bh(&ar->data_lock);
6056 		for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
6057 			peer = ar->peer_map[i];
6058 			if (!peer)
6059 				continue;
6060 
6061 			if (peer->sta == sta) {
6062 				ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
6063 					    sta->addr, peer, i, arvif->vdev_id);
6064 				peer->sta = NULL;
6065 
6066 				/* Clean up the peer object as well since we
6067 				 * must have failed to do this above.
6068 				 */
6069 				list_del(&peer->list);
6070 				ar->peer_map[i] = NULL;
6071 				kfree(peer);
6072 				ar->num_peers--;
6073 			}
6074 		}
6075 		spin_unlock_bh(&ar->data_lock);
6076 
6077 		for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
6078 			ath10k_mac_txq_unref(ar, sta->txq[i]);
6079 
6080 		if (!sta->tdls)
6081 			goto exit;
6082 
6083 		if (ath10k_mac_tdls_vif_stations_count(hw, vif))
6084 			goto exit;
6085 
6086 		/* This was the last tdls peer in current vif */
6087 		ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
6088 						      WMI_TDLS_DISABLE);
6089 		if (ret) {
6090 			ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
6091 				    arvif->vdev_id, ret);
6092 		}
6093 	} else if (old_state == IEEE80211_STA_AUTH &&
6094 		   new_state == IEEE80211_STA_ASSOC &&
6095 		   (vif->type == NL80211_IFTYPE_AP ||
6096 		    vif->type == NL80211_IFTYPE_MESH_POINT ||
6097 		    vif->type == NL80211_IFTYPE_ADHOC)) {
6098 		/*
6099 		 * New association.
6100 		 */
6101 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n",
6102 			   sta->addr);
6103 
6104 		ret = ath10k_station_assoc(ar, vif, sta, false);
6105 		if (ret)
6106 			ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
6107 				    sta->addr, arvif->vdev_id, ret);
6108 	} else if (old_state == IEEE80211_STA_ASSOC &&
6109 		   new_state == IEEE80211_STA_AUTHORIZED &&
6110 		   sta->tdls) {
6111 		/*
6112 		 * Tdls station authorized.
6113 		 */
6114 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n",
6115 			   sta->addr);
6116 
6117 		ret = ath10k_station_assoc(ar, vif, sta, false);
6118 		if (ret) {
6119 			ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
6120 				    sta->addr, arvif->vdev_id, ret);
6121 			goto exit;
6122 		}
6123 
6124 		ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
6125 						  WMI_TDLS_PEER_STATE_CONNECTED);
6126 		if (ret)
6127 			ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
6128 				    sta->addr, arvif->vdev_id, ret);
6129 	} else if (old_state == IEEE80211_STA_ASSOC &&
6130 		    new_state == IEEE80211_STA_AUTH &&
6131 		    (vif->type == NL80211_IFTYPE_AP ||
6132 		     vif->type == NL80211_IFTYPE_MESH_POINT ||
6133 		     vif->type == NL80211_IFTYPE_ADHOC)) {
6134 		/*
6135 		 * Disassociation.
6136 		 */
6137 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n",
6138 			   sta->addr);
6139 
6140 		ret = ath10k_station_disassoc(ar, vif, sta);
6141 		if (ret)
6142 			ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
6143 				    sta->addr, arvif->vdev_id, ret);
6144 	}
6145 exit:
6146 	mutex_unlock(&ar->conf_mutex);
6147 	return ret;
6148 }
6149 
6150 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
6151 				u16 ac, bool enable)
6152 {
6153 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6154 	struct wmi_sta_uapsd_auto_trig_arg arg = {};
6155 	u32 prio = 0, acc = 0;
6156 	u32 value = 0;
6157 	int ret = 0;
6158 
6159 	lockdep_assert_held(&ar->conf_mutex);
6160 
6161 	if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
6162 		return 0;
6163 
6164 	switch (ac) {
6165 	case IEEE80211_AC_VO:
6166 		value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
6167 			WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
6168 		prio = 7;
6169 		acc = 3;
6170 		break;
6171 	case IEEE80211_AC_VI:
6172 		value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
6173 			WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
6174 		prio = 5;
6175 		acc = 2;
6176 		break;
6177 	case IEEE80211_AC_BE:
6178 		value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
6179 			WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
6180 		prio = 2;
6181 		acc = 1;
6182 		break;
6183 	case IEEE80211_AC_BK:
6184 		value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
6185 			WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
6186 		prio = 0;
6187 		acc = 0;
6188 		break;
6189 	}
6190 
6191 	if (enable)
6192 		arvif->u.sta.uapsd |= value;
6193 	else
6194 		arvif->u.sta.uapsd &= ~value;
6195 
6196 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6197 					  WMI_STA_PS_PARAM_UAPSD,
6198 					  arvif->u.sta.uapsd);
6199 	if (ret) {
6200 		ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
6201 		goto exit;
6202 	}
6203 
6204 	if (arvif->u.sta.uapsd)
6205 		value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
6206 	else
6207 		value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
6208 
6209 	ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
6210 					  WMI_STA_PS_PARAM_RX_WAKE_POLICY,
6211 					  value);
6212 	if (ret)
6213 		ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
6214 
6215 	ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
6216 	if (ret) {
6217 		ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
6218 			    arvif->vdev_id, ret);
6219 		return ret;
6220 	}
6221 
6222 	ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
6223 	if (ret) {
6224 		ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
6225 			    arvif->vdev_id, ret);
6226 		return ret;
6227 	}
6228 
6229 	if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
6230 	    test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
6231 		/* Only userspace can make an educated decision when to send
6232 		 * trigger frame. The following effectively disables u-UAPSD
6233 		 * autotrigger in firmware (which is enabled by default
6234 		 * provided the autotrigger service is available).
6235 		 */
6236 
6237 		arg.wmm_ac = acc;
6238 		arg.user_priority = prio;
6239 		arg.service_interval = 0;
6240 		arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6241 		arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
6242 
6243 		ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
6244 						arvif->bssid, &arg, 1);
6245 		if (ret) {
6246 			ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
6247 				    ret);
6248 			return ret;
6249 		}
6250 	}
6251 
6252 exit:
6253 	return ret;
6254 }
6255 
6256 static int ath10k_conf_tx(struct ieee80211_hw *hw,
6257 			  struct ieee80211_vif *vif, u16 ac,
6258 			  const struct ieee80211_tx_queue_params *params)
6259 {
6260 	struct ath10k *ar = hw->priv;
6261 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6262 	struct wmi_wmm_params_arg *p = NULL;
6263 	int ret;
6264 
6265 	mutex_lock(&ar->conf_mutex);
6266 
6267 	switch (ac) {
6268 	case IEEE80211_AC_VO:
6269 		p = &arvif->wmm_params.ac_vo;
6270 		break;
6271 	case IEEE80211_AC_VI:
6272 		p = &arvif->wmm_params.ac_vi;
6273 		break;
6274 	case IEEE80211_AC_BE:
6275 		p = &arvif->wmm_params.ac_be;
6276 		break;
6277 	case IEEE80211_AC_BK:
6278 		p = &arvif->wmm_params.ac_bk;
6279 		break;
6280 	}
6281 
6282 	if (WARN_ON(!p)) {
6283 		ret = -EINVAL;
6284 		goto exit;
6285 	}
6286 
6287 	p->cwmin = params->cw_min;
6288 	p->cwmax = params->cw_max;
6289 	p->aifs = params->aifs;
6290 
6291 	/*
6292 	 * The channel time duration programmed in the HW is in absolute
6293 	 * microseconds, while mac80211 gives the txop in units of
6294 	 * 32 microseconds.
6295 	 */
6296 	p->txop = params->txop * 32;
6297 
6298 	if (ar->wmi.ops->gen_vdev_wmm_conf) {
6299 		ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
6300 					       &arvif->wmm_params);
6301 		if (ret) {
6302 			ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
6303 				    arvif->vdev_id, ret);
6304 			goto exit;
6305 		}
6306 	} else {
6307 		/* This won't work well with multi-interface cases but it's
6308 		 * better than nothing.
6309 		 */
6310 		ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
6311 		if (ret) {
6312 			ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
6313 			goto exit;
6314 		}
6315 	}
6316 
6317 	ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
6318 	if (ret)
6319 		ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
6320 
6321 exit:
6322 	mutex_unlock(&ar->conf_mutex);
6323 	return ret;
6324 }
6325 
6326 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ)
6327 
6328 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
6329 				    struct ieee80211_vif *vif,
6330 				    struct ieee80211_channel *chan,
6331 				    int duration,
6332 				    enum ieee80211_roc_type type)
6333 {
6334 	struct ath10k *ar = hw->priv;
6335 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6336 	struct wmi_start_scan_arg arg;
6337 	int ret = 0;
6338 	u32 scan_time_msec;
6339 
6340 	mutex_lock(&ar->conf_mutex);
6341 
6342 	spin_lock_bh(&ar->data_lock);
6343 	switch (ar->scan.state) {
6344 	case ATH10K_SCAN_IDLE:
6345 		reinit_completion(&ar->scan.started);
6346 		reinit_completion(&ar->scan.completed);
6347 		reinit_completion(&ar->scan.on_channel);
6348 		ar->scan.state = ATH10K_SCAN_STARTING;
6349 		ar->scan.is_roc = true;
6350 		ar->scan.vdev_id = arvif->vdev_id;
6351 		ar->scan.roc_freq = chan->center_freq;
6352 		ar->scan.roc_notify = true;
6353 		ret = 0;
6354 		break;
6355 	case ATH10K_SCAN_STARTING:
6356 	case ATH10K_SCAN_RUNNING:
6357 	case ATH10K_SCAN_ABORTING:
6358 		ret = -EBUSY;
6359 		break;
6360 	}
6361 	spin_unlock_bh(&ar->data_lock);
6362 
6363 	if (ret)
6364 		goto exit;
6365 
6366 	scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
6367 
6368 	memset(&arg, 0, sizeof(arg));
6369 	ath10k_wmi_start_scan_init(ar, &arg);
6370 	arg.vdev_id = arvif->vdev_id;
6371 	arg.scan_id = ATH10K_SCAN_ID;
6372 	arg.n_channels = 1;
6373 	arg.channels[0] = chan->center_freq;
6374 	arg.dwell_time_active = scan_time_msec;
6375 	arg.dwell_time_passive = scan_time_msec;
6376 	arg.max_scan_time = scan_time_msec;
6377 	arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6378 	arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
6379 	arg.burst_duration_ms = duration;
6380 
6381 	ret = ath10k_start_scan(ar, &arg);
6382 	if (ret) {
6383 		ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
6384 		spin_lock_bh(&ar->data_lock);
6385 		ar->scan.state = ATH10K_SCAN_IDLE;
6386 		spin_unlock_bh(&ar->data_lock);
6387 		goto exit;
6388 	}
6389 
6390 	ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
6391 	if (ret == 0) {
6392 		ath10k_warn(ar, "failed to switch to channel for roc scan\n");
6393 
6394 		ret = ath10k_scan_stop(ar);
6395 		if (ret)
6396 			ath10k_warn(ar, "failed to stop scan: %d\n", ret);
6397 
6398 		ret = -ETIMEDOUT;
6399 		goto exit;
6400 	}
6401 
6402 	ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6403 				     msecs_to_jiffies(duration));
6404 
6405 	ret = 0;
6406 exit:
6407 	mutex_unlock(&ar->conf_mutex);
6408 	return ret;
6409 }
6410 
6411 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw)
6412 {
6413 	struct ath10k *ar = hw->priv;
6414 
6415 	mutex_lock(&ar->conf_mutex);
6416 
6417 	spin_lock_bh(&ar->data_lock);
6418 	ar->scan.roc_notify = false;
6419 	spin_unlock_bh(&ar->data_lock);
6420 
6421 	ath10k_scan_abort(ar);
6422 
6423 	mutex_unlock(&ar->conf_mutex);
6424 
6425 	cancel_delayed_work_sync(&ar->scan.timeout);
6426 
6427 	return 0;
6428 }
6429 
6430 /*
6431  * Both RTS and Fragmentation threshold are interface-specific
6432  * in ath10k, but device-specific in mac80211.
6433  */
6434 
6435 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
6436 {
6437 	struct ath10k *ar = hw->priv;
6438 	struct ath10k_vif *arvif;
6439 	int ret = 0;
6440 
6441 	mutex_lock(&ar->conf_mutex);
6442 	list_for_each_entry(arvif, &ar->arvifs, list) {
6443 		ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
6444 			   arvif->vdev_id, value);
6445 
6446 		ret = ath10k_mac_set_rts(arvif, value);
6447 		if (ret) {
6448 			ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
6449 				    arvif->vdev_id, ret);
6450 			break;
6451 		}
6452 	}
6453 	mutex_unlock(&ar->conf_mutex);
6454 
6455 	return ret;
6456 }
6457 
6458 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
6459 {
6460 	/* Even though there's a WMI enum for fragmentation threshold no known
6461 	 * firmware actually implements it. Moreover it is not possible to rely
6462 	 * frame fragmentation to mac80211 because firmware clears the "more
6463 	 * fragments" bit in frame control making it impossible for remote
6464 	 * devices to reassemble frames.
6465 	 *
6466 	 * Hence implement a dummy callback just to say fragmentation isn't
6467 	 * supported. This effectively prevents mac80211 from doing frame
6468 	 * fragmentation in software.
6469 	 */
6470 	return -EOPNOTSUPP;
6471 }
6472 
6473 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6474 			 u32 queues, bool drop)
6475 {
6476 	struct ath10k *ar = hw->priv;
6477 	bool skip;
6478 	long time_left;
6479 
6480 	/* mac80211 doesn't care if we really xmit queued frames or not
6481 	 * we'll collect those frames either way if we stop/delete vdevs */
6482 	if (drop)
6483 		return;
6484 
6485 	mutex_lock(&ar->conf_mutex);
6486 
6487 	if (ar->state == ATH10K_STATE_WEDGED)
6488 		goto skip;
6489 
6490 	time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
6491 			bool empty;
6492 
6493 			spin_lock_bh(&ar->htt.tx_lock);
6494 			empty = (ar->htt.num_pending_tx == 0);
6495 			spin_unlock_bh(&ar->htt.tx_lock);
6496 
6497 			skip = (ar->state == ATH10K_STATE_WEDGED) ||
6498 			       test_bit(ATH10K_FLAG_CRASH_FLUSH,
6499 					&ar->dev_flags);
6500 
6501 			(empty || skip);
6502 		}), ATH10K_FLUSH_TIMEOUT_HZ);
6503 
6504 	if (time_left == 0 || skip)
6505 		ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
6506 			    skip, ar->state, time_left);
6507 
6508 skip:
6509 	mutex_unlock(&ar->conf_mutex);
6510 }
6511 
6512 /* TODO: Implement this function properly
6513  * For now it is needed to reply to Probe Requests in IBSS mode.
6514  * Propably we need this information from FW.
6515  */
6516 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
6517 {
6518 	return 1;
6519 }
6520 
6521 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
6522 				     enum ieee80211_reconfig_type reconfig_type)
6523 {
6524 	struct ath10k *ar = hw->priv;
6525 
6526 	if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
6527 		return;
6528 
6529 	mutex_lock(&ar->conf_mutex);
6530 
6531 	/* If device failed to restart it will be in a different state, e.g.
6532 	 * ATH10K_STATE_WEDGED */
6533 	if (ar->state == ATH10K_STATE_RESTARTED) {
6534 		ath10k_info(ar, "device successfully recovered\n");
6535 		ar->state = ATH10K_STATE_ON;
6536 		ieee80211_wake_queues(ar->hw);
6537 	}
6538 
6539 	mutex_unlock(&ar->conf_mutex);
6540 }
6541 
6542 static void
6543 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
6544 				  struct ieee80211_channel *channel)
6545 {
6546 	int ret;
6547 	enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR;
6548 
6549 	lockdep_assert_held(&ar->conf_mutex);
6550 
6551 	if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
6552 	    (ar->rx_channel != channel))
6553 		return;
6554 
6555 	if (ar->scan.state != ATH10K_SCAN_IDLE) {
6556 		ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
6557 		return;
6558 	}
6559 
6560 	reinit_completion(&ar->bss_survey_done);
6561 
6562 	ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
6563 	if (ret) {
6564 		ath10k_warn(ar, "failed to send pdev bss chan info request\n");
6565 		return;
6566 	}
6567 
6568 	ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
6569 	if (!ret) {
6570 		ath10k_warn(ar, "bss channel survey timed out\n");
6571 		return;
6572 	}
6573 }
6574 
6575 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
6576 			     struct survey_info *survey)
6577 {
6578 	struct ath10k *ar = hw->priv;
6579 	struct ieee80211_supported_band *sband;
6580 	struct survey_info *ar_survey = &ar->survey[idx];
6581 	int ret = 0;
6582 
6583 	mutex_lock(&ar->conf_mutex);
6584 
6585 	sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
6586 	if (sband && idx >= sband->n_channels) {
6587 		idx -= sband->n_channels;
6588 		sband = NULL;
6589 	}
6590 
6591 	if (!sband)
6592 		sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
6593 
6594 	if (!sband || idx >= sband->n_channels) {
6595 		ret = -ENOENT;
6596 		goto exit;
6597 	}
6598 
6599 	ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
6600 
6601 	spin_lock_bh(&ar->data_lock);
6602 	memcpy(survey, ar_survey, sizeof(*survey));
6603 	spin_unlock_bh(&ar->data_lock);
6604 
6605 	survey->channel = &sband->channels[idx];
6606 
6607 	if (ar->rx_channel == survey->channel)
6608 		survey->filled |= SURVEY_INFO_IN_USE;
6609 
6610 exit:
6611 	mutex_unlock(&ar->conf_mutex);
6612 	return ret;
6613 }
6614 
6615 static bool
6616 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6617 					enum nl80211_band band,
6618 					const struct cfg80211_bitrate_mask *mask)
6619 {
6620 	int num_rates = 0;
6621 	int i;
6622 
6623 	num_rates += hweight32(mask->control[band].legacy);
6624 
6625 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6626 		num_rates += hweight8(mask->control[band].ht_mcs[i]);
6627 
6628 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++)
6629 		num_rates += hweight16(mask->control[band].vht_mcs[i]);
6630 
6631 	return num_rates == 1;
6632 }
6633 
6634 static bool
6635 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
6636 				       enum nl80211_band band,
6637 				       const struct cfg80211_bitrate_mask *mask,
6638 				       int *nss)
6639 {
6640 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6641 	u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
6642 	u8 ht_nss_mask = 0;
6643 	u8 vht_nss_mask = 0;
6644 	int i;
6645 
6646 	if (mask->control[band].legacy)
6647 		return false;
6648 
6649 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6650 		if (mask->control[band].ht_mcs[i] == 0)
6651 			continue;
6652 		else if (mask->control[band].ht_mcs[i] ==
6653 			 sband->ht_cap.mcs.rx_mask[i])
6654 			ht_nss_mask |= BIT(i);
6655 		else
6656 			return false;
6657 	}
6658 
6659 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6660 		if (mask->control[band].vht_mcs[i] == 0)
6661 			continue;
6662 		else if (mask->control[band].vht_mcs[i] ==
6663 			 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
6664 			vht_nss_mask |= BIT(i);
6665 		else
6666 			return false;
6667 	}
6668 
6669 	if (ht_nss_mask != vht_nss_mask)
6670 		return false;
6671 
6672 	if (ht_nss_mask == 0)
6673 		return false;
6674 
6675 	if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
6676 		return false;
6677 
6678 	*nss = fls(ht_nss_mask);
6679 
6680 	return true;
6681 }
6682 
6683 static int
6684 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6685 					enum nl80211_band band,
6686 					const struct cfg80211_bitrate_mask *mask,
6687 					u8 *rate, u8 *nss)
6688 {
6689 	struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
6690 	int rate_idx;
6691 	int i;
6692 	u16 bitrate;
6693 	u8 preamble;
6694 	u8 hw_rate;
6695 
6696 	if (hweight32(mask->control[band].legacy) == 1) {
6697 		rate_idx = ffs(mask->control[band].legacy) - 1;
6698 
6699 		hw_rate = sband->bitrates[rate_idx].hw_value;
6700 		bitrate = sband->bitrates[rate_idx].bitrate;
6701 
6702 		if (ath10k_mac_bitrate_is_cck(bitrate))
6703 			preamble = WMI_RATE_PREAMBLE_CCK;
6704 		else
6705 			preamble = WMI_RATE_PREAMBLE_OFDM;
6706 
6707 		*nss = 1;
6708 		*rate = preamble << 6 |
6709 			(*nss - 1) << 4 |
6710 			hw_rate << 0;
6711 
6712 		return 0;
6713 	}
6714 
6715 	for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6716 		if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6717 			*nss = i + 1;
6718 			*rate = WMI_RATE_PREAMBLE_HT << 6 |
6719 				(*nss - 1) << 4 |
6720 				(ffs(mask->control[band].ht_mcs[i]) - 1);
6721 
6722 			return 0;
6723 		}
6724 	}
6725 
6726 	for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6727 		if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6728 			*nss = i + 1;
6729 			*rate = WMI_RATE_PREAMBLE_VHT << 6 |
6730 				(*nss - 1) << 4 |
6731 				(ffs(mask->control[band].vht_mcs[i]) - 1);
6732 
6733 			return 0;
6734 		}
6735 	}
6736 
6737 	return -EINVAL;
6738 }
6739 
6740 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
6741 					    u8 rate, u8 nss, u8 sgi, u8 ldpc)
6742 {
6743 	struct ath10k *ar = arvif->ar;
6744 	u32 vdev_param;
6745 	int ret;
6746 
6747 	lockdep_assert_held(&ar->conf_mutex);
6748 
6749 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n",
6750 		   arvif->vdev_id, rate, nss, sgi);
6751 
6752 	vdev_param = ar->wmi.vdev_param->fixed_rate;
6753 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
6754 	if (ret) {
6755 		ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
6756 			    rate, ret);
6757 		return ret;
6758 	}
6759 
6760 	vdev_param = ar->wmi.vdev_param->nss;
6761 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
6762 	if (ret) {
6763 		ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
6764 		return ret;
6765 	}
6766 
6767 	vdev_param = ar->wmi.vdev_param->sgi;
6768 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
6769 	if (ret) {
6770 		ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
6771 		return ret;
6772 	}
6773 
6774 	vdev_param = ar->wmi.vdev_param->ldpc;
6775 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
6776 	if (ret) {
6777 		ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
6778 		return ret;
6779 	}
6780 
6781 	return 0;
6782 }
6783 
6784 static bool
6785 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
6786 				enum nl80211_band band,
6787 				const struct cfg80211_bitrate_mask *mask)
6788 {
6789 	int i;
6790 	u16 vht_mcs;
6791 
6792 	/* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
6793 	 * to express all VHT MCS rate masks. Effectively only the following
6794 	 * ranges can be used: none, 0-7, 0-8 and 0-9.
6795 	 */
6796 	for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
6797 		vht_mcs = mask->control[band].vht_mcs[i];
6798 
6799 		switch (vht_mcs) {
6800 		case 0:
6801 		case BIT(8) - 1:
6802 		case BIT(9) - 1:
6803 		case BIT(10) - 1:
6804 			break;
6805 		default:
6806 			ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
6807 			return false;
6808 		}
6809 	}
6810 
6811 	return true;
6812 }
6813 
6814 static void ath10k_mac_set_bitrate_mask_iter(void *data,
6815 					     struct ieee80211_sta *sta)
6816 {
6817 	struct ath10k_vif *arvif = data;
6818 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6819 	struct ath10k *ar = arvif->ar;
6820 
6821 	if (arsta->arvif != arvif)
6822 		return;
6823 
6824 	spin_lock_bh(&ar->data_lock);
6825 	arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
6826 	spin_unlock_bh(&ar->data_lock);
6827 
6828 	ieee80211_queue_work(ar->hw, &arsta->update_wk);
6829 }
6830 
6831 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
6832 					  struct ieee80211_vif *vif,
6833 					  const struct cfg80211_bitrate_mask *mask)
6834 {
6835 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6836 	struct cfg80211_chan_def def;
6837 	struct ath10k *ar = arvif->ar;
6838 	enum nl80211_band band;
6839 	const u8 *ht_mcs_mask;
6840 	const u16 *vht_mcs_mask;
6841 	u8 rate;
6842 	u8 nss;
6843 	u8 sgi;
6844 	u8 ldpc;
6845 	int single_nss;
6846 	int ret;
6847 
6848 	if (ath10k_mac_vif_chan(vif, &def))
6849 		return -EPERM;
6850 
6851 	band = def.chan->band;
6852 	ht_mcs_mask = mask->control[band].ht_mcs;
6853 	vht_mcs_mask = mask->control[band].vht_mcs;
6854 	ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
6855 
6856 	sgi = mask->control[band].gi;
6857 	if (sgi == NL80211_TXRATE_FORCE_LGI)
6858 		return -EINVAL;
6859 
6860 	if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) {
6861 		ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
6862 							      &rate, &nss);
6863 		if (ret) {
6864 			ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
6865 				    arvif->vdev_id, ret);
6866 			return ret;
6867 		}
6868 	} else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
6869 							  &single_nss)) {
6870 		rate = WMI_FIXED_RATE_NONE;
6871 		nss = single_nss;
6872 	} else {
6873 		rate = WMI_FIXED_RATE_NONE;
6874 		nss = min(ar->num_rf_chains,
6875 			  max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6876 			      ath10k_mac_max_vht_nss(vht_mcs_mask)));
6877 
6878 		if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask))
6879 			return -EINVAL;
6880 
6881 		mutex_lock(&ar->conf_mutex);
6882 
6883 		arvif->bitrate_mask = *mask;
6884 		ieee80211_iterate_stations_atomic(ar->hw,
6885 						  ath10k_mac_set_bitrate_mask_iter,
6886 						  arvif);
6887 
6888 		mutex_unlock(&ar->conf_mutex);
6889 	}
6890 
6891 	mutex_lock(&ar->conf_mutex);
6892 
6893 	ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
6894 	if (ret) {
6895 		ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
6896 			    arvif->vdev_id, ret);
6897 		goto exit;
6898 	}
6899 
6900 exit:
6901 	mutex_unlock(&ar->conf_mutex);
6902 
6903 	return ret;
6904 }
6905 
6906 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
6907 				 struct ieee80211_vif *vif,
6908 				 struct ieee80211_sta *sta,
6909 				 u32 changed)
6910 {
6911 	struct ath10k *ar = hw->priv;
6912 	struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6913 	u32 bw, smps;
6914 
6915 	spin_lock_bh(&ar->data_lock);
6916 
6917 	ath10k_dbg(ar, ATH10K_DBG_MAC,
6918 		   "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
6919 		   sta->addr, changed, sta->bandwidth, sta->rx_nss,
6920 		   sta->smps_mode);
6921 
6922 	if (changed & IEEE80211_RC_BW_CHANGED) {
6923 		bw = WMI_PEER_CHWIDTH_20MHZ;
6924 
6925 		switch (sta->bandwidth) {
6926 		case IEEE80211_STA_RX_BW_20:
6927 			bw = WMI_PEER_CHWIDTH_20MHZ;
6928 			break;
6929 		case IEEE80211_STA_RX_BW_40:
6930 			bw = WMI_PEER_CHWIDTH_40MHZ;
6931 			break;
6932 		case IEEE80211_STA_RX_BW_80:
6933 			bw = WMI_PEER_CHWIDTH_80MHZ;
6934 			break;
6935 		case IEEE80211_STA_RX_BW_160:
6936 			ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
6937 				    sta->bandwidth, sta->addr);
6938 			bw = WMI_PEER_CHWIDTH_20MHZ;
6939 			break;
6940 		}
6941 
6942 		arsta->bw = bw;
6943 	}
6944 
6945 	if (changed & IEEE80211_RC_NSS_CHANGED)
6946 		arsta->nss = sta->rx_nss;
6947 
6948 	if (changed & IEEE80211_RC_SMPS_CHANGED) {
6949 		smps = WMI_PEER_SMPS_PS_NONE;
6950 
6951 		switch (sta->smps_mode) {
6952 		case IEEE80211_SMPS_AUTOMATIC:
6953 		case IEEE80211_SMPS_OFF:
6954 			smps = WMI_PEER_SMPS_PS_NONE;
6955 			break;
6956 		case IEEE80211_SMPS_STATIC:
6957 			smps = WMI_PEER_SMPS_STATIC;
6958 			break;
6959 		case IEEE80211_SMPS_DYNAMIC:
6960 			smps = WMI_PEER_SMPS_DYNAMIC;
6961 			break;
6962 		case IEEE80211_SMPS_NUM_MODES:
6963 			ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
6964 				    sta->smps_mode, sta->addr);
6965 			smps = WMI_PEER_SMPS_PS_NONE;
6966 			break;
6967 		}
6968 
6969 		arsta->smps = smps;
6970 	}
6971 
6972 	arsta->changed |= changed;
6973 
6974 	spin_unlock_bh(&ar->data_lock);
6975 
6976 	ieee80211_queue_work(hw, &arsta->update_wk);
6977 }
6978 
6979 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif)
6980 {
6981 	/*
6982 	 * FIXME: Return 0 for time being. Need to figure out whether FW
6983 	 * has the API to fetch 64-bit local TSF
6984 	 */
6985 
6986 	return 0;
6987 }
6988 
6989 static void ath10k_set_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
6990 			   u64 tsf)
6991 {
6992 	struct ath10k *ar = hw->priv;
6993 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
6994 	u32 tsf_offset, vdev_param = ar->wmi.vdev_param->set_tsf;
6995 	int ret;
6996 
6997 	/* Workaround:
6998 	 *
6999 	 * Given tsf argument is entire TSF value, but firmware accepts
7000 	 * only TSF offset to current TSF.
7001 	 *
7002 	 * get_tsf function is used to get offset value, however since
7003 	 * ath10k_get_tsf is not implemented properly, it will return 0 always.
7004 	 * Luckily all the caller functions to set_tsf, as of now, also rely on
7005 	 * get_tsf function to get entire tsf value such get_tsf() + tsf_delta,
7006 	 * final tsf offset value to firmware will be arithmetically correct.
7007 	 */
7008 	tsf_offset = tsf - ath10k_get_tsf(hw, vif);
7009 	ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
7010 					vdev_param, tsf_offset);
7011 	if (ret && ret != -EOPNOTSUPP)
7012 		ath10k_warn(ar, "failed to set tsf offset: %d\n", ret);
7013 }
7014 
7015 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
7016 			       struct ieee80211_vif *vif,
7017 			       struct ieee80211_ampdu_params *params)
7018 {
7019 	struct ath10k *ar = hw->priv;
7020 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7021 	struct ieee80211_sta *sta = params->sta;
7022 	enum ieee80211_ampdu_mlme_action action = params->action;
7023 	u16 tid = params->tid;
7024 
7025 	ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n",
7026 		   arvif->vdev_id, sta->addr, tid, action);
7027 
7028 	switch (action) {
7029 	case IEEE80211_AMPDU_RX_START:
7030 	case IEEE80211_AMPDU_RX_STOP:
7031 		/* HTT AddBa/DelBa events trigger mac80211 Rx BA session
7032 		 * creation/removal. Do we need to verify this?
7033 		 */
7034 		return 0;
7035 	case IEEE80211_AMPDU_TX_START:
7036 	case IEEE80211_AMPDU_TX_STOP_CONT:
7037 	case IEEE80211_AMPDU_TX_STOP_FLUSH:
7038 	case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
7039 	case IEEE80211_AMPDU_TX_OPERATIONAL:
7040 		/* Firmware offloads Tx aggregation entirely so deny mac80211
7041 		 * Tx aggregation requests.
7042 		 */
7043 		return -EOPNOTSUPP;
7044 	}
7045 
7046 	return -EINVAL;
7047 }
7048 
7049 static void
7050 ath10k_mac_update_rx_channel(struct ath10k *ar,
7051 			     struct ieee80211_chanctx_conf *ctx,
7052 			     struct ieee80211_vif_chanctx_switch *vifs,
7053 			     int n_vifs)
7054 {
7055 	struct cfg80211_chan_def *def = NULL;
7056 
7057 	/* Both locks are required because ar->rx_channel is modified. This
7058 	 * allows readers to hold either lock.
7059 	 */
7060 	lockdep_assert_held(&ar->conf_mutex);
7061 	lockdep_assert_held(&ar->data_lock);
7062 
7063 	WARN_ON(ctx && vifs);
7064 	WARN_ON(vifs && n_vifs != 1);
7065 
7066 	/* FIXME: Sort of an optimization and a workaround. Peers and vifs are
7067 	 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
7068 	 * ppdu on Rx may reduce performance on low-end systems. It should be
7069 	 * possible to make tables/hashmaps to speed the lookup up (be vary of
7070 	 * cpu data cache lines though regarding sizes) but to keep the initial
7071 	 * implementation simple and less intrusive fallback to the slow lookup
7072 	 * only for multi-channel cases. Single-channel cases will remain to
7073 	 * use the old channel derival and thus performance should not be
7074 	 * affected much.
7075 	 */
7076 	rcu_read_lock();
7077 	if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
7078 		ieee80211_iter_chan_contexts_atomic(ar->hw,
7079 						    ath10k_mac_get_any_chandef_iter,
7080 						    &def);
7081 
7082 		if (vifs)
7083 			def = &vifs[0].new_ctx->def;
7084 
7085 		ar->rx_channel = def->chan;
7086 	} else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
7087 		   (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
7088 		/* During driver restart due to firmware assert, since mac80211
7089 		 * already has valid channel context for given radio, channel
7090 		 * context iteration return num_chanctx > 0. So fix rx_channel
7091 		 * when restart is in progress.
7092 		 */
7093 		ar->rx_channel = ctx->def.chan;
7094 	} else {
7095 		ar->rx_channel = NULL;
7096 	}
7097 	rcu_read_unlock();
7098 }
7099 
7100 static void
7101 ath10k_mac_update_vif_chan(struct ath10k *ar,
7102 			   struct ieee80211_vif_chanctx_switch *vifs,
7103 			   int n_vifs)
7104 {
7105 	struct ath10k_vif *arvif;
7106 	int ret;
7107 	int i;
7108 
7109 	lockdep_assert_held(&ar->conf_mutex);
7110 
7111 	/* First stop monitor interface. Some FW versions crash if there's a
7112 	 * lone monitor interface.
7113 	 */
7114 	if (ar->monitor_started)
7115 		ath10k_monitor_stop(ar);
7116 
7117 	for (i = 0; i < n_vifs; i++) {
7118 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7119 
7120 		ath10k_dbg(ar, ATH10K_DBG_MAC,
7121 			   "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n",
7122 			   arvif->vdev_id,
7123 			   vifs[i].old_ctx->def.chan->center_freq,
7124 			   vifs[i].new_ctx->def.chan->center_freq,
7125 			   vifs[i].old_ctx->def.width,
7126 			   vifs[i].new_ctx->def.width);
7127 
7128 		if (WARN_ON(!arvif->is_started))
7129 			continue;
7130 
7131 		if (WARN_ON(!arvif->is_up))
7132 			continue;
7133 
7134 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7135 		if (ret) {
7136 			ath10k_warn(ar, "failed to down vdev %d: %d\n",
7137 				    arvif->vdev_id, ret);
7138 			continue;
7139 		}
7140 	}
7141 
7142 	/* All relevant vdevs are downed and associated channel resources
7143 	 * should be available for the channel switch now.
7144 	 */
7145 
7146 	spin_lock_bh(&ar->data_lock);
7147 	ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
7148 	spin_unlock_bh(&ar->data_lock);
7149 
7150 	for (i = 0; i < n_vifs; i++) {
7151 		arvif = ath10k_vif_to_arvif(vifs[i].vif);
7152 
7153 		if (WARN_ON(!arvif->is_started))
7154 			continue;
7155 
7156 		if (WARN_ON(!arvif->is_up))
7157 			continue;
7158 
7159 		ret = ath10k_mac_setup_bcn_tmpl(arvif);
7160 		if (ret)
7161 			ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
7162 				    ret);
7163 
7164 		ret = ath10k_mac_setup_prb_tmpl(arvif);
7165 		if (ret)
7166 			ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
7167 				    ret);
7168 
7169 		ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
7170 		if (ret) {
7171 			ath10k_warn(ar, "failed to restart vdev %d: %d\n",
7172 				    arvif->vdev_id, ret);
7173 			continue;
7174 		}
7175 
7176 		ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
7177 					 arvif->bssid);
7178 		if (ret) {
7179 			ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
7180 				    arvif->vdev_id, ret);
7181 			continue;
7182 		}
7183 	}
7184 
7185 	ath10k_monitor_recalc(ar);
7186 }
7187 
7188 static int
7189 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
7190 			  struct ieee80211_chanctx_conf *ctx)
7191 {
7192 	struct ath10k *ar = hw->priv;
7193 
7194 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7195 		   "mac chanctx add freq %hu width %d ptr %pK\n",
7196 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7197 
7198 	mutex_lock(&ar->conf_mutex);
7199 
7200 	spin_lock_bh(&ar->data_lock);
7201 	ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
7202 	spin_unlock_bh(&ar->data_lock);
7203 
7204 	ath10k_recalc_radar_detection(ar);
7205 	ath10k_monitor_recalc(ar);
7206 
7207 	mutex_unlock(&ar->conf_mutex);
7208 
7209 	return 0;
7210 }
7211 
7212 static void
7213 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
7214 			     struct ieee80211_chanctx_conf *ctx)
7215 {
7216 	struct ath10k *ar = hw->priv;
7217 
7218 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7219 		   "mac chanctx remove freq %hu width %d ptr %pK\n",
7220 		   ctx->def.chan->center_freq, ctx->def.width, ctx);
7221 
7222 	mutex_lock(&ar->conf_mutex);
7223 
7224 	spin_lock_bh(&ar->data_lock);
7225 	ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
7226 	spin_unlock_bh(&ar->data_lock);
7227 
7228 	ath10k_recalc_radar_detection(ar);
7229 	ath10k_monitor_recalc(ar);
7230 
7231 	mutex_unlock(&ar->conf_mutex);
7232 }
7233 
7234 struct ath10k_mac_change_chanctx_arg {
7235 	struct ieee80211_chanctx_conf *ctx;
7236 	struct ieee80211_vif_chanctx_switch *vifs;
7237 	int n_vifs;
7238 	int next_vif;
7239 };
7240 
7241 static void
7242 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
7243 				   struct ieee80211_vif *vif)
7244 {
7245 	struct ath10k_mac_change_chanctx_arg *arg = data;
7246 
7247 	if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx)
7248 		return;
7249 
7250 	arg->n_vifs++;
7251 }
7252 
7253 static void
7254 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
7255 				    struct ieee80211_vif *vif)
7256 {
7257 	struct ath10k_mac_change_chanctx_arg *arg = data;
7258 	struct ieee80211_chanctx_conf *ctx;
7259 
7260 	ctx = rcu_access_pointer(vif->chanctx_conf);
7261 	if (ctx != arg->ctx)
7262 		return;
7263 
7264 	if (WARN_ON(arg->next_vif == arg->n_vifs))
7265 		return;
7266 
7267 	arg->vifs[arg->next_vif].vif = vif;
7268 	arg->vifs[arg->next_vif].old_ctx = ctx;
7269 	arg->vifs[arg->next_vif].new_ctx = ctx;
7270 	arg->next_vif++;
7271 }
7272 
7273 static void
7274 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
7275 			     struct ieee80211_chanctx_conf *ctx,
7276 			     u32 changed)
7277 {
7278 	struct ath10k *ar = hw->priv;
7279 	struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
7280 
7281 	mutex_lock(&ar->conf_mutex);
7282 
7283 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7284 		   "mac chanctx change freq %hu width %d ptr %pK changed %x\n",
7285 		   ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
7286 
7287 	/* This shouldn't really happen because channel switching should use
7288 	 * switch_vif_chanctx().
7289 	 */
7290 	if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
7291 		goto unlock;
7292 
7293 	if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
7294 		ieee80211_iterate_active_interfaces_atomic(
7295 					hw,
7296 					IEEE80211_IFACE_ITER_NORMAL,
7297 					ath10k_mac_change_chanctx_cnt_iter,
7298 					&arg);
7299 		if (arg.n_vifs == 0)
7300 			goto radar;
7301 
7302 		arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
7303 				   GFP_KERNEL);
7304 		if (!arg.vifs)
7305 			goto radar;
7306 
7307 		ieee80211_iterate_active_interfaces_atomic(
7308 					hw,
7309 					IEEE80211_IFACE_ITER_NORMAL,
7310 					ath10k_mac_change_chanctx_fill_iter,
7311 					&arg);
7312 		ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
7313 		kfree(arg.vifs);
7314 	}
7315 
7316 radar:
7317 	ath10k_recalc_radar_detection(ar);
7318 
7319 	/* FIXME: How to configure Rx chains properly? */
7320 
7321 	/* No other actions are actually necessary. Firmware maintains channel
7322 	 * definitions per vdev internally and there's no host-side channel
7323 	 * context abstraction to configure, e.g. channel width.
7324 	 */
7325 
7326 unlock:
7327 	mutex_unlock(&ar->conf_mutex);
7328 }
7329 
7330 static int
7331 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
7332 				 struct ieee80211_vif *vif,
7333 				 struct ieee80211_chanctx_conf *ctx)
7334 {
7335 	struct ath10k *ar = hw->priv;
7336 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7337 	int ret;
7338 
7339 	mutex_lock(&ar->conf_mutex);
7340 
7341 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7342 		   "mac chanctx assign ptr %pK vdev_id %i\n",
7343 		   ctx, arvif->vdev_id);
7344 
7345 	if (WARN_ON(arvif->is_started)) {
7346 		mutex_unlock(&ar->conf_mutex);
7347 		return -EBUSY;
7348 	}
7349 
7350 	ret = ath10k_vdev_start(arvif, &ctx->def);
7351 	if (ret) {
7352 		ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
7353 			    arvif->vdev_id, vif->addr,
7354 			    ctx->def.chan->center_freq, ret);
7355 		goto err;
7356 	}
7357 
7358 	arvif->is_started = true;
7359 
7360 	ret = ath10k_mac_vif_setup_ps(arvif);
7361 	if (ret) {
7362 		ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
7363 			    arvif->vdev_id, ret);
7364 		goto err_stop;
7365 	}
7366 
7367 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7368 		ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
7369 		if (ret) {
7370 			ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
7371 				    arvif->vdev_id, ret);
7372 			goto err_stop;
7373 		}
7374 
7375 		arvif->is_up = true;
7376 	}
7377 
7378 	mutex_unlock(&ar->conf_mutex);
7379 	return 0;
7380 
7381 err_stop:
7382 	ath10k_vdev_stop(arvif);
7383 	arvif->is_started = false;
7384 	ath10k_mac_vif_setup_ps(arvif);
7385 
7386 err:
7387 	mutex_unlock(&ar->conf_mutex);
7388 	return ret;
7389 }
7390 
7391 static void
7392 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
7393 				   struct ieee80211_vif *vif,
7394 				   struct ieee80211_chanctx_conf *ctx)
7395 {
7396 	struct ath10k *ar = hw->priv;
7397 	struct ath10k_vif *arvif = (void *)vif->drv_priv;
7398 	int ret;
7399 
7400 	mutex_lock(&ar->conf_mutex);
7401 
7402 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7403 		   "mac chanctx unassign ptr %pK vdev_id %i\n",
7404 		   ctx, arvif->vdev_id);
7405 
7406 	WARN_ON(!arvif->is_started);
7407 
7408 	if (vif->type == NL80211_IFTYPE_MONITOR) {
7409 		WARN_ON(!arvif->is_up);
7410 
7411 		ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
7412 		if (ret)
7413 			ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
7414 				    arvif->vdev_id, ret);
7415 
7416 		arvif->is_up = false;
7417 	}
7418 
7419 	ret = ath10k_vdev_stop(arvif);
7420 	if (ret)
7421 		ath10k_warn(ar, "failed to stop vdev %i: %d\n",
7422 			    arvif->vdev_id, ret);
7423 
7424 	arvif->is_started = false;
7425 
7426 	mutex_unlock(&ar->conf_mutex);
7427 }
7428 
7429 static int
7430 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
7431 				 struct ieee80211_vif_chanctx_switch *vifs,
7432 				 int n_vifs,
7433 				 enum ieee80211_chanctx_switch_mode mode)
7434 {
7435 	struct ath10k *ar = hw->priv;
7436 
7437 	mutex_lock(&ar->conf_mutex);
7438 
7439 	ath10k_dbg(ar, ATH10K_DBG_MAC,
7440 		   "mac chanctx switch n_vifs %d mode %d\n",
7441 		   n_vifs, mode);
7442 	ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
7443 
7444 	mutex_unlock(&ar->conf_mutex);
7445 	return 0;
7446 }
7447 
7448 static const struct ieee80211_ops ath10k_ops = {
7449 	.tx				= ath10k_mac_op_tx,
7450 	.wake_tx_queue			= ath10k_mac_op_wake_tx_queue,
7451 	.start				= ath10k_start,
7452 	.stop				= ath10k_stop,
7453 	.config				= ath10k_config,
7454 	.add_interface			= ath10k_add_interface,
7455 	.remove_interface		= ath10k_remove_interface,
7456 	.configure_filter		= ath10k_configure_filter,
7457 	.bss_info_changed		= ath10k_bss_info_changed,
7458 	.set_coverage_class		= ath10k_mac_op_set_coverage_class,
7459 	.hw_scan			= ath10k_hw_scan,
7460 	.cancel_hw_scan			= ath10k_cancel_hw_scan,
7461 	.set_key			= ath10k_set_key,
7462 	.set_default_unicast_key        = ath10k_set_default_unicast_key,
7463 	.sta_state			= ath10k_sta_state,
7464 	.conf_tx			= ath10k_conf_tx,
7465 	.remain_on_channel		= ath10k_remain_on_channel,
7466 	.cancel_remain_on_channel	= ath10k_cancel_remain_on_channel,
7467 	.set_rts_threshold		= ath10k_set_rts_threshold,
7468 	.set_frag_threshold		= ath10k_mac_op_set_frag_threshold,
7469 	.flush				= ath10k_flush,
7470 	.tx_last_beacon			= ath10k_tx_last_beacon,
7471 	.set_antenna			= ath10k_set_antenna,
7472 	.get_antenna			= ath10k_get_antenna,
7473 	.reconfig_complete		= ath10k_reconfig_complete,
7474 	.get_survey			= ath10k_get_survey,
7475 	.set_bitrate_mask		= ath10k_mac_op_set_bitrate_mask,
7476 	.sta_rc_update			= ath10k_sta_rc_update,
7477 	.get_tsf			= ath10k_get_tsf,
7478 	.set_tsf			= ath10k_set_tsf,
7479 	.ampdu_action			= ath10k_ampdu_action,
7480 	.get_et_sset_count		= ath10k_debug_get_et_sset_count,
7481 	.get_et_stats			= ath10k_debug_get_et_stats,
7482 	.get_et_strings			= ath10k_debug_get_et_strings,
7483 	.add_chanctx			= ath10k_mac_op_add_chanctx,
7484 	.remove_chanctx			= ath10k_mac_op_remove_chanctx,
7485 	.change_chanctx			= ath10k_mac_op_change_chanctx,
7486 	.assign_vif_chanctx		= ath10k_mac_op_assign_vif_chanctx,
7487 	.unassign_vif_chanctx		= ath10k_mac_op_unassign_vif_chanctx,
7488 	.switch_vif_chanctx		= ath10k_mac_op_switch_vif_chanctx,
7489 
7490 	CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
7491 
7492 #ifdef CONFIG_PM
7493 	.suspend			= ath10k_wow_op_suspend,
7494 	.resume				= ath10k_wow_op_resume,
7495 #endif
7496 #ifdef CONFIG_MAC80211_DEBUGFS
7497 	.sta_add_debugfs		= ath10k_sta_add_debugfs,
7498 	.sta_statistics			= ath10k_sta_statistics,
7499 #endif
7500 };
7501 
7502 #define CHAN2G(_channel, _freq, _flags) { \
7503 	.band			= NL80211_BAND_2GHZ, \
7504 	.hw_value		= (_channel), \
7505 	.center_freq		= (_freq), \
7506 	.flags			= (_flags), \
7507 	.max_antenna_gain	= 0, \
7508 	.max_power		= 30, \
7509 }
7510 
7511 #define CHAN5G(_channel, _freq, _flags) { \
7512 	.band			= NL80211_BAND_5GHZ, \
7513 	.hw_value		= (_channel), \
7514 	.center_freq		= (_freq), \
7515 	.flags			= (_flags), \
7516 	.max_antenna_gain	= 0, \
7517 	.max_power		= 30, \
7518 }
7519 
7520 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
7521 	CHAN2G(1, 2412, 0),
7522 	CHAN2G(2, 2417, 0),
7523 	CHAN2G(3, 2422, 0),
7524 	CHAN2G(4, 2427, 0),
7525 	CHAN2G(5, 2432, 0),
7526 	CHAN2G(6, 2437, 0),
7527 	CHAN2G(7, 2442, 0),
7528 	CHAN2G(8, 2447, 0),
7529 	CHAN2G(9, 2452, 0),
7530 	CHAN2G(10, 2457, 0),
7531 	CHAN2G(11, 2462, 0),
7532 	CHAN2G(12, 2467, 0),
7533 	CHAN2G(13, 2472, 0),
7534 	CHAN2G(14, 2484, 0),
7535 };
7536 
7537 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
7538 	CHAN5G(36, 5180, 0),
7539 	CHAN5G(40, 5200, 0),
7540 	CHAN5G(44, 5220, 0),
7541 	CHAN5G(48, 5240, 0),
7542 	CHAN5G(52, 5260, 0),
7543 	CHAN5G(56, 5280, 0),
7544 	CHAN5G(60, 5300, 0),
7545 	CHAN5G(64, 5320, 0),
7546 	CHAN5G(100, 5500, 0),
7547 	CHAN5G(104, 5520, 0),
7548 	CHAN5G(108, 5540, 0),
7549 	CHAN5G(112, 5560, 0),
7550 	CHAN5G(116, 5580, 0),
7551 	CHAN5G(120, 5600, 0),
7552 	CHAN5G(124, 5620, 0),
7553 	CHAN5G(128, 5640, 0),
7554 	CHAN5G(132, 5660, 0),
7555 	CHAN5G(136, 5680, 0),
7556 	CHAN5G(140, 5700, 0),
7557 	CHAN5G(144, 5720, 0),
7558 	CHAN5G(149, 5745, 0),
7559 	CHAN5G(153, 5765, 0),
7560 	CHAN5G(157, 5785, 0),
7561 	CHAN5G(161, 5805, 0),
7562 	CHAN5G(165, 5825, 0),
7563 };
7564 
7565 struct ath10k *ath10k_mac_create(size_t priv_size)
7566 {
7567 	struct ieee80211_hw *hw;
7568 	struct ieee80211_ops *ops;
7569 	struct ath10k *ar;
7570 
7571 	ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
7572 	if (!ops)
7573 		return NULL;
7574 
7575 	hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
7576 	if (!hw) {
7577 		kfree(ops);
7578 		return NULL;
7579 	}
7580 
7581 	ar = hw->priv;
7582 	ar->hw = hw;
7583 	ar->ops = ops;
7584 
7585 	return ar;
7586 }
7587 
7588 void ath10k_mac_destroy(struct ath10k *ar)
7589 {
7590 	struct ieee80211_ops *ops = ar->ops;
7591 
7592 	ieee80211_free_hw(ar->hw);
7593 	kfree(ops);
7594 }
7595 
7596 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
7597 	{
7598 		.max	= 8,
7599 		.types	= BIT(NL80211_IFTYPE_STATION)
7600 			| BIT(NL80211_IFTYPE_P2P_CLIENT)
7601 	},
7602 	{
7603 		.max	= 3,
7604 		.types	= BIT(NL80211_IFTYPE_P2P_GO)
7605 	},
7606 	{
7607 		.max	= 1,
7608 		.types	= BIT(NL80211_IFTYPE_P2P_DEVICE)
7609 	},
7610 	{
7611 		.max	= 7,
7612 		.types	= BIT(NL80211_IFTYPE_AP)
7613 #ifdef CONFIG_MAC80211_MESH
7614 			| BIT(NL80211_IFTYPE_MESH_POINT)
7615 #endif
7616 	},
7617 };
7618 
7619 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
7620 	{
7621 		.max	= 8,
7622 		.types	= BIT(NL80211_IFTYPE_AP)
7623 #ifdef CONFIG_MAC80211_MESH
7624 			| BIT(NL80211_IFTYPE_MESH_POINT)
7625 #endif
7626 	},
7627 	{
7628 		.max	= 1,
7629 		.types	= BIT(NL80211_IFTYPE_STATION)
7630 	},
7631 };
7632 
7633 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
7634 	{
7635 		.limits = ath10k_if_limits,
7636 		.n_limits = ARRAY_SIZE(ath10k_if_limits),
7637 		.max_interfaces = 8,
7638 		.num_different_channels = 1,
7639 		.beacon_int_infra_match = true,
7640 	},
7641 };
7642 
7643 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
7644 	{
7645 		.limits = ath10k_10x_if_limits,
7646 		.n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
7647 		.max_interfaces = 8,
7648 		.num_different_channels = 1,
7649 		.beacon_int_infra_match = true,
7650 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7651 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7652 					BIT(NL80211_CHAN_WIDTH_20) |
7653 					BIT(NL80211_CHAN_WIDTH_40) |
7654 					BIT(NL80211_CHAN_WIDTH_80),
7655 #endif
7656 	},
7657 };
7658 
7659 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
7660 	{
7661 		.max = 2,
7662 		.types = BIT(NL80211_IFTYPE_STATION),
7663 	},
7664 	{
7665 		.max = 2,
7666 		.types = BIT(NL80211_IFTYPE_AP) |
7667 #ifdef CONFIG_MAC80211_MESH
7668 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7669 #endif
7670 			 BIT(NL80211_IFTYPE_P2P_CLIENT) |
7671 			 BIT(NL80211_IFTYPE_P2P_GO),
7672 	},
7673 	{
7674 		.max = 1,
7675 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7676 	},
7677 };
7678 
7679 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
7680 	{
7681 		.max = 2,
7682 		.types = BIT(NL80211_IFTYPE_STATION),
7683 	},
7684 	{
7685 		.max = 2,
7686 		.types = BIT(NL80211_IFTYPE_P2P_CLIENT),
7687 	},
7688 	{
7689 		.max = 1,
7690 		.types = BIT(NL80211_IFTYPE_AP) |
7691 #ifdef CONFIG_MAC80211_MESH
7692 			 BIT(NL80211_IFTYPE_MESH_POINT) |
7693 #endif
7694 			 BIT(NL80211_IFTYPE_P2P_GO),
7695 	},
7696 	{
7697 		.max = 1,
7698 		.types = BIT(NL80211_IFTYPE_P2P_DEVICE),
7699 	},
7700 };
7701 
7702 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
7703 	{
7704 		.max = 1,
7705 		.types = BIT(NL80211_IFTYPE_STATION),
7706 	},
7707 	{
7708 		.max = 1,
7709 		.types = BIT(NL80211_IFTYPE_ADHOC),
7710 	},
7711 };
7712 
7713 /* FIXME: This is not thouroughly tested. These combinations may over- or
7714  * underestimate hw/fw capabilities.
7715  */
7716 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
7717 	{
7718 		.limits = ath10k_tlv_if_limit,
7719 		.num_different_channels = 1,
7720 		.max_interfaces = 4,
7721 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7722 	},
7723 	{
7724 		.limits = ath10k_tlv_if_limit_ibss,
7725 		.num_different_channels = 1,
7726 		.max_interfaces = 2,
7727 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7728 	},
7729 };
7730 
7731 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
7732 	{
7733 		.limits = ath10k_tlv_if_limit,
7734 		.num_different_channels = 1,
7735 		.max_interfaces = 4,
7736 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
7737 	},
7738 	{
7739 		.limits = ath10k_tlv_qcs_if_limit,
7740 		.num_different_channels = 2,
7741 		.max_interfaces = 4,
7742 		.n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
7743 	},
7744 	{
7745 		.limits = ath10k_tlv_if_limit_ibss,
7746 		.num_different_channels = 1,
7747 		.max_interfaces = 2,
7748 		.n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
7749 	},
7750 };
7751 
7752 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
7753 	{
7754 		.max = 1,
7755 		.types = BIT(NL80211_IFTYPE_STATION),
7756 	},
7757 	{
7758 		.max	= 16,
7759 		.types	= BIT(NL80211_IFTYPE_AP)
7760 #ifdef CONFIG_MAC80211_MESH
7761 			| BIT(NL80211_IFTYPE_MESH_POINT)
7762 #endif
7763 	},
7764 };
7765 
7766 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
7767 	{
7768 		.limits = ath10k_10_4_if_limits,
7769 		.n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
7770 		.max_interfaces = 16,
7771 		.num_different_channels = 1,
7772 		.beacon_int_infra_match = true,
7773 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
7774 		.radar_detect_widths =	BIT(NL80211_CHAN_WIDTH_20_NOHT) |
7775 					BIT(NL80211_CHAN_WIDTH_20) |
7776 					BIT(NL80211_CHAN_WIDTH_40) |
7777 					BIT(NL80211_CHAN_WIDTH_80),
7778 #endif
7779 	},
7780 };
7781 
7782 static void ath10k_get_arvif_iter(void *data, u8 *mac,
7783 				  struct ieee80211_vif *vif)
7784 {
7785 	struct ath10k_vif_iter *arvif_iter = data;
7786 	struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif);
7787 
7788 	if (arvif->vdev_id == arvif_iter->vdev_id)
7789 		arvif_iter->arvif = arvif;
7790 }
7791 
7792 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
7793 {
7794 	struct ath10k_vif_iter arvif_iter;
7795 	u32 flags;
7796 
7797 	memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
7798 	arvif_iter.vdev_id = vdev_id;
7799 
7800 	flags = IEEE80211_IFACE_ITER_RESUME_ALL;
7801 	ieee80211_iterate_active_interfaces_atomic(ar->hw,
7802 						   flags,
7803 						   ath10k_get_arvif_iter,
7804 						   &arvif_iter);
7805 	if (!arvif_iter.arvif) {
7806 		ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
7807 		return NULL;
7808 	}
7809 
7810 	return arvif_iter.arvif;
7811 }
7812 
7813 #define WRD_METHOD "WRDD"
7814 #define WRDD_WIFI  (0x07)
7815 
7816 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
7817 {
7818 	union acpi_object *mcc_pkg;
7819 	union acpi_object *domain_type;
7820 	union acpi_object *mcc_value;
7821 	u32 i;
7822 
7823 	if (wrdd->type != ACPI_TYPE_PACKAGE ||
7824 	    wrdd->package.count < 2 ||
7825 	    wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
7826 	    wrdd->package.elements[0].integer.value != 0) {
7827 		ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
7828 		return 0;
7829 	}
7830 
7831 	for (i = 1; i < wrdd->package.count; ++i) {
7832 		mcc_pkg = &wrdd->package.elements[i];
7833 
7834 		if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
7835 			continue;
7836 		if (mcc_pkg->package.count < 2)
7837 			continue;
7838 		if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
7839 		    mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
7840 			continue;
7841 
7842 		domain_type = &mcc_pkg->package.elements[0];
7843 		if (domain_type->integer.value != WRDD_WIFI)
7844 			continue;
7845 
7846 		mcc_value = &mcc_pkg->package.elements[1];
7847 		return mcc_value->integer.value;
7848 	}
7849 	return 0;
7850 }
7851 
7852 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
7853 {
7854 	struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev);
7855 	acpi_handle root_handle;
7856 	acpi_handle handle;
7857 	struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
7858 	acpi_status status;
7859 	u32 alpha2_code;
7860 	char alpha2[3];
7861 
7862 	root_handle = ACPI_HANDLE(&pdev->dev);
7863 	if (!root_handle)
7864 		return -EOPNOTSUPP;
7865 
7866 	status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
7867 	if (ACPI_FAILURE(status)) {
7868 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7869 			   "failed to get wrd method %d\n", status);
7870 		return -EIO;
7871 	}
7872 
7873 	status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
7874 	if (ACPI_FAILURE(status)) {
7875 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7876 			   "failed to call wrdc %d\n", status);
7877 		return -EIO;
7878 	}
7879 
7880 	alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
7881 	kfree(wrdd.pointer);
7882 	if (!alpha2_code)
7883 		return -EIO;
7884 
7885 	alpha2[0] = (alpha2_code >> 8) & 0xff;
7886 	alpha2[1] = (alpha2_code >> 0) & 0xff;
7887 	alpha2[2] = '\0';
7888 
7889 	ath10k_dbg(ar, ATH10K_DBG_BOOT,
7890 		   "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
7891 
7892 	*rd = ath_regd_find_country_by_name(alpha2);
7893 	if (*rd == 0xffff)
7894 		return -EIO;
7895 
7896 	*rd |= COUNTRY_ERD_FLAG;
7897 	return 0;
7898 }
7899 
7900 static int ath10k_mac_init_rd(struct ath10k *ar)
7901 {
7902 	int ret;
7903 	u16 rd;
7904 
7905 	ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
7906 	if (ret) {
7907 		ath10k_dbg(ar, ATH10K_DBG_BOOT,
7908 			   "fallback to eeprom programmed regulatory settings\n");
7909 		rd = ar->hw_eeprom_rd;
7910 	}
7911 
7912 	ar->ath_common.regulatory.current_rd = rd;
7913 	return 0;
7914 }
7915 
7916 int ath10k_mac_register(struct ath10k *ar)
7917 {
7918 	static const u32 cipher_suites[] = {
7919 		WLAN_CIPHER_SUITE_WEP40,
7920 		WLAN_CIPHER_SUITE_WEP104,
7921 		WLAN_CIPHER_SUITE_TKIP,
7922 		WLAN_CIPHER_SUITE_CCMP,
7923 		WLAN_CIPHER_SUITE_AES_CMAC,
7924 	};
7925 	struct ieee80211_supported_band *band;
7926 	void *channels;
7927 	int ret;
7928 
7929 	SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
7930 
7931 	SET_IEEE80211_DEV(ar->hw, ar->dev);
7932 
7933 	BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
7934 		      ARRAY_SIZE(ath10k_5ghz_channels)) !=
7935 		     ATH10K_NUM_CHANS);
7936 
7937 	if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
7938 		channels = kmemdup(ath10k_2ghz_channels,
7939 				   sizeof(ath10k_2ghz_channels),
7940 				   GFP_KERNEL);
7941 		if (!channels) {
7942 			ret = -ENOMEM;
7943 			goto err_free;
7944 		}
7945 
7946 		band = &ar->mac.sbands[NL80211_BAND_2GHZ];
7947 		band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
7948 		band->channels = channels;
7949 
7950 		if (ar->hw_params.cck_rate_map_rev2) {
7951 			band->n_bitrates = ath10k_g_rates_rev2_size;
7952 			band->bitrates = ath10k_g_rates_rev2;
7953 		} else {
7954 			band->n_bitrates = ath10k_g_rates_size;
7955 			band->bitrates = ath10k_g_rates;
7956 		}
7957 
7958 		ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
7959 	}
7960 
7961 	if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
7962 		channels = kmemdup(ath10k_5ghz_channels,
7963 				   sizeof(ath10k_5ghz_channels),
7964 				   GFP_KERNEL);
7965 		if (!channels) {
7966 			ret = -ENOMEM;
7967 			goto err_free;
7968 		}
7969 
7970 		band = &ar->mac.sbands[NL80211_BAND_5GHZ];
7971 		band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
7972 		band->channels = channels;
7973 		band->n_bitrates = ath10k_a_rates_size;
7974 		band->bitrates = ath10k_a_rates;
7975 		ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
7976 	}
7977 
7978 	ath10k_mac_setup_ht_vht_cap(ar);
7979 
7980 	ar->hw->wiphy->interface_modes =
7981 		BIT(NL80211_IFTYPE_STATION) |
7982 		BIT(NL80211_IFTYPE_AP) |
7983 		BIT(NL80211_IFTYPE_MESH_POINT);
7984 
7985 	ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
7986 	ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
7987 
7988 	if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
7989 		ar->hw->wiphy->interface_modes |=
7990 			BIT(NL80211_IFTYPE_P2P_DEVICE) |
7991 			BIT(NL80211_IFTYPE_P2P_CLIENT) |
7992 			BIT(NL80211_IFTYPE_P2P_GO);
7993 
7994 	ieee80211_hw_set(ar->hw, SIGNAL_DBM);
7995 	ieee80211_hw_set(ar->hw, SUPPORTS_PS);
7996 	ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
7997 	ieee80211_hw_set(ar->hw, MFP_CAPABLE);
7998 	ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
7999 	ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
8000 	ieee80211_hw_set(ar->hw, AP_LINK_PS);
8001 	ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
8002 	ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
8003 	ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
8004 	ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
8005 	ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
8006 	ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
8007 	ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
8008 	ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
8009 
8010 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8011 		ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
8012 
8013 	ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
8014 	ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
8015 
8016 	if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
8017 		ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
8018 
8019 	if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
8020 		ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
8021 		ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
8022 	}
8023 
8024 	ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
8025 	ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
8026 
8027 	ar->hw->vif_data_size = sizeof(struct ath10k_vif);
8028 	ar->hw->sta_data_size = sizeof(struct ath10k_sta);
8029 	ar->hw->txq_data_size = sizeof(struct ath10k_txq);
8030 
8031 	ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
8032 
8033 	if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
8034 		ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
8035 
8036 		/* Firmware delivers WPS/P2P Probe Requests frames to driver so
8037 		 * that userspace (e.g. wpa_supplicant/hostapd) can generate
8038 		 * correct Probe Responses. This is more of a hack advert..
8039 		 */
8040 		ar->hw->wiphy->probe_resp_offload |=
8041 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
8042 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
8043 			NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
8044 	}
8045 
8046 	if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map))
8047 		ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
8048 
8049 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
8050 	ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
8051 	ar->hw->wiphy->max_remain_on_channel_duration = 5000;
8052 
8053 	ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
8054 	ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
8055 				   NL80211_FEATURE_AP_SCAN;
8056 
8057 	ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
8058 
8059 	ret = ath10k_wow_init(ar);
8060 	if (ret) {
8061 		ath10k_warn(ar, "failed to init wow: %d\n", ret);
8062 		goto err_free;
8063 	}
8064 
8065 	wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
8066 
8067 	/*
8068 	 * on LL hardware queues are managed entirely by the FW
8069 	 * so we only advertise to mac we can do the queues thing
8070 	 */
8071 	ar->hw->queues = IEEE80211_MAX_QUEUES;
8072 
8073 	/* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
8074 	 * something that vdev_ids can't reach so that we don't stop the queue
8075 	 * accidentally.
8076 	 */
8077 	ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
8078 
8079 	switch (ar->running_fw->fw_file.wmi_op_version) {
8080 	case ATH10K_FW_WMI_OP_VERSION_MAIN:
8081 		ar->hw->wiphy->iface_combinations = ath10k_if_comb;
8082 		ar->hw->wiphy->n_iface_combinations =
8083 			ARRAY_SIZE(ath10k_if_comb);
8084 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8085 		break;
8086 	case ATH10K_FW_WMI_OP_VERSION_TLV:
8087 		if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
8088 			ar->hw->wiphy->iface_combinations =
8089 				ath10k_tlv_qcs_if_comb;
8090 			ar->hw->wiphy->n_iface_combinations =
8091 				ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
8092 		} else {
8093 			ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
8094 			ar->hw->wiphy->n_iface_combinations =
8095 				ARRAY_SIZE(ath10k_tlv_if_comb);
8096 		}
8097 		ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
8098 		break;
8099 	case ATH10K_FW_WMI_OP_VERSION_10_1:
8100 	case ATH10K_FW_WMI_OP_VERSION_10_2:
8101 	case ATH10K_FW_WMI_OP_VERSION_10_2_4:
8102 		ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
8103 		ar->hw->wiphy->n_iface_combinations =
8104 			ARRAY_SIZE(ath10k_10x_if_comb);
8105 		break;
8106 	case ATH10K_FW_WMI_OP_VERSION_10_4:
8107 		ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
8108 		ar->hw->wiphy->n_iface_combinations =
8109 			ARRAY_SIZE(ath10k_10_4_if_comb);
8110 		break;
8111 	case ATH10K_FW_WMI_OP_VERSION_UNSET:
8112 	case ATH10K_FW_WMI_OP_VERSION_MAX:
8113 		WARN_ON(1);
8114 		ret = -EINVAL;
8115 		goto err_free;
8116 	}
8117 
8118 	if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
8119 		ar->hw->netdev_features = NETIF_F_HW_CSUM;
8120 
8121 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
8122 		/* Init ath dfs pattern detector */
8123 		ar->ath_common.debug_mask = ATH_DBG_DFS;
8124 		ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
8125 							     NL80211_DFS_UNSET);
8126 
8127 		if (!ar->dfs_detector)
8128 			ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
8129 	}
8130 
8131 	/* Current wake_tx_queue implementation imposes a significant
8132 	 * performance penalty in some setups. The tx scheduling code needs
8133 	 * more work anyway so disable the wake_tx_queue unless firmware
8134 	 * supports the pull-push mechanism.
8135 	 */
8136 	if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL,
8137 		      ar->running_fw->fw_file.fw_features))
8138 		ar->ops->wake_tx_queue = NULL;
8139 
8140 	ret = ath10k_mac_init_rd(ar);
8141 	if (ret) {
8142 		ath10k_err(ar, "failed to derive regdom: %d\n", ret);
8143 		goto err_dfs_detector_exit;
8144 	}
8145 
8146 	/* Disable set_coverage_class for chipsets that do not support it. */
8147 	if (!ar->hw_params.hw_ops->set_coverage_class)
8148 		ar->ops->set_coverage_class = NULL;
8149 
8150 	ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
8151 			    ath10k_reg_notifier);
8152 	if (ret) {
8153 		ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
8154 		goto err_dfs_detector_exit;
8155 	}
8156 
8157 	ar->hw->wiphy->cipher_suites = cipher_suites;
8158 	ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites);
8159 
8160 	ret = ieee80211_register_hw(ar->hw);
8161 	if (ret) {
8162 		ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
8163 		goto err_dfs_detector_exit;
8164 	}
8165 
8166 	if (!ath_is_world_regd(&ar->ath_common.regulatory)) {
8167 		ret = regulatory_hint(ar->hw->wiphy,
8168 				      ar->ath_common.regulatory.alpha2);
8169 		if (ret)
8170 			goto err_unregister;
8171 	}
8172 
8173 	return 0;
8174 
8175 err_unregister:
8176 	ieee80211_unregister_hw(ar->hw);
8177 
8178 err_dfs_detector_exit:
8179 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8180 		ar->dfs_detector->exit(ar->dfs_detector);
8181 
8182 err_free:
8183 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8184 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8185 
8186 	SET_IEEE80211_DEV(ar->hw, NULL);
8187 	return ret;
8188 }
8189 
8190 void ath10k_mac_unregister(struct ath10k *ar)
8191 {
8192 	ieee80211_unregister_hw(ar->hw);
8193 
8194 	if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
8195 		ar->dfs_detector->exit(ar->dfs_detector);
8196 
8197 	kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
8198 	kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
8199 
8200 	SET_IEEE80211_DEV(ar->hw, NULL);
8201 }
8202