1 // SPDX-License-Identifier: ISC
2 /*
3 * Copyright (c) 2005-2011 Atheros Communications Inc.
4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc.
5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved.
6 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
7 */
8
9 #include "mac.h"
10
11 #include <net/cfg80211.h>
12 #include <net/mac80211.h>
13 #include <linux/etherdevice.h>
14 #include <linux/acpi.h>
15 #include <linux/of.h>
16 #include <linux/bitfield.h>
17
18 #include "hif.h"
19 #include "core.h"
20 #include "debug.h"
21 #include "wmi.h"
22 #include "htt.h"
23 #include "txrx.h"
24 #include "testmode.h"
25 #include "wmi-tlv.h"
26 #include "wmi-ops.h"
27 #include "wow.h"
28 #include "leds.h"
29
30 /*********/
31 /* Rates */
32 /*********/
33
34 static struct ieee80211_rate ath10k_rates[] = {
35 { .bitrate = 10,
36 .hw_value = ATH10K_HW_RATE_CCK_LP_1M },
37 { .bitrate = 20,
38 .hw_value = ATH10K_HW_RATE_CCK_LP_2M,
39 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M,
40 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
41 { .bitrate = 55,
42 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M,
43 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M,
44 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
45 { .bitrate = 110,
46 .hw_value = ATH10K_HW_RATE_CCK_LP_11M,
47 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M,
48 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
49
50 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
51 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
52 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
53 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
54 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
55 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
56 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
57 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
58 };
59
60 static struct ieee80211_rate ath10k_rates_rev2[] = {
61 { .bitrate = 10,
62 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M },
63 { .bitrate = 20,
64 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M,
65 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M,
66 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
67 { .bitrate = 55,
68 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M,
69 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M,
70 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
71 { .bitrate = 110,
72 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M,
73 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M,
74 .flags = IEEE80211_RATE_SHORT_PREAMBLE },
75
76 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M },
77 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M },
78 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M },
79 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M },
80 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M },
81 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M },
82 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M },
83 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M },
84 };
85
86 static const struct cfg80211_sar_freq_ranges ath10k_sar_freq_ranges[] = {
87 {.start_freq = 2402, .end_freq = 2494 },
88 {.start_freq = 5170, .end_freq = 5875 },
89 };
90
91 static const struct cfg80211_sar_capa ath10k_sar_capa = {
92 .type = NL80211_SAR_TYPE_POWER,
93 .num_freq_ranges = (ARRAY_SIZE(ath10k_sar_freq_ranges)),
94 .freq_ranges = &ath10k_sar_freq_ranges[0],
95 };
96
97 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4
98
99 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX)
100 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \
101 ATH10K_MAC_FIRST_OFDM_RATE_IDX)
102 #define ath10k_g_rates (ath10k_rates + 0)
103 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates))
104
105 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0)
106 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2))
107
108 #define ath10k_wmi_legacy_rates ath10k_rates
109
ath10k_mac_bitrate_is_cck(int bitrate)110 static bool ath10k_mac_bitrate_is_cck(int bitrate)
111 {
112 switch (bitrate) {
113 case 10:
114 case 20:
115 case 55:
116 case 110:
117 return true;
118 }
119
120 return false;
121 }
122
ath10k_mac_bitrate_to_rate(int bitrate)123 static u8 ath10k_mac_bitrate_to_rate(int bitrate)
124 {
125 return DIV_ROUND_UP(bitrate, 5) |
126 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0);
127 }
128
ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band * sband,u8 hw_rate,bool cck)129 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband,
130 u8 hw_rate, bool cck)
131 {
132 const struct ieee80211_rate *rate;
133 int i;
134
135 for (i = 0; i < sband->n_bitrates; i++) {
136 rate = &sband->bitrates[i];
137
138 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck)
139 continue;
140
141 if (rate->hw_value == hw_rate)
142 return i;
143 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE &&
144 rate->hw_value_short == hw_rate)
145 return i;
146 }
147
148 return 0;
149 }
150
ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band * sband,u32 bitrate)151 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband,
152 u32 bitrate)
153 {
154 int i;
155
156 for (i = 0; i < sband->n_bitrates; i++)
157 if (sband->bitrates[i].bitrate == bitrate)
158 return i;
159
160 return 0;
161 }
162
ath10k_mac_get_rate_hw_value(int bitrate)163 static int ath10k_mac_get_rate_hw_value(int bitrate)
164 {
165 int i;
166 u8 hw_value_prefix = 0;
167
168 if (ath10k_mac_bitrate_is_cck(bitrate))
169 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6;
170
171 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) {
172 if (ath10k_rates[i].bitrate == bitrate)
173 return hw_value_prefix | ath10k_rates[i].hw_value;
174 }
175
176 return -EINVAL;
177 }
178
ath10k_mac_get_max_vht_mcs_map(u16 mcs_map,int nss)179 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss)
180 {
181 switch ((mcs_map >> (2 * nss)) & 0x3) {
182 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1;
183 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1;
184 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1;
185 }
186 return 0;
187 }
188
189 static u32
ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])190 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
191 {
192 int nss;
193
194 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--)
195 if (ht_mcs_mask[nss])
196 return nss + 1;
197
198 return 1;
199 }
200
201 static u32
ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])202 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
203 {
204 int nss;
205
206 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--)
207 if (vht_mcs_mask[nss])
208 return nss + 1;
209
210 return 1;
211 }
212
ath10k_mac_ext_resource_config(struct ath10k * ar,u32 val)213 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val)
214 {
215 enum wmi_host_platform_type platform_type;
216 int ret;
217
218 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map))
219 platform_type = WMI_HOST_PLATFORM_LOW_PERF;
220 else
221 platform_type = WMI_HOST_PLATFORM_HIGH_PERF;
222
223 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val);
224
225 if (ret && ret != -EOPNOTSUPP) {
226 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret);
227 return ret;
228 }
229
230 return 0;
231 }
232
233 /**********/
234 /* Crypto */
235 /**********/
236
ath10k_send_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)237 static int ath10k_send_key(struct ath10k_vif *arvif,
238 struct ieee80211_key_conf *key,
239 enum set_key_cmd cmd,
240 const u8 *macaddr, u32 flags)
241 {
242 struct ath10k *ar = arvif->ar;
243 struct wmi_vdev_install_key_arg arg = {
244 .vdev_id = arvif->vdev_id,
245 .key_idx = key->keyidx,
246 .key_len = key->keylen,
247 .key_data = key->key,
248 .key_flags = flags,
249 .macaddr = macaddr,
250 };
251
252 lockdep_assert_held(&arvif->ar->conf_mutex);
253
254 switch (key->cipher) {
255 case WLAN_CIPHER_SUITE_CCMP:
256 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
257 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
258 break;
259 case WLAN_CIPHER_SUITE_TKIP:
260 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP];
261 arg.key_txmic_len = 8;
262 arg.key_rxmic_len = 8;
263 break;
264 case WLAN_CIPHER_SUITE_WEP40:
265 case WLAN_CIPHER_SUITE_WEP104:
266 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP];
267 break;
268 case WLAN_CIPHER_SUITE_CCMP_256:
269 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM];
270 break;
271 case WLAN_CIPHER_SUITE_GCMP:
272 case WLAN_CIPHER_SUITE_GCMP_256:
273 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM];
274 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT;
275 break;
276 case WLAN_CIPHER_SUITE_BIP_GMAC_128:
277 case WLAN_CIPHER_SUITE_BIP_GMAC_256:
278 case WLAN_CIPHER_SUITE_BIP_CMAC_256:
279 case WLAN_CIPHER_SUITE_AES_CMAC:
280 WARN_ON(1);
281 return -EINVAL;
282 default:
283 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher);
284 return -EOPNOTSUPP;
285 }
286
287 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
288 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV;
289
290 if (cmd == DISABLE_KEY) {
291 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE];
292 arg.key_data = NULL;
293 }
294
295 return ath10k_wmi_vdev_install_key(arvif->ar, &arg);
296 }
297
ath10k_install_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key,enum set_key_cmd cmd,const u8 * macaddr,u32 flags)298 static int ath10k_install_key(struct ath10k_vif *arvif,
299 struct ieee80211_key_conf *key,
300 enum set_key_cmd cmd,
301 const u8 *macaddr, u32 flags)
302 {
303 struct ath10k *ar = arvif->ar;
304 int ret;
305 unsigned long time_left;
306
307 lockdep_assert_held(&ar->conf_mutex);
308
309 reinit_completion(&ar->install_key_done);
310
311 if (arvif->nohwcrypt)
312 return 1;
313
314 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags);
315 if (ret)
316 return ret;
317
318 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ);
319 if (time_left == 0)
320 return -ETIMEDOUT;
321
322 return 0;
323 }
324
ath10k_install_peer_wep_keys(struct ath10k_vif * arvif,const u8 * addr)325 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif,
326 const u8 *addr)
327 {
328 struct ath10k *ar = arvif->ar;
329 struct ath10k_peer *peer;
330 int ret;
331 int i;
332 u32 flags;
333
334 lockdep_assert_held(&ar->conf_mutex);
335
336 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP &&
337 arvif->vif->type != NL80211_IFTYPE_ADHOC &&
338 arvif->vif->type != NL80211_IFTYPE_MESH_POINT))
339 return -EINVAL;
340
341 spin_lock_bh(&ar->data_lock);
342 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
343 spin_unlock_bh(&ar->data_lock);
344
345 if (!peer)
346 return -ENOENT;
347
348 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) {
349 if (arvif->wep_keys[i] == NULL)
350 continue;
351
352 switch (arvif->vif->type) {
353 case NL80211_IFTYPE_AP:
354 flags = WMI_KEY_PAIRWISE;
355
356 if (arvif->def_wep_key_idx == i)
357 flags |= WMI_KEY_TX_USAGE;
358
359 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
360 SET_KEY, addr, flags);
361 if (ret < 0)
362 return ret;
363 break;
364 case NL80211_IFTYPE_ADHOC:
365 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
366 SET_KEY, addr,
367 WMI_KEY_PAIRWISE);
368 if (ret < 0)
369 return ret;
370
371 ret = ath10k_install_key(arvif, arvif->wep_keys[i],
372 SET_KEY, addr, WMI_KEY_GROUP);
373 if (ret < 0)
374 return ret;
375 break;
376 default:
377 WARN_ON(1);
378 return -EINVAL;
379 }
380
381 spin_lock_bh(&ar->data_lock);
382 peer->keys[i] = arvif->wep_keys[i];
383 spin_unlock_bh(&ar->data_lock);
384 }
385
386 /* In some cases (notably with static WEP IBSS with multiple keys)
387 * multicast Tx becomes broken. Both pairwise and groupwise keys are
388 * installed already. Using WMI_KEY_TX_USAGE in different combinations
389 * didn't seem help. Using def_keyid vdev parameter seems to be
390 * effective so use that.
391 *
392 * FIXME: Revisit. Perhaps this can be done in a less hacky way.
393 */
394 if (arvif->vif->type != NL80211_IFTYPE_ADHOC)
395 return 0;
396
397 if (arvif->def_wep_key_idx == -1)
398 return 0;
399
400 ret = ath10k_wmi_vdev_set_param(arvif->ar,
401 arvif->vdev_id,
402 arvif->ar->wmi.vdev_param->def_keyid,
403 arvif->def_wep_key_idx);
404 if (ret) {
405 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n",
406 arvif->vdev_id, ret);
407 return ret;
408 }
409
410 return 0;
411 }
412
ath10k_clear_peer_keys(struct ath10k_vif * arvif,const u8 * addr)413 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif,
414 const u8 *addr)
415 {
416 struct ath10k *ar = arvif->ar;
417 struct ath10k_peer *peer;
418 int first_errno = 0;
419 int ret;
420 int i;
421 u32 flags = 0;
422
423 lockdep_assert_held(&ar->conf_mutex);
424
425 spin_lock_bh(&ar->data_lock);
426 peer = ath10k_peer_find(ar, arvif->vdev_id, addr);
427 spin_unlock_bh(&ar->data_lock);
428
429 if (!peer)
430 return -ENOENT;
431
432 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
433 if (peer->keys[i] == NULL)
434 continue;
435
436 /* key flags are not required to delete the key */
437 ret = ath10k_install_key(arvif, peer->keys[i],
438 DISABLE_KEY, addr, flags);
439 if (ret < 0 && first_errno == 0)
440 first_errno = ret;
441
442 if (ret < 0)
443 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n",
444 i, ret);
445
446 spin_lock_bh(&ar->data_lock);
447 peer->keys[i] = NULL;
448 spin_unlock_bh(&ar->data_lock);
449 }
450
451 return first_errno;
452 }
453
ath10k_mac_is_peer_wep_key_set(struct ath10k * ar,const u8 * addr,u8 keyidx)454 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr,
455 u8 keyidx)
456 {
457 struct ath10k_peer *peer;
458 int i;
459
460 lockdep_assert_held(&ar->data_lock);
461
462 /* We don't know which vdev this peer belongs to,
463 * since WMI doesn't give us that information.
464 *
465 * FIXME: multi-bss needs to be handled.
466 */
467 peer = ath10k_peer_find(ar, 0, addr);
468 if (!peer)
469 return false;
470
471 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
472 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx)
473 return true;
474 }
475
476 return false;
477 }
478
ath10k_clear_vdev_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)479 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif,
480 struct ieee80211_key_conf *key)
481 {
482 struct ath10k *ar = arvif->ar;
483 struct ath10k_peer *peer;
484 u8 addr[ETH_ALEN];
485 int first_errno = 0;
486 int ret;
487 int i;
488 u32 flags = 0;
489
490 lockdep_assert_held(&ar->conf_mutex);
491
492 for (;;) {
493 /* since ath10k_install_key we can't hold data_lock all the
494 * time, so we try to remove the keys incrementally
495 */
496 spin_lock_bh(&ar->data_lock);
497 i = 0;
498 list_for_each_entry(peer, &ar->peers, list) {
499 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) {
500 if (peer->keys[i] == key) {
501 ether_addr_copy(addr, peer->addr);
502 peer->keys[i] = NULL;
503 break;
504 }
505 }
506
507 if (i < ARRAY_SIZE(peer->keys))
508 break;
509 }
510 spin_unlock_bh(&ar->data_lock);
511
512 if (i == ARRAY_SIZE(peer->keys))
513 break;
514 /* key flags are not required to delete the key */
515 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags);
516 if (ret < 0 && first_errno == 0)
517 first_errno = ret;
518
519 if (ret)
520 ath10k_warn(ar, "failed to remove key for %pM: %d\n",
521 addr, ret);
522 }
523
524 return first_errno;
525 }
526
ath10k_mac_vif_update_wep_key(struct ath10k_vif * arvif,struct ieee80211_key_conf * key)527 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif,
528 struct ieee80211_key_conf *key)
529 {
530 struct ath10k *ar = arvif->ar;
531 struct ath10k_peer *peer;
532 int ret;
533
534 lockdep_assert_held(&ar->conf_mutex);
535
536 list_for_each_entry(peer, &ar->peers, list) {
537 if (ether_addr_equal(peer->addr, arvif->vif->addr))
538 continue;
539
540 if (ether_addr_equal(peer->addr, arvif->bssid))
541 continue;
542
543 if (peer->keys[key->keyidx] == key)
544 continue;
545
546 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n",
547 arvif->vdev_id, key->keyidx);
548
549 ret = ath10k_install_peer_wep_keys(arvif, peer->addr);
550 if (ret) {
551 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n",
552 arvif->vdev_id, peer->addr, ret);
553 return ret;
554 }
555 }
556
557 return 0;
558 }
559
560 /*********************/
561 /* General utilities */
562 /*********************/
563
564 static inline enum wmi_phy_mode
chan_to_phymode(const struct cfg80211_chan_def * chandef)565 chan_to_phymode(const struct cfg80211_chan_def *chandef)
566 {
567 enum wmi_phy_mode phymode = MODE_UNKNOWN;
568
569 switch (chandef->chan->band) {
570 case NL80211_BAND_2GHZ:
571 switch (chandef->width) {
572 case NL80211_CHAN_WIDTH_20_NOHT:
573 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM)
574 phymode = MODE_11B;
575 else
576 phymode = MODE_11G;
577 break;
578 case NL80211_CHAN_WIDTH_20:
579 phymode = MODE_11NG_HT20;
580 break;
581 case NL80211_CHAN_WIDTH_40:
582 phymode = MODE_11NG_HT40;
583 break;
584 default:
585 phymode = MODE_UNKNOWN;
586 break;
587 }
588 break;
589 case NL80211_BAND_5GHZ:
590 switch (chandef->width) {
591 case NL80211_CHAN_WIDTH_20_NOHT:
592 phymode = MODE_11A;
593 break;
594 case NL80211_CHAN_WIDTH_20:
595 phymode = MODE_11NA_HT20;
596 break;
597 case NL80211_CHAN_WIDTH_40:
598 phymode = MODE_11NA_HT40;
599 break;
600 case NL80211_CHAN_WIDTH_80:
601 phymode = MODE_11AC_VHT80;
602 break;
603 case NL80211_CHAN_WIDTH_160:
604 phymode = MODE_11AC_VHT160;
605 break;
606 case NL80211_CHAN_WIDTH_80P80:
607 phymode = MODE_11AC_VHT80_80;
608 break;
609 default:
610 phymode = MODE_UNKNOWN;
611 break;
612 }
613 break;
614 default:
615 break;
616 }
617
618 WARN_ON(phymode == MODE_UNKNOWN);
619 return phymode;
620 }
621
ath10k_parse_mpdudensity(u8 mpdudensity)622 static u8 ath10k_parse_mpdudensity(u8 mpdudensity)
623 {
624 /*
625 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing":
626 * 0 for no restriction
627 * 1 for 1/4 us
628 * 2 for 1/2 us
629 * 3 for 1 us
630 * 4 for 2 us
631 * 5 for 4 us
632 * 6 for 8 us
633 * 7 for 16 us
634 */
635 switch (mpdudensity) {
636 case 0:
637 return 0;
638 case 1:
639 case 2:
640 case 3:
641 /* Our lower layer calculations limit our precision to
642 * 1 microsecond
643 */
644 return 1;
645 case 4:
646 return 2;
647 case 5:
648 return 4;
649 case 6:
650 return 8;
651 case 7:
652 return 16;
653 default:
654 return 0;
655 }
656 }
657
ath10k_mac_vif_chan(struct ieee80211_vif * vif,struct cfg80211_chan_def * def)658 int ath10k_mac_vif_chan(struct ieee80211_vif *vif,
659 struct cfg80211_chan_def *def)
660 {
661 struct ieee80211_chanctx_conf *conf;
662
663 rcu_read_lock();
664 conf = rcu_dereference(vif->bss_conf.chanctx_conf);
665 if (!conf) {
666 rcu_read_unlock();
667 return -ENOENT;
668 }
669
670 *def = conf->def;
671 rcu_read_unlock();
672
673 return 0;
674 }
675
ath10k_mac_num_chanctxs_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)676 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw,
677 struct ieee80211_chanctx_conf *conf,
678 void *data)
679 {
680 int *num = data;
681
682 (*num)++;
683 }
684
ath10k_mac_num_chanctxs(struct ath10k * ar)685 static int ath10k_mac_num_chanctxs(struct ath10k *ar)
686 {
687 int num = 0;
688
689 ieee80211_iter_chan_contexts_atomic(ar->hw,
690 ath10k_mac_num_chanctxs_iter,
691 &num);
692
693 return num;
694 }
695
696 static void
ath10k_mac_get_any_chandef_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)697 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw,
698 struct ieee80211_chanctx_conf *conf,
699 void *data)
700 {
701 struct cfg80211_chan_def **def = data;
702
703 *def = &conf->def;
704 }
705
ath10k_wait_for_peer_delete_done(struct ath10k * ar,u32 vdev_id,const u8 * addr)706 static void ath10k_wait_for_peer_delete_done(struct ath10k *ar, u32 vdev_id,
707 const u8 *addr)
708 {
709 unsigned long time_left;
710 int ret;
711
712 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
713 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
714 if (ret) {
715 ath10k_warn(ar, "failed wait for peer deleted");
716 return;
717 }
718
719 time_left = wait_for_completion_timeout(&ar->peer_delete_done,
720 5 * HZ);
721 if (!time_left)
722 ath10k_warn(ar, "Timeout in receiving peer delete response\n");
723 }
724 }
725
ath10k_peer_create(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 vdev_id,const u8 * addr,enum wmi_peer_type peer_type)726 static int ath10k_peer_create(struct ath10k *ar,
727 struct ieee80211_vif *vif,
728 struct ieee80211_sta *sta,
729 u32 vdev_id,
730 const u8 *addr,
731 enum wmi_peer_type peer_type)
732 {
733 struct ath10k_peer *peer;
734 int ret;
735
736 lockdep_assert_held(&ar->conf_mutex);
737
738 /* Each vdev consumes a peer entry as well. */
739 if (ar->num_peers + list_count_nodes(&ar->arvifs) >= ar->max_num_peers)
740 return -ENOBUFS;
741
742 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type);
743 if (ret) {
744 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n",
745 addr, vdev_id, ret);
746 return ret;
747 }
748
749 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr);
750 if (ret) {
751 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n",
752 addr, vdev_id, ret);
753 return ret;
754 }
755
756 spin_lock_bh(&ar->data_lock);
757
758 peer = ath10k_peer_find(ar, vdev_id, addr);
759 if (!peer) {
760 spin_unlock_bh(&ar->data_lock);
761 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n",
762 addr, vdev_id);
763 ath10k_wait_for_peer_delete_done(ar, vdev_id, addr);
764 return -ENOENT;
765 }
766
767 peer->vif = vif;
768 peer->sta = sta;
769
770 spin_unlock_bh(&ar->data_lock);
771
772 ar->num_peers++;
773
774 return 0;
775 }
776
ath10k_mac_set_kickout(struct ath10k_vif * arvif)777 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif)
778 {
779 struct ath10k *ar = arvif->ar;
780 u32 param;
781 int ret;
782
783 param = ar->wmi.pdev_param->sta_kickout_th;
784 ret = ath10k_wmi_pdev_set_param(ar, param,
785 ATH10K_KICKOUT_THRESHOLD);
786 if (ret) {
787 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n",
788 arvif->vdev_id, ret);
789 return ret;
790 }
791
792 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs;
793 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
794 ATH10K_KEEPALIVE_MIN_IDLE);
795 if (ret) {
796 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n",
797 arvif->vdev_id, ret);
798 return ret;
799 }
800
801 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs;
802 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
803 ATH10K_KEEPALIVE_MAX_IDLE);
804 if (ret) {
805 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n",
806 arvif->vdev_id, ret);
807 return ret;
808 }
809
810 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs;
811 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param,
812 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE);
813 if (ret) {
814 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n",
815 arvif->vdev_id, ret);
816 return ret;
817 }
818
819 return 0;
820 }
821
ath10k_mac_set_rts(struct ath10k_vif * arvif,u32 value)822 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value)
823 {
824 struct ath10k *ar = arvif->ar;
825 u32 vdev_param;
826
827 vdev_param = ar->wmi.vdev_param->rts_threshold;
828 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value);
829 }
830
ath10k_peer_delete(struct ath10k * ar,u32 vdev_id,const u8 * addr)831 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr)
832 {
833 int ret;
834
835 lockdep_assert_held(&ar->conf_mutex);
836
837 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr);
838 if (ret)
839 return ret;
840
841 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr);
842 if (ret)
843 return ret;
844
845 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
846 unsigned long time_left;
847
848 time_left = wait_for_completion_timeout
849 (&ar->peer_delete_done, 5 * HZ);
850
851 if (!time_left) {
852 ath10k_warn(ar, "Timeout in receiving peer delete response\n");
853 return -ETIMEDOUT;
854 }
855 }
856
857 ar->num_peers--;
858
859 return 0;
860 }
861
ath10k_peer_map_cleanup(struct ath10k * ar,struct ath10k_peer * peer)862 static void ath10k_peer_map_cleanup(struct ath10k *ar, struct ath10k_peer *peer)
863 {
864 int peer_id, i;
865
866 lockdep_assert_held(&ar->conf_mutex);
867
868 for_each_set_bit(peer_id, peer->peer_ids,
869 ATH10K_MAX_NUM_PEER_IDS) {
870 ar->peer_map[peer_id] = NULL;
871 }
872
873 /* Double check that peer is properly un-referenced from
874 * the peer_map
875 */
876 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
877 if (ar->peer_map[i] == peer) {
878 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n",
879 peer->addr, peer, i);
880 ar->peer_map[i] = NULL;
881 }
882 }
883
884 list_del(&peer->list);
885 kfree(peer);
886 ar->num_peers--;
887 }
888
ath10k_peer_cleanup(struct ath10k * ar,u32 vdev_id)889 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id)
890 {
891 struct ath10k_peer *peer, *tmp;
892
893 lockdep_assert_held(&ar->conf_mutex);
894
895 spin_lock_bh(&ar->data_lock);
896 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
897 if (peer->vdev_id != vdev_id)
898 continue;
899
900 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n",
901 peer->addr, vdev_id);
902
903 ath10k_peer_map_cleanup(ar, peer);
904 }
905 spin_unlock_bh(&ar->data_lock);
906 }
907
ath10k_peer_cleanup_all(struct ath10k * ar)908 static void ath10k_peer_cleanup_all(struct ath10k *ar)
909 {
910 struct ath10k_peer *peer, *tmp;
911 int i;
912
913 lockdep_assert_held(&ar->conf_mutex);
914
915 spin_lock_bh(&ar->data_lock);
916 list_for_each_entry_safe(peer, tmp, &ar->peers, list) {
917 list_del(&peer->list);
918 kfree(peer);
919 }
920
921 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++)
922 ar->peer_map[i] = NULL;
923
924 spin_unlock_bh(&ar->data_lock);
925
926 ar->num_peers = 0;
927 ar->num_stations = 0;
928 }
929
ath10k_mac_tdls_peer_update(struct ath10k * ar,u32 vdev_id,struct ieee80211_sta * sta,enum wmi_tdls_peer_state state)930 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id,
931 struct ieee80211_sta *sta,
932 enum wmi_tdls_peer_state state)
933 {
934 int ret;
935 struct wmi_tdls_peer_update_cmd_arg arg = {};
936 struct wmi_tdls_peer_capab_arg cap = {};
937 struct wmi_channel_arg chan_arg = {};
938
939 lockdep_assert_held(&ar->conf_mutex);
940
941 arg.vdev_id = vdev_id;
942 arg.peer_state = state;
943 ether_addr_copy(arg.addr, sta->addr);
944
945 cap.peer_max_sp = sta->max_sp;
946 cap.peer_uapsd_queues = sta->uapsd_queues;
947
948 if (state == WMI_TDLS_PEER_STATE_CONNECTED &&
949 !sta->tdls_initiator)
950 cap.is_peer_responder = 1;
951
952 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg);
953 if (ret) {
954 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n",
955 arg.addr, vdev_id, ret);
956 return ret;
957 }
958
959 return 0;
960 }
961
962 /************************/
963 /* Interface management */
964 /************************/
965
ath10k_mac_vif_beacon_free(struct ath10k_vif * arvif)966 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif)
967 {
968 struct ath10k *ar = arvif->ar;
969
970 lockdep_assert_held(&ar->data_lock);
971
972 if (!arvif->beacon)
973 return;
974
975 if (!arvif->beacon_buf)
976 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr,
977 arvif->beacon->len, DMA_TO_DEVICE);
978
979 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED &&
980 arvif->beacon_state != ATH10K_BEACON_SENT))
981 return;
982
983 dev_kfree_skb_any(arvif->beacon);
984
985 arvif->beacon = NULL;
986 arvif->beacon_state = ATH10K_BEACON_SCHEDULED;
987 }
988
ath10k_mac_vif_beacon_cleanup(struct ath10k_vif * arvif)989 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif)
990 {
991 struct ath10k *ar = arvif->ar;
992
993 lockdep_assert_held(&ar->data_lock);
994
995 ath10k_mac_vif_beacon_free(arvif);
996
997 if (arvif->beacon_buf) {
998 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
999 kfree(arvif->beacon_buf);
1000 else
1001 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
1002 arvif->beacon_buf,
1003 arvif->beacon_paddr);
1004 arvif->beacon_buf = NULL;
1005 }
1006 }
1007
ath10k_vdev_setup_sync(struct ath10k * ar)1008 static inline int ath10k_vdev_setup_sync(struct ath10k *ar)
1009 {
1010 unsigned long time_left;
1011
1012 lockdep_assert_held(&ar->conf_mutex);
1013
1014 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags))
1015 return -ESHUTDOWN;
1016
1017 time_left = wait_for_completion_timeout(&ar->vdev_setup_done,
1018 ATH10K_VDEV_SETUP_TIMEOUT_HZ);
1019 if (time_left == 0)
1020 return -ETIMEDOUT;
1021
1022 return ar->last_wmi_vdev_start_status;
1023 }
1024
ath10k_monitor_vdev_start(struct ath10k * ar,int vdev_id)1025 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id)
1026 {
1027 struct cfg80211_chan_def *chandef = NULL;
1028 struct ieee80211_channel *channel = NULL;
1029 struct wmi_vdev_start_request_arg arg = {};
1030 int ret = 0;
1031
1032 lockdep_assert_held(&ar->conf_mutex);
1033
1034 ieee80211_iter_chan_contexts_atomic(ar->hw,
1035 ath10k_mac_get_any_chandef_iter,
1036 &chandef);
1037 if (WARN_ON_ONCE(!chandef))
1038 return -ENOENT;
1039
1040 channel = chandef->chan;
1041
1042 arg.vdev_id = vdev_id;
1043 arg.channel.freq = channel->center_freq;
1044 arg.channel.band_center_freq1 = chandef->center_freq1;
1045 arg.channel.band_center_freq2 = chandef->center_freq2;
1046
1047 /* TODO setup this dynamically, what in case we
1048 * don't have any vifs?
1049 */
1050 arg.channel.mode = chan_to_phymode(chandef);
1051 arg.channel.chan_radar =
1052 !!(channel->flags & IEEE80211_CHAN_RADAR);
1053
1054 arg.channel.min_power = 0;
1055 arg.channel.max_power = channel->max_power * 2;
1056 arg.channel.max_reg_power = channel->max_reg_power * 2;
1057 arg.channel.max_antenna_gain = channel->max_antenna_gain;
1058
1059 reinit_completion(&ar->vdev_setup_done);
1060 reinit_completion(&ar->vdev_delete_done);
1061
1062 ret = ath10k_wmi_vdev_start(ar, &arg);
1063 if (ret) {
1064 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n",
1065 vdev_id, ret);
1066 return ret;
1067 }
1068
1069 ret = ath10k_vdev_setup_sync(ar);
1070 if (ret) {
1071 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n",
1072 vdev_id, ret);
1073 return ret;
1074 }
1075
1076 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr);
1077 if (ret) {
1078 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n",
1079 vdev_id, ret);
1080 goto vdev_stop;
1081 }
1082
1083 ar->monitor_vdev_id = vdev_id;
1084
1085 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n",
1086 ar->monitor_vdev_id);
1087 return 0;
1088
1089 vdev_stop:
1090 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1091 if (ret)
1092 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n",
1093 ar->monitor_vdev_id, ret);
1094
1095 return ret;
1096 }
1097
ath10k_monitor_vdev_stop(struct ath10k * ar)1098 static int ath10k_monitor_vdev_stop(struct ath10k *ar)
1099 {
1100 int ret = 0;
1101
1102 lockdep_assert_held(&ar->conf_mutex);
1103
1104 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id);
1105 if (ret)
1106 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n",
1107 ar->monitor_vdev_id, ret);
1108
1109 reinit_completion(&ar->vdev_setup_done);
1110 reinit_completion(&ar->vdev_delete_done);
1111
1112 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id);
1113 if (ret)
1114 ath10k_warn(ar, "failed to request monitor vdev %i stop: %d\n",
1115 ar->monitor_vdev_id, ret);
1116
1117 ret = ath10k_vdev_setup_sync(ar);
1118 if (ret)
1119 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n",
1120 ar->monitor_vdev_id, ret);
1121
1122 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n",
1123 ar->monitor_vdev_id);
1124 return ret;
1125 }
1126
ath10k_monitor_vdev_create(struct ath10k * ar)1127 static int ath10k_monitor_vdev_create(struct ath10k *ar)
1128 {
1129 int bit, ret = 0;
1130
1131 lockdep_assert_held(&ar->conf_mutex);
1132
1133 if (ar->free_vdev_map == 0) {
1134 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n");
1135 return -ENOMEM;
1136 }
1137
1138 bit = __ffs64(ar->free_vdev_map);
1139
1140 ar->monitor_vdev_id = bit;
1141
1142 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id,
1143 WMI_VDEV_TYPE_MONITOR,
1144 0, ar->mac_addr);
1145 if (ret) {
1146 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n",
1147 ar->monitor_vdev_id, ret);
1148 return ret;
1149 }
1150
1151 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id);
1152 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n",
1153 ar->monitor_vdev_id);
1154
1155 return 0;
1156 }
1157
ath10k_monitor_vdev_delete(struct ath10k * ar)1158 static int ath10k_monitor_vdev_delete(struct ath10k *ar)
1159 {
1160 int ret = 0;
1161
1162 lockdep_assert_held(&ar->conf_mutex);
1163
1164 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id);
1165 if (ret) {
1166 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n",
1167 ar->monitor_vdev_id, ret);
1168 return ret;
1169 }
1170
1171 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id;
1172
1173 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n",
1174 ar->monitor_vdev_id);
1175 return ret;
1176 }
1177
ath10k_monitor_start(struct ath10k * ar)1178 static int ath10k_monitor_start(struct ath10k *ar)
1179 {
1180 int ret;
1181
1182 lockdep_assert_held(&ar->conf_mutex);
1183
1184 ret = ath10k_monitor_vdev_create(ar);
1185 if (ret) {
1186 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret);
1187 return ret;
1188 }
1189
1190 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id);
1191 if (ret) {
1192 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret);
1193 ath10k_monitor_vdev_delete(ar);
1194 return ret;
1195 }
1196
1197 ar->monitor_started = true;
1198 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n");
1199
1200 return 0;
1201 }
1202
ath10k_monitor_stop(struct ath10k * ar)1203 static int ath10k_monitor_stop(struct ath10k *ar)
1204 {
1205 int ret;
1206
1207 lockdep_assert_held(&ar->conf_mutex);
1208
1209 ret = ath10k_monitor_vdev_stop(ar);
1210 if (ret) {
1211 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret);
1212 return ret;
1213 }
1214
1215 ret = ath10k_monitor_vdev_delete(ar);
1216 if (ret) {
1217 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret);
1218 return ret;
1219 }
1220
1221 ar->monitor_started = false;
1222 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n");
1223
1224 return 0;
1225 }
1226
ath10k_mac_monitor_vdev_is_needed(struct ath10k * ar)1227 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar)
1228 {
1229 int num_ctx;
1230
1231 /* At least one chanctx is required to derive a channel to start
1232 * monitor vdev on.
1233 */
1234 num_ctx = ath10k_mac_num_chanctxs(ar);
1235 if (num_ctx == 0)
1236 return false;
1237
1238 /* If there's already an existing special monitor interface then don't
1239 * bother creating another monitor vdev.
1240 */
1241 if (ar->monitor_arvif)
1242 return false;
1243
1244 return ar->monitor ||
1245 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST,
1246 ar->running_fw->fw_file.fw_features) &&
1247 (ar->filter_flags & (FIF_OTHER_BSS | FIF_MCAST_ACTION))) ||
1248 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1249 }
1250
ath10k_mac_monitor_vdev_is_allowed(struct ath10k * ar)1251 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar)
1252 {
1253 int num_ctx;
1254
1255 num_ctx = ath10k_mac_num_chanctxs(ar);
1256
1257 /* FIXME: Current interface combinations and cfg80211/mac80211 code
1258 * shouldn't allow this but make sure to prevent handling the following
1259 * case anyway since multi-channel DFS hasn't been tested at all.
1260 */
1261 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1)
1262 return false;
1263
1264 return true;
1265 }
1266
ath10k_monitor_recalc(struct ath10k * ar)1267 static int ath10k_monitor_recalc(struct ath10k *ar)
1268 {
1269 bool needed;
1270 bool allowed;
1271 int ret;
1272
1273 lockdep_assert_held(&ar->conf_mutex);
1274
1275 needed = ath10k_mac_monitor_vdev_is_needed(ar);
1276 allowed = ath10k_mac_monitor_vdev_is_allowed(ar);
1277
1278 ath10k_dbg(ar, ATH10K_DBG_MAC,
1279 "mac monitor recalc started? %d needed? %d allowed? %d\n",
1280 ar->monitor_started, needed, allowed);
1281
1282 if (WARN_ON(needed && !allowed)) {
1283 if (ar->monitor_started) {
1284 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n");
1285
1286 ret = ath10k_monitor_stop(ar);
1287 if (ret)
1288 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n",
1289 ret);
1290 /* not serious */
1291 }
1292
1293 return -EPERM;
1294 }
1295
1296 if (needed == ar->monitor_started)
1297 return 0;
1298
1299 if (needed)
1300 return ath10k_monitor_start(ar);
1301 else
1302 return ath10k_monitor_stop(ar);
1303 }
1304
ath10k_mac_can_set_cts_prot(struct ath10k_vif * arvif)1305 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif)
1306 {
1307 struct ath10k *ar = arvif->ar;
1308
1309 lockdep_assert_held(&ar->conf_mutex);
1310
1311 if (!arvif->is_started) {
1312 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n");
1313 return false;
1314 }
1315
1316 return true;
1317 }
1318
ath10k_mac_set_cts_prot(struct ath10k_vif * arvif)1319 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif)
1320 {
1321 struct ath10k *ar = arvif->ar;
1322 u32 vdev_param;
1323
1324 lockdep_assert_held(&ar->conf_mutex);
1325
1326 vdev_param = ar->wmi.vdev_param->protection_mode;
1327
1328 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n",
1329 arvif->vdev_id, arvif->use_cts_prot);
1330
1331 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1332 arvif->use_cts_prot ? 1 : 0);
1333 }
1334
ath10k_recalc_rtscts_prot(struct ath10k_vif * arvif)1335 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif)
1336 {
1337 struct ath10k *ar = arvif->ar;
1338 u32 vdev_param, rts_cts = 0;
1339
1340 lockdep_assert_held(&ar->conf_mutex);
1341
1342 vdev_param = ar->wmi.vdev_param->enable_rtscts;
1343
1344 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET);
1345
1346 if (arvif->num_legacy_stations > 0)
1347 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES,
1348 WMI_RTSCTS_PROFILE);
1349 else
1350 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES,
1351 WMI_RTSCTS_PROFILE);
1352
1353 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n",
1354 arvif->vdev_id, rts_cts);
1355
1356 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
1357 rts_cts);
1358 }
1359
ath10k_start_cac(struct ath10k * ar)1360 static int ath10k_start_cac(struct ath10k *ar)
1361 {
1362 int ret;
1363
1364 lockdep_assert_held(&ar->conf_mutex);
1365
1366 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1367
1368 ret = ath10k_monitor_recalc(ar);
1369 if (ret) {
1370 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret);
1371 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1372 return ret;
1373 }
1374
1375 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n",
1376 ar->monitor_vdev_id);
1377
1378 return 0;
1379 }
1380
ath10k_stop_cac(struct ath10k * ar)1381 static int ath10k_stop_cac(struct ath10k *ar)
1382 {
1383 lockdep_assert_held(&ar->conf_mutex);
1384
1385 /* CAC is not running - do nothing */
1386 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags))
1387 return 0;
1388
1389 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
1390 ath10k_monitor_stop(ar);
1391
1392 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n");
1393
1394 return 0;
1395 }
1396
ath10k_mac_has_radar_iter(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * conf,void * data)1397 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw,
1398 struct ieee80211_chanctx_conf *conf,
1399 void *data)
1400 {
1401 bool *ret = data;
1402
1403 if (!*ret && conf->radar_enabled)
1404 *ret = true;
1405 }
1406
ath10k_mac_has_radar_enabled(struct ath10k * ar)1407 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar)
1408 {
1409 bool has_radar = false;
1410
1411 ieee80211_iter_chan_contexts_atomic(ar->hw,
1412 ath10k_mac_has_radar_iter,
1413 &has_radar);
1414
1415 return has_radar;
1416 }
1417
ath10k_recalc_radar_detection(struct ath10k * ar)1418 static void ath10k_recalc_radar_detection(struct ath10k *ar)
1419 {
1420 int ret;
1421
1422 lockdep_assert_held(&ar->conf_mutex);
1423
1424 ath10k_stop_cac(ar);
1425
1426 if (!ath10k_mac_has_radar_enabled(ar))
1427 return;
1428
1429 if (ar->num_started_vdevs > 0)
1430 return;
1431
1432 ret = ath10k_start_cac(ar);
1433 if (ret) {
1434 /*
1435 * Not possible to start CAC on current channel so starting
1436 * radiation is not allowed, make this channel DFS_UNAVAILABLE
1437 * by indicating that radar was detected.
1438 */
1439 ath10k_warn(ar, "failed to start CAC: %d\n", ret);
1440 ieee80211_radar_detected(ar->hw, NULL);
1441 }
1442 }
1443
ath10k_vdev_stop(struct ath10k_vif * arvif)1444 static int ath10k_vdev_stop(struct ath10k_vif *arvif)
1445 {
1446 struct ath10k *ar = arvif->ar;
1447 int ret;
1448
1449 lockdep_assert_held(&ar->conf_mutex);
1450
1451 reinit_completion(&ar->vdev_setup_done);
1452 reinit_completion(&ar->vdev_delete_done);
1453
1454 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id);
1455 if (ret) {
1456 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n",
1457 arvif->vdev_id, ret);
1458 return ret;
1459 }
1460
1461 ret = ath10k_vdev_setup_sync(ar);
1462 if (ret) {
1463 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n",
1464 arvif->vdev_id, ret);
1465 return ret;
1466 }
1467
1468 WARN_ON(ar->num_started_vdevs == 0);
1469
1470 if (ar->num_started_vdevs != 0) {
1471 ar->num_started_vdevs--;
1472 ath10k_recalc_radar_detection(ar);
1473 }
1474
1475 return ret;
1476 }
1477
ath10k_vdev_start_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * chandef,bool restart)1478 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif,
1479 const struct cfg80211_chan_def *chandef,
1480 bool restart)
1481 {
1482 struct ath10k *ar = arvif->ar;
1483 struct wmi_vdev_start_request_arg arg = {};
1484 int ret = 0;
1485
1486 lockdep_assert_held(&ar->conf_mutex);
1487
1488 reinit_completion(&ar->vdev_setup_done);
1489 reinit_completion(&ar->vdev_delete_done);
1490
1491 arg.vdev_id = arvif->vdev_id;
1492 arg.dtim_period = arvif->dtim_period;
1493 arg.bcn_intval = arvif->beacon_interval;
1494
1495 arg.channel.freq = chandef->chan->center_freq;
1496 arg.channel.band_center_freq1 = chandef->center_freq1;
1497 arg.channel.band_center_freq2 = chandef->center_freq2;
1498 arg.channel.mode = chan_to_phymode(chandef);
1499
1500 arg.channel.min_power = 0;
1501 arg.channel.max_power = chandef->chan->max_power * 2;
1502 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2;
1503 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain;
1504
1505 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
1506 arg.ssid = arvif->u.ap.ssid;
1507 arg.ssid_len = arvif->u.ap.ssid_len;
1508 arg.hidden_ssid = arvif->u.ap.hidden_ssid;
1509
1510 /* For now allow DFS for AP mode */
1511 arg.channel.chan_radar =
1512 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR);
1513 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
1514 arg.ssid = arvif->vif->cfg.ssid;
1515 arg.ssid_len = arvif->vif->cfg.ssid_len;
1516 }
1517
1518 ath10k_dbg(ar, ATH10K_DBG_MAC,
1519 "mac vdev %d start center_freq %d phymode %s\n",
1520 arg.vdev_id, arg.channel.freq,
1521 ath10k_wmi_phymode_str(arg.channel.mode));
1522
1523 if (restart)
1524 ret = ath10k_wmi_vdev_restart(ar, &arg);
1525 else
1526 ret = ath10k_wmi_vdev_start(ar, &arg);
1527
1528 if (ret) {
1529 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n",
1530 arg.vdev_id, ret);
1531 return ret;
1532 }
1533
1534 ret = ath10k_vdev_setup_sync(ar);
1535 if (ret) {
1536 ath10k_warn(ar,
1537 "failed to synchronize setup for vdev %i restart %d: %d\n",
1538 arg.vdev_id, restart, ret);
1539 return ret;
1540 }
1541
1542 ar->num_started_vdevs++;
1543 ath10k_recalc_radar_detection(ar);
1544
1545 return ret;
1546 }
1547
ath10k_vdev_start(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1548 static int ath10k_vdev_start(struct ath10k_vif *arvif,
1549 const struct cfg80211_chan_def *def)
1550 {
1551 return ath10k_vdev_start_restart(arvif, def, false);
1552 }
1553
ath10k_vdev_restart(struct ath10k_vif * arvif,const struct cfg80211_chan_def * def)1554 static int ath10k_vdev_restart(struct ath10k_vif *arvif,
1555 const struct cfg80211_chan_def *def)
1556 {
1557 return ath10k_vdev_start_restart(arvif, def, true);
1558 }
1559
ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif * arvif,struct sk_buff * bcn)1560 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif,
1561 struct sk_buff *bcn)
1562 {
1563 struct ath10k *ar = arvif->ar;
1564 struct ieee80211_mgmt *mgmt;
1565 const u8 *p2p_ie;
1566 int ret;
1567
1568 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p)
1569 return 0;
1570
1571 mgmt = (void *)bcn->data;
1572 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1573 mgmt->u.beacon.variable,
1574 bcn->len - (mgmt->u.beacon.variable -
1575 bcn->data));
1576 if (!p2p_ie)
1577 return -ENOENT;
1578
1579 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie);
1580 if (ret) {
1581 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n",
1582 arvif->vdev_id, ret);
1583 return ret;
1584 }
1585
1586 return 0;
1587 }
1588
ath10k_mac_remove_vendor_ie(struct sk_buff * skb,unsigned int oui,u8 oui_type,size_t ie_offset)1589 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui,
1590 u8 oui_type, size_t ie_offset)
1591 {
1592 size_t len;
1593 const u8 *next;
1594 const u8 *end;
1595 u8 *ie;
1596
1597 if (WARN_ON(skb->len < ie_offset))
1598 return -EINVAL;
1599
1600 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type,
1601 skb->data + ie_offset,
1602 skb->len - ie_offset);
1603 if (!ie)
1604 return -ENOENT;
1605
1606 len = ie[1] + 2;
1607 end = skb->data + skb->len;
1608 next = ie + len;
1609
1610 if (WARN_ON(next > end))
1611 return -EINVAL;
1612
1613 memmove(ie, next, end - next);
1614 skb_trim(skb, skb->len - len);
1615
1616 return 0;
1617 }
1618
ath10k_mac_setup_bcn_tmpl(struct ath10k_vif * arvif)1619 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif)
1620 {
1621 struct ath10k *ar = arvif->ar;
1622 struct ieee80211_hw *hw = ar->hw;
1623 struct ieee80211_vif *vif = arvif->vif;
1624 struct ieee80211_mutable_offsets offs = {};
1625 struct sk_buff *bcn;
1626 int ret;
1627
1628 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1629 return 0;
1630
1631 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
1632 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
1633 return 0;
1634
1635 bcn = ieee80211_beacon_get_template(hw, vif, &offs, 0);
1636 if (!bcn) {
1637 ath10k_warn(ar, "failed to get beacon template from mac80211\n");
1638 return -EPERM;
1639 }
1640
1641 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn);
1642 if (ret) {
1643 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret);
1644 kfree_skb(bcn);
1645 return ret;
1646 }
1647
1648 /* P2P IE is inserted by firmware automatically (as configured above)
1649 * so remove it from the base beacon template to avoid duplicate P2P
1650 * IEs in beacon frames.
1651 */
1652 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P,
1653 offsetof(struct ieee80211_mgmt,
1654 u.beacon.variable));
1655
1656 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0,
1657 0, NULL, 0);
1658 kfree_skb(bcn);
1659
1660 if (ret) {
1661 ath10k_warn(ar, "failed to submit beacon template command: %d\n",
1662 ret);
1663 return ret;
1664 }
1665
1666 return 0;
1667 }
1668
ath10k_mac_setup_prb_tmpl(struct ath10k_vif * arvif)1669 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif)
1670 {
1671 struct ath10k *ar = arvif->ar;
1672 struct ieee80211_hw *hw = ar->hw;
1673 struct ieee80211_vif *vif = arvif->vif;
1674 struct sk_buff *prb;
1675 int ret;
1676
1677 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1678 return 0;
1679
1680 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
1681 return 0;
1682
1683 /* For mesh, probe response and beacon share the same template */
1684 if (ieee80211_vif_is_mesh(vif))
1685 return 0;
1686
1687 prb = ieee80211_proberesp_get(hw, vif);
1688 if (!prb) {
1689 ath10k_warn(ar, "failed to get probe resp template from mac80211\n");
1690 return -EPERM;
1691 }
1692
1693 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb);
1694 kfree_skb(prb);
1695
1696 if (ret) {
1697 ath10k_warn(ar, "failed to submit probe resp template command: %d\n",
1698 ret);
1699 return ret;
1700 }
1701
1702 return 0;
1703 }
1704
ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif * arvif)1705 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif)
1706 {
1707 struct ath10k *ar = arvif->ar;
1708 struct cfg80211_chan_def def;
1709 int ret;
1710
1711 /* When originally vdev is started during assign_vif_chanctx() some
1712 * information is missing, notably SSID. Firmware revisions with beacon
1713 * offloading require the SSID to be provided during vdev (re)start to
1714 * handle hidden SSID properly.
1715 *
1716 * Vdev restart must be done after vdev has been both started and
1717 * upped. Otherwise some firmware revisions (at least 10.2) fail to
1718 * deliver vdev restart response event causing timeouts during vdev
1719 * syncing in ath10k.
1720 *
1721 * Note: The vdev down/up and template reinstallation could be skipped
1722 * since only wmi-tlv firmware are known to have beacon offload and
1723 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart
1724 * response delivery. It's probably more robust to keep it as is.
1725 */
1726 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))
1727 return 0;
1728
1729 if (WARN_ON(!arvif->is_started))
1730 return -EINVAL;
1731
1732 if (WARN_ON(!arvif->is_up))
1733 return -EINVAL;
1734
1735 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
1736 return -EINVAL;
1737
1738 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1739 if (ret) {
1740 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n",
1741 arvif->vdev_id, ret);
1742 return ret;
1743 }
1744
1745 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise
1746 * firmware will crash upon vdev up.
1747 */
1748
1749 ret = ath10k_mac_setup_bcn_tmpl(arvif);
1750 if (ret) {
1751 ath10k_warn(ar, "failed to update beacon template: %d\n", ret);
1752 return ret;
1753 }
1754
1755 ret = ath10k_mac_setup_prb_tmpl(arvif);
1756 if (ret) {
1757 ath10k_warn(ar, "failed to update presp template: %d\n", ret);
1758 return ret;
1759 }
1760
1761 ret = ath10k_vdev_restart(arvif, &def);
1762 if (ret) {
1763 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n",
1764 arvif->vdev_id, ret);
1765 return ret;
1766 }
1767
1768 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1769 arvif->bssid);
1770 if (ret) {
1771 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n",
1772 arvif->vdev_id, ret);
1773 return ret;
1774 }
1775
1776 return 0;
1777 }
1778
ath10k_control_beaconing(struct ath10k_vif * arvif,struct ieee80211_bss_conf * info)1779 static void ath10k_control_beaconing(struct ath10k_vif *arvif,
1780 struct ieee80211_bss_conf *info)
1781 {
1782 struct ath10k *ar = arvif->ar;
1783 int ret = 0;
1784
1785 lockdep_assert_held(&arvif->ar->conf_mutex);
1786
1787 if (!info->enable_beacon) {
1788 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
1789 if (ret)
1790 ath10k_warn(ar, "failed to down vdev_id %i: %d\n",
1791 arvif->vdev_id, ret);
1792
1793 arvif->is_up = false;
1794
1795 spin_lock_bh(&arvif->ar->data_lock);
1796 ath10k_mac_vif_beacon_free(arvif);
1797 spin_unlock_bh(&arvif->ar->data_lock);
1798
1799 return;
1800 }
1801
1802 arvif->tx_seq_no = 0x1000;
1803
1804 arvif->aid = 0;
1805 ether_addr_copy(arvif->bssid, info->bssid);
1806
1807 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
1808 arvif->bssid);
1809 if (ret) {
1810 ath10k_warn(ar, "failed to bring up vdev %d: %i\n",
1811 arvif->vdev_id, ret);
1812 return;
1813 }
1814
1815 arvif->is_up = true;
1816
1817 ret = ath10k_mac_vif_fix_hidden_ssid(arvif);
1818 if (ret) {
1819 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n",
1820 arvif->vdev_id, ret);
1821 return;
1822 }
1823
1824 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id);
1825 }
1826
ath10k_control_ibss(struct ath10k_vif * arvif,struct ieee80211_vif * vif)1827 static void ath10k_control_ibss(struct ath10k_vif *arvif,
1828 struct ieee80211_vif *vif)
1829 {
1830 struct ath10k *ar = arvif->ar;
1831 u32 vdev_param;
1832 int ret = 0;
1833
1834 lockdep_assert_held(&arvif->ar->conf_mutex);
1835
1836 if (!vif->cfg.ibss_joined) {
1837 if (is_zero_ether_addr(arvif->bssid))
1838 return;
1839
1840 eth_zero_addr(arvif->bssid);
1841
1842 return;
1843 }
1844
1845 vdev_param = arvif->ar->wmi.vdev_param->atim_window;
1846 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param,
1847 ATH10K_DEFAULT_ATIM);
1848 if (ret)
1849 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n",
1850 arvif->vdev_id, ret);
1851 }
1852
ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif * arvif)1853 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif)
1854 {
1855 struct ath10k *ar = arvif->ar;
1856 u32 param;
1857 u32 value;
1858 int ret;
1859
1860 lockdep_assert_held(&arvif->ar->conf_mutex);
1861
1862 if (arvif->u.sta.uapsd)
1863 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER;
1864 else
1865 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS;
1866
1867 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD;
1868 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value);
1869 if (ret) {
1870 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n",
1871 value, arvif->vdev_id, ret);
1872 return ret;
1873 }
1874
1875 return 0;
1876 }
1877
ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif * arvif)1878 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif)
1879 {
1880 struct ath10k *ar = arvif->ar;
1881 u32 param;
1882 u32 value;
1883 int ret;
1884
1885 lockdep_assert_held(&arvif->ar->conf_mutex);
1886
1887 if (arvif->u.sta.uapsd)
1888 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD;
1889 else
1890 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX;
1891
1892 param = WMI_STA_PS_PARAM_PSPOLL_COUNT;
1893 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
1894 param, value);
1895 if (ret) {
1896 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n",
1897 value, arvif->vdev_id, ret);
1898 return ret;
1899 }
1900
1901 return 0;
1902 }
1903
ath10k_mac_num_vifs_started(struct ath10k * ar)1904 static int ath10k_mac_num_vifs_started(struct ath10k *ar)
1905 {
1906 struct ath10k_vif *arvif;
1907 int num = 0;
1908
1909 lockdep_assert_held(&ar->conf_mutex);
1910
1911 list_for_each_entry(arvif, &ar->arvifs, list)
1912 if (arvif->is_started)
1913 num++;
1914
1915 return num;
1916 }
1917
ath10k_mac_vif_setup_ps(struct ath10k_vif * arvif)1918 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif)
1919 {
1920 struct ath10k *ar = arvif->ar;
1921 struct ieee80211_vif *vif = arvif->vif;
1922 struct ieee80211_conf *conf = &ar->hw->conf;
1923 enum wmi_sta_powersave_param param;
1924 enum wmi_sta_ps_mode psmode;
1925 int ret;
1926 int ps_timeout;
1927 bool enable_ps;
1928
1929 lockdep_assert_held(&arvif->ar->conf_mutex);
1930
1931 if (arvif->vif->type != NL80211_IFTYPE_STATION)
1932 return 0;
1933
1934 enable_ps = arvif->ps;
1935
1936 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 &&
1937 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT,
1938 ar->running_fw->fw_file.fw_features)) {
1939 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n",
1940 arvif->vdev_id);
1941 enable_ps = false;
1942 }
1943
1944 if (!arvif->is_started) {
1945 /* mac80211 can update vif powersave state while disconnected.
1946 * Firmware doesn't behave nicely and consumes more power than
1947 * necessary if PS is disabled on a non-started vdev. Hence
1948 * force-enable PS for non-running vdevs.
1949 */
1950 psmode = WMI_STA_PS_MODE_ENABLED;
1951 } else if (enable_ps) {
1952 psmode = WMI_STA_PS_MODE_ENABLED;
1953 param = WMI_STA_PS_PARAM_INACTIVITY_TIME;
1954
1955 ps_timeout = conf->dynamic_ps_timeout;
1956 if (ps_timeout == 0) {
1957 /* Firmware doesn't like 0 */
1958 ps_timeout = ieee80211_tu_to_usec(
1959 vif->bss_conf.beacon_int) / 1000;
1960 }
1961
1962 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param,
1963 ps_timeout);
1964 if (ret) {
1965 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n",
1966 arvif->vdev_id, ret);
1967 return ret;
1968 }
1969 } else {
1970 psmode = WMI_STA_PS_MODE_DISABLED;
1971 }
1972
1973 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n",
1974 arvif->vdev_id, psmode ? "enable" : "disable");
1975
1976 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode);
1977 if (ret) {
1978 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n",
1979 psmode, arvif->vdev_id, ret);
1980 return ret;
1981 }
1982
1983 return 0;
1984 }
1985
ath10k_mac_vif_disable_keepalive(struct ath10k_vif * arvif)1986 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif)
1987 {
1988 struct ath10k *ar = arvif->ar;
1989 struct wmi_sta_keepalive_arg arg = {};
1990 int ret;
1991
1992 lockdep_assert_held(&arvif->ar->conf_mutex);
1993
1994 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
1995 return 0;
1996
1997 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map))
1998 return 0;
1999
2000 /* Some firmware revisions have a bug and ignore the `enabled` field.
2001 * Instead use the interval to disable the keepalive.
2002 */
2003 arg.vdev_id = arvif->vdev_id;
2004 arg.enabled = 1;
2005 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME;
2006 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE;
2007
2008 ret = ath10k_wmi_sta_keepalive(ar, &arg);
2009 if (ret) {
2010 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n",
2011 arvif->vdev_id, ret);
2012 return ret;
2013 }
2014
2015 return 0;
2016 }
2017
ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif * arvif)2018 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif)
2019 {
2020 struct ath10k *ar = arvif->ar;
2021 struct ieee80211_vif *vif = arvif->vif;
2022 int ret;
2023
2024 lockdep_assert_held(&arvif->ar->conf_mutex);
2025
2026 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)))
2027 return;
2028
2029 if (arvif->vdev_type != WMI_VDEV_TYPE_AP)
2030 return;
2031
2032 if (!vif->bss_conf.csa_active)
2033 return;
2034
2035 if (!arvif->is_up)
2036 return;
2037
2038 if (!ieee80211_beacon_cntdwn_is_complete(vif, 0)) {
2039 ieee80211_beacon_update_cntdwn(vif, 0);
2040
2041 ret = ath10k_mac_setup_bcn_tmpl(arvif);
2042 if (ret)
2043 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
2044 ret);
2045
2046 ret = ath10k_mac_setup_prb_tmpl(arvif);
2047 if (ret)
2048 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
2049 ret);
2050 } else {
2051 ieee80211_csa_finish(vif, 0);
2052 }
2053 }
2054
ath10k_mac_vif_ap_csa_work(struct work_struct * work)2055 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work)
2056 {
2057 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2058 ap_csa_work);
2059 struct ath10k *ar = arvif->ar;
2060
2061 mutex_lock(&ar->conf_mutex);
2062 ath10k_mac_vif_ap_csa_count_down(arvif);
2063 mutex_unlock(&ar->conf_mutex);
2064 }
2065
ath10k_mac_handle_beacon_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2066 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac,
2067 struct ieee80211_vif *vif)
2068 {
2069 struct sk_buff *skb = data;
2070 struct ieee80211_mgmt *mgmt = (void *)skb->data;
2071 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2072
2073 if (vif->type != NL80211_IFTYPE_STATION)
2074 return;
2075
2076 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid))
2077 return;
2078
2079 cancel_delayed_work(&arvif->connection_loss_work);
2080 }
2081
ath10k_mac_handle_beacon(struct ath10k * ar,struct sk_buff * skb)2082 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb)
2083 {
2084 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2085 ATH10K_ITER_NORMAL_FLAGS,
2086 ath10k_mac_handle_beacon_iter,
2087 skb);
2088 }
2089
ath10k_mac_handle_beacon_miss_iter(void * data,u8 * mac,struct ieee80211_vif * vif)2090 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac,
2091 struct ieee80211_vif *vif)
2092 {
2093 u32 *vdev_id = data;
2094 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2095 struct ath10k *ar = arvif->ar;
2096 struct ieee80211_hw *hw = ar->hw;
2097
2098 if (arvif->vdev_id != *vdev_id)
2099 return;
2100
2101 if (!arvif->is_up)
2102 return;
2103
2104 ieee80211_beacon_loss(vif);
2105
2106 /* Firmware doesn't report beacon loss events repeatedly. If AP probe
2107 * (done by mac80211) succeeds but beacons do not resume then it
2108 * doesn't make sense to continue operation. Queue connection loss work
2109 * which can be cancelled when beacon is received.
2110 */
2111 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work,
2112 ATH10K_CONNECTION_LOSS_HZ);
2113 }
2114
ath10k_mac_handle_beacon_miss(struct ath10k * ar,u32 vdev_id)2115 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id)
2116 {
2117 ieee80211_iterate_active_interfaces_atomic(ar->hw,
2118 ATH10K_ITER_NORMAL_FLAGS,
2119 ath10k_mac_handle_beacon_miss_iter,
2120 &vdev_id);
2121 }
2122
ath10k_mac_vif_sta_connection_loss_work(struct work_struct * work)2123 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work)
2124 {
2125 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif,
2126 connection_loss_work.work);
2127 struct ieee80211_vif *vif = arvif->vif;
2128
2129 if (!arvif->is_up)
2130 return;
2131
2132 ieee80211_connection_loss(vif);
2133 }
2134
2135 /**********************/
2136 /* Station management */
2137 /**********************/
2138
ath10k_peer_assoc_h_listen_intval(struct ath10k * ar,struct ieee80211_vif * vif)2139 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar,
2140 struct ieee80211_vif *vif)
2141 {
2142 /* Some firmware revisions have unstable STA powersave when listen
2143 * interval is set too high (e.g. 5). The symptoms are firmware doesn't
2144 * generate NullFunc frames properly even if buffered frames have been
2145 * indicated in Beacon TIM. Firmware would seldom wake up to pull
2146 * buffered frames. Often pinging the device from AP would simply fail.
2147 *
2148 * As a workaround set it to 1.
2149 */
2150 if (vif->type == NL80211_IFTYPE_STATION)
2151 return 1;
2152
2153 return ar->hw->conf.listen_interval;
2154 }
2155
ath10k_peer_assoc_h_basic(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2156 static void ath10k_peer_assoc_h_basic(struct ath10k *ar,
2157 struct ieee80211_vif *vif,
2158 struct ieee80211_sta *sta,
2159 struct wmi_peer_assoc_complete_arg *arg)
2160 {
2161 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2162 u32 aid;
2163
2164 lockdep_assert_held(&ar->conf_mutex);
2165
2166 if (vif->type == NL80211_IFTYPE_STATION)
2167 aid = vif->cfg.aid;
2168 else
2169 aid = sta->aid;
2170
2171 ether_addr_copy(arg->addr, sta->addr);
2172 arg->vdev_id = arvif->vdev_id;
2173 arg->peer_aid = aid;
2174 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth;
2175 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif);
2176 arg->peer_num_spatial_streams = 1;
2177 arg->peer_caps = vif->bss_conf.assoc_capability;
2178 }
2179
ath10k_peer_assoc_h_crypto(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2180 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar,
2181 struct ieee80211_vif *vif,
2182 struct ieee80211_sta *sta,
2183 struct wmi_peer_assoc_complete_arg *arg)
2184 {
2185 struct ieee80211_bss_conf *info = &vif->bss_conf;
2186 struct cfg80211_chan_def def;
2187 struct cfg80211_bss *bss;
2188 const u8 *rsnie = NULL;
2189 const u8 *wpaie = NULL;
2190
2191 lockdep_assert_held(&ar->conf_mutex);
2192
2193 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2194 return;
2195
2196 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid,
2197 vif->cfg.ssid_len ? vif->cfg.ssid : NULL,
2198 vif->cfg.ssid_len,
2199 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY);
2200 if (bss) {
2201 const struct cfg80211_bss_ies *ies;
2202
2203 rcu_read_lock();
2204 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN);
2205
2206 ies = rcu_dereference(bss->ies);
2207
2208 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT,
2209 WLAN_OUI_TYPE_MICROSOFT_WPA,
2210 ies->data,
2211 ies->len);
2212 rcu_read_unlock();
2213 cfg80211_put_bss(ar->hw->wiphy, bss);
2214 }
2215
2216 /* FIXME: base on RSN IE/WPA IE is a correct idea? */
2217 if (rsnie || wpaie) {
2218 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__);
2219 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way;
2220 }
2221
2222 if (wpaie) {
2223 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__);
2224 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way;
2225 }
2226
2227 if (sta->mfp &&
2228 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT,
2229 ar->running_fw->fw_file.fw_features)) {
2230 arg->peer_flags |= ar->wmi.peer_flags->pmf;
2231 }
2232 }
2233
ath10k_peer_assoc_h_rates(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2234 static void ath10k_peer_assoc_h_rates(struct ath10k *ar,
2235 struct ieee80211_vif *vif,
2236 struct ieee80211_sta *sta,
2237 struct wmi_peer_assoc_complete_arg *arg)
2238 {
2239 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2240 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates;
2241 struct cfg80211_chan_def def;
2242 const struct ieee80211_supported_band *sband;
2243 const struct ieee80211_rate *rates;
2244 enum nl80211_band band;
2245 u32 ratemask;
2246 u8 rate;
2247 int i;
2248
2249 lockdep_assert_held(&ar->conf_mutex);
2250
2251 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2252 return;
2253
2254 band = def.chan->band;
2255 sband = ar->hw->wiphy->bands[band];
2256 ratemask = sta->deflink.supp_rates[band];
2257 ratemask &= arvif->bitrate_mask.control[band].legacy;
2258 rates = sband->bitrates;
2259
2260 rateset->num_rates = 0;
2261
2262 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) {
2263 if (!(ratemask & 1))
2264 continue;
2265
2266 rate = ath10k_mac_bitrate_to_rate(rates->bitrate);
2267 rateset->rates[rateset->num_rates] = rate;
2268 rateset->num_rates++;
2269 }
2270 }
2271
2272 static bool
ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])2273 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN])
2274 {
2275 int nss;
2276
2277 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++)
2278 if (ht_mcs_mask[nss])
2279 return false;
2280
2281 return true;
2282 }
2283
2284 static bool
ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])2285 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX])
2286 {
2287 int nss;
2288
2289 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++)
2290 if (vht_mcs_mask[nss])
2291 return false;
2292
2293 return true;
2294 }
2295
ath10k_peer_assoc_h_ht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2296 static void ath10k_peer_assoc_h_ht(struct ath10k *ar,
2297 struct ieee80211_vif *vif,
2298 struct ieee80211_sta *sta,
2299 struct wmi_peer_assoc_complete_arg *arg)
2300 {
2301 const struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
2302 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2303 struct cfg80211_chan_def def;
2304 enum nl80211_band band;
2305 const u8 *ht_mcs_mask;
2306 const u16 *vht_mcs_mask;
2307 int i, n;
2308 u8 max_nss;
2309 u32 stbc;
2310
2311 lockdep_assert_held(&ar->conf_mutex);
2312
2313 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2314 return;
2315
2316 if (!ht_cap->ht_supported)
2317 return;
2318
2319 band = def.chan->band;
2320 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2321 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2322
2323 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) &&
2324 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2325 return;
2326
2327 arg->peer_flags |= ar->wmi.peer_flags->ht;
2328 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2329 ht_cap->ampdu_factor)) - 1;
2330
2331 arg->peer_mpdu_density =
2332 ath10k_parse_mpdudensity(ht_cap->ampdu_density);
2333
2334 arg->peer_ht_caps = ht_cap->cap;
2335 arg->peer_rate_caps |= WMI_RC_HT_FLAG;
2336
2337 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING)
2338 arg->peer_flags |= ar->wmi.peer_flags->ldbc;
2339
2340 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40) {
2341 arg->peer_flags |= ar->wmi.peer_flags->bw40;
2342 arg->peer_rate_caps |= WMI_RC_CW40_FLAG;
2343 }
2344
2345 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) {
2346 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20)
2347 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2348
2349 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40)
2350 arg->peer_rate_caps |= WMI_RC_SGI_FLAG;
2351 }
2352
2353 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) {
2354 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG;
2355 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2356 }
2357
2358 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) {
2359 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC;
2360 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT;
2361 stbc = stbc << WMI_RC_RX_STBC_FLAG_S;
2362 arg->peer_rate_caps |= stbc;
2363 arg->peer_flags |= ar->wmi.peer_flags->stbc;
2364 }
2365
2366 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2])
2367 arg->peer_rate_caps |= WMI_RC_TS_FLAG;
2368 else if (ht_cap->mcs.rx_mask[1])
2369 arg->peer_rate_caps |= WMI_RC_DS_FLAG;
2370
2371 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++)
2372 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) &&
2373 (ht_mcs_mask[i / 8] & BIT(i % 8))) {
2374 max_nss = (i / 8) + 1;
2375 arg->peer_ht_rates.rates[n++] = i;
2376 }
2377
2378 /*
2379 * This is a workaround for HT-enabled STAs which break the spec
2380 * and have no HT capabilities RX mask (no HT RX MCS map).
2381 *
2382 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS),
2383 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs.
2384 *
2385 * Firmware asserts if such situation occurs.
2386 */
2387 if (n == 0) {
2388 arg->peer_ht_rates.num_rates = 8;
2389 for (i = 0; i < arg->peer_ht_rates.num_rates; i++)
2390 arg->peer_ht_rates.rates[i] = i;
2391 } else {
2392 arg->peer_ht_rates.num_rates = n;
2393 arg->peer_num_spatial_streams = min(sta->deflink.rx_nss,
2394 max_nss);
2395 }
2396
2397 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n",
2398 arg->addr,
2399 arg->peer_ht_rates.num_rates,
2400 arg->peer_num_spatial_streams);
2401 }
2402
ath10k_peer_assoc_qos_ap(struct ath10k * ar,struct ath10k_vif * arvif,struct ieee80211_sta * sta)2403 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar,
2404 struct ath10k_vif *arvif,
2405 struct ieee80211_sta *sta)
2406 {
2407 u32 uapsd = 0;
2408 u32 max_sp = 0;
2409 int ret = 0;
2410
2411 lockdep_assert_held(&ar->conf_mutex);
2412
2413 if (sta->wme && sta->uapsd_queues) {
2414 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n",
2415 sta->uapsd_queues, sta->max_sp);
2416
2417 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
2418 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN |
2419 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN;
2420 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
2421 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN |
2422 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN;
2423 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
2424 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN |
2425 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN;
2426 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
2427 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN |
2428 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN;
2429
2430 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP)
2431 max_sp = sta->max_sp;
2432
2433 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2434 sta->addr,
2435 WMI_AP_PS_PEER_PARAM_UAPSD,
2436 uapsd);
2437 if (ret) {
2438 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n",
2439 arvif->vdev_id, ret);
2440 return ret;
2441 }
2442
2443 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id,
2444 sta->addr,
2445 WMI_AP_PS_PEER_PARAM_MAX_SP,
2446 max_sp);
2447 if (ret) {
2448 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n",
2449 arvif->vdev_id, ret);
2450 return ret;
2451 }
2452
2453 /* TODO setup this based on STA listen interval and
2454 * beacon interval. Currently we don't know
2455 * sta->listen_interval - mac80211 patch required.
2456 * Currently use 10 seconds
2457 */
2458 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr,
2459 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME,
2460 10);
2461 if (ret) {
2462 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n",
2463 arvif->vdev_id, ret);
2464 return ret;
2465 }
2466 }
2467
2468 return 0;
2469 }
2470
2471 static u16
ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])2472 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set,
2473 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX])
2474 {
2475 int idx_limit;
2476 int nss;
2477 u16 mcs_map;
2478 u16 mcs;
2479
2480 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) {
2481 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) &
2482 vht_mcs_limit[nss];
2483
2484 if (mcs_map)
2485 idx_limit = fls(mcs_map) - 1;
2486 else
2487 idx_limit = -1;
2488
2489 switch (idx_limit) {
2490 case 0:
2491 case 1:
2492 case 2:
2493 case 3:
2494 case 4:
2495 case 5:
2496 case 6:
2497 default:
2498 /* see ath10k_mac_can_set_bitrate_mask() */
2499 WARN_ON(1);
2500 fallthrough;
2501 case -1:
2502 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED;
2503 break;
2504 case 7:
2505 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7;
2506 break;
2507 case 8:
2508 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8;
2509 break;
2510 case 9:
2511 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9;
2512 break;
2513 }
2514
2515 tx_mcs_set &= ~(0x3 << (nss * 2));
2516 tx_mcs_set |= mcs << (nss * 2);
2517 }
2518
2519 return tx_mcs_set;
2520 }
2521
get_160mhz_nss_from_maxrate(int rate)2522 static u32 get_160mhz_nss_from_maxrate(int rate)
2523 {
2524 u32 nss;
2525
2526 switch (rate) {
2527 case 780:
2528 nss = 1;
2529 break;
2530 case 1560:
2531 nss = 2;
2532 break;
2533 case 2106:
2534 nss = 3; /* not support MCS9 from spec*/
2535 break;
2536 case 3120:
2537 nss = 4;
2538 break;
2539 default:
2540 nss = 1;
2541 }
2542
2543 return nss;
2544 }
2545
ath10k_peer_assoc_h_vht(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2546 static void ath10k_peer_assoc_h_vht(struct ath10k *ar,
2547 struct ieee80211_vif *vif,
2548 struct ieee80211_sta *sta,
2549 struct wmi_peer_assoc_complete_arg *arg)
2550 {
2551 const struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
2552 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2553 struct ath10k_hw_params *hw = &ar->hw_params;
2554 struct cfg80211_chan_def def;
2555 enum nl80211_band band;
2556 const u16 *vht_mcs_mask;
2557 u8 ampdu_factor;
2558 u8 max_nss, vht_mcs;
2559 int i;
2560
2561 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2562 return;
2563
2564 if (!vht_cap->vht_supported)
2565 return;
2566
2567 band = def.chan->band;
2568 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2569
2570 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask))
2571 return;
2572
2573 arg->peer_flags |= ar->wmi.peer_flags->vht;
2574
2575 if (def.chan->band == NL80211_BAND_2GHZ)
2576 arg->peer_flags |= ar->wmi.peer_flags->vht_2g;
2577
2578 arg->peer_vht_caps = vht_cap->cap;
2579
2580 ampdu_factor = (vht_cap->cap &
2581 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >>
2582 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
2583
2584 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to
2585 * zero in VHT IE. Using it would result in degraded throughput.
2586 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep
2587 * it if VHT max_mpdu is smaller.
2588 */
2589 arg->peer_max_mpdu = max(arg->peer_max_mpdu,
2590 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR +
2591 ampdu_factor)) - 1);
2592
2593 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
2594 arg->peer_flags |= ar->wmi.peer_flags->bw80;
2595
2596 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160)
2597 arg->peer_flags |= ar->wmi.peer_flags->bw160;
2598
2599 /* Calculate peer NSS capability from VHT capabilities if STA
2600 * supports VHT.
2601 */
2602 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) {
2603 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >>
2604 (2 * i) & 3;
2605
2606 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) &&
2607 vht_mcs_mask[i])
2608 max_nss = i + 1;
2609 }
2610 arg->peer_num_spatial_streams = min(sta->deflink.rx_nss, max_nss);
2611 arg->peer_vht_rates.rx_max_rate =
2612 __le16_to_cpu(vht_cap->vht_mcs.rx_highest);
2613 arg->peer_vht_rates.rx_mcs_set =
2614 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map);
2615 arg->peer_vht_rates.tx_max_rate =
2616 __le16_to_cpu(vht_cap->vht_mcs.tx_highest);
2617 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit(
2618 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask);
2619
2620 /* Configure bandwidth-NSS mapping to FW
2621 * for the chip's tx chains setting on 160Mhz bw
2622 */
2623 if (arg->peer_phymode == MODE_11AC_VHT160 ||
2624 arg->peer_phymode == MODE_11AC_VHT80_80) {
2625 u32 rx_nss;
2626 u32 max_rate;
2627
2628 max_rate = arg->peer_vht_rates.rx_max_rate;
2629 rx_nss = get_160mhz_nss_from_maxrate(max_rate);
2630
2631 if (rx_nss == 0)
2632 rx_nss = arg->peer_num_spatial_streams;
2633 else
2634 rx_nss = min(arg->peer_num_spatial_streams, rx_nss);
2635
2636 max_rate = hw->vht160_mcs_tx_highest;
2637 rx_nss = min(rx_nss, get_160mhz_nss_from_maxrate(max_rate));
2638
2639 arg->peer_bw_rxnss_override =
2640 FIELD_PREP(WMI_PEER_NSS_MAP_ENABLE, 1) |
2641 FIELD_PREP(WMI_PEER_NSS_160MHZ_MASK, (rx_nss - 1));
2642
2643 if (arg->peer_phymode == MODE_11AC_VHT80_80) {
2644 arg->peer_bw_rxnss_override |=
2645 FIELD_PREP(WMI_PEER_NSS_80_80MHZ_MASK, (rx_nss - 1));
2646 }
2647 }
2648 ath10k_dbg(ar, ATH10K_DBG_MAC,
2649 "mac vht peer %pM max_mpdu %d flags 0x%x peer_rx_nss_override 0x%x\n",
2650 sta->addr, arg->peer_max_mpdu,
2651 arg->peer_flags, arg->peer_bw_rxnss_override);
2652 }
2653
ath10k_peer_assoc_h_qos(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2654 static void ath10k_peer_assoc_h_qos(struct ath10k *ar,
2655 struct ieee80211_vif *vif,
2656 struct ieee80211_sta *sta,
2657 struct wmi_peer_assoc_complete_arg *arg)
2658 {
2659 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2660
2661 switch (arvif->vdev_type) {
2662 case WMI_VDEV_TYPE_AP:
2663 if (sta->wme)
2664 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2665
2666 if (sta->wme && sta->uapsd_queues) {
2667 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd;
2668 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG;
2669 }
2670 break;
2671 case WMI_VDEV_TYPE_STA:
2672 if (sta->wme)
2673 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2674 break;
2675 case WMI_VDEV_TYPE_IBSS:
2676 if (sta->wme)
2677 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos;
2678 break;
2679 default:
2680 break;
2681 }
2682
2683 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n",
2684 sta->addr, !!(arg->peer_flags &
2685 arvif->ar->wmi.peer_flags->qos));
2686 }
2687
ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta * sta)2688 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta)
2689 {
2690 return sta->deflink.supp_rates[NL80211_BAND_2GHZ] >>
2691 ATH10K_MAC_FIRST_OFDM_RATE_IDX;
2692 }
2693
ath10k_mac_get_phymode_vht(struct ath10k * ar,struct ieee80211_sta * sta)2694 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar,
2695 struct ieee80211_sta *sta)
2696 {
2697 struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
2698
2699 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_160) {
2700 switch (vht_cap->cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) {
2701 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ:
2702 return MODE_11AC_VHT160;
2703 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ:
2704 return MODE_11AC_VHT80_80;
2705 default:
2706 /* not sure if this is a valid case? */
2707 return MODE_11AC_VHT160;
2708 }
2709 }
2710
2711 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_80)
2712 return MODE_11AC_VHT80;
2713
2714 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2715 return MODE_11AC_VHT40;
2716
2717 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_20)
2718 return MODE_11AC_VHT20;
2719
2720 return MODE_UNKNOWN;
2721 }
2722
ath10k_peer_assoc_h_phymode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2723 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar,
2724 struct ieee80211_vif *vif,
2725 struct ieee80211_sta *sta,
2726 struct wmi_peer_assoc_complete_arg *arg)
2727 {
2728 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2729 struct cfg80211_chan_def def;
2730 enum nl80211_band band;
2731 const u8 *ht_mcs_mask;
2732 const u16 *vht_mcs_mask;
2733 enum wmi_phy_mode phymode = MODE_UNKNOWN;
2734
2735 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
2736 return;
2737
2738 band = def.chan->band;
2739 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
2740 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
2741
2742 switch (band) {
2743 case NL80211_BAND_2GHZ:
2744 if (sta->deflink.vht_cap.vht_supported &&
2745 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2746 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2747 phymode = MODE_11AC_VHT40;
2748 else
2749 phymode = MODE_11AC_VHT20;
2750 } else if (sta->deflink.ht_cap.ht_supported &&
2751 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2752 if (sta->deflink.bandwidth == IEEE80211_STA_RX_BW_40)
2753 phymode = MODE_11NG_HT40;
2754 else
2755 phymode = MODE_11NG_HT20;
2756 } else if (ath10k_mac_sta_has_ofdm_only(sta)) {
2757 phymode = MODE_11G;
2758 } else {
2759 phymode = MODE_11B;
2760 }
2761
2762 break;
2763 case NL80211_BAND_5GHZ:
2764 /*
2765 * Check VHT first.
2766 */
2767 if (sta->deflink.vht_cap.vht_supported &&
2768 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) {
2769 phymode = ath10k_mac_get_phymode_vht(ar, sta);
2770 } else if (sta->deflink.ht_cap.ht_supported &&
2771 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) {
2772 if (sta->deflink.bandwidth >= IEEE80211_STA_RX_BW_40)
2773 phymode = MODE_11NA_HT40;
2774 else
2775 phymode = MODE_11NA_HT20;
2776 } else {
2777 phymode = MODE_11A;
2778 }
2779
2780 break;
2781 default:
2782 break;
2783 }
2784
2785 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n",
2786 sta->addr, ath10k_wmi_phymode_str(phymode));
2787
2788 arg->peer_phymode = phymode;
2789 WARN_ON(phymode == MODE_UNKNOWN);
2790 }
2791
ath10k_peer_assoc_prepare(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct wmi_peer_assoc_complete_arg * arg)2792 static int ath10k_peer_assoc_prepare(struct ath10k *ar,
2793 struct ieee80211_vif *vif,
2794 struct ieee80211_sta *sta,
2795 struct wmi_peer_assoc_complete_arg *arg)
2796 {
2797 lockdep_assert_held(&ar->conf_mutex);
2798
2799 memset(arg, 0, sizeof(*arg));
2800
2801 ath10k_peer_assoc_h_basic(ar, vif, sta, arg);
2802 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg);
2803 ath10k_peer_assoc_h_rates(ar, vif, sta, arg);
2804 ath10k_peer_assoc_h_ht(ar, vif, sta, arg);
2805 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg);
2806 ath10k_peer_assoc_h_vht(ar, vif, sta, arg);
2807 ath10k_peer_assoc_h_qos(ar, vif, sta, arg);
2808
2809 return 0;
2810 }
2811
2812 static const u32 ath10k_smps_map[] = {
2813 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC,
2814 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC,
2815 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE,
2816 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE,
2817 };
2818
ath10k_setup_peer_smps(struct ath10k * ar,struct ath10k_vif * arvif,const u8 * addr,const struct ieee80211_sta_ht_cap * ht_cap)2819 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif,
2820 const u8 *addr,
2821 const struct ieee80211_sta_ht_cap *ht_cap)
2822 {
2823 int smps;
2824
2825 if (!ht_cap->ht_supported)
2826 return 0;
2827
2828 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS;
2829 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT;
2830
2831 if (smps >= ARRAY_SIZE(ath10k_smps_map))
2832 return -EINVAL;
2833
2834 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr,
2835 ar->wmi.peer_param->smps_state,
2836 ath10k_smps_map[smps]);
2837 }
2838
ath10k_mac_vif_recalc_txbf(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta_vht_cap vht_cap)2839 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar,
2840 struct ieee80211_vif *vif,
2841 struct ieee80211_sta_vht_cap vht_cap)
2842 {
2843 struct ath10k_vif *arvif = (void *)vif->drv_priv;
2844 int ret;
2845 u32 param;
2846 u32 value;
2847
2848 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC)
2849 return 0;
2850
2851 if (!(ar->vht_cap_info &
2852 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2853 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE |
2854 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2855 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)))
2856 return 0;
2857
2858 param = ar->wmi.vdev_param->txbf;
2859 value = 0;
2860
2861 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED))
2862 return 0;
2863
2864 /* The following logic is correct. If a remote STA advertises support
2865 * for being a beamformer then we should enable us being a beamformee.
2866 */
2867
2868 if (ar->vht_cap_info &
2869 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
2870 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
2871 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
2872 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2873
2874 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
2875 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE;
2876 }
2877
2878 if (ar->vht_cap_info &
2879 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
2880 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
2881 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
2882 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2883
2884 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
2885 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER;
2886 }
2887
2888 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE)
2889 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
2890
2891 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER)
2892 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
2893
2894 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value);
2895 if (ret) {
2896 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n",
2897 value, ret);
2898 return ret;
2899 }
2900
2901 return 0;
2902 }
2903
ath10k_mac_is_connected(struct ath10k * ar)2904 static bool ath10k_mac_is_connected(struct ath10k *ar)
2905 {
2906 struct ath10k_vif *arvif;
2907
2908 list_for_each_entry(arvif, &ar->arvifs, list) {
2909 if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
2910 return true;
2911 }
2912
2913 return false;
2914 }
2915
ath10k_mac_txpower_setup(struct ath10k * ar,int txpower)2916 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower)
2917 {
2918 int ret;
2919 u32 param;
2920 int tx_power_2g, tx_power_5g;
2921 bool connected;
2922
2923 lockdep_assert_held(&ar->conf_mutex);
2924
2925 /* ath10k internally uses unit of 0.5 dBm so multiply by 2 */
2926 tx_power_2g = txpower * 2;
2927 tx_power_5g = txpower * 2;
2928
2929 connected = ath10k_mac_is_connected(ar);
2930
2931 if (connected && ar->tx_power_2g_limit)
2932 if (tx_power_2g > ar->tx_power_2g_limit)
2933 tx_power_2g = ar->tx_power_2g_limit;
2934
2935 if (connected && ar->tx_power_5g_limit)
2936 if (tx_power_5g > ar->tx_power_5g_limit)
2937 tx_power_5g = ar->tx_power_5g_limit;
2938
2939 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower 2g: %d, 5g: %d\n",
2940 tx_power_2g, tx_power_5g);
2941
2942 param = ar->wmi.pdev_param->txpower_limit2g;
2943 ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_2g);
2944 if (ret) {
2945 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n",
2946 tx_power_2g, ret);
2947 return ret;
2948 }
2949
2950 param = ar->wmi.pdev_param->txpower_limit5g;
2951 ret = ath10k_wmi_pdev_set_param(ar, param, tx_power_5g);
2952 if (ret) {
2953 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n",
2954 tx_power_5g, ret);
2955 return ret;
2956 }
2957
2958 return 0;
2959 }
2960
ath10k_mac_txpower_recalc(struct ath10k * ar)2961 static int ath10k_mac_txpower_recalc(struct ath10k *ar)
2962 {
2963 struct ath10k_vif *arvif;
2964 int ret, txpower = -1;
2965
2966 lockdep_assert_held(&ar->conf_mutex);
2967
2968 list_for_each_entry(arvif, &ar->arvifs, list) {
2969 /* txpower not initialized yet? */
2970 if (arvif->txpower == INT_MIN)
2971 continue;
2972
2973 if (txpower == -1)
2974 txpower = arvif->txpower;
2975 else
2976 txpower = min(txpower, arvif->txpower);
2977 }
2978
2979 if (txpower == -1)
2980 return 0;
2981
2982 ret = ath10k_mac_txpower_setup(ar, txpower);
2983 if (ret) {
2984 ath10k_warn(ar, "failed to setup tx power %d: %d\n",
2985 txpower, ret);
2986 return ret;
2987 }
2988
2989 return 0;
2990 }
2991
ath10k_mac_set_sar_power(struct ath10k * ar)2992 static int ath10k_mac_set_sar_power(struct ath10k *ar)
2993 {
2994 if (!ar->hw_params.dynamic_sar_support)
2995 return -EOPNOTSUPP;
2996
2997 if (!ath10k_mac_is_connected(ar))
2998 return 0;
2999
3000 /* if connected, then arvif->txpower must be valid */
3001 return ath10k_mac_txpower_recalc(ar);
3002 }
3003
ath10k_mac_set_sar_specs(struct ieee80211_hw * hw,const struct cfg80211_sar_specs * sar)3004 static int ath10k_mac_set_sar_specs(struct ieee80211_hw *hw,
3005 const struct cfg80211_sar_specs *sar)
3006 {
3007 const struct cfg80211_sar_sub_specs *sub_specs;
3008 struct ath10k *ar = hw->priv;
3009 u32 i;
3010 int ret;
3011
3012 mutex_lock(&ar->conf_mutex);
3013
3014 if (!ar->hw_params.dynamic_sar_support) {
3015 ret = -EOPNOTSUPP;
3016 goto err;
3017 }
3018
3019 if (!sar || sar->type != NL80211_SAR_TYPE_POWER ||
3020 sar->num_sub_specs == 0) {
3021 ret = -EINVAL;
3022 goto err;
3023 }
3024
3025 sub_specs = sar->sub_specs;
3026
3027 /* 0dbm is not a practical value for ath10k, so use 0
3028 * as no SAR limitation on it.
3029 */
3030 ar->tx_power_2g_limit = 0;
3031 ar->tx_power_5g_limit = 0;
3032
3033 /* note the power is in 0.25dbm unit, while ath10k uses
3034 * 0.5dbm unit.
3035 */
3036 for (i = 0; i < sar->num_sub_specs; i++) {
3037 if (sub_specs->freq_range_index == 0)
3038 ar->tx_power_2g_limit = sub_specs->power / 2;
3039 else if (sub_specs->freq_range_index == 1)
3040 ar->tx_power_5g_limit = sub_specs->power / 2;
3041
3042 sub_specs++;
3043 }
3044
3045 ret = ath10k_mac_set_sar_power(ar);
3046 if (ret) {
3047 ath10k_warn(ar, "failed to set sar power: %d", ret);
3048 goto err;
3049 }
3050
3051 err:
3052 mutex_unlock(&ar->conf_mutex);
3053 return ret;
3054 }
3055
3056 /* can be called only in mac80211 callbacks due to `key_count` usage */
ath10k_bss_assoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * bss_conf)3057 static void ath10k_bss_assoc(struct ieee80211_hw *hw,
3058 struct ieee80211_vif *vif,
3059 struct ieee80211_bss_conf *bss_conf)
3060 {
3061 struct ath10k *ar = hw->priv;
3062 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3063 struct ieee80211_sta_ht_cap ht_cap;
3064 struct ieee80211_sta_vht_cap vht_cap;
3065 struct wmi_peer_assoc_complete_arg peer_arg;
3066 struct ieee80211_sta *ap_sta;
3067 int ret;
3068
3069 lockdep_assert_held(&ar->conf_mutex);
3070
3071 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n",
3072 arvif->vdev_id, arvif->bssid, arvif->aid);
3073
3074 rcu_read_lock();
3075
3076 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid);
3077 if (!ap_sta) {
3078 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n",
3079 bss_conf->bssid, arvif->vdev_id);
3080 rcu_read_unlock();
3081 return;
3082 }
3083
3084 /* ap_sta must be accessed only within rcu section which must be left
3085 * before calling ath10k_setup_peer_smps() which might sleep.
3086 */
3087 ht_cap = ap_sta->deflink.ht_cap;
3088 vht_cap = ap_sta->deflink.vht_cap;
3089
3090 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg);
3091 if (ret) {
3092 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n",
3093 bss_conf->bssid, arvif->vdev_id, ret);
3094 rcu_read_unlock();
3095 return;
3096 }
3097
3098 rcu_read_unlock();
3099
3100 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
3101 if (ret) {
3102 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n",
3103 bss_conf->bssid, arvif->vdev_id, ret);
3104 return;
3105 }
3106
3107 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap);
3108 if (ret) {
3109 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n",
3110 arvif->vdev_id, ret);
3111 return;
3112 }
3113
3114 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
3115 if (ret) {
3116 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n",
3117 arvif->vdev_id, bss_conf->bssid, ret);
3118 return;
3119 }
3120
3121 ath10k_dbg(ar, ATH10K_DBG_MAC,
3122 "mac vdev %d up (associated) bssid %pM aid %d\n",
3123 arvif->vdev_id, bss_conf->bssid, vif->cfg.aid);
3124
3125 WARN_ON(arvif->is_up);
3126
3127 arvif->aid = vif->cfg.aid;
3128 ether_addr_copy(arvif->bssid, bss_conf->bssid);
3129
3130 ret = ath10k_wmi_pdev_set_param(ar,
3131 ar->wmi.pdev_param->peer_stats_info_enable, 1);
3132 if (ret)
3133 ath10k_warn(ar, "failed to enable peer stats info: %d\n", ret);
3134
3135 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid);
3136 if (ret) {
3137 ath10k_warn(ar, "failed to set vdev %d up: %d\n",
3138 arvif->vdev_id, ret);
3139 return;
3140 }
3141
3142 arvif->is_up = true;
3143
3144 ath10k_mac_set_sar_power(ar);
3145
3146 /* Workaround: Some firmware revisions (tested with qca6174
3147 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be
3148 * poked with peer param command.
3149 */
3150 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid,
3151 ar->wmi.peer_param->dummy_var, 1);
3152 if (ret) {
3153 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n",
3154 arvif->bssid, arvif->vdev_id, ret);
3155 return;
3156 }
3157 }
3158
ath10k_bss_disassoc(struct ieee80211_hw * hw,struct ieee80211_vif * vif)3159 static void ath10k_bss_disassoc(struct ieee80211_hw *hw,
3160 struct ieee80211_vif *vif)
3161 {
3162 struct ath10k *ar = hw->priv;
3163 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3164 struct ieee80211_sta_vht_cap vht_cap = {};
3165 int ret;
3166
3167 lockdep_assert_held(&ar->conf_mutex);
3168
3169 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n",
3170 arvif->vdev_id, arvif->bssid);
3171
3172 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
3173 if (ret)
3174 ath10k_warn(ar, "failed to down vdev %i: %d\n",
3175 arvif->vdev_id, ret);
3176
3177 arvif->def_wep_key_idx = -1;
3178
3179 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap);
3180 if (ret) {
3181 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n",
3182 arvif->vdev_id, ret);
3183 return;
3184 }
3185
3186 arvif->is_up = false;
3187
3188 ath10k_mac_txpower_recalc(ar);
3189
3190 cancel_delayed_work_sync(&arvif->connection_loss_work);
3191 }
3192
ath10k_new_peer_tid_config(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_vif * arvif)3193 static int ath10k_new_peer_tid_config(struct ath10k *ar,
3194 struct ieee80211_sta *sta,
3195 struct ath10k_vif *arvif)
3196 {
3197 struct wmi_per_peer_per_tid_cfg_arg arg = {};
3198 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
3199 bool config_apply;
3200 int ret, i;
3201
3202 for (i = 0; i < ATH10K_TID_MAX; i++) {
3203 config_apply = false;
3204 if (arvif->retry_long[i] || arvif->ampdu[i] ||
3205 arvif->rate_ctrl[i] || arvif->rtscts[i]) {
3206 config_apply = true;
3207 arg.tid = i;
3208 arg.vdev_id = arvif->vdev_id;
3209 arg.retry_count = arvif->retry_long[i];
3210 arg.aggr_control = arvif->ampdu[i];
3211 arg.rate_ctrl = arvif->rate_ctrl[i];
3212 arg.rcode_flags = arvif->rate_code[i];
3213
3214 if (arvif->rtscts[i])
3215 arg.ext_tid_cfg_bitmap =
3216 WMI_EXT_TID_RTS_CTS_CONFIG;
3217 else
3218 arg.ext_tid_cfg_bitmap = 0;
3219
3220 arg.rtscts_ctrl = arvif->rtscts[i];
3221 }
3222
3223 if (arvif->noack[i]) {
3224 arg.ack_policy = arvif->noack[i];
3225 arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
3226 arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
3227 config_apply = true;
3228 }
3229
3230 /* Assign default value(-1) to newly connected station.
3231 * This is to identify station specific tid configuration not
3232 * configured for the station.
3233 */
3234 arsta->retry_long[i] = -1;
3235 arsta->noack[i] = -1;
3236 arsta->ampdu[i] = -1;
3237
3238 if (!config_apply)
3239 continue;
3240
3241 ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
3242
3243 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
3244 if (ret) {
3245 ath10k_warn(ar, "failed to set per tid retry/aggr config for sta %pM: %d\n",
3246 sta->addr, ret);
3247 return ret;
3248 }
3249
3250 memset(&arg, 0, sizeof(arg));
3251 }
3252
3253 return 0;
3254 }
3255
ath10k_station_assoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,bool reassoc)3256 static int ath10k_station_assoc(struct ath10k *ar,
3257 struct ieee80211_vif *vif,
3258 struct ieee80211_sta *sta,
3259 bool reassoc)
3260 {
3261 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3262 struct wmi_peer_assoc_complete_arg peer_arg;
3263 int ret = 0;
3264
3265 lockdep_assert_held(&ar->conf_mutex);
3266
3267 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg);
3268 if (ret) {
3269 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n",
3270 sta->addr, arvif->vdev_id, ret);
3271 return ret;
3272 }
3273
3274 ret = ath10k_wmi_peer_assoc(ar, &peer_arg);
3275 if (ret) {
3276 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n",
3277 sta->addr, arvif->vdev_id, ret);
3278 return ret;
3279 }
3280
3281 /* Re-assoc is run only to update supported rates for given station. It
3282 * doesn't make much sense to reconfigure the peer completely.
3283 */
3284 if (!reassoc) {
3285 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr,
3286 &sta->deflink.ht_cap);
3287 if (ret) {
3288 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n",
3289 arvif->vdev_id, ret);
3290 return ret;
3291 }
3292
3293 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta);
3294 if (ret) {
3295 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n",
3296 sta->addr, arvif->vdev_id, ret);
3297 return ret;
3298 }
3299
3300 if (!sta->wme) {
3301 arvif->num_legacy_stations++;
3302 ret = ath10k_recalc_rtscts_prot(arvif);
3303 if (ret) {
3304 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
3305 arvif->vdev_id, ret);
3306 return ret;
3307 }
3308 }
3309
3310 /* Plumb cached keys only for static WEP */
3311 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) {
3312 ret = ath10k_install_peer_wep_keys(arvif, sta->addr);
3313 if (ret) {
3314 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n",
3315 arvif->vdev_id, ret);
3316 return ret;
3317 }
3318 }
3319 }
3320
3321 if (!test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map))
3322 return ret;
3323
3324 return ath10k_new_peer_tid_config(ar, sta, arvif);
3325 }
3326
ath10k_station_disassoc(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta)3327 static int ath10k_station_disassoc(struct ath10k *ar,
3328 struct ieee80211_vif *vif,
3329 struct ieee80211_sta *sta)
3330 {
3331 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3332 int ret = 0;
3333
3334 lockdep_assert_held(&ar->conf_mutex);
3335
3336 if (!sta->wme) {
3337 arvif->num_legacy_stations--;
3338 ret = ath10k_recalc_rtscts_prot(arvif);
3339 if (ret) {
3340 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
3341 arvif->vdev_id, ret);
3342 return ret;
3343 }
3344 }
3345
3346 ret = ath10k_clear_peer_keys(arvif, sta->addr);
3347 if (ret) {
3348 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n",
3349 arvif->vdev_id, ret);
3350 return ret;
3351 }
3352
3353 return ret;
3354 }
3355
3356 /**************/
3357 /* Regulatory */
3358 /**************/
3359
ath10k_update_channel_list(struct ath10k * ar)3360 static int ath10k_update_channel_list(struct ath10k *ar)
3361 {
3362 struct ieee80211_hw *hw = ar->hw;
3363 struct ieee80211_supported_band **bands;
3364 enum nl80211_band band;
3365 struct ieee80211_channel *channel;
3366 struct wmi_scan_chan_list_arg arg = {0};
3367 struct wmi_channel_arg *ch;
3368 bool passive;
3369 int len;
3370 int ret;
3371 int i;
3372
3373 lockdep_assert_held(&ar->conf_mutex);
3374
3375 bands = hw->wiphy->bands;
3376 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3377 if (!bands[band])
3378 continue;
3379
3380 for (i = 0; i < bands[band]->n_channels; i++) {
3381 if (bands[band]->channels[i].flags &
3382 IEEE80211_CHAN_DISABLED)
3383 continue;
3384
3385 arg.n_channels++;
3386 }
3387 }
3388
3389 len = sizeof(struct wmi_channel_arg) * arg.n_channels;
3390 arg.channels = kzalloc(len, GFP_KERNEL);
3391 if (!arg.channels)
3392 return -ENOMEM;
3393
3394 ch = arg.channels;
3395 for (band = 0; band < NUM_NL80211_BANDS; band++) {
3396 if (!bands[band])
3397 continue;
3398
3399 for (i = 0; i < bands[band]->n_channels; i++) {
3400 channel = &bands[band]->channels[i];
3401
3402 if (channel->flags & IEEE80211_CHAN_DISABLED)
3403 continue;
3404
3405 ch->allow_ht = true;
3406
3407 /* FIXME: when should we really allow VHT? */
3408 ch->allow_vht = true;
3409
3410 ch->allow_ibss =
3411 !(channel->flags & IEEE80211_CHAN_NO_IR);
3412
3413 ch->ht40plus =
3414 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS);
3415
3416 ch->chan_radar =
3417 !!(channel->flags & IEEE80211_CHAN_RADAR);
3418
3419 passive = channel->flags & IEEE80211_CHAN_NO_IR;
3420 ch->passive = passive;
3421
3422 /* the firmware is ignoring the "radar" flag of the
3423 * channel and is scanning actively using Probe Requests
3424 * on "Radar detection"/DFS channels which are not
3425 * marked as "available"
3426 */
3427 ch->passive |= ch->chan_radar;
3428
3429 ch->freq = channel->center_freq;
3430 ch->band_center_freq1 = channel->center_freq;
3431 ch->min_power = 0;
3432 ch->max_power = channel->max_power * 2;
3433 ch->max_reg_power = channel->max_reg_power * 2;
3434 ch->max_antenna_gain = channel->max_antenna_gain;
3435 ch->reg_class_id = 0; /* FIXME */
3436
3437 /* FIXME: why use only legacy modes, why not any
3438 * HT/VHT modes? Would that even make any
3439 * difference?
3440 */
3441 if (channel->band == NL80211_BAND_2GHZ)
3442 ch->mode = MODE_11G;
3443 else
3444 ch->mode = MODE_11A;
3445
3446 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN))
3447 continue;
3448
3449 ath10k_dbg(ar, ATH10K_DBG_WMI,
3450 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n",
3451 ch - arg.channels, arg.n_channels,
3452 ch->freq, ch->max_power, ch->max_reg_power,
3453 ch->max_antenna_gain, ch->mode);
3454
3455 ch++;
3456 }
3457 }
3458
3459 ret = ath10k_wmi_scan_chan_list(ar, &arg);
3460 kfree(arg.channels);
3461
3462 return ret;
3463 }
3464
3465 static enum wmi_dfs_region
ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)3466 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region)
3467 {
3468 switch (dfs_region) {
3469 case NL80211_DFS_UNSET:
3470 return WMI_UNINIT_DFS_DOMAIN;
3471 case NL80211_DFS_FCC:
3472 return WMI_FCC_DFS_DOMAIN;
3473 case NL80211_DFS_ETSI:
3474 return WMI_ETSI_DFS_DOMAIN;
3475 case NL80211_DFS_JP:
3476 return WMI_MKK4_DFS_DOMAIN;
3477 }
3478 return WMI_UNINIT_DFS_DOMAIN;
3479 }
3480
ath10k_regd_update(struct ath10k * ar)3481 static void ath10k_regd_update(struct ath10k *ar)
3482 {
3483 struct reg_dmn_pair_mapping *regpair;
3484 int ret;
3485 enum wmi_dfs_region wmi_dfs_reg;
3486 enum nl80211_dfs_regions nl_dfs_reg;
3487
3488 lockdep_assert_held(&ar->conf_mutex);
3489
3490 ret = ath10k_update_channel_list(ar);
3491 if (ret)
3492 ath10k_warn(ar, "failed to update channel list: %d\n", ret);
3493
3494 regpair = ar->ath_common.regulatory.regpair;
3495
3496 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3497 nl_dfs_reg = ar->dfs_detector->region;
3498 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg);
3499 } else {
3500 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN;
3501 }
3502
3503 /* Target allows setting up per-band regdomain but ath_common provides
3504 * a combined one only
3505 */
3506 ret = ath10k_wmi_pdev_set_regdomain(ar,
3507 regpair->reg_domain,
3508 regpair->reg_domain, /* 2ghz */
3509 regpair->reg_domain, /* 5ghz */
3510 regpair->reg_2ghz_ctl,
3511 regpair->reg_5ghz_ctl,
3512 wmi_dfs_reg);
3513 if (ret)
3514 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret);
3515 }
3516
ath10k_mac_update_channel_list(struct ath10k * ar,struct ieee80211_supported_band * band)3517 static void ath10k_mac_update_channel_list(struct ath10k *ar,
3518 struct ieee80211_supported_band *band)
3519 {
3520 int i;
3521
3522 if (ar->low_5ghz_chan && ar->high_5ghz_chan) {
3523 for (i = 0; i < band->n_channels; i++) {
3524 if (band->channels[i].center_freq < ar->low_5ghz_chan ||
3525 band->channels[i].center_freq > ar->high_5ghz_chan)
3526 band->channels[i].flags |=
3527 IEEE80211_CHAN_DISABLED;
3528 }
3529 }
3530 }
3531
ath10k_reg_notifier(struct wiphy * wiphy,struct regulatory_request * request)3532 static void ath10k_reg_notifier(struct wiphy *wiphy,
3533 struct regulatory_request *request)
3534 {
3535 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
3536 struct ath10k *ar = hw->priv;
3537 bool result;
3538
3539 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory);
3540
3541 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) {
3542 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n",
3543 request->dfs_region);
3544 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector,
3545 request->dfs_region);
3546 if (!result)
3547 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n",
3548 request->dfs_region);
3549 }
3550
3551 mutex_lock(&ar->conf_mutex);
3552 if (ar->state == ATH10K_STATE_ON)
3553 ath10k_regd_update(ar);
3554 mutex_unlock(&ar->conf_mutex);
3555
3556 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
3557 ath10k_mac_update_channel_list(ar,
3558 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]);
3559 }
3560
ath10k_stop_radar_confirmation(struct ath10k * ar)3561 static void ath10k_stop_radar_confirmation(struct ath10k *ar)
3562 {
3563 spin_lock_bh(&ar->data_lock);
3564 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED;
3565 spin_unlock_bh(&ar->data_lock);
3566
3567 cancel_work_sync(&ar->radar_confirmation_work);
3568 }
3569
3570 /***************/
3571 /* TX handlers */
3572 /***************/
3573
3574 enum ath10k_mac_tx_path {
3575 ATH10K_MAC_TX_HTT,
3576 ATH10K_MAC_TX_HTT_MGMT,
3577 ATH10K_MAC_TX_WMI_MGMT,
3578 ATH10K_MAC_TX_UNKNOWN,
3579 };
3580
ath10k_mac_tx_lock(struct ath10k * ar,int reason)3581 void ath10k_mac_tx_lock(struct ath10k *ar, int reason)
3582 {
3583 lockdep_assert_held(&ar->htt.tx_lock);
3584
3585 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3586 ar->tx_paused |= BIT(reason);
3587 ieee80211_stop_queues(ar->hw);
3588 }
3589
ath10k_mac_tx_unlock_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3590 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac,
3591 struct ieee80211_vif *vif)
3592 {
3593 struct ath10k *ar = data;
3594 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3595
3596 if (arvif->tx_paused)
3597 return;
3598
3599 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3600 }
3601
ath10k_mac_tx_unlock(struct ath10k * ar,int reason)3602 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason)
3603 {
3604 lockdep_assert_held(&ar->htt.tx_lock);
3605
3606 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX);
3607 ar->tx_paused &= ~BIT(reason);
3608
3609 if (ar->tx_paused)
3610 return;
3611
3612 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3613 ATH10K_ITER_RESUME_FLAGS,
3614 ath10k_mac_tx_unlock_iter,
3615 ar);
3616
3617 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue);
3618 }
3619
ath10k_mac_vif_tx_lock(struct ath10k_vif * arvif,int reason)3620 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason)
3621 {
3622 struct ath10k *ar = arvif->ar;
3623
3624 lockdep_assert_held(&ar->htt.tx_lock);
3625
3626 WARN_ON(reason >= BITS_PER_LONG);
3627 arvif->tx_paused |= BIT(reason);
3628 ieee80211_stop_queue(ar->hw, arvif->vdev_id);
3629 }
3630
ath10k_mac_vif_tx_unlock(struct ath10k_vif * arvif,int reason)3631 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason)
3632 {
3633 struct ath10k *ar = arvif->ar;
3634
3635 lockdep_assert_held(&ar->htt.tx_lock);
3636
3637 WARN_ON(reason >= BITS_PER_LONG);
3638 arvif->tx_paused &= ~BIT(reason);
3639
3640 if (ar->tx_paused)
3641 return;
3642
3643 if (arvif->tx_paused)
3644 return;
3645
3646 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
3647 }
3648
ath10k_mac_vif_handle_tx_pause(struct ath10k_vif * arvif,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3649 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif,
3650 enum wmi_tlv_tx_pause_id pause_id,
3651 enum wmi_tlv_tx_pause_action action)
3652 {
3653 struct ath10k *ar = arvif->ar;
3654
3655 lockdep_assert_held(&ar->htt.tx_lock);
3656
3657 switch (action) {
3658 case WMI_TLV_TX_PAUSE_ACTION_STOP:
3659 ath10k_mac_vif_tx_lock(arvif, pause_id);
3660 break;
3661 case WMI_TLV_TX_PAUSE_ACTION_WAKE:
3662 ath10k_mac_vif_tx_unlock(arvif, pause_id);
3663 break;
3664 default:
3665 ath10k_dbg(ar, ATH10K_DBG_BOOT,
3666 "received unknown tx pause action %d on vdev %i, ignoring\n",
3667 action, arvif->vdev_id);
3668 break;
3669 }
3670 }
3671
3672 struct ath10k_mac_tx_pause {
3673 u32 vdev_id;
3674 enum wmi_tlv_tx_pause_id pause_id;
3675 enum wmi_tlv_tx_pause_action action;
3676 };
3677
ath10k_mac_handle_tx_pause_iter(void * data,u8 * mac,struct ieee80211_vif * vif)3678 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac,
3679 struct ieee80211_vif *vif)
3680 {
3681 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3682 struct ath10k_mac_tx_pause *arg = data;
3683
3684 if (arvif->vdev_id != arg->vdev_id)
3685 return;
3686
3687 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action);
3688 }
3689
ath10k_mac_handle_tx_pause_vdev(struct ath10k * ar,u32 vdev_id,enum wmi_tlv_tx_pause_id pause_id,enum wmi_tlv_tx_pause_action action)3690 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id,
3691 enum wmi_tlv_tx_pause_id pause_id,
3692 enum wmi_tlv_tx_pause_action action)
3693 {
3694 struct ath10k_mac_tx_pause arg = {
3695 .vdev_id = vdev_id,
3696 .pause_id = pause_id,
3697 .action = action,
3698 };
3699
3700 spin_lock_bh(&ar->htt.tx_lock);
3701 ieee80211_iterate_active_interfaces_atomic(ar->hw,
3702 ATH10K_ITER_RESUME_FLAGS,
3703 ath10k_mac_handle_tx_pause_iter,
3704 &arg);
3705 spin_unlock_bh(&ar->htt.tx_lock);
3706 }
3707
3708 static enum ath10k_hw_txrx_mode
ath10k_mac_tx_h_get_txmode(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct sk_buff * skb)3709 ath10k_mac_tx_h_get_txmode(struct ath10k *ar,
3710 struct ieee80211_vif *vif,
3711 struct ieee80211_sta *sta,
3712 struct sk_buff *skb)
3713 {
3714 const struct ieee80211_hdr *hdr = (void *)skb->data;
3715 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
3716 __le16 fc = hdr->frame_control;
3717
3718 if (IEEE80211_SKB_CB(skb)->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
3719 return ATH10K_HW_TXRX_ETHERNET;
3720
3721 if (!vif || vif->type == NL80211_IFTYPE_MONITOR)
3722 return ATH10K_HW_TXRX_RAW;
3723
3724 if (ieee80211_is_mgmt(fc))
3725 return ATH10K_HW_TXRX_MGMT;
3726
3727 /* Workaround:
3728 *
3729 * NullFunc frames are mostly used to ping if a client or AP are still
3730 * reachable and responsive. This implies tx status reports must be
3731 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can
3732 * come to a conclusion that the other end disappeared and tear down
3733 * BSS connection or it can never disconnect from BSS/client (which is
3734 * the case).
3735 *
3736 * Firmware with HTT older than 3.0 delivers incorrect tx status for
3737 * NullFunc frames to driver. However there's a HTT Mgmt Tx command
3738 * which seems to deliver correct tx reports for NullFunc frames. The
3739 * downside of using it is it ignores client powersave state so it can
3740 * end up disconnecting sleeping clients in AP mode. It should fix STA
3741 * mode though because AP don't sleep.
3742 */
3743 if (ar->htt.target_version_major < 3 &&
3744 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) &&
3745 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3746 ar->running_fw->fw_file.fw_features))
3747 return ATH10K_HW_TXRX_MGMT;
3748
3749 /* Workaround:
3750 *
3751 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for
3752 * NativeWifi txmode - it selects AP key instead of peer key. It seems
3753 * to work with Ethernet txmode so use it.
3754 *
3755 * FIXME: Check if raw mode works with TDLS.
3756 */
3757 if (ieee80211_is_data_present(fc) && sta && sta->tdls)
3758 return ATH10K_HW_TXRX_ETHERNET;
3759
3760 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) ||
3761 skb_cb->flags & ATH10K_SKB_F_RAW_TX)
3762 return ATH10K_HW_TXRX_RAW;
3763
3764 return ATH10K_HW_TXRX_NATIVE_WIFI;
3765 }
3766
ath10k_tx_h_use_hwcrypto(struct ieee80211_vif * vif,struct sk_buff * skb)3767 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif,
3768 struct sk_buff *skb)
3769 {
3770 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3771 const struct ieee80211_hdr *hdr = (void *)skb->data;
3772 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT |
3773 IEEE80211_TX_CTL_INJECTED;
3774
3775 if (!ieee80211_has_protected(hdr->frame_control))
3776 return false;
3777
3778 if ((info->flags & mask) == mask)
3779 return false;
3780
3781 if (vif)
3782 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt;
3783
3784 return true;
3785 }
3786
3787 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS
3788 * Control in the header.
3789 */
ath10k_tx_h_nwifi(struct ieee80211_hw * hw,struct sk_buff * skb)3790 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb)
3791 {
3792 struct ieee80211_hdr *hdr = (void *)skb->data;
3793 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3794 u8 *qos_ctl;
3795
3796 if (!ieee80211_is_data_qos(hdr->frame_control))
3797 return;
3798
3799 qos_ctl = ieee80211_get_qos_ctl(hdr);
3800 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
3801 skb->data, (void *)qos_ctl - (void *)skb->data);
3802 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
3803
3804 /* Some firmware revisions don't handle sending QoS NullFunc well.
3805 * These frames are mainly used for CQM purposes so it doesn't really
3806 * matter whether QoS NullFunc or NullFunc are sent.
3807 */
3808 hdr = (void *)skb->data;
3809 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
3810 cb->flags &= ~ATH10K_SKB_F_QOS;
3811
3812 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
3813 }
3814
ath10k_tx_h_8023(struct sk_buff * skb)3815 static void ath10k_tx_h_8023(struct sk_buff *skb)
3816 {
3817 struct ieee80211_hdr *hdr;
3818 struct rfc1042_hdr *rfc1042;
3819 struct ethhdr *eth;
3820 size_t hdrlen;
3821 u8 da[ETH_ALEN];
3822 u8 sa[ETH_ALEN];
3823 __be16 type;
3824
3825 hdr = (void *)skb->data;
3826 hdrlen = ieee80211_hdrlen(hdr->frame_control);
3827 rfc1042 = (void *)skb->data + hdrlen;
3828
3829 ether_addr_copy(da, ieee80211_get_DA(hdr));
3830 ether_addr_copy(sa, ieee80211_get_SA(hdr));
3831 type = rfc1042->snap_type;
3832
3833 skb_pull(skb, hdrlen + sizeof(*rfc1042));
3834 skb_push(skb, sizeof(*eth));
3835
3836 eth = (void *)skb->data;
3837 ether_addr_copy(eth->h_dest, da);
3838 ether_addr_copy(eth->h_source, sa);
3839 eth->h_proto = type;
3840 }
3841
ath10k_tx_h_add_p2p_noa_ie(struct ath10k * ar,struct ieee80211_vif * vif,struct sk_buff * skb)3842 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar,
3843 struct ieee80211_vif *vif,
3844 struct sk_buff *skb)
3845 {
3846 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
3847 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3848
3849 /* This is case only for P2P_GO */
3850 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p)
3851 return;
3852
3853 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
3854 spin_lock_bh(&ar->data_lock);
3855 if (arvif->u.ap.noa_data)
3856 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len,
3857 GFP_ATOMIC))
3858 skb_put_data(skb, arvif->u.ap.noa_data,
3859 arvif->u.ap.noa_len);
3860 spin_unlock_bh(&ar->data_lock);
3861 }
3862 }
3863
ath10k_mac_tx_h_fill_cb(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_txq * txq,struct ieee80211_sta * sta,struct sk_buff * skb,u16 airtime)3864 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar,
3865 struct ieee80211_vif *vif,
3866 struct ieee80211_txq *txq,
3867 struct ieee80211_sta *sta,
3868 struct sk_buff *skb, u16 airtime)
3869 {
3870 struct ieee80211_hdr *hdr = (void *)skb->data;
3871 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb);
3872 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
3873 bool is_data = ieee80211_is_data(hdr->frame_control) ||
3874 ieee80211_is_data_qos(hdr->frame_control);
3875 struct ath10k_vif *arvif = (void *)vif->drv_priv;
3876 struct ath10k_sta *arsta;
3877 u8 tid, *qos_ctl;
3878 bool noack = false;
3879
3880 cb->flags = 0;
3881
3882 if (info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) {
3883 cb->flags |= ATH10K_SKB_F_QOS; /* Assume data frames are QoS */
3884 goto finish_cb_fill;
3885 }
3886
3887 if (!ath10k_tx_h_use_hwcrypto(vif, skb))
3888 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3889
3890 if (ieee80211_is_mgmt(hdr->frame_control))
3891 cb->flags |= ATH10K_SKB_F_MGMT;
3892
3893 if (ieee80211_is_data_qos(hdr->frame_control)) {
3894 cb->flags |= ATH10K_SKB_F_QOS;
3895 qos_ctl = ieee80211_get_qos_ctl(hdr);
3896 tid = (*qos_ctl) & IEEE80211_QOS_CTL_TID_MASK;
3897
3898 if (arvif->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
3899 noack = true;
3900
3901 if (sta) {
3902 arsta = (struct ath10k_sta *)sta->drv_priv;
3903
3904 if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_NOACK)
3905 noack = true;
3906
3907 if (arsta->noack[tid] == WMI_PEER_TID_CONFIG_ACK)
3908 noack = false;
3909 }
3910
3911 if (noack)
3912 cb->flags |= ATH10K_SKB_F_NOACK_TID;
3913 }
3914
3915 /* Data frames encrypted in software will be posted to firmware
3916 * with tx encap mode set to RAW. Ex: Multicast traffic generated
3917 * for a specific VLAN group will always be encrypted in software.
3918 */
3919 if (is_data && ieee80211_has_protected(hdr->frame_control) &&
3920 !info->control.hw_key) {
3921 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT;
3922 cb->flags |= ATH10K_SKB_F_RAW_TX;
3923 }
3924
3925 finish_cb_fill:
3926 cb->vif = vif;
3927 cb->txq = txq;
3928 cb->airtime_est = airtime;
3929 if (sta) {
3930 arsta = (struct ath10k_sta *)sta->drv_priv;
3931 spin_lock_bh(&ar->data_lock);
3932 cb->ucast_cipher = arsta->ucast_cipher;
3933 spin_unlock_bh(&ar->data_lock);
3934 }
3935 }
3936
ath10k_mac_tx_frm_has_freq(struct ath10k * ar)3937 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar)
3938 {
3939 /* FIXME: Not really sure since when the behaviour changed. At some
3940 * point new firmware stopped requiring creation of peer entries for
3941 * offchannel tx (and actually creating them causes issues with wmi-htc
3942 * tx credit replenishment and reliability). Assuming it's at least 3.4
3943 * because that's when the `freq` was introduced to TX_FRM HTT command.
3944 */
3945 return (ar->htt.target_version_major >= 3 &&
3946 ar->htt.target_version_minor >= 4 &&
3947 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV);
3948 }
3949
ath10k_mac_tx_wmi_mgmt(struct ath10k * ar,struct sk_buff * skb)3950 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb)
3951 {
3952 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue;
3953
3954 if (skb_queue_len_lockless(q) >= ATH10K_MAX_NUM_MGMT_PENDING) {
3955 ath10k_warn(ar, "wmi mgmt tx queue is full\n");
3956 return -ENOSPC;
3957 }
3958
3959 skb_queue_tail(q, skb);
3960 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work);
3961
3962 return 0;
3963 }
3964
3965 static enum ath10k_mac_tx_path
ath10k_mac_tx_h_get_txpath(struct ath10k * ar,struct sk_buff * skb,enum ath10k_hw_txrx_mode txmode)3966 ath10k_mac_tx_h_get_txpath(struct ath10k *ar,
3967 struct sk_buff *skb,
3968 enum ath10k_hw_txrx_mode txmode)
3969 {
3970 switch (txmode) {
3971 case ATH10K_HW_TXRX_RAW:
3972 case ATH10K_HW_TXRX_NATIVE_WIFI:
3973 case ATH10K_HW_TXRX_ETHERNET:
3974 return ATH10K_MAC_TX_HTT;
3975 case ATH10K_HW_TXRX_MGMT:
3976 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX,
3977 ar->running_fw->fw_file.fw_features) ||
3978 test_bit(WMI_SERVICE_MGMT_TX_WMI,
3979 ar->wmi.svc_map))
3980 return ATH10K_MAC_TX_WMI_MGMT;
3981 else if (ar->htt.target_version_major >= 3)
3982 return ATH10K_MAC_TX_HTT;
3983 else
3984 return ATH10K_MAC_TX_HTT_MGMT;
3985 }
3986
3987 return ATH10K_MAC_TX_UNKNOWN;
3988 }
3989
ath10k_mac_tx_submit(struct ath10k * ar,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb)3990 static int ath10k_mac_tx_submit(struct ath10k *ar,
3991 enum ath10k_hw_txrx_mode txmode,
3992 enum ath10k_mac_tx_path txpath,
3993 struct sk_buff *skb)
3994 {
3995 struct ath10k_htt *htt = &ar->htt;
3996 int ret = -EINVAL;
3997
3998 switch (txpath) {
3999 case ATH10K_MAC_TX_HTT:
4000 ret = ath10k_htt_tx(htt, txmode, skb);
4001 break;
4002 case ATH10K_MAC_TX_HTT_MGMT:
4003 ret = ath10k_htt_mgmt_tx(htt, skb);
4004 break;
4005 case ATH10K_MAC_TX_WMI_MGMT:
4006 ret = ath10k_mac_tx_wmi_mgmt(ar, skb);
4007 break;
4008 case ATH10K_MAC_TX_UNKNOWN:
4009 WARN_ON_ONCE(1);
4010 ret = -EINVAL;
4011 break;
4012 }
4013
4014 if (ret) {
4015 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n",
4016 ret);
4017 ieee80211_free_txskb(ar->hw, skb);
4018 }
4019
4020 return ret;
4021 }
4022
4023 /* This function consumes the sk_buff regardless of return value as far as
4024 * caller is concerned so no freeing is necessary afterwards.
4025 */
ath10k_mac_tx(struct ath10k * ar,struct ieee80211_vif * vif,enum ath10k_hw_txrx_mode txmode,enum ath10k_mac_tx_path txpath,struct sk_buff * skb,bool noque_offchan)4026 static int ath10k_mac_tx(struct ath10k *ar,
4027 struct ieee80211_vif *vif,
4028 enum ath10k_hw_txrx_mode txmode,
4029 enum ath10k_mac_tx_path txpath,
4030 struct sk_buff *skb, bool noque_offchan)
4031 {
4032 struct ieee80211_hw *hw = ar->hw;
4033 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4034 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb);
4035 int ret;
4036
4037 /* We should disable CCK RATE due to P2P */
4038 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)
4039 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n");
4040
4041 switch (txmode) {
4042 case ATH10K_HW_TXRX_MGMT:
4043 case ATH10K_HW_TXRX_NATIVE_WIFI:
4044 ath10k_tx_h_nwifi(hw, skb);
4045 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb);
4046 ath10k_tx_h_seq_no(vif, skb);
4047 break;
4048 case ATH10K_HW_TXRX_ETHERNET:
4049 /* Convert 802.11->802.3 header only if the frame was earlier
4050 * encapsulated to 802.11 by mac80211. Otherwise pass it as is.
4051 */
4052 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP))
4053 ath10k_tx_h_8023(skb);
4054 break;
4055 case ATH10K_HW_TXRX_RAW:
4056 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) &&
4057 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) {
4058 WARN_ON_ONCE(1);
4059 ieee80211_free_txskb(hw, skb);
4060 return -EOPNOTSUPP;
4061 }
4062 }
4063
4064 if (!noque_offchan && info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) {
4065 if (!ath10k_mac_tx_frm_has_freq(ar)) {
4066 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac queued offchannel skb %pK len %d\n",
4067 skb, skb->len);
4068
4069 skb_queue_tail(&ar->offchan_tx_queue, skb);
4070 ieee80211_queue_work(hw, &ar->offchan_tx_work);
4071 return 0;
4072 }
4073 }
4074
4075 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb);
4076 if (ret) {
4077 ath10k_warn(ar, "failed to submit frame: %d\n", ret);
4078 return ret;
4079 }
4080
4081 return 0;
4082 }
4083
ath10k_offchan_tx_purge(struct ath10k * ar)4084 void ath10k_offchan_tx_purge(struct ath10k *ar)
4085 {
4086 struct sk_buff *skb;
4087
4088 for (;;) {
4089 skb = skb_dequeue(&ar->offchan_tx_queue);
4090 if (!skb)
4091 break;
4092
4093 ieee80211_free_txskb(ar->hw, skb);
4094 }
4095 }
4096
ath10k_offchan_tx_work(struct work_struct * work)4097 void ath10k_offchan_tx_work(struct work_struct *work)
4098 {
4099 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work);
4100 struct ath10k_peer *peer;
4101 struct ath10k_vif *arvif;
4102 enum ath10k_hw_txrx_mode txmode;
4103 enum ath10k_mac_tx_path txpath;
4104 struct ieee80211_hdr *hdr;
4105 struct ieee80211_vif *vif;
4106 struct ieee80211_sta *sta;
4107 struct sk_buff *skb;
4108 const u8 *peer_addr;
4109 int vdev_id;
4110 int ret;
4111 unsigned long time_left;
4112 bool tmp_peer_created = false;
4113
4114 /* FW requirement: We must create a peer before FW will send out
4115 * an offchannel frame. Otherwise the frame will be stuck and
4116 * never transmitted. We delete the peer upon tx completion.
4117 * It is unlikely that a peer for offchannel tx will already be
4118 * present. However it may be in some rare cases so account for that.
4119 * Otherwise we might remove a legitimate peer and break stuff.
4120 */
4121
4122 for (;;) {
4123 skb = skb_dequeue(&ar->offchan_tx_queue);
4124 if (!skb)
4125 break;
4126
4127 mutex_lock(&ar->conf_mutex);
4128
4129 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK len %d\n",
4130 skb, skb->len);
4131
4132 hdr = (struct ieee80211_hdr *)skb->data;
4133 peer_addr = ieee80211_get_DA(hdr);
4134
4135 spin_lock_bh(&ar->data_lock);
4136 vdev_id = ar->scan.vdev_id;
4137 peer = ath10k_peer_find(ar, vdev_id, peer_addr);
4138 spin_unlock_bh(&ar->data_lock);
4139
4140 if (peer) {
4141 ath10k_warn(ar, "peer %pM on vdev %d already present\n",
4142 peer_addr, vdev_id);
4143 } else {
4144 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id,
4145 peer_addr,
4146 WMI_PEER_TYPE_DEFAULT);
4147 if (ret)
4148 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n",
4149 peer_addr, vdev_id, ret);
4150 tmp_peer_created = (ret == 0);
4151 }
4152
4153 spin_lock_bh(&ar->data_lock);
4154 reinit_completion(&ar->offchan_tx_completed);
4155 ar->offchan_tx_skb = skb;
4156 spin_unlock_bh(&ar->data_lock);
4157
4158 /* It's safe to access vif and sta - conf_mutex guarantees that
4159 * sta_state() and remove_interface() are locked exclusively
4160 * out wrt to this offchannel worker.
4161 */
4162 arvif = ath10k_get_arvif(ar, vdev_id);
4163 if (arvif) {
4164 vif = arvif->vif;
4165 sta = ieee80211_find_sta(vif, peer_addr);
4166 } else {
4167 vif = NULL;
4168 sta = NULL;
4169 }
4170
4171 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4172 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4173
4174 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, true);
4175 if (ret) {
4176 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n",
4177 ret);
4178 /* not serious */
4179 }
4180
4181 time_left =
4182 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ);
4183 if (time_left == 0)
4184 ath10k_warn(ar, "timed out waiting for offchannel skb %pK, len: %d\n",
4185 skb, skb->len);
4186
4187 if (!peer && tmp_peer_created) {
4188 ret = ath10k_peer_delete(ar, vdev_id, peer_addr);
4189 if (ret)
4190 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n",
4191 peer_addr, vdev_id, ret);
4192 }
4193
4194 mutex_unlock(&ar->conf_mutex);
4195 }
4196 }
4197
ath10k_mgmt_over_wmi_tx_purge(struct ath10k * ar)4198 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar)
4199 {
4200 struct sk_buff *skb;
4201
4202 for (;;) {
4203 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
4204 if (!skb)
4205 break;
4206
4207 ieee80211_free_txskb(ar->hw, skb);
4208 }
4209 }
4210
ath10k_mgmt_over_wmi_tx_work(struct work_struct * work)4211 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work)
4212 {
4213 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work);
4214 struct sk_buff *skb;
4215 dma_addr_t paddr;
4216 int ret;
4217
4218 for (;;) {
4219 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue);
4220 if (!skb)
4221 break;
4222
4223 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF,
4224 ar->running_fw->fw_file.fw_features)) {
4225 paddr = dma_map_single(ar->dev, skb->data,
4226 skb->len, DMA_TO_DEVICE);
4227 if (dma_mapping_error(ar->dev, paddr)) {
4228 ieee80211_free_txskb(ar->hw, skb);
4229 continue;
4230 }
4231 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr);
4232 if (ret) {
4233 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n",
4234 ret);
4235 /* remove this msdu from idr tracking */
4236 ath10k_wmi_cleanup_mgmt_tx_send(ar, skb);
4237
4238 dma_unmap_single(ar->dev, paddr, skb->len,
4239 DMA_TO_DEVICE);
4240 ieee80211_free_txskb(ar->hw, skb);
4241 }
4242 } else {
4243 ret = ath10k_wmi_mgmt_tx(ar, skb);
4244 if (ret) {
4245 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n",
4246 ret);
4247 ieee80211_free_txskb(ar->hw, skb);
4248 }
4249 }
4250 }
4251 }
4252
ath10k_mac_txq_init(struct ieee80211_txq * txq)4253 static void ath10k_mac_txq_init(struct ieee80211_txq *txq)
4254 {
4255 struct ath10k_txq *artxq;
4256
4257 if (!txq)
4258 return;
4259
4260 artxq = (void *)txq->drv_priv;
4261 INIT_LIST_HEAD(&artxq->list);
4262 }
4263
ath10k_mac_txq_unref(struct ath10k * ar,struct ieee80211_txq * txq)4264 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq)
4265 {
4266 struct ath10k_skb_cb *cb;
4267 struct sk_buff *msdu;
4268 int msdu_id;
4269
4270 if (!txq)
4271 return;
4272
4273 spin_lock_bh(&ar->htt.tx_lock);
4274 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) {
4275 cb = ATH10K_SKB_CB(msdu);
4276 if (cb->txq == txq)
4277 cb->txq = NULL;
4278 }
4279 spin_unlock_bh(&ar->htt.tx_lock);
4280 }
4281
ath10k_mac_txq_lookup(struct ath10k * ar,u16 peer_id,u8 tid)4282 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar,
4283 u16 peer_id,
4284 u8 tid)
4285 {
4286 struct ath10k_peer *peer;
4287
4288 lockdep_assert_held(&ar->data_lock);
4289
4290 peer = ar->peer_map[peer_id];
4291 if (!peer)
4292 return NULL;
4293
4294 if (peer->removed)
4295 return NULL;
4296
4297 if (peer->sta)
4298 return peer->sta->txq[tid];
4299 else if (peer->vif)
4300 return peer->vif->txq;
4301 else
4302 return NULL;
4303 }
4304
ath10k_mac_tx_can_push(struct ieee80211_hw * hw,struct ieee80211_txq * txq)4305 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw,
4306 struct ieee80211_txq *txq)
4307 {
4308 struct ath10k *ar = hw->priv;
4309 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4310
4311 /* No need to get locks */
4312 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH)
4313 return true;
4314
4315 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed)
4316 return true;
4317
4318 if (artxq->num_fw_queued < artxq->num_push_allowed)
4319 return true;
4320
4321 return false;
4322 }
4323
4324 /* Return estimated airtime in microsecond, which is calculated using last
4325 * reported TX rate. This is just a rough estimation because host driver has no
4326 * knowledge of the actual transmit rate, retries or aggregation. If actual
4327 * airtime can be reported by firmware, then delta between estimated and actual
4328 * airtime can be adjusted from deficit.
4329 */
4330 #define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */
4331 #define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */
ath10k_mac_update_airtime(struct ath10k * ar,struct ieee80211_txq * txq,struct sk_buff * skb)4332 static u16 ath10k_mac_update_airtime(struct ath10k *ar,
4333 struct ieee80211_txq *txq,
4334 struct sk_buff *skb)
4335 {
4336 struct ath10k_sta *arsta;
4337 u32 pktlen;
4338 u16 airtime = 0;
4339
4340 if (!txq || !txq->sta)
4341 return airtime;
4342
4343 if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
4344 return airtime;
4345
4346 spin_lock_bh(&ar->data_lock);
4347 arsta = (struct ath10k_sta *)txq->sta->drv_priv;
4348
4349 pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */
4350 if (arsta->last_tx_bitrate) {
4351 /* airtime in us, last_tx_bitrate in 100kbps */
4352 airtime = (pktlen * 8 * (1000 / 100))
4353 / arsta->last_tx_bitrate;
4354 /* overhead for media access time and IFS */
4355 airtime += IEEE80211_ATF_OVERHEAD_IFS;
4356 } else {
4357 /* This is mostly for throttle excessive BC/MC frames, and the
4358 * airtime/rate doesn't need be exact. Airtime of BC/MC frames
4359 * in 2G get some discount, which helps prevent very low rate
4360 * frames from being blocked for too long.
4361 */
4362 airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */
4363 airtime += IEEE80211_ATF_OVERHEAD;
4364 }
4365 spin_unlock_bh(&ar->data_lock);
4366
4367 return airtime;
4368 }
4369
ath10k_mac_tx_push_txq(struct ieee80211_hw * hw,struct ieee80211_txq * txq)4370 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw,
4371 struct ieee80211_txq *txq)
4372 {
4373 struct ath10k *ar = hw->priv;
4374 struct ath10k_htt *htt = &ar->htt;
4375 struct ath10k_txq *artxq = (void *)txq->drv_priv;
4376 struct ieee80211_vif *vif = txq->vif;
4377 struct ieee80211_sta *sta = txq->sta;
4378 enum ath10k_hw_txrx_mode txmode;
4379 enum ath10k_mac_tx_path txpath;
4380 struct sk_buff *skb;
4381 struct ieee80211_hdr *hdr;
4382 size_t skb_len;
4383 bool is_mgmt, is_presp;
4384 int ret;
4385 u16 airtime;
4386
4387 spin_lock_bh(&ar->htt.tx_lock);
4388 ret = ath10k_htt_tx_inc_pending(htt);
4389 spin_unlock_bh(&ar->htt.tx_lock);
4390
4391 if (ret)
4392 return ret;
4393
4394 skb = ieee80211_tx_dequeue_ni(hw, txq);
4395 if (!skb) {
4396 spin_lock_bh(&ar->htt.tx_lock);
4397 ath10k_htt_tx_dec_pending(htt);
4398 spin_unlock_bh(&ar->htt.tx_lock);
4399
4400 return -ENOENT;
4401 }
4402
4403 airtime = ath10k_mac_update_airtime(ar, txq, skb);
4404 ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
4405
4406 skb_len = skb->len;
4407 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4408 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4409 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4410
4411 if (is_mgmt) {
4412 hdr = (struct ieee80211_hdr *)skb->data;
4413 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4414
4415 spin_lock_bh(&ar->htt.tx_lock);
4416 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4417
4418 if (ret) {
4419 ath10k_htt_tx_dec_pending(htt);
4420 spin_unlock_bh(&ar->htt.tx_lock);
4421 return ret;
4422 }
4423 spin_unlock_bh(&ar->htt.tx_lock);
4424 }
4425
4426 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
4427 if (unlikely(ret)) {
4428 ath10k_warn(ar, "failed to push frame: %d\n", ret);
4429
4430 spin_lock_bh(&ar->htt.tx_lock);
4431 ath10k_htt_tx_dec_pending(htt);
4432 if (is_mgmt)
4433 ath10k_htt_tx_mgmt_dec_pending(htt);
4434 spin_unlock_bh(&ar->htt.tx_lock);
4435
4436 return ret;
4437 }
4438
4439 spin_lock_bh(&ar->htt.tx_lock);
4440 artxq->num_fw_queued++;
4441 spin_unlock_bh(&ar->htt.tx_lock);
4442
4443 return skb_len;
4444 }
4445
ath10k_mac_schedule_txq(struct ieee80211_hw * hw,u32 ac)4446 static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac)
4447 {
4448 struct ieee80211_txq *txq;
4449 int ret = 0;
4450
4451 ieee80211_txq_schedule_start(hw, ac);
4452 while ((txq = ieee80211_next_txq(hw, ac))) {
4453 while (ath10k_mac_tx_can_push(hw, txq)) {
4454 ret = ath10k_mac_tx_push_txq(hw, txq);
4455 if (ret < 0)
4456 break;
4457 }
4458 ieee80211_return_txq(hw, txq, false);
4459 ath10k_htt_tx_txq_update(hw, txq);
4460 if (ret == -EBUSY)
4461 break;
4462 }
4463 ieee80211_txq_schedule_end(hw, ac);
4464
4465 return ret;
4466 }
4467
ath10k_mac_tx_push_pending(struct ath10k * ar)4468 void ath10k_mac_tx_push_pending(struct ath10k *ar)
4469 {
4470 struct ieee80211_hw *hw = ar->hw;
4471 u32 ac;
4472
4473 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
4474 return;
4475
4476 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2))
4477 return;
4478
4479 rcu_read_lock();
4480 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) {
4481 if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY)
4482 break;
4483 }
4484 rcu_read_unlock();
4485 }
4486 EXPORT_SYMBOL(ath10k_mac_tx_push_pending);
4487
4488 /************/
4489 /* Scanning */
4490 /************/
4491
__ath10k_scan_finish(struct ath10k * ar)4492 void __ath10k_scan_finish(struct ath10k *ar)
4493 {
4494 lockdep_assert_held(&ar->data_lock);
4495
4496 switch (ar->scan.state) {
4497 case ATH10K_SCAN_IDLE:
4498 break;
4499 case ATH10K_SCAN_RUNNING:
4500 case ATH10K_SCAN_ABORTING:
4501 if (ar->scan.is_roc && ar->scan.roc_notify)
4502 ieee80211_remain_on_channel_expired(ar->hw);
4503 fallthrough;
4504 case ATH10K_SCAN_STARTING:
4505 if (!ar->scan.is_roc) {
4506 struct cfg80211_scan_info info = {
4507 .aborted = ((ar->scan.state ==
4508 ATH10K_SCAN_ABORTING) ||
4509 (ar->scan.state ==
4510 ATH10K_SCAN_STARTING)),
4511 };
4512
4513 ieee80211_scan_completed(ar->hw, &info);
4514 }
4515
4516 ar->scan.state = ATH10K_SCAN_IDLE;
4517 ar->scan_channel = NULL;
4518 ar->scan.roc_freq = 0;
4519 ath10k_offchan_tx_purge(ar);
4520 cancel_delayed_work(&ar->scan.timeout);
4521 complete(&ar->scan.completed);
4522 break;
4523 }
4524 }
4525
ath10k_scan_finish(struct ath10k * ar)4526 void ath10k_scan_finish(struct ath10k *ar)
4527 {
4528 spin_lock_bh(&ar->data_lock);
4529 __ath10k_scan_finish(ar);
4530 spin_unlock_bh(&ar->data_lock);
4531 }
4532
ath10k_scan_stop(struct ath10k * ar)4533 static int ath10k_scan_stop(struct ath10k *ar)
4534 {
4535 struct wmi_stop_scan_arg arg = {
4536 .req_id = 1, /* FIXME */
4537 .req_type = WMI_SCAN_STOP_ONE,
4538 .u.scan_id = ATH10K_SCAN_ID,
4539 };
4540 int ret;
4541
4542 lockdep_assert_held(&ar->conf_mutex);
4543
4544 ret = ath10k_wmi_stop_scan(ar, &arg);
4545 if (ret) {
4546 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret);
4547 goto out;
4548 }
4549
4550 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ);
4551 if (ret == 0) {
4552 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n");
4553 ret = -ETIMEDOUT;
4554 } else if (ret > 0) {
4555 ret = 0;
4556 }
4557
4558 out:
4559 /* Scan state should be updated upon scan completion but in case
4560 * firmware fails to deliver the event (for whatever reason) it is
4561 * desired to clean up scan state anyway. Firmware may have just
4562 * dropped the scan completion event delivery due to transport pipe
4563 * being overflown with data and/or it can recover on its own before
4564 * next scan request is submitted.
4565 */
4566 spin_lock_bh(&ar->data_lock);
4567 if (ar->scan.state != ATH10K_SCAN_IDLE)
4568 __ath10k_scan_finish(ar);
4569 spin_unlock_bh(&ar->data_lock);
4570
4571 return ret;
4572 }
4573
ath10k_scan_abort(struct ath10k * ar)4574 static void ath10k_scan_abort(struct ath10k *ar)
4575 {
4576 int ret;
4577
4578 lockdep_assert_held(&ar->conf_mutex);
4579
4580 spin_lock_bh(&ar->data_lock);
4581
4582 switch (ar->scan.state) {
4583 case ATH10K_SCAN_IDLE:
4584 /* This can happen if timeout worker kicked in and called
4585 * abortion while scan completion was being processed.
4586 */
4587 break;
4588 case ATH10K_SCAN_STARTING:
4589 case ATH10K_SCAN_ABORTING:
4590 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n",
4591 ath10k_scan_state_str(ar->scan.state),
4592 ar->scan.state);
4593 break;
4594 case ATH10K_SCAN_RUNNING:
4595 ar->scan.state = ATH10K_SCAN_ABORTING;
4596 spin_unlock_bh(&ar->data_lock);
4597
4598 ret = ath10k_scan_stop(ar);
4599 if (ret)
4600 ath10k_warn(ar, "failed to abort scan: %d\n", ret);
4601
4602 spin_lock_bh(&ar->data_lock);
4603 break;
4604 }
4605
4606 spin_unlock_bh(&ar->data_lock);
4607 }
4608
ath10k_scan_timeout_work(struct work_struct * work)4609 void ath10k_scan_timeout_work(struct work_struct *work)
4610 {
4611 struct ath10k *ar = container_of(work, struct ath10k,
4612 scan.timeout.work);
4613
4614 mutex_lock(&ar->conf_mutex);
4615 ath10k_scan_abort(ar);
4616 mutex_unlock(&ar->conf_mutex);
4617 }
4618
ath10k_start_scan(struct ath10k * ar,const struct wmi_start_scan_arg * arg)4619 static int ath10k_start_scan(struct ath10k *ar,
4620 const struct wmi_start_scan_arg *arg)
4621 {
4622 int ret;
4623
4624 lockdep_assert_held(&ar->conf_mutex);
4625
4626 ret = ath10k_wmi_start_scan(ar, arg);
4627 if (ret)
4628 return ret;
4629
4630 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ);
4631 if (ret == 0) {
4632 ret = ath10k_scan_stop(ar);
4633 if (ret)
4634 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
4635
4636 return -ETIMEDOUT;
4637 }
4638
4639 /* If we failed to start the scan, return error code at
4640 * this point. This is probably due to some issue in the
4641 * firmware, but no need to wedge the driver due to that...
4642 */
4643 spin_lock_bh(&ar->data_lock);
4644 if (ar->scan.state == ATH10K_SCAN_IDLE) {
4645 spin_unlock_bh(&ar->data_lock);
4646 return -EINVAL;
4647 }
4648 spin_unlock_bh(&ar->data_lock);
4649
4650 return 0;
4651 }
4652
4653 /**********************/
4654 /* mac80211 callbacks */
4655 /**********************/
4656
ath10k_mac_op_tx(struct ieee80211_hw * hw,struct ieee80211_tx_control * control,struct sk_buff * skb)4657 static void ath10k_mac_op_tx(struct ieee80211_hw *hw,
4658 struct ieee80211_tx_control *control,
4659 struct sk_buff *skb)
4660 {
4661 struct ath10k *ar = hw->priv;
4662 struct ath10k_htt *htt = &ar->htt;
4663 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
4664 struct ieee80211_vif *vif = info->control.vif;
4665 struct ieee80211_sta *sta = control->sta;
4666 struct ieee80211_txq *txq = NULL;
4667 enum ath10k_hw_txrx_mode txmode;
4668 enum ath10k_mac_tx_path txpath;
4669 bool is_htt;
4670 bool is_mgmt;
4671 int ret;
4672 u16 airtime;
4673
4674 airtime = ath10k_mac_update_airtime(ar, txq, skb);
4675 ath10k_mac_tx_h_fill_cb(ar, vif, txq, sta, skb, airtime);
4676
4677 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb);
4678 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode);
4679 is_htt = (txpath == ATH10K_MAC_TX_HTT ||
4680 txpath == ATH10K_MAC_TX_HTT_MGMT);
4681 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT);
4682
4683 if (is_htt) {
4684 bool is_presp = false;
4685
4686 spin_lock_bh(&ar->htt.tx_lock);
4687 if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)) {
4688 struct ieee80211_hdr *hdr = (void *)skb->data;
4689
4690 is_presp = ieee80211_is_probe_resp(hdr->frame_control);
4691 }
4692
4693 ret = ath10k_htt_tx_inc_pending(htt);
4694 if (ret) {
4695 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n",
4696 ret);
4697 spin_unlock_bh(&ar->htt.tx_lock);
4698 ieee80211_free_txskb(ar->hw, skb);
4699 return;
4700 }
4701
4702 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp);
4703 if (ret) {
4704 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n",
4705 ret);
4706 ath10k_htt_tx_dec_pending(htt);
4707 spin_unlock_bh(&ar->htt.tx_lock);
4708 ieee80211_free_txskb(ar->hw, skb);
4709 return;
4710 }
4711 spin_unlock_bh(&ar->htt.tx_lock);
4712 }
4713
4714 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb, false);
4715 if (ret) {
4716 ath10k_warn(ar, "failed to transmit frame: %d\n", ret);
4717 if (is_htt) {
4718 spin_lock_bh(&ar->htt.tx_lock);
4719 ath10k_htt_tx_dec_pending(htt);
4720 if (is_mgmt)
4721 ath10k_htt_tx_mgmt_dec_pending(htt);
4722 spin_unlock_bh(&ar->htt.tx_lock);
4723 }
4724 return;
4725 }
4726 }
4727
ath10k_mac_op_wake_tx_queue(struct ieee80211_hw * hw,struct ieee80211_txq * txq)4728 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw,
4729 struct ieee80211_txq *txq)
4730 {
4731 struct ath10k *ar = hw->priv;
4732 int ret;
4733 u8 ac = txq->ac;
4734
4735 ath10k_htt_tx_txq_update(hw, txq);
4736 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH)
4737 return;
4738
4739 spin_lock_bh(&ar->queue_lock[ac]);
4740
4741 ieee80211_txq_schedule_start(hw, ac);
4742 txq = ieee80211_next_txq(hw, ac);
4743 if (!txq)
4744 goto out;
4745
4746 while (ath10k_mac_tx_can_push(hw, txq)) {
4747 ret = ath10k_mac_tx_push_txq(hw, txq);
4748 if (ret < 0)
4749 break;
4750 }
4751 ieee80211_return_txq(hw, txq, false);
4752 ath10k_htt_tx_txq_update(hw, txq);
4753 out:
4754 ieee80211_txq_schedule_end(hw, ac);
4755 spin_unlock_bh(&ar->queue_lock[ac]);
4756 }
4757
4758 /* Must not be called with conf_mutex held as workers can use that also. */
ath10k_drain_tx(struct ath10k * ar)4759 void ath10k_drain_tx(struct ath10k *ar)
4760 {
4761 lockdep_assert_not_held(&ar->conf_mutex);
4762
4763 /* make sure rcu-protected mac80211 tx path itself is drained */
4764 synchronize_net();
4765
4766 ath10k_offchan_tx_purge(ar);
4767 ath10k_mgmt_over_wmi_tx_purge(ar);
4768
4769 cancel_work_sync(&ar->offchan_tx_work);
4770 cancel_work_sync(&ar->wmi_mgmt_tx_work);
4771 }
4772
ath10k_halt(struct ath10k * ar)4773 void ath10k_halt(struct ath10k *ar)
4774 {
4775 struct ath10k_vif *arvif;
4776
4777 lockdep_assert_held(&ar->conf_mutex);
4778
4779 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags);
4780 ar->filter_flags = 0;
4781 ar->monitor = false;
4782 ar->monitor_arvif = NULL;
4783
4784 if (ar->monitor_started)
4785 ath10k_monitor_stop(ar);
4786
4787 ar->monitor_started = false;
4788 ar->tx_paused = 0;
4789
4790 ath10k_scan_finish(ar);
4791 ath10k_peer_cleanup_all(ar);
4792 ath10k_stop_radar_confirmation(ar);
4793 ath10k_core_stop(ar);
4794 ath10k_hif_power_down(ar);
4795
4796 spin_lock_bh(&ar->data_lock);
4797 list_for_each_entry(arvif, &ar->arvifs, list)
4798 ath10k_mac_vif_beacon_cleanup(arvif);
4799 spin_unlock_bh(&ar->data_lock);
4800 }
4801
ath10k_get_antenna(struct ieee80211_hw * hw,u32 * tx_ant,u32 * rx_ant)4802 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant)
4803 {
4804 struct ath10k *ar = hw->priv;
4805
4806 mutex_lock(&ar->conf_mutex);
4807
4808 *tx_ant = ar->cfg_tx_chainmask;
4809 *rx_ant = ar->cfg_rx_chainmask;
4810
4811 mutex_unlock(&ar->conf_mutex);
4812
4813 return 0;
4814 }
4815
ath10k_check_chain_mask(struct ath10k * ar,u32 cm,const char * dbg)4816 static bool ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg)
4817 {
4818 /* It is not clear that allowing gaps in chainmask
4819 * is helpful. Probably it will not do what user
4820 * is hoping for, so warn in that case.
4821 */
4822 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0)
4823 return true;
4824
4825 ath10k_warn(ar, "mac %s antenna chainmask is invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n",
4826 dbg, cm);
4827 return false;
4828 }
4829
ath10k_mac_get_vht_cap_bf_sts(struct ath10k * ar)4830 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar)
4831 {
4832 int nsts = ar->vht_cap_info;
4833
4834 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4835 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4836
4837 /* If firmware does not deliver to host number of space-time
4838 * streams supported, assume it support up to 4 BF STS and return
4839 * the value for VHT CAP: nsts-1)
4840 */
4841 if (nsts == 0)
4842 return 3;
4843
4844 return nsts;
4845 }
4846
ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k * ar)4847 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar)
4848 {
4849 int sound_dim = ar->vht_cap_info;
4850
4851 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4852 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4853
4854 /* If the sounding dimension is not advertised by the firmware,
4855 * let's use a default value of 1
4856 */
4857 if (sound_dim == 0)
4858 return 1;
4859
4860 return sound_dim;
4861 }
4862
ath10k_create_vht_cap(struct ath10k * ar)4863 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar)
4864 {
4865 struct ieee80211_sta_vht_cap vht_cap = {0};
4866 struct ath10k_hw_params *hw = &ar->hw_params;
4867 u16 mcs_map;
4868 u32 val;
4869 int i;
4870
4871 vht_cap.vht_supported = 1;
4872 vht_cap.cap = ar->vht_cap_info;
4873
4874 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
4875 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) {
4876 val = ath10k_mac_get_vht_cap_bf_sts(ar);
4877 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT;
4878 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK;
4879
4880 vht_cap.cap |= val;
4881 }
4882
4883 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
4884 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) {
4885 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
4886 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT;
4887 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK;
4888
4889 vht_cap.cap |= val;
4890 }
4891
4892 mcs_map = 0;
4893 for (i = 0; i < 8; i++) {
4894 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i)))
4895 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2);
4896 else
4897 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2);
4898 }
4899
4900 if (ar->cfg_tx_chainmask <= 1)
4901 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC;
4902
4903 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map);
4904 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map);
4905
4906 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do
4907 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give
4908 * user-space a clue if that is the case.
4909 */
4910 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) &&
4911 (hw->vht160_mcs_rx_highest != 0 ||
4912 hw->vht160_mcs_tx_highest != 0)) {
4913 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest);
4914 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest);
4915 }
4916
4917 return vht_cap;
4918 }
4919
ath10k_get_ht_cap(struct ath10k * ar)4920 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar)
4921 {
4922 int i;
4923 struct ieee80211_sta_ht_cap ht_cap = {0};
4924
4925 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED))
4926 return ht_cap;
4927
4928 ht_cap.ht_supported = 1;
4929 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K;
4930 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8;
4931 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40;
4932 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40;
4933 ht_cap.cap |=
4934 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT;
4935
4936 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI)
4937 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20;
4938
4939 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI)
4940 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40;
4941
4942 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) {
4943 u32 smps;
4944
4945 smps = WLAN_HT_CAP_SM_PS_DYNAMIC;
4946 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT;
4947
4948 ht_cap.cap |= smps;
4949 }
4950
4951 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1))
4952 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC;
4953
4954 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) {
4955 u32 stbc;
4956
4957 stbc = ar->ht_cap_info;
4958 stbc &= WMI_HT_CAP_RX_STBC;
4959 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT;
4960 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT;
4961 stbc &= IEEE80211_HT_CAP_RX_STBC;
4962
4963 ht_cap.cap |= stbc;
4964 }
4965
4966 if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info &
4967 WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC)))
4968 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING;
4969
4970 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT)
4971 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT;
4972
4973 /* max AMSDU is implicitly taken from vht_cap_info */
4974 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK)
4975 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU;
4976
4977 for (i = 0; i < ar->num_rf_chains; i++) {
4978 if (ar->cfg_rx_chainmask & BIT(i))
4979 ht_cap.mcs.rx_mask[i] = 0xFF;
4980 }
4981
4982 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED;
4983
4984 return ht_cap;
4985 }
4986
ath10k_mac_setup_ht_vht_cap(struct ath10k * ar)4987 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar)
4988 {
4989 struct ieee80211_supported_band *band;
4990 struct ieee80211_sta_vht_cap vht_cap;
4991 struct ieee80211_sta_ht_cap ht_cap;
4992
4993 ht_cap = ath10k_get_ht_cap(ar);
4994 vht_cap = ath10k_create_vht_cap(ar);
4995
4996 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
4997 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
4998 band->ht_cap = ht_cap;
4999 }
5000 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
5001 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
5002 band->ht_cap = ht_cap;
5003 band->vht_cap = vht_cap;
5004 }
5005 }
5006
__ath10k_set_antenna(struct ath10k * ar,u32 tx_ant,u32 rx_ant)5007 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant)
5008 {
5009 int ret;
5010 bool is_valid_tx_chain_mask, is_valid_rx_chain_mask;
5011
5012 lockdep_assert_held(&ar->conf_mutex);
5013
5014 is_valid_tx_chain_mask = ath10k_check_chain_mask(ar, tx_ant, "tx");
5015 is_valid_rx_chain_mask = ath10k_check_chain_mask(ar, rx_ant, "rx");
5016
5017 if (!is_valid_tx_chain_mask || !is_valid_rx_chain_mask)
5018 return -EINVAL;
5019
5020 ar->cfg_tx_chainmask = tx_ant;
5021 ar->cfg_rx_chainmask = rx_ant;
5022
5023 if ((ar->state != ATH10K_STATE_ON) &&
5024 (ar->state != ATH10K_STATE_RESTARTED))
5025 return 0;
5026
5027 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask,
5028 tx_ant);
5029 if (ret) {
5030 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n",
5031 ret, tx_ant);
5032 return ret;
5033 }
5034
5035 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask,
5036 rx_ant);
5037 if (ret) {
5038 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n",
5039 ret, rx_ant);
5040 return ret;
5041 }
5042
5043 /* Reload HT/VHT capability */
5044 ath10k_mac_setup_ht_vht_cap(ar);
5045
5046 return 0;
5047 }
5048
ath10k_set_antenna(struct ieee80211_hw * hw,u32 tx_ant,u32 rx_ant)5049 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant)
5050 {
5051 struct ath10k *ar = hw->priv;
5052 int ret;
5053
5054 mutex_lock(&ar->conf_mutex);
5055 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant);
5056 mutex_unlock(&ar->conf_mutex);
5057 return ret;
5058 }
5059
__ath10k_fetch_bb_timing_dt(struct ath10k * ar,struct wmi_bb_timing_cfg_arg * bb_timing)5060 static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar,
5061 struct wmi_bb_timing_cfg_arg *bb_timing)
5062 {
5063 struct device_node *node;
5064 const char *fem_name;
5065 int ret;
5066
5067 node = ar->dev->of_node;
5068 if (!node)
5069 return -ENOENT;
5070
5071 ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name);
5072 if (ret)
5073 return -ENOENT;
5074
5075 /*
5076 * If external Front End module used in hardware, then default base band timing
5077 * parameter cannot be used since they were fine tuned for reference hardware,
5078 * so choosing different value suitable for that external FEM.
5079 */
5080 if (!strcmp("microsemi-lx5586", fem_name)) {
5081 bb_timing->bb_tx_timing = 0x00;
5082 bb_timing->bb_xpa_timing = 0x0101;
5083 } else {
5084 return -ENOENT;
5085 }
5086
5087 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n",
5088 bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing);
5089 return 0;
5090 }
5091
ath10k_mac_rfkill_config(struct ath10k * ar)5092 static int ath10k_mac_rfkill_config(struct ath10k *ar)
5093 {
5094 u32 param;
5095 int ret;
5096
5097 if (ar->hw_values->rfkill_pin == 0) {
5098 ath10k_warn(ar, "ath10k does not support hardware rfkill with this device\n");
5099 return -EOPNOTSUPP;
5100 }
5101
5102 ath10k_dbg(ar, ATH10K_DBG_MAC,
5103 "mac rfkill_pin %d rfkill_cfg %d rfkill_on_level %d",
5104 ar->hw_values->rfkill_pin, ar->hw_values->rfkill_cfg,
5105 ar->hw_values->rfkill_on_level);
5106
5107 param = FIELD_PREP(WMI_TLV_RFKILL_CFG_RADIO_LEVEL,
5108 ar->hw_values->rfkill_on_level) |
5109 FIELD_PREP(WMI_TLV_RFKILL_CFG_GPIO_PIN_NUM,
5110 ar->hw_values->rfkill_pin) |
5111 FIELD_PREP(WMI_TLV_RFKILL_CFG_PIN_AS_GPIO,
5112 ar->hw_values->rfkill_cfg);
5113
5114 ret = ath10k_wmi_pdev_set_param(ar,
5115 ar->wmi.pdev_param->rfkill_config,
5116 param);
5117 if (ret) {
5118 ath10k_warn(ar,
5119 "failed to set rfkill config 0x%x: %d\n",
5120 param, ret);
5121 return ret;
5122 }
5123 return 0;
5124 }
5125
ath10k_mac_rfkill_enable_radio(struct ath10k * ar,bool enable)5126 int ath10k_mac_rfkill_enable_radio(struct ath10k *ar, bool enable)
5127 {
5128 enum wmi_tlv_rfkill_enable_radio param;
5129 int ret;
5130
5131 if (enable)
5132 param = WMI_TLV_RFKILL_ENABLE_RADIO_ON;
5133 else
5134 param = WMI_TLV_RFKILL_ENABLE_RADIO_OFF;
5135
5136 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac rfkill enable %d", param);
5137
5138 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rfkill_enable,
5139 param);
5140 if (ret) {
5141 ath10k_warn(ar, "failed to set rfkill enable param %d: %d\n",
5142 param, ret);
5143 return ret;
5144 }
5145
5146 return 0;
5147 }
5148
ath10k_start(struct ieee80211_hw * hw)5149 static int ath10k_start(struct ieee80211_hw *hw)
5150 {
5151 struct ath10k *ar = hw->priv;
5152 u32 param;
5153 int ret = 0;
5154 struct wmi_bb_timing_cfg_arg bb_timing = {0};
5155
5156 /*
5157 * This makes sense only when restarting hw. It is harmless to call
5158 * unconditionally. This is necessary to make sure no HTT/WMI tx
5159 * commands will be submitted while restarting.
5160 */
5161 ath10k_drain_tx(ar);
5162
5163 mutex_lock(&ar->conf_mutex);
5164
5165 switch (ar->state) {
5166 case ATH10K_STATE_OFF:
5167 ar->state = ATH10K_STATE_ON;
5168 break;
5169 case ATH10K_STATE_RESTARTING:
5170 ar->state = ATH10K_STATE_RESTARTED;
5171 break;
5172 case ATH10K_STATE_ON:
5173 case ATH10K_STATE_RESTARTED:
5174 case ATH10K_STATE_WEDGED:
5175 WARN_ON(1);
5176 ret = -EINVAL;
5177 goto err;
5178 case ATH10K_STATE_UTF:
5179 ret = -EBUSY;
5180 goto err;
5181 }
5182
5183 spin_lock_bh(&ar->data_lock);
5184
5185 if (ar->hw_rfkill_on) {
5186 ar->hw_rfkill_on = false;
5187 spin_unlock_bh(&ar->data_lock);
5188 goto err;
5189 }
5190
5191 spin_unlock_bh(&ar->data_lock);
5192
5193 ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL);
5194 if (ret) {
5195 ath10k_err(ar, "Could not init hif: %d\n", ret);
5196 goto err_off;
5197 }
5198
5199 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL,
5200 &ar->normal_mode_fw);
5201 if (ret) {
5202 ath10k_err(ar, "Could not init core: %d\n", ret);
5203 goto err_power_down;
5204 }
5205
5206 if (ar->sys_cap_info & WMI_TLV_SYS_CAP_INFO_RFKILL) {
5207 ret = ath10k_mac_rfkill_config(ar);
5208 if (ret && ret != -EOPNOTSUPP) {
5209 ath10k_warn(ar, "failed to configure rfkill: %d", ret);
5210 goto err_core_stop;
5211 }
5212 }
5213
5214 param = ar->wmi.pdev_param->pmf_qos;
5215 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
5216 if (ret) {
5217 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret);
5218 goto err_core_stop;
5219 }
5220
5221 param = ar->wmi.pdev_param->dynamic_bw;
5222 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
5223 if (ret) {
5224 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret);
5225 goto err_core_stop;
5226 }
5227
5228 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
5229 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr);
5230 if (ret) {
5231 ath10k_err(ar, "failed to set prob req oui: %i\n", ret);
5232 goto err_core_stop;
5233 }
5234 }
5235
5236 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
5237 ret = ath10k_wmi_adaptive_qcs(ar, true);
5238 if (ret) {
5239 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n",
5240 ret);
5241 goto err_core_stop;
5242 }
5243 }
5244
5245 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) {
5246 param = ar->wmi.pdev_param->burst_enable;
5247 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
5248 if (ret) {
5249 ath10k_warn(ar, "failed to disable burst: %d\n", ret);
5250 goto err_core_stop;
5251 }
5252 }
5253
5254 param = ar->wmi.pdev_param->idle_ps_config;
5255 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
5256 if (ret && ret != -EOPNOTSUPP) {
5257 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret);
5258 goto err_core_stop;
5259 }
5260
5261 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask);
5262
5263 /*
5264 * By default FW set ARP frames ac to voice (6). In that case ARP
5265 * exchange is not working properly for UAPSD enabled AP. ARP requests
5266 * which arrives with access category 0 are processed by network stack
5267 * and send back with access category 0, but FW changes access category
5268 * to 6. Set ARP frames access category to best effort (0) solves
5269 * this problem.
5270 */
5271
5272 param = ar->wmi.pdev_param->arp_ac_override;
5273 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
5274 if (ret) {
5275 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n",
5276 ret);
5277 goto err_core_stop;
5278 }
5279
5280 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA,
5281 ar->running_fw->fw_file.fw_features)) {
5282 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1,
5283 WMI_CCA_DETECT_LEVEL_AUTO,
5284 WMI_CCA_DETECT_MARGIN_AUTO);
5285 if (ret) {
5286 ath10k_warn(ar, "failed to enable adaptive cca: %d\n",
5287 ret);
5288 goto err_core_stop;
5289 }
5290 }
5291
5292 param = ar->wmi.pdev_param->ani_enable;
5293 ret = ath10k_wmi_pdev_set_param(ar, param, 1);
5294 if (ret) {
5295 ath10k_warn(ar, "failed to enable ani by default: %d\n",
5296 ret);
5297 goto err_core_stop;
5298 }
5299
5300 ar->ani_enabled = true;
5301
5302 if (ath10k_peer_stats_enabled(ar)) {
5303 param = ar->wmi.pdev_param->peer_stats_update_period;
5304 ret = ath10k_wmi_pdev_set_param(ar, param,
5305 PEER_DEFAULT_STATS_UPDATE_PERIOD);
5306 if (ret) {
5307 ath10k_warn(ar,
5308 "failed to set peer stats period : %d\n",
5309 ret);
5310 goto err_core_stop;
5311 }
5312 }
5313
5314 param = ar->wmi.pdev_param->enable_btcoex;
5315 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) &&
5316 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM,
5317 ar->running_fw->fw_file.fw_features) &&
5318 ar->coex_support) {
5319 ret = ath10k_wmi_pdev_set_param(ar, param, 0);
5320 if (ret) {
5321 ath10k_warn(ar,
5322 "failed to set btcoex param: %d\n", ret);
5323 goto err_core_stop;
5324 }
5325 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags);
5326 }
5327
5328 if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) {
5329 ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing);
5330 if (!ret) {
5331 ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing);
5332 if (ret) {
5333 ath10k_warn(ar,
5334 "failed to set bb timings: %d\n",
5335 ret);
5336 goto err_core_stop;
5337 }
5338 }
5339 }
5340
5341 ar->num_started_vdevs = 0;
5342 ath10k_regd_update(ar);
5343
5344 ath10k_spectral_start(ar);
5345 ath10k_thermal_set_throttling(ar);
5346
5347 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE;
5348
5349 mutex_unlock(&ar->conf_mutex);
5350 return 0;
5351
5352 err_core_stop:
5353 ath10k_core_stop(ar);
5354
5355 err_power_down:
5356 ath10k_hif_power_down(ar);
5357
5358 err_off:
5359 ar->state = ATH10K_STATE_OFF;
5360
5361 err:
5362 mutex_unlock(&ar->conf_mutex);
5363 return ret;
5364 }
5365
ath10k_stop(struct ieee80211_hw * hw,bool suspend)5366 static void ath10k_stop(struct ieee80211_hw *hw, bool suspend)
5367 {
5368 struct ath10k *ar = hw->priv;
5369 u32 opt;
5370
5371 ath10k_drain_tx(ar);
5372
5373 mutex_lock(&ar->conf_mutex);
5374 if (ar->state != ATH10K_STATE_OFF) {
5375 if (!ar->hw_rfkill_on) {
5376 /* If the current driver state is RESTARTING but not yet
5377 * fully RESTARTED because of incoming suspend event,
5378 * then ath10k_halt() is already called via
5379 * ath10k_core_restart() and should not be called here.
5380 */
5381 if (ar->state != ATH10K_STATE_RESTARTING) {
5382 ath10k_halt(ar);
5383 } else {
5384 /* Suspending here, because when in RESTARTING
5385 * state, ath10k_core_stop() skips
5386 * ath10k_wait_for_suspend().
5387 */
5388 opt = WMI_PDEV_SUSPEND_AND_DISABLE_INTR;
5389 ath10k_wait_for_suspend(ar, opt);
5390 }
5391 }
5392 ar->state = ATH10K_STATE_OFF;
5393 }
5394 mutex_unlock(&ar->conf_mutex);
5395
5396 cancel_work_sync(&ar->set_coverage_class_work);
5397 cancel_delayed_work_sync(&ar->scan.timeout);
5398 cancel_work_sync(&ar->restart_work);
5399 }
5400
ath10k_config_ps(struct ath10k * ar)5401 static int ath10k_config_ps(struct ath10k *ar)
5402 {
5403 struct ath10k_vif *arvif;
5404 int ret = 0;
5405
5406 lockdep_assert_held(&ar->conf_mutex);
5407
5408 list_for_each_entry(arvif, &ar->arvifs, list) {
5409 ret = ath10k_mac_vif_setup_ps(arvif);
5410 if (ret) {
5411 ath10k_warn(ar, "failed to setup powersave: %d\n", ret);
5412 break;
5413 }
5414 }
5415
5416 return ret;
5417 }
5418
ath10k_config(struct ieee80211_hw * hw,u32 changed)5419 static int ath10k_config(struct ieee80211_hw *hw, u32 changed)
5420 {
5421 struct ath10k *ar = hw->priv;
5422 struct ieee80211_conf *conf = &hw->conf;
5423 int ret = 0;
5424
5425 mutex_lock(&ar->conf_mutex);
5426
5427 if (changed & IEEE80211_CONF_CHANGE_PS)
5428 ath10k_config_ps(ar);
5429
5430 if (changed & IEEE80211_CONF_CHANGE_MONITOR) {
5431 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR;
5432 ret = ath10k_monitor_recalc(ar);
5433 if (ret)
5434 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5435 }
5436
5437 mutex_unlock(&ar->conf_mutex);
5438 return ret;
5439 }
5440
get_nss_from_chainmask(u16 chain_mask)5441 static u32 get_nss_from_chainmask(u16 chain_mask)
5442 {
5443 if ((chain_mask & 0xf) == 0xf)
5444 return 4;
5445 else if ((chain_mask & 0x7) == 0x7)
5446 return 3;
5447 else if ((chain_mask & 0x3) == 0x3)
5448 return 2;
5449 return 1;
5450 }
5451
ath10k_mac_set_txbf_conf(struct ath10k_vif * arvif)5452 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif)
5453 {
5454 u32 value = 0;
5455 struct ath10k *ar = arvif->ar;
5456 int nsts;
5457 int sound_dim;
5458
5459 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC)
5460 return 0;
5461
5462 nsts = ath10k_mac_get_vht_cap_bf_sts(ar);
5463 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE |
5464 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE))
5465 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET);
5466
5467 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar);
5468 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE |
5469 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))
5470 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET);
5471
5472 if (!value)
5473 return 0;
5474
5475 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE)
5476 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER;
5477
5478 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)
5479 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER |
5480 WMI_VDEV_PARAM_TXBF_SU_TX_BFER);
5481
5482 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE)
5483 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE;
5484
5485 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)
5486 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE |
5487 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE);
5488
5489 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
5490 ar->wmi.vdev_param->txbf, value);
5491 }
5492
ath10k_update_vif_offload(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5493 static void ath10k_update_vif_offload(struct ieee80211_hw *hw,
5494 struct ieee80211_vif *vif)
5495 {
5496 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5497 struct ath10k *ar = hw->priv;
5498 u32 vdev_param;
5499 int ret;
5500
5501 if (ath10k_frame_mode != ATH10K_HW_TXRX_ETHERNET ||
5502 ar->wmi.vdev_param->tx_encap_type == WMI_VDEV_PARAM_UNSUPPORTED ||
5503 (vif->type != NL80211_IFTYPE_STATION &&
5504 vif->type != NL80211_IFTYPE_AP))
5505 vif->offload_flags &= ~IEEE80211_OFFLOAD_ENCAP_ENABLED;
5506
5507 vdev_param = ar->wmi.vdev_param->tx_encap_type;
5508 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5509 ATH10K_HW_TXRX_NATIVE_WIFI);
5510 /* 10.X firmware does not support this VDEV parameter. Do not warn */
5511 if (ret && ret != -EOPNOTSUPP) {
5512 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n",
5513 arvif->vdev_id, ret);
5514 }
5515 }
5516
5517 /*
5518 * TODO:
5519 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE,
5520 * because we will send mgmt frames without CCK. This requirement
5521 * for P2P_FIND/GO_NEG should be handled by checking CCK flag
5522 * in the TX packet.
5523 */
ath10k_add_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5524 static int ath10k_add_interface(struct ieee80211_hw *hw,
5525 struct ieee80211_vif *vif)
5526 {
5527 struct ath10k *ar = hw->priv;
5528 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5529 struct ath10k_peer *peer;
5530 enum wmi_sta_powersave_param param;
5531 int ret = 0;
5532 u32 value;
5533 int bit;
5534 int i;
5535 u32 vdev_param;
5536
5537 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD;
5538
5539 mutex_lock(&ar->conf_mutex);
5540
5541 memset(arvif, 0, sizeof(*arvif));
5542 ath10k_mac_txq_init(vif->txq);
5543
5544 arvif->ar = ar;
5545 arvif->vif = vif;
5546
5547 INIT_LIST_HEAD(&arvif->list);
5548 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work);
5549 INIT_DELAYED_WORK(&arvif->connection_loss_work,
5550 ath10k_mac_vif_sta_connection_loss_work);
5551
5552 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) {
5553 arvif->bitrate_mask.control[i].legacy = 0xffffffff;
5554 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff,
5555 sizeof(arvif->bitrate_mask.control[i].ht_mcs));
5556 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff,
5557 sizeof(arvif->bitrate_mask.control[i].vht_mcs));
5558 }
5559
5560 if (ar->num_peers >= ar->max_num_peers) {
5561 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n");
5562 ret = -ENOBUFS;
5563 goto err;
5564 }
5565
5566 if (ar->free_vdev_map == 0) {
5567 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n");
5568 ret = -EBUSY;
5569 goto err;
5570 }
5571 bit = __ffs64(ar->free_vdev_map);
5572
5573 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n",
5574 bit, ar->free_vdev_map);
5575
5576 arvif->vdev_id = bit;
5577 arvif->vdev_subtype =
5578 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE);
5579
5580 switch (vif->type) {
5581 case NL80211_IFTYPE_P2P_DEVICE:
5582 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5583 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5584 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE);
5585 break;
5586 case NL80211_IFTYPE_UNSPECIFIED:
5587 case NL80211_IFTYPE_STATION:
5588 arvif->vdev_type = WMI_VDEV_TYPE_STA;
5589 if (vif->p2p)
5590 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5591 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT);
5592 break;
5593 case NL80211_IFTYPE_ADHOC:
5594 arvif->vdev_type = WMI_VDEV_TYPE_IBSS;
5595 break;
5596 case NL80211_IFTYPE_MESH_POINT:
5597 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) {
5598 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5599 (ar, WMI_VDEV_SUBTYPE_MESH_11S);
5600 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5601 ret = -EINVAL;
5602 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n");
5603 goto err;
5604 }
5605 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5606 break;
5607 case NL80211_IFTYPE_AP:
5608 arvif->vdev_type = WMI_VDEV_TYPE_AP;
5609
5610 if (vif->p2p)
5611 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype
5612 (ar, WMI_VDEV_SUBTYPE_P2P_GO);
5613 break;
5614 case NL80211_IFTYPE_MONITOR:
5615 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR;
5616 break;
5617 default:
5618 WARN_ON(1);
5619 break;
5620 }
5621
5622 /* Using vdev_id as queue number will make it very easy to do per-vif
5623 * tx queue locking. This shouldn't wrap due to interface combinations
5624 * but do a modulo for correctness sake and prevent using offchannel tx
5625 * queues for regular vif tx.
5626 */
5627 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5628 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++)
5629 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1);
5630
5631 /* Some firmware revisions don't wait for beacon tx completion before
5632 * sending another SWBA event. This could lead to hardware using old
5633 * (freed) beacon data in some cases, e.g. tx credit starvation
5634 * combined with missed TBTT. This is very rare.
5635 *
5636 * On non-IOMMU-enabled hosts this could be a possible security issue
5637 * because hw could beacon some random data on the air. On
5638 * IOMMU-enabled hosts DMAR faults would occur in most cases and target
5639 * device would crash.
5640 *
5641 * Since there are no beacon tx completions (implicit nor explicit)
5642 * propagated to host the only workaround for this is to allocate a
5643 * DMA-coherent buffer for a lifetime of a vif and use it for all
5644 * beacon tx commands. Worst case for this approach is some beacons may
5645 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap.
5646 */
5647 if (vif->type == NL80211_IFTYPE_ADHOC ||
5648 vif->type == NL80211_IFTYPE_MESH_POINT ||
5649 vif->type == NL80211_IFTYPE_AP) {
5650 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) {
5651 arvif->beacon_buf = kmalloc(IEEE80211_MAX_FRAME_LEN,
5652 GFP_KERNEL);
5653
5654 /* Using a kernel pointer in place of a dma_addr_t
5655 * token can lead to undefined behavior if that
5656 * makes it into cache management functions. Use a
5657 * known-invalid address token instead, which
5658 * avoids the warning and makes it easier to catch
5659 * bugs if it does end up getting used.
5660 */
5661 arvif->beacon_paddr = DMA_MAPPING_ERROR;
5662 } else {
5663 arvif->beacon_buf =
5664 dma_alloc_coherent(ar->dev,
5665 IEEE80211_MAX_FRAME_LEN,
5666 &arvif->beacon_paddr,
5667 GFP_ATOMIC);
5668 }
5669 if (!arvif->beacon_buf) {
5670 ret = -ENOMEM;
5671 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n",
5672 ret);
5673 goto err;
5674 }
5675 }
5676 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags))
5677 arvif->nohwcrypt = true;
5678
5679 if (arvif->nohwcrypt &&
5680 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) {
5681 ret = -EINVAL;
5682 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n");
5683 goto err;
5684 }
5685
5686 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n",
5687 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype,
5688 arvif->beacon_buf ? "single-buf" : "per-skb");
5689
5690 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type,
5691 arvif->vdev_subtype, vif->addr);
5692 if (ret) {
5693 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n",
5694 arvif->vdev_id, ret);
5695 goto err;
5696 }
5697
5698 if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT,
5699 ar->wmi.svc_map)) {
5700 vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn;
5701 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5702 WMI_VDEV_DISABLE_4_ADDR_SRC_LRN);
5703 if (ret && ret != -EOPNOTSUPP) {
5704 ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n",
5705 arvif->vdev_id, ret);
5706 }
5707 }
5708
5709 ar->free_vdev_map &= ~(1LL << arvif->vdev_id);
5710 spin_lock_bh(&ar->data_lock);
5711 list_add(&arvif->list, &ar->arvifs);
5712 spin_unlock_bh(&ar->data_lock);
5713
5714 /* It makes no sense to have firmware do keepalives. mac80211 already
5715 * takes care of this with idle connection polling.
5716 */
5717 ret = ath10k_mac_vif_disable_keepalive(arvif);
5718 if (ret) {
5719 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n",
5720 arvif->vdev_id, ret);
5721 goto err_vdev_delete;
5722 }
5723
5724 arvif->def_wep_key_idx = -1;
5725
5726 ath10k_update_vif_offload(hw, vif);
5727
5728 /* Configuring number of spatial stream for monitor interface is causing
5729 * target assert in qca9888 and qca6174.
5730 */
5731 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) {
5732 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask);
5733
5734 vdev_param = ar->wmi.vdev_param->nss;
5735 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5736 nss);
5737 if (ret) {
5738 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n",
5739 arvif->vdev_id, ar->cfg_tx_chainmask, nss,
5740 ret);
5741 goto err_vdev_delete;
5742 }
5743 }
5744
5745 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5746 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5747 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id,
5748 vif->addr, WMI_PEER_TYPE_DEFAULT);
5749 if (ret) {
5750 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n",
5751 arvif->vdev_id, ret);
5752 goto err_vdev_delete;
5753 }
5754
5755 spin_lock_bh(&ar->data_lock);
5756
5757 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr);
5758 if (!peer) {
5759 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
5760 vif->addr, arvif->vdev_id);
5761 spin_unlock_bh(&ar->data_lock);
5762 ret = -ENOENT;
5763 goto err_peer_delete;
5764 }
5765
5766 arvif->peer_id = find_first_bit(peer->peer_ids,
5767 ATH10K_MAX_NUM_PEER_IDS);
5768
5769 spin_unlock_bh(&ar->data_lock);
5770 } else {
5771 arvif->peer_id = HTT_INVALID_PEERID;
5772 }
5773
5774 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) {
5775 ret = ath10k_mac_set_kickout(arvif);
5776 if (ret) {
5777 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n",
5778 arvif->vdev_id, ret);
5779 goto err_peer_delete;
5780 }
5781 }
5782
5783 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
5784 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY;
5785 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
5786 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
5787 param, value);
5788 if (ret) {
5789 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n",
5790 arvif->vdev_id, ret);
5791 goto err_peer_delete;
5792 }
5793
5794 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
5795 if (ret) {
5796 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
5797 arvif->vdev_id, ret);
5798 goto err_peer_delete;
5799 }
5800
5801 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
5802 if (ret) {
5803 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
5804 arvif->vdev_id, ret);
5805 goto err_peer_delete;
5806 }
5807 }
5808
5809 ret = ath10k_mac_set_txbf_conf(arvif);
5810 if (ret) {
5811 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n",
5812 arvif->vdev_id, ret);
5813 goto err_peer_delete;
5814 }
5815
5816 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold);
5817 if (ret) {
5818 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
5819 arvif->vdev_id, ret);
5820 goto err_peer_delete;
5821 }
5822
5823 arvif->txpower = vif->bss_conf.txpower;
5824 ret = ath10k_mac_txpower_recalc(ar);
5825 if (ret) {
5826 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
5827 goto err_peer_delete;
5828 }
5829
5830 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
5831 vdev_param = ar->wmi.vdev_param->rtt_responder_role;
5832 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
5833 arvif->ftm_responder);
5834
5835 /* It is harmless to not set FTM role. Do not warn */
5836 if (ret && ret != -EOPNOTSUPP)
5837 ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n",
5838 arvif->vdev_id, ret);
5839 }
5840
5841 if (vif->type == NL80211_IFTYPE_MONITOR) {
5842 ar->monitor_arvif = arvif;
5843 ret = ath10k_monitor_recalc(ar);
5844 if (ret) {
5845 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5846 goto err_peer_delete;
5847 }
5848 }
5849
5850 spin_lock_bh(&ar->htt.tx_lock);
5851 if (!ar->tx_paused)
5852 ieee80211_wake_queue(ar->hw, arvif->vdev_id);
5853 spin_unlock_bh(&ar->htt.tx_lock);
5854
5855 mutex_unlock(&ar->conf_mutex);
5856 return 0;
5857
5858 err_peer_delete:
5859 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5860 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5861 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr);
5862 ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
5863 vif->addr);
5864 }
5865
5866 err_vdev_delete:
5867 ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5868 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5869 spin_lock_bh(&ar->data_lock);
5870 list_del(&arvif->list);
5871 spin_unlock_bh(&ar->data_lock);
5872
5873 err:
5874 if (arvif->beacon_buf) {
5875 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL)
5876 kfree(arvif->beacon_buf);
5877 else
5878 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN,
5879 arvif->beacon_buf,
5880 arvif->beacon_paddr);
5881 arvif->beacon_buf = NULL;
5882 }
5883
5884 mutex_unlock(&ar->conf_mutex);
5885
5886 return ret;
5887 }
5888
ath10k_mac_vif_tx_unlock_all(struct ath10k_vif * arvif)5889 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif)
5890 {
5891 int i;
5892
5893 for (i = 0; i < BITS_PER_LONG; i++)
5894 ath10k_mac_vif_tx_unlock(arvif, i);
5895 }
5896
ath10k_remove_interface(struct ieee80211_hw * hw,struct ieee80211_vif * vif)5897 static void ath10k_remove_interface(struct ieee80211_hw *hw,
5898 struct ieee80211_vif *vif)
5899 {
5900 struct ath10k *ar = hw->priv;
5901 struct ath10k_vif *arvif = (void *)vif->drv_priv;
5902 struct ath10k_peer *peer;
5903 unsigned long time_left;
5904 int ret;
5905 int i;
5906
5907 cancel_work_sync(&arvif->ap_csa_work);
5908 cancel_delayed_work_sync(&arvif->connection_loss_work);
5909
5910 mutex_lock(&ar->conf_mutex);
5911
5912 ret = ath10k_spectral_vif_stop(arvif);
5913 if (ret)
5914 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n",
5915 arvif->vdev_id, ret);
5916
5917 ar->free_vdev_map |= 1LL << arvif->vdev_id;
5918 spin_lock_bh(&ar->data_lock);
5919 list_del(&arvif->list);
5920 spin_unlock_bh(&ar->data_lock);
5921
5922 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5923 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5924 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id,
5925 vif->addr);
5926 if (ret)
5927 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n",
5928 arvif->vdev_id, ret);
5929
5930 ath10k_wait_for_peer_delete_done(ar, arvif->vdev_id,
5931 vif->addr);
5932 kfree(arvif->u.ap.noa_data);
5933 }
5934
5935 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n",
5936 arvif->vdev_id);
5937
5938 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id);
5939 if (ret)
5940 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n",
5941 arvif->vdev_id, ret);
5942
5943 if (test_bit(WMI_SERVICE_SYNC_DELETE_CMDS, ar->wmi.svc_map)) {
5944 time_left = wait_for_completion_timeout(&ar->vdev_delete_done,
5945 ATH10K_VDEV_DELETE_TIMEOUT_HZ);
5946 if (time_left == 0) {
5947 ath10k_warn(ar, "Timeout in receiving vdev delete response\n");
5948 goto out;
5949 }
5950 }
5951
5952 /* Some firmware revisions don't notify host about self-peer removal
5953 * until after associated vdev is deleted.
5954 */
5955 if (arvif->vdev_type == WMI_VDEV_TYPE_AP ||
5956 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) {
5957 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id,
5958 vif->addr);
5959 if (ret)
5960 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n",
5961 arvif->vdev_id, ret);
5962
5963 spin_lock_bh(&ar->data_lock);
5964 ar->num_peers--;
5965 spin_unlock_bh(&ar->data_lock);
5966 }
5967
5968 spin_lock_bh(&ar->data_lock);
5969 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
5970 peer = ar->peer_map[i];
5971 if (!peer)
5972 continue;
5973
5974 if (peer->vif == vif) {
5975 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n",
5976 vif->addr, arvif->vdev_id);
5977 peer->vif = NULL;
5978 }
5979 }
5980
5981 /* Clean this up late, less opportunity for firmware to access
5982 * DMA memory we have deleted.
5983 */
5984 ath10k_mac_vif_beacon_cleanup(arvif);
5985 spin_unlock_bh(&ar->data_lock);
5986
5987 ath10k_peer_cleanup(ar, arvif->vdev_id);
5988 ath10k_mac_txq_unref(ar, vif->txq);
5989
5990 if (vif->type == NL80211_IFTYPE_MONITOR) {
5991 ar->monitor_arvif = NULL;
5992 ret = ath10k_monitor_recalc(ar);
5993 if (ret)
5994 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
5995 }
5996
5997 ret = ath10k_mac_txpower_recalc(ar);
5998 if (ret)
5999 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
6000
6001 spin_lock_bh(&ar->htt.tx_lock);
6002 ath10k_mac_vif_tx_unlock_all(arvif);
6003 spin_unlock_bh(&ar->htt.tx_lock);
6004
6005 ath10k_mac_txq_unref(ar, vif->txq);
6006
6007 out:
6008 mutex_unlock(&ar->conf_mutex);
6009 }
6010
6011 /*
6012 * FIXME: Has to be verified.
6013 */
6014 #define SUPPORTED_FILTERS \
6015 (FIF_ALLMULTI | \
6016 FIF_CONTROL | \
6017 FIF_PSPOLL | \
6018 FIF_OTHER_BSS | \
6019 FIF_BCN_PRBRESP_PROMISC | \
6020 FIF_PROBE_REQ | \
6021 FIF_FCSFAIL)
6022
ath10k_configure_filter(struct ieee80211_hw * hw,unsigned int changed_flags,unsigned int * total_flags,u64 multicast)6023 static void ath10k_configure_filter(struct ieee80211_hw *hw,
6024 unsigned int changed_flags,
6025 unsigned int *total_flags,
6026 u64 multicast)
6027 {
6028 struct ath10k *ar = hw->priv;
6029 int ret;
6030 unsigned int supported = SUPPORTED_FILTERS;
6031
6032 mutex_lock(&ar->conf_mutex);
6033
6034 if (ar->hw_params.mcast_frame_registration)
6035 supported |= FIF_MCAST_ACTION;
6036
6037 *total_flags &= supported;
6038
6039 ar->filter_flags = *total_flags;
6040
6041 ret = ath10k_monitor_recalc(ar);
6042 if (ret)
6043 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret);
6044
6045 mutex_unlock(&ar->conf_mutex);
6046 }
6047
ath10k_recalculate_mgmt_rate(struct ath10k * ar,struct ieee80211_vif * vif,struct cfg80211_chan_def * def)6048 static void ath10k_recalculate_mgmt_rate(struct ath10k *ar,
6049 struct ieee80211_vif *vif,
6050 struct cfg80211_chan_def *def)
6051 {
6052 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6053 const struct ieee80211_supported_band *sband;
6054 u8 basic_rate_idx;
6055 int hw_rate_code;
6056 u32 vdev_param;
6057 u16 bitrate;
6058 int ret;
6059
6060 lockdep_assert_held(&ar->conf_mutex);
6061
6062 sband = ar->hw->wiphy->bands[def->chan->band];
6063 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1;
6064 bitrate = sband->bitrates[basic_rate_idx].bitrate;
6065
6066 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate);
6067 if (hw_rate_code < 0) {
6068 ath10k_warn(ar, "bitrate not supported %d\n", bitrate);
6069 return;
6070 }
6071
6072 vdev_param = ar->wmi.vdev_param->mgmt_rate;
6073 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6074 hw_rate_code);
6075 if (ret)
6076 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret);
6077 }
6078
ath10k_bss_info_changed(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * info,u64 changed)6079 static void ath10k_bss_info_changed(struct ieee80211_hw *hw,
6080 struct ieee80211_vif *vif,
6081 struct ieee80211_bss_conf *info,
6082 u64 changed)
6083 {
6084 struct ath10k *ar = hw->priv;
6085 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6086 struct cfg80211_chan_def def;
6087 u32 vdev_param, pdev_param, slottime, preamble;
6088 u16 bitrate, hw_value;
6089 u8 rate, rateidx;
6090 int ret = 0, mcast_rate;
6091 enum nl80211_band band;
6092
6093 mutex_lock(&ar->conf_mutex);
6094
6095 if (changed & BSS_CHANGED_IBSS)
6096 ath10k_control_ibss(arvif, vif);
6097
6098 if (changed & BSS_CHANGED_BEACON_INT) {
6099 arvif->beacon_interval = info->beacon_int;
6100 vdev_param = ar->wmi.vdev_param->beacon_interval;
6101 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6102 arvif->beacon_interval);
6103 ath10k_dbg(ar, ATH10K_DBG_MAC,
6104 "mac vdev %d beacon_interval %d\n",
6105 arvif->vdev_id, arvif->beacon_interval);
6106
6107 if (ret)
6108 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n",
6109 arvif->vdev_id, ret);
6110 }
6111
6112 if (changed & BSS_CHANGED_BEACON) {
6113 ath10k_dbg(ar, ATH10K_DBG_MAC,
6114 "vdev %d set beacon tx mode to staggered\n",
6115 arvif->vdev_id);
6116
6117 pdev_param = ar->wmi.pdev_param->beacon_tx_mode;
6118 ret = ath10k_wmi_pdev_set_param(ar, pdev_param,
6119 WMI_BEACON_STAGGERED_MODE);
6120 if (ret)
6121 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n",
6122 arvif->vdev_id, ret);
6123
6124 ret = ath10k_mac_setup_bcn_tmpl(arvif);
6125 if (ret)
6126 ath10k_warn(ar, "failed to update beacon template: %d\n",
6127 ret);
6128
6129 if (ieee80211_vif_is_mesh(vif)) {
6130 /* mesh doesn't use SSID but firmware needs it */
6131 arvif->u.ap.ssid_len = 4;
6132 memcpy(arvif->u.ap.ssid, "mesh", arvif->u.ap.ssid_len);
6133 }
6134 }
6135
6136 if (changed & BSS_CHANGED_AP_PROBE_RESP) {
6137 ret = ath10k_mac_setup_prb_tmpl(arvif);
6138 if (ret)
6139 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n",
6140 arvif->vdev_id, ret);
6141 }
6142
6143 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) {
6144 arvif->dtim_period = info->dtim_period;
6145
6146 ath10k_dbg(ar, ATH10K_DBG_MAC,
6147 "mac vdev %d dtim_period %d\n",
6148 arvif->vdev_id, arvif->dtim_period);
6149
6150 vdev_param = ar->wmi.vdev_param->dtim_period;
6151 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6152 arvif->dtim_period);
6153 if (ret)
6154 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n",
6155 arvif->vdev_id, ret);
6156 }
6157
6158 if (changed & BSS_CHANGED_SSID &&
6159 vif->type == NL80211_IFTYPE_AP) {
6160 arvif->u.ap.ssid_len = vif->cfg.ssid_len;
6161 if (vif->cfg.ssid_len)
6162 memcpy(arvif->u.ap.ssid, vif->cfg.ssid,
6163 vif->cfg.ssid_len);
6164 arvif->u.ap.hidden_ssid = info->hidden_ssid;
6165 }
6166
6167 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid))
6168 ether_addr_copy(arvif->bssid, info->bssid);
6169
6170 if (changed & BSS_CHANGED_FTM_RESPONDER &&
6171 arvif->ftm_responder != info->ftm_responder &&
6172 test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) {
6173 arvif->ftm_responder = info->ftm_responder;
6174
6175 vdev_param = ar->wmi.vdev_param->rtt_responder_role;
6176 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6177 arvif->ftm_responder);
6178
6179 ath10k_dbg(ar, ATH10K_DBG_MAC,
6180 "mac vdev %d ftm_responder %d:ret %d\n",
6181 arvif->vdev_id, arvif->ftm_responder, ret);
6182 }
6183
6184 if (changed & BSS_CHANGED_BEACON_ENABLED)
6185 ath10k_control_beaconing(arvif, info);
6186
6187 if (changed & BSS_CHANGED_ERP_CTS_PROT) {
6188 arvif->use_cts_prot = info->use_cts_prot;
6189
6190 ret = ath10k_recalc_rtscts_prot(arvif);
6191 if (ret)
6192 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n",
6193 arvif->vdev_id, ret);
6194
6195 if (ath10k_mac_can_set_cts_prot(arvif)) {
6196 ret = ath10k_mac_set_cts_prot(arvif);
6197 if (ret)
6198 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
6199 arvif->vdev_id, ret);
6200 }
6201 }
6202
6203 if (changed & BSS_CHANGED_ERP_SLOT) {
6204 if (info->use_short_slot)
6205 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */
6206
6207 else
6208 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */
6209
6210 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n",
6211 arvif->vdev_id, slottime);
6212
6213 vdev_param = ar->wmi.vdev_param->slot_time;
6214 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6215 slottime);
6216 if (ret)
6217 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n",
6218 arvif->vdev_id, ret);
6219 }
6220
6221 if (changed & BSS_CHANGED_ERP_PREAMBLE) {
6222 if (info->use_short_preamble)
6223 preamble = WMI_VDEV_PREAMBLE_SHORT;
6224 else
6225 preamble = WMI_VDEV_PREAMBLE_LONG;
6226
6227 ath10k_dbg(ar, ATH10K_DBG_MAC,
6228 "mac vdev %d preamble %dn",
6229 arvif->vdev_id, preamble);
6230
6231 vdev_param = ar->wmi.vdev_param->preamble;
6232 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6233 preamble);
6234 if (ret)
6235 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n",
6236 arvif->vdev_id, ret);
6237 }
6238
6239 if (changed & BSS_CHANGED_ASSOC) {
6240 if (vif->cfg.assoc) {
6241 /* Workaround: Make sure monitor vdev is not running
6242 * when associating to prevent some firmware revisions
6243 * (e.g. 10.1 and 10.2) from crashing.
6244 */
6245 if (ar->monitor_started)
6246 ath10k_monitor_stop(ar);
6247 ath10k_bss_assoc(hw, vif, info);
6248 ath10k_monitor_recalc(ar);
6249 } else {
6250 ath10k_bss_disassoc(hw, vif);
6251 }
6252 }
6253
6254 if (changed & BSS_CHANGED_TXPOWER) {
6255 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n",
6256 arvif->vdev_id, info->txpower);
6257
6258 arvif->txpower = info->txpower;
6259 ret = ath10k_mac_txpower_recalc(ar);
6260 if (ret)
6261 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret);
6262 }
6263
6264 if (changed & BSS_CHANGED_PS) {
6265 arvif->ps = vif->cfg.ps;
6266
6267 ret = ath10k_config_ps(ar);
6268 if (ret)
6269 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n",
6270 arvif->vdev_id, ret);
6271 }
6272
6273 if (changed & BSS_CHANGED_MCAST_RATE &&
6274 !ath10k_mac_vif_chan(arvif->vif, &def)) {
6275 band = def.chan->band;
6276 mcast_rate = vif->bss_conf.mcast_rate[band];
6277 if (mcast_rate > 0)
6278 rateidx = mcast_rate - 1;
6279 else
6280 rateidx = ffs(vif->bss_conf.basic_rates) - 1;
6281
6282 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
6283 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
6284
6285 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate;
6286 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value;
6287 if (ath10k_mac_bitrate_is_cck(bitrate))
6288 preamble = WMI_RATE_PREAMBLE_CCK;
6289 else
6290 preamble = WMI_RATE_PREAMBLE_OFDM;
6291
6292 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble);
6293
6294 ath10k_dbg(ar, ATH10K_DBG_MAC,
6295 "mac vdev %d mcast_rate %x\n",
6296 arvif->vdev_id, rate);
6297
6298 vdev_param = ar->wmi.vdev_param->mcast_data_rate;
6299 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6300 vdev_param, rate);
6301 if (ret)
6302 ath10k_warn(ar,
6303 "failed to set mcast rate on vdev %i: %d\n",
6304 arvif->vdev_id, ret);
6305
6306 vdev_param = ar->wmi.vdev_param->bcast_data_rate;
6307 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
6308 vdev_param, rate);
6309 if (ret)
6310 ath10k_warn(ar,
6311 "failed to set bcast rate on vdev %i: %d\n",
6312 arvif->vdev_id, ret);
6313 }
6314
6315 if (changed & BSS_CHANGED_BASIC_RATES &&
6316 !ath10k_mac_vif_chan(arvif->vif, &def))
6317 ath10k_recalculate_mgmt_rate(ar, vif, &def);
6318
6319 mutex_unlock(&ar->conf_mutex);
6320 }
6321
ath10k_mac_op_set_coverage_class(struct ieee80211_hw * hw,s16 value)6322 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value)
6323 {
6324 struct ath10k *ar = hw->priv;
6325
6326 /* This function should never be called if setting the coverage class
6327 * is not supported on this hardware.
6328 */
6329 if (!ar->hw_params.hw_ops->set_coverage_class) {
6330 WARN_ON_ONCE(1);
6331 return;
6332 }
6333 ar->hw_params.hw_ops->set_coverage_class(ar, value);
6334 }
6335
6336 struct ath10k_mac_tdls_iter_data {
6337 u32 num_tdls_stations;
6338 struct ieee80211_vif *curr_vif;
6339 };
6340
ath10k_mac_tdls_vif_stations_count_iter(void * data,struct ieee80211_sta * sta)6341 static void ath10k_mac_tdls_vif_stations_count_iter(void *data,
6342 struct ieee80211_sta *sta)
6343 {
6344 struct ath10k_mac_tdls_iter_data *iter_data = data;
6345 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
6346 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
6347
6348 if (sta->tdls && sta_vif == iter_data->curr_vif)
6349 iter_data->num_tdls_stations++;
6350 }
6351
ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw * hw,struct ieee80211_vif * vif)6352 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw,
6353 struct ieee80211_vif *vif)
6354 {
6355 struct ath10k_mac_tdls_iter_data data = {};
6356
6357 data.curr_vif = vif;
6358
6359 ieee80211_iterate_stations_atomic(hw,
6360 ath10k_mac_tdls_vif_stations_count_iter,
6361 &data);
6362 return data.num_tdls_stations;
6363 }
6364
ath10k_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_scan_request * hw_req)6365 static int ath10k_hw_scan(struct ieee80211_hw *hw,
6366 struct ieee80211_vif *vif,
6367 struct ieee80211_scan_request *hw_req)
6368 {
6369 struct ath10k *ar = hw->priv;
6370 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6371 struct cfg80211_scan_request *req = &hw_req->req;
6372 struct wmi_start_scan_arg arg;
6373 int ret = 0;
6374 int i;
6375 u32 scan_timeout;
6376
6377 mutex_lock(&ar->conf_mutex);
6378
6379 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
6380 ret = -EBUSY;
6381 goto exit;
6382 }
6383
6384 spin_lock_bh(&ar->data_lock);
6385 switch (ar->scan.state) {
6386 case ATH10K_SCAN_IDLE:
6387 reinit_completion(&ar->scan.started);
6388 reinit_completion(&ar->scan.completed);
6389 ar->scan.state = ATH10K_SCAN_STARTING;
6390 ar->scan.is_roc = false;
6391 ar->scan.vdev_id = arvif->vdev_id;
6392 ret = 0;
6393 break;
6394 case ATH10K_SCAN_STARTING:
6395 case ATH10K_SCAN_RUNNING:
6396 case ATH10K_SCAN_ABORTING:
6397 ret = -EBUSY;
6398 break;
6399 }
6400 spin_unlock_bh(&ar->data_lock);
6401
6402 if (ret)
6403 goto exit;
6404
6405 memset(&arg, 0, sizeof(arg));
6406 ath10k_wmi_start_scan_init(ar, &arg);
6407 arg.vdev_id = arvif->vdev_id;
6408 arg.scan_id = ATH10K_SCAN_ID;
6409
6410 if (req->ie_len) {
6411 arg.ie_len = req->ie_len;
6412 memcpy(arg.ie, req->ie, arg.ie_len);
6413 }
6414
6415 if (req->n_ssids) {
6416 arg.n_ssids = req->n_ssids;
6417 for (i = 0; i < arg.n_ssids; i++) {
6418 arg.ssids[i].len = req->ssids[i].ssid_len;
6419 arg.ssids[i].ssid = req->ssids[i].ssid;
6420 }
6421 } else {
6422 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
6423 }
6424
6425 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) {
6426 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ;
6427 ether_addr_copy(arg.mac_addr.addr, req->mac_addr);
6428 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask);
6429 }
6430
6431 if (req->n_channels) {
6432 arg.n_channels = req->n_channels;
6433 for (i = 0; i < arg.n_channels; i++)
6434 arg.channels[i] = req->channels[i]->center_freq;
6435 }
6436
6437 /* if duration is set, default dwell times will be overwritten */
6438 if (req->duration) {
6439 arg.dwell_time_active = req->duration;
6440 arg.dwell_time_passive = req->duration;
6441 arg.burst_duration_ms = req->duration;
6442
6443 scan_timeout = min_t(u32, arg.max_rest_time *
6444 (arg.n_channels - 1) + (req->duration +
6445 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) *
6446 arg.n_channels, arg.max_scan_time);
6447 } else {
6448 scan_timeout = arg.max_scan_time;
6449 }
6450
6451 /* Add a 200ms margin to account for event/command processing */
6452 scan_timeout += 200;
6453
6454 ret = ath10k_start_scan(ar, &arg);
6455 if (ret) {
6456 ath10k_warn(ar, "failed to start hw scan: %d\n", ret);
6457 spin_lock_bh(&ar->data_lock);
6458 ar->scan.state = ATH10K_SCAN_IDLE;
6459 spin_unlock_bh(&ar->data_lock);
6460 }
6461
6462 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
6463 msecs_to_jiffies(scan_timeout));
6464
6465 exit:
6466 mutex_unlock(&ar->conf_mutex);
6467 return ret;
6468 }
6469
ath10k_cancel_hw_scan(struct ieee80211_hw * hw,struct ieee80211_vif * vif)6470 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw,
6471 struct ieee80211_vif *vif)
6472 {
6473 struct ath10k *ar = hw->priv;
6474
6475 mutex_lock(&ar->conf_mutex);
6476 ath10k_scan_abort(ar);
6477 mutex_unlock(&ar->conf_mutex);
6478
6479 cancel_delayed_work_sync(&ar->scan.timeout);
6480 }
6481
ath10k_set_key_h_def_keyidx(struct ath10k * ar,struct ath10k_vif * arvif,enum set_key_cmd cmd,struct ieee80211_key_conf * key)6482 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar,
6483 struct ath10k_vif *arvif,
6484 enum set_key_cmd cmd,
6485 struct ieee80211_key_conf *key)
6486 {
6487 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid;
6488 int ret;
6489
6490 /* 10.1 firmware branch requires default key index to be set to group
6491 * key index after installing it. Otherwise FW/HW Txes corrupted
6492 * frames with multi-vif APs. This is not required for main firmware
6493 * branch (e.g. 636).
6494 *
6495 * This is also needed for 636 fw for IBSS-RSN to work more reliably.
6496 *
6497 * FIXME: It remains unknown if this is required for multi-vif STA
6498 * interfaces on 10.1.
6499 */
6500
6501 if (arvif->vdev_type != WMI_VDEV_TYPE_AP &&
6502 arvif->vdev_type != WMI_VDEV_TYPE_IBSS)
6503 return;
6504
6505 if (key->cipher == WLAN_CIPHER_SUITE_WEP40)
6506 return;
6507
6508 if (key->cipher == WLAN_CIPHER_SUITE_WEP104)
6509 return;
6510
6511 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
6512 return;
6513
6514 if (cmd != SET_KEY)
6515 return;
6516
6517 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param,
6518 key->keyidx);
6519 if (ret)
6520 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n",
6521 arvif->vdev_id, ret);
6522 }
6523
ath10k_set_key(struct ieee80211_hw * hw,enum set_key_cmd cmd,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct ieee80211_key_conf * key)6524 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd,
6525 struct ieee80211_vif *vif, struct ieee80211_sta *sta,
6526 struct ieee80211_key_conf *key)
6527 {
6528 struct ath10k *ar = hw->priv;
6529 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6530 struct ath10k_sta *arsta;
6531 struct ath10k_peer *peer;
6532 const u8 *peer_addr;
6533 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 ||
6534 key->cipher == WLAN_CIPHER_SUITE_WEP104;
6535 int ret = 0;
6536 int ret2;
6537 u32 flags = 0;
6538 u32 flags2;
6539
6540 /* this one needs to be done in software */
6541 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
6542 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
6543 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 ||
6544 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256)
6545 return 1;
6546
6547 if (arvif->nohwcrypt)
6548 return 1;
6549
6550 if (key->keyidx > WMI_MAX_KEY_INDEX)
6551 return -ENOSPC;
6552
6553 mutex_lock(&ar->conf_mutex);
6554
6555 if (sta) {
6556 arsta = (struct ath10k_sta *)sta->drv_priv;
6557 peer_addr = sta->addr;
6558 spin_lock_bh(&ar->data_lock);
6559 arsta->ucast_cipher = key->cipher;
6560 spin_unlock_bh(&ar->data_lock);
6561 } else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) {
6562 peer_addr = vif->bss_conf.bssid;
6563 } else {
6564 peer_addr = vif->addr;
6565 }
6566
6567 key->hw_key_idx = key->keyidx;
6568
6569 if (is_wep) {
6570 if (cmd == SET_KEY)
6571 arvif->wep_keys[key->keyidx] = key;
6572 else
6573 arvif->wep_keys[key->keyidx] = NULL;
6574 }
6575
6576 /* the peer should not disappear in mid-way (unless FW goes awry) since
6577 * we already hold conf_mutex. we just make sure its there now.
6578 */
6579 spin_lock_bh(&ar->data_lock);
6580 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
6581 spin_unlock_bh(&ar->data_lock);
6582
6583 if (!peer) {
6584 if (cmd == SET_KEY) {
6585 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n",
6586 peer_addr);
6587 ret = -EOPNOTSUPP;
6588 goto exit;
6589 } else {
6590 /* if the peer doesn't exist there is no key to disable anymore */
6591 goto exit;
6592 }
6593 }
6594
6595 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE)
6596 flags |= WMI_KEY_PAIRWISE;
6597 else
6598 flags |= WMI_KEY_GROUP;
6599
6600 if (is_wep) {
6601 if (cmd == DISABLE_KEY)
6602 ath10k_clear_vdev_key(arvif, key);
6603
6604 /* When WEP keys are uploaded it's possible that there are
6605 * stations associated already (e.g. when merging) without any
6606 * keys. Static WEP needs an explicit per-peer key upload.
6607 */
6608 if (vif->type == NL80211_IFTYPE_ADHOC &&
6609 cmd == SET_KEY)
6610 ath10k_mac_vif_update_wep_key(arvif, key);
6611
6612 /* 802.1x never sets the def_wep_key_idx so each set_key()
6613 * call changes default tx key.
6614 *
6615 * Static WEP sets def_wep_key_idx via .set_default_unicast_key
6616 * after first set_key().
6617 */
6618 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1)
6619 flags |= WMI_KEY_TX_USAGE;
6620 }
6621
6622 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags);
6623 if (ret) {
6624 WARN_ON(ret > 0);
6625 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n",
6626 arvif->vdev_id, peer_addr, ret);
6627 goto exit;
6628 }
6629
6630 /* mac80211 sets static WEP keys as groupwise while firmware requires
6631 * them to be installed twice as both pairwise and groupwise.
6632 */
6633 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) {
6634 flags2 = flags;
6635 flags2 &= ~WMI_KEY_GROUP;
6636 flags2 |= WMI_KEY_PAIRWISE;
6637
6638 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2);
6639 if (ret) {
6640 WARN_ON(ret > 0);
6641 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n",
6642 arvif->vdev_id, peer_addr, ret);
6643 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY,
6644 peer_addr, flags);
6645 if (ret2) {
6646 WARN_ON(ret2 > 0);
6647 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n",
6648 arvif->vdev_id, peer_addr, ret2);
6649 }
6650 goto exit;
6651 }
6652 }
6653
6654 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key);
6655
6656 spin_lock_bh(&ar->data_lock);
6657 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr);
6658 if (peer && cmd == SET_KEY)
6659 peer->keys[key->keyidx] = key;
6660 else if (peer && cmd == DISABLE_KEY)
6661 peer->keys[key->keyidx] = NULL;
6662 else if (peer == NULL)
6663 /* impossible unless FW goes crazy */
6664 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr);
6665 spin_unlock_bh(&ar->data_lock);
6666
6667 if (sta && sta->tdls)
6668 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6669 ar->wmi.peer_param->authorize, 1);
6670 else if (sta && cmd == SET_KEY && (key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
6671 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, peer_addr,
6672 ar->wmi.peer_param->authorize, 1);
6673
6674 exit:
6675 mutex_unlock(&ar->conf_mutex);
6676 return ret;
6677 }
6678
ath10k_set_default_unicast_key(struct ieee80211_hw * hw,struct ieee80211_vif * vif,int keyidx)6679 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw,
6680 struct ieee80211_vif *vif,
6681 int keyidx)
6682 {
6683 struct ath10k *ar = hw->priv;
6684 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6685 int ret;
6686
6687 mutex_lock(&arvif->ar->conf_mutex);
6688
6689 if (arvif->ar->state != ATH10K_STATE_ON)
6690 goto unlock;
6691
6692 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n",
6693 arvif->vdev_id, keyidx);
6694
6695 ret = ath10k_wmi_vdev_set_param(arvif->ar,
6696 arvif->vdev_id,
6697 arvif->ar->wmi.vdev_param->def_keyid,
6698 keyidx);
6699
6700 if (ret) {
6701 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n",
6702 arvif->vdev_id,
6703 ret);
6704 goto unlock;
6705 }
6706
6707 arvif->def_wep_key_idx = keyidx;
6708
6709 unlock:
6710 mutex_unlock(&arvif->ar->conf_mutex);
6711 }
6712
ath10k_sta_rc_update_wk(struct work_struct * wk)6713 static void ath10k_sta_rc_update_wk(struct work_struct *wk)
6714 {
6715 struct ath10k *ar;
6716 struct ath10k_vif *arvif;
6717 struct ath10k_sta *arsta;
6718 struct ieee80211_sta *sta;
6719 struct cfg80211_chan_def def;
6720 enum nl80211_band band;
6721 const u8 *ht_mcs_mask;
6722 const u16 *vht_mcs_mask;
6723 u32 changed, bw, nss, smps;
6724 int err;
6725
6726 arsta = container_of(wk, struct ath10k_sta, update_wk);
6727 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
6728 arvif = arsta->arvif;
6729 ar = arvif->ar;
6730
6731 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def)))
6732 return;
6733
6734 band = def.chan->band;
6735 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs;
6736 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs;
6737
6738 spin_lock_bh(&ar->data_lock);
6739
6740 changed = arsta->changed;
6741 arsta->changed = 0;
6742
6743 bw = arsta->bw;
6744 nss = arsta->nss;
6745 smps = arsta->smps;
6746
6747 spin_unlock_bh(&ar->data_lock);
6748
6749 mutex_lock(&ar->conf_mutex);
6750
6751 nss = max_t(u32, 1, nss);
6752 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask),
6753 ath10k_mac_max_vht_nss(vht_mcs_mask)));
6754
6755 if (changed & IEEE80211_RC_BW_CHANGED) {
6756 enum wmi_phy_mode mode;
6757
6758 mode = chan_to_phymode(&def);
6759 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM peer bw %d phymode %d\n",
6760 sta->addr, bw, mode);
6761
6762 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6763 ar->wmi.peer_param->phymode, mode);
6764 if (err) {
6765 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n",
6766 sta->addr, mode, err);
6767 goto exit;
6768 }
6769
6770 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6771 ar->wmi.peer_param->chan_width, bw);
6772 if (err)
6773 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n",
6774 sta->addr, bw, err);
6775 }
6776
6777 if (changed & IEEE80211_RC_NSS_CHANGED) {
6778 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM nss %d\n",
6779 sta->addr, nss);
6780
6781 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6782 ar->wmi.peer_param->nss, nss);
6783 if (err)
6784 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n",
6785 sta->addr, nss, err);
6786 }
6787
6788 if (changed & IEEE80211_RC_SMPS_CHANGED) {
6789 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM smps %d\n",
6790 sta->addr, smps);
6791
6792 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6793 ar->wmi.peer_param->smps_state, smps);
6794 if (err)
6795 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n",
6796 sta->addr, smps, err);
6797 }
6798
6799 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) {
6800 ath10k_dbg(ar, ATH10K_DBG_STA, "mac update sta %pM supp rates\n",
6801 sta->addr);
6802
6803 err = ath10k_station_assoc(ar, arvif->vif, sta, true);
6804 if (err)
6805 ath10k_warn(ar, "failed to reassociate station: %pM\n",
6806 sta->addr);
6807 }
6808
6809 exit:
6810 mutex_unlock(&ar->conf_mutex);
6811 }
6812
ath10k_mac_inc_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)6813 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif,
6814 struct ieee80211_sta *sta)
6815 {
6816 struct ath10k *ar = arvif->ar;
6817
6818 lockdep_assert_held(&ar->conf_mutex);
6819
6820 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6821 return 0;
6822
6823 if (ar->num_stations >= ar->max_num_stations)
6824 return -ENOBUFS;
6825
6826 ar->num_stations++;
6827
6828 return 0;
6829 }
6830
ath10k_mac_dec_num_stations(struct ath10k_vif * arvif,struct ieee80211_sta * sta)6831 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif,
6832 struct ieee80211_sta *sta)
6833 {
6834 struct ath10k *ar = arvif->ar;
6835
6836 lockdep_assert_held(&ar->conf_mutex);
6837
6838 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls)
6839 return;
6840
6841 ar->num_stations--;
6842 }
6843
ath10k_sta_set_txpwr(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)6844 static int ath10k_sta_set_txpwr(struct ieee80211_hw *hw,
6845 struct ieee80211_vif *vif,
6846 struct ieee80211_sta *sta)
6847 {
6848 struct ath10k *ar = hw->priv;
6849 struct ath10k_vif *arvif = (void *)vif->drv_priv;
6850 int ret = 0;
6851 s16 txpwr;
6852
6853 if (sta->deflink.txpwr.type == NL80211_TX_POWER_AUTOMATIC) {
6854 txpwr = 0;
6855 } else {
6856 txpwr = sta->deflink.txpwr.power;
6857 if (!txpwr)
6858 return -EINVAL;
6859 }
6860
6861 if (txpwr > ATH10K_TX_POWER_MAX_VAL || txpwr < ATH10K_TX_POWER_MIN_VAL)
6862 return -EINVAL;
6863
6864 mutex_lock(&ar->conf_mutex);
6865
6866 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
6867 ar->wmi.peer_param->use_fixed_power, txpwr);
6868 if (ret) {
6869 ath10k_warn(ar, "failed to set tx power for station ret: %d\n",
6870 ret);
6871 goto out;
6872 }
6873
6874 out:
6875 mutex_unlock(&ar->conf_mutex);
6876 return ret;
6877 }
6878
6879 struct ath10k_mac_iter_tid_conf_data {
6880 struct ieee80211_vif *curr_vif;
6881 struct ath10k *ar;
6882 bool reset_config;
6883 };
6884
6885 static bool
ath10k_mac_bitrate_mask_has_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,int * vht_num_rates)6886 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar,
6887 enum nl80211_band band,
6888 const struct cfg80211_bitrate_mask *mask,
6889 int *vht_num_rates)
6890 {
6891 int num_rates = 0;
6892 int i, tmp;
6893
6894 num_rates += hweight32(mask->control[band].legacy);
6895
6896 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++)
6897 num_rates += hweight8(mask->control[band].ht_mcs[i]);
6898
6899 *vht_num_rates = 0;
6900 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6901 tmp = hweight16(mask->control[band].vht_mcs[i]);
6902 num_rates += tmp;
6903 *vht_num_rates += tmp;
6904 }
6905
6906 return num_rates == 1;
6907 }
6908
6909 static int
ath10k_mac_bitrate_mask_get_single_rate(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,u8 * rate,u8 * nss,bool vht_only)6910 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar,
6911 enum nl80211_band band,
6912 const struct cfg80211_bitrate_mask *mask,
6913 u8 *rate, u8 *nss, bool vht_only)
6914 {
6915 int rate_idx;
6916 int i;
6917 u16 bitrate;
6918 u8 preamble;
6919 u8 hw_rate;
6920
6921 if (vht_only)
6922 goto next;
6923
6924 if (hweight32(mask->control[band].legacy) == 1) {
6925 rate_idx = ffs(mask->control[band].legacy) - 1;
6926
6927 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY)
6928 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX;
6929
6930 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value;
6931 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate;
6932
6933 if (ath10k_mac_bitrate_is_cck(bitrate))
6934 preamble = WMI_RATE_PREAMBLE_CCK;
6935 else
6936 preamble = WMI_RATE_PREAMBLE_OFDM;
6937
6938 *nss = 1;
6939 *rate = preamble << 6 |
6940 (*nss - 1) << 4 |
6941 hw_rate << 0;
6942
6943 return 0;
6944 }
6945
6946 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
6947 if (hweight8(mask->control[band].ht_mcs[i]) == 1) {
6948 *nss = i + 1;
6949 *rate = WMI_RATE_PREAMBLE_HT << 6 |
6950 (*nss - 1) << 4 |
6951 (ffs(mask->control[band].ht_mcs[i]) - 1);
6952
6953 return 0;
6954 }
6955 }
6956
6957 next:
6958 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
6959 if (hweight16(mask->control[band].vht_mcs[i]) == 1) {
6960 *nss = i + 1;
6961 *rate = WMI_RATE_PREAMBLE_VHT << 6 |
6962 (*nss - 1) << 4 |
6963 (ffs(mask->control[band].vht_mcs[i]) - 1);
6964
6965 return 0;
6966 }
6967 }
6968
6969 return -EINVAL;
6970 }
6971
ath10k_mac_validate_rate_mask(struct ath10k * ar,struct ieee80211_sta * sta,u32 rate_ctrl_flag,u8 nss)6972 static int ath10k_mac_validate_rate_mask(struct ath10k *ar,
6973 struct ieee80211_sta *sta,
6974 u32 rate_ctrl_flag, u8 nss)
6975 {
6976 struct ieee80211_sta_ht_cap *ht_cap = &sta->deflink.ht_cap;
6977 struct ieee80211_sta_vht_cap *vht_cap = &sta->deflink.vht_cap;
6978
6979 if (nss > sta->deflink.rx_nss) {
6980 ath10k_warn(ar, "Invalid nss field, configured %u limit %u\n",
6981 nss, sta->deflink.rx_nss);
6982 return -EINVAL;
6983 }
6984
6985 if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_VHT) {
6986 if (!vht_cap->vht_supported) {
6987 ath10k_warn(ar, "Invalid VHT rate for sta %pM\n",
6988 sta->addr);
6989 return -EINVAL;
6990 }
6991 } else if (ATH10K_HW_PREAMBLE(rate_ctrl_flag) == WMI_RATE_PREAMBLE_HT) {
6992 if (!ht_cap->ht_supported || vht_cap->vht_supported) {
6993 ath10k_warn(ar, "Invalid HT rate for sta %pM\n",
6994 sta->addr);
6995 return -EINVAL;
6996 }
6997 } else {
6998 if (ht_cap->ht_supported || vht_cap->vht_supported)
6999 return -EINVAL;
7000 }
7001
7002 return 0;
7003 }
7004
7005 static int
ath10k_mac_tid_bitrate_config(struct ath10k * ar,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 * rate_ctrl_flag,u8 * rate_ctrl,enum nl80211_tx_rate_setting txrate_type,const struct cfg80211_bitrate_mask * mask)7006 ath10k_mac_tid_bitrate_config(struct ath10k *ar,
7007 struct ieee80211_vif *vif,
7008 struct ieee80211_sta *sta,
7009 u32 *rate_ctrl_flag, u8 *rate_ctrl,
7010 enum nl80211_tx_rate_setting txrate_type,
7011 const struct cfg80211_bitrate_mask *mask)
7012 {
7013 struct cfg80211_chan_def def;
7014 enum nl80211_band band;
7015 u8 nss, rate;
7016 int vht_num_rates, ret;
7017
7018 if (WARN_ON(ath10k_mac_vif_chan(vif, &def)))
7019 return -EINVAL;
7020
7021 if (txrate_type == NL80211_TX_RATE_AUTOMATIC) {
7022 *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7023 *rate_ctrl_flag = 0;
7024 return 0;
7025 }
7026
7027 band = def.chan->band;
7028
7029 if (!ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
7030 &vht_num_rates)) {
7031 return -EINVAL;
7032 }
7033
7034 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
7035 &rate, &nss, false);
7036 if (ret) {
7037 ath10k_warn(ar, "failed to get single rate: %d\n",
7038 ret);
7039 return ret;
7040 }
7041
7042 *rate_ctrl_flag = rate;
7043
7044 if (sta && ath10k_mac_validate_rate_mask(ar, sta, *rate_ctrl_flag, nss))
7045 return -EINVAL;
7046
7047 if (txrate_type == NL80211_TX_RATE_FIXED)
7048 *rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_FIXED_RATE;
7049 else if (txrate_type == NL80211_TX_RATE_LIMITED &&
7050 (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
7051 ar->wmi.svc_map)))
7052 *rate_ctrl = WMI_PEER_TID_CONFIG_RATE_UPPER_CAP;
7053 else
7054 return -EOPNOTSUPP;
7055
7056 return 0;
7057 }
7058
ath10k_mac_set_tid_config(struct ath10k * ar,struct ieee80211_sta * sta,struct ieee80211_vif * vif,u32 changed,struct wmi_per_peer_per_tid_cfg_arg * arg)7059 static int ath10k_mac_set_tid_config(struct ath10k *ar, struct ieee80211_sta *sta,
7060 struct ieee80211_vif *vif, u32 changed,
7061 struct wmi_per_peer_per_tid_cfg_arg *arg)
7062 {
7063 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7064 struct ath10k_sta *arsta;
7065 int ret;
7066
7067 if (sta) {
7068 if (!sta->wme)
7069 return -EOPNOTSUPP;
7070
7071 arsta = (struct ath10k_sta *)sta->drv_priv;
7072
7073 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7074 if ((arsta->retry_long[arg->tid] > 0 ||
7075 arsta->rate_code[arg->tid] > 0 ||
7076 arsta->ampdu[arg->tid] ==
7077 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
7078 arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
7079 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
7080 arg->ack_policy = 0;
7081 arg->aggr_control = 0;
7082 arg->rate_ctrl = 0;
7083 arg->rcode_flags = 0;
7084 }
7085 }
7086
7087 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7088 if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
7089 arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7090 arg->aggr_control = 0;
7091 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
7092 }
7093 }
7094
7095 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7096 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7097 if (arsta->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK ||
7098 arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7099 arg->rate_ctrl = 0;
7100 arg->rcode_flags = 0;
7101 }
7102 }
7103
7104 ether_addr_copy(arg->peer_macaddr.addr, sta->addr);
7105
7106 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, arg);
7107 if (ret)
7108 return ret;
7109
7110 /* Store the configured parameters in success case */
7111 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7112 arsta->noack[arg->tid] = arg->ack_policy;
7113 arg->ack_policy = 0;
7114 arg->aggr_control = 0;
7115 arg->rate_ctrl = 0;
7116 arg->rcode_flags = 0;
7117 }
7118
7119 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7120 arsta->retry_long[arg->tid] = arg->retry_count;
7121 arg->retry_count = 0;
7122 }
7123
7124 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7125 arsta->ampdu[arg->tid] = arg->aggr_control;
7126 arg->aggr_control = 0;
7127 }
7128
7129 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7130 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7131 arsta->rate_ctrl[arg->tid] = arg->rate_ctrl;
7132 arg->rate_ctrl = 0;
7133 arg->rcode_flags = 0;
7134 }
7135
7136 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7137 arsta->rtscts[arg->tid] = arg->rtscts_ctrl;
7138 arg->ext_tid_cfg_bitmap = 0;
7139 }
7140 } else {
7141 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7142 if ((arvif->retry_long[arg->tid] ||
7143 arvif->rate_code[arg->tid] ||
7144 arvif->ampdu[arg->tid] ==
7145 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE) &&
7146 arg->ack_policy == WMI_PEER_TID_CONFIG_NOACK) {
7147 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_NOACK);
7148 } else {
7149 arvif->noack[arg->tid] = arg->ack_policy;
7150 arvif->ampdu[arg->tid] = arg->aggr_control;
7151 arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
7152 }
7153 }
7154
7155 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7156 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
7157 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG);
7158 else
7159 arvif->retry_long[arg->tid] = arg->retry_count;
7160 }
7161
7162 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7163 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK)
7164 changed &= ~BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL);
7165 else
7166 arvif->ampdu[arg->tid] = arg->aggr_control;
7167 }
7168
7169 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7170 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7171 if (arvif->noack[arg->tid] == WMI_PEER_TID_CONFIG_NOACK) {
7172 changed &= ~(BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7173 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE));
7174 } else {
7175 arvif->rate_ctrl[arg->tid] = arg->rate_ctrl;
7176 arvif->rate_code[arg->tid] = arg->rcode_flags;
7177 }
7178 }
7179
7180 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7181 arvif->rtscts[arg->tid] = arg->rtscts_ctrl;
7182 arg->ext_tid_cfg_bitmap = 0;
7183 }
7184
7185 if (changed)
7186 arvif->tid_conf_changed[arg->tid] |= changed;
7187 }
7188
7189 return 0;
7190 }
7191
7192 static int
ath10k_mac_parse_tid_config(struct ath10k * ar,struct ieee80211_sta * sta,struct ieee80211_vif * vif,struct cfg80211_tid_cfg * tid_conf,struct wmi_per_peer_per_tid_cfg_arg * arg)7193 ath10k_mac_parse_tid_config(struct ath10k *ar,
7194 struct ieee80211_sta *sta,
7195 struct ieee80211_vif *vif,
7196 struct cfg80211_tid_cfg *tid_conf,
7197 struct wmi_per_peer_per_tid_cfg_arg *arg)
7198 {
7199 u32 changed = tid_conf->mask;
7200 int ret = 0, i = 0;
7201
7202 if (!changed)
7203 return -EINVAL;
7204
7205 while (i < ATH10K_TID_MAX) {
7206 if (!(tid_conf->tids & BIT(i))) {
7207 i++;
7208 continue;
7209 }
7210
7211 arg->tid = i;
7212
7213 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7214 if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE) {
7215 arg->ack_policy = WMI_PEER_TID_CONFIG_NOACK;
7216 arg->rate_ctrl =
7217 WMI_TID_CONFIG_RATE_CONTROL_DEFAULT_LOWEST_RATE;
7218 arg->aggr_control =
7219 WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
7220 } else {
7221 arg->ack_policy =
7222 WMI_PEER_TID_CONFIG_ACK;
7223 arg->rate_ctrl =
7224 WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7225 arg->aggr_control =
7226 WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7227 }
7228 }
7229
7230 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG))
7231 arg->retry_count = tid_conf->retry_long;
7232
7233 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7234 if (tid_conf->noack == NL80211_TID_CONFIG_ENABLE)
7235 arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7236 else
7237 arg->aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_DISABLE;
7238 }
7239
7240 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7241 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7242 ret = ath10k_mac_tid_bitrate_config(ar, vif, sta,
7243 &arg->rcode_flags,
7244 &arg->rate_ctrl,
7245 tid_conf->txrate_type,
7246 &tid_conf->txrate_mask);
7247 if (ret) {
7248 ath10k_warn(ar, "failed to configure bitrate mask %d\n",
7249 ret);
7250 arg->rcode_flags = 0;
7251 arg->rate_ctrl = 0;
7252 }
7253 }
7254
7255 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7256 if (tid_conf->rtscts)
7257 arg->rtscts_ctrl = tid_conf->rtscts;
7258
7259 arg->ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
7260 }
7261
7262 ret = ath10k_mac_set_tid_config(ar, sta, vif, changed, arg);
7263 if (ret)
7264 return ret;
7265 i++;
7266 }
7267
7268 return ret;
7269 }
7270
ath10k_mac_reset_tid_config(struct ath10k * ar,struct ieee80211_sta * sta,struct ath10k_vif * arvif,u8 tids)7271 static int ath10k_mac_reset_tid_config(struct ath10k *ar,
7272 struct ieee80211_sta *sta,
7273 struct ath10k_vif *arvif,
7274 u8 tids)
7275 {
7276 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7277 struct wmi_per_peer_per_tid_cfg_arg arg;
7278 int ret = 0, i = 0;
7279
7280 arg.vdev_id = arvif->vdev_id;
7281 while (i < ATH10K_TID_MAX) {
7282 if (!(tids & BIT(i))) {
7283 i++;
7284 continue;
7285 }
7286
7287 arg.tid = i;
7288 arg.ack_policy = WMI_PEER_TID_CONFIG_ACK;
7289 arg.retry_count = ATH10K_MAX_RETRY_COUNT;
7290 arg.rate_ctrl = WMI_TID_CONFIG_RATE_CONTROL_AUTO;
7291 arg.aggr_control = WMI_TID_CONFIG_AGGR_CONTROL_ENABLE;
7292 arg.rtscts_ctrl = WMI_TID_CONFIG_RTSCTS_CONTROL_ENABLE;
7293 arg.ext_tid_cfg_bitmap = WMI_EXT_TID_RTS_CTS_CONFIG;
7294
7295 ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
7296
7297 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
7298 if (ret)
7299 return ret;
7300
7301 if (!arvif->tids_rst) {
7302 arsta->retry_long[i] = -1;
7303 arsta->noack[i] = -1;
7304 arsta->ampdu[i] = -1;
7305 arsta->rate_code[i] = -1;
7306 arsta->rate_ctrl[i] = 0;
7307 arsta->rtscts[i] = -1;
7308 } else {
7309 arvif->retry_long[i] = 0;
7310 arvif->noack[i] = 0;
7311 arvif->ampdu[i] = 0;
7312 arvif->rate_code[i] = 0;
7313 arvif->rate_ctrl[i] = 0;
7314 arvif->rtscts[i] = 0;
7315 }
7316
7317 i++;
7318 }
7319
7320 return ret;
7321 }
7322
ath10k_sta_tid_cfg_wk(struct work_struct * wk)7323 static void ath10k_sta_tid_cfg_wk(struct work_struct *wk)
7324 {
7325 struct wmi_per_peer_per_tid_cfg_arg arg = {};
7326 struct ieee80211_sta *sta;
7327 struct ath10k_sta *arsta;
7328 struct ath10k_vif *arvif;
7329 struct ath10k *ar;
7330 bool config_apply;
7331 int ret, i;
7332 u32 changed;
7333 u8 nss;
7334
7335 arsta = container_of(wk, struct ath10k_sta, tid_config_wk);
7336 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv);
7337 arvif = arsta->arvif;
7338 ar = arvif->ar;
7339
7340 mutex_lock(&ar->conf_mutex);
7341
7342 if (arvif->tids_rst) {
7343 ret = ath10k_mac_reset_tid_config(ar, sta, arvif,
7344 arvif->tids_rst);
7345 goto exit;
7346 }
7347
7348 ether_addr_copy(arg.peer_macaddr.addr, sta->addr);
7349
7350 for (i = 0; i < ATH10K_TID_MAX; i++) {
7351 config_apply = false;
7352 changed = arvif->tid_conf_changed[i];
7353
7354 if (changed & BIT(NL80211_TID_CONFIG_ATTR_NOACK)) {
7355 if (arsta->noack[i] != -1) {
7356 arg.ack_policy = 0;
7357 } else {
7358 config_apply = true;
7359 arg.ack_policy = arvif->noack[i];
7360 arg.aggr_control = arvif->ampdu[i];
7361 arg.rate_ctrl = arvif->rate_ctrl[i];
7362 }
7363 }
7364
7365 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG)) {
7366 if (arsta->retry_long[i] != -1 ||
7367 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7368 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7369 arg.retry_count = 0;
7370 } else {
7371 arg.retry_count = arvif->retry_long[i];
7372 config_apply = true;
7373 }
7374 }
7375
7376 if (changed & BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL)) {
7377 if (arsta->ampdu[i] != -1 ||
7378 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7379 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7380 arg.aggr_control = 0;
7381 } else {
7382 arg.aggr_control = arvif->ampdu[i];
7383 config_apply = true;
7384 }
7385 }
7386
7387 if (changed & (BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
7388 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE))) {
7389 nss = ATH10K_HW_NSS(arvif->rate_code[i]);
7390 ret = ath10k_mac_validate_rate_mask(ar, sta,
7391 arvif->rate_code[i],
7392 nss);
7393 if (ret &&
7394 arvif->rate_ctrl[i] > WMI_TID_CONFIG_RATE_CONTROL_AUTO) {
7395 arg.rate_ctrl = 0;
7396 arg.rcode_flags = 0;
7397 }
7398
7399 if (arsta->rate_ctrl[i] >
7400 WMI_TID_CONFIG_RATE_CONTROL_AUTO ||
7401 arsta->noack[i] == WMI_PEER_TID_CONFIG_NOACK ||
7402 arvif->noack[i] == WMI_PEER_TID_CONFIG_NOACK) {
7403 arg.rate_ctrl = 0;
7404 arg.rcode_flags = 0;
7405 } else {
7406 arg.rate_ctrl = arvif->rate_ctrl[i];
7407 arg.rcode_flags = arvif->rate_code[i];
7408 config_apply = true;
7409 }
7410 }
7411
7412 if (changed & BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL)) {
7413 if (arsta->rtscts[i]) {
7414 arg.rtscts_ctrl = 0;
7415 arg.ext_tid_cfg_bitmap = 0;
7416 } else {
7417 arg.rtscts_ctrl = arvif->rtscts[i] - 1;
7418 arg.ext_tid_cfg_bitmap =
7419 WMI_EXT_TID_RTS_CTS_CONFIG;
7420 config_apply = true;
7421 }
7422 }
7423
7424 arg.tid = i;
7425
7426 if (config_apply) {
7427 ret = ath10k_wmi_set_per_peer_per_tid_cfg(ar, &arg);
7428 if (ret)
7429 ath10k_warn(ar, "failed to set per tid config for sta %pM: %d\n",
7430 sta->addr, ret);
7431 }
7432
7433 arg.ack_policy = 0;
7434 arg.retry_count = 0;
7435 arg.aggr_control = 0;
7436 arg.rate_ctrl = 0;
7437 arg.rcode_flags = 0;
7438 }
7439
7440 exit:
7441 mutex_unlock(&ar->conf_mutex);
7442 }
7443
ath10k_mac_vif_stations_tid_conf(void * data,struct ieee80211_sta * sta)7444 static void ath10k_mac_vif_stations_tid_conf(void *data,
7445 struct ieee80211_sta *sta)
7446 {
7447 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7448 struct ath10k_mac_iter_tid_conf_data *iter_data = data;
7449 struct ieee80211_vif *sta_vif = arsta->arvif->vif;
7450
7451 if (sta_vif != iter_data->curr_vif || !sta->wme)
7452 return;
7453
7454 ieee80211_queue_work(iter_data->ar->hw, &arsta->tid_config_wk);
7455 }
7456
ath10k_sta_state(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,enum ieee80211_sta_state old_state,enum ieee80211_sta_state new_state)7457 static int ath10k_sta_state(struct ieee80211_hw *hw,
7458 struct ieee80211_vif *vif,
7459 struct ieee80211_sta *sta,
7460 enum ieee80211_sta_state old_state,
7461 enum ieee80211_sta_state new_state)
7462 {
7463 struct ath10k *ar = hw->priv;
7464 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7465 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
7466 struct ath10k_peer *peer;
7467 int ret = 0;
7468 int i;
7469
7470 if (old_state == IEEE80211_STA_NOTEXIST &&
7471 new_state == IEEE80211_STA_NONE) {
7472 memset(arsta, 0, sizeof(*arsta));
7473 arsta->arvif = arvif;
7474 arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED;
7475 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk);
7476 INIT_WORK(&arsta->tid_config_wk, ath10k_sta_tid_cfg_wk);
7477
7478 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
7479 ath10k_mac_txq_init(sta->txq[i]);
7480 }
7481
7482 /* cancel must be done outside the mutex to avoid deadlock */
7483 if ((old_state == IEEE80211_STA_NONE &&
7484 new_state == IEEE80211_STA_NOTEXIST)) {
7485 cancel_work_sync(&arsta->update_wk);
7486 cancel_work_sync(&arsta->tid_config_wk);
7487 }
7488
7489 mutex_lock(&ar->conf_mutex);
7490
7491 if (old_state == IEEE80211_STA_NOTEXIST &&
7492 new_state == IEEE80211_STA_NONE) {
7493 /*
7494 * New station addition.
7495 */
7496 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT;
7497 u32 num_tdls_stations;
7498
7499 ath10k_dbg(ar, ATH10K_DBG_STA,
7500 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n",
7501 arvif->vdev_id, sta->addr,
7502 ar->num_stations + 1, ar->max_num_stations,
7503 ar->num_peers + 1, ar->max_num_peers);
7504
7505 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif);
7506
7507 if (sta->tdls) {
7508 if (num_tdls_stations >= ar->max_num_tdls_vdevs) {
7509 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n",
7510 arvif->vdev_id,
7511 ar->max_num_tdls_vdevs);
7512 ret = -ELNRNG;
7513 goto exit;
7514 }
7515 peer_type = WMI_PEER_TYPE_TDLS;
7516 }
7517
7518 ret = ath10k_mac_inc_num_stations(arvif, sta);
7519 if (ret) {
7520 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n",
7521 ar->max_num_stations);
7522 goto exit;
7523 }
7524
7525 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
7526 arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats),
7527 GFP_KERNEL);
7528 if (!arsta->tx_stats) {
7529 ath10k_mac_dec_num_stations(arvif, sta);
7530 ret = -ENOMEM;
7531 goto exit;
7532 }
7533 }
7534
7535 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id,
7536 sta->addr, peer_type);
7537 if (ret) {
7538 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n",
7539 sta->addr, arvif->vdev_id, ret);
7540 ath10k_mac_dec_num_stations(arvif, sta);
7541 kfree(arsta->tx_stats);
7542 goto exit;
7543 }
7544
7545 spin_lock_bh(&ar->data_lock);
7546
7547 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
7548 if (!peer) {
7549 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n",
7550 vif->addr, arvif->vdev_id);
7551 spin_unlock_bh(&ar->data_lock);
7552 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
7553 ath10k_mac_dec_num_stations(arvif, sta);
7554 kfree(arsta->tx_stats);
7555 ret = -ENOENT;
7556 goto exit;
7557 }
7558
7559 arsta->peer_id = find_first_bit(peer->peer_ids,
7560 ATH10K_MAX_NUM_PEER_IDS);
7561
7562 spin_unlock_bh(&ar->data_lock);
7563
7564 if (!sta->tdls)
7565 goto exit;
7566
7567 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
7568 WMI_TDLS_ENABLE_ACTIVE);
7569 if (ret) {
7570 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
7571 arvif->vdev_id, ret);
7572 ath10k_peer_delete(ar, arvif->vdev_id,
7573 sta->addr);
7574 ath10k_mac_dec_num_stations(arvif, sta);
7575 kfree(arsta->tx_stats);
7576 goto exit;
7577 }
7578
7579 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
7580 WMI_TDLS_PEER_STATE_PEERING);
7581 if (ret) {
7582 ath10k_warn(ar,
7583 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n",
7584 sta->addr, arvif->vdev_id, ret);
7585 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
7586 ath10k_mac_dec_num_stations(arvif, sta);
7587 kfree(arsta->tx_stats);
7588
7589 if (num_tdls_stations != 0)
7590 goto exit;
7591 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
7592 WMI_TDLS_DISABLE);
7593 }
7594 } else if ((old_state == IEEE80211_STA_NONE &&
7595 new_state == IEEE80211_STA_NOTEXIST)) {
7596 /*
7597 * Existing station deletion.
7598 */
7599 ath10k_dbg(ar, ATH10K_DBG_STA,
7600 "mac vdev %d peer delete %pM sta %pK (sta gone)\n",
7601 arvif->vdev_id, sta->addr, sta);
7602
7603 if (sta->tdls) {
7604 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id,
7605 sta,
7606 WMI_TDLS_PEER_STATE_TEARDOWN);
7607 if (ret)
7608 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n",
7609 sta->addr,
7610 WMI_TDLS_PEER_STATE_TEARDOWN, ret);
7611 }
7612
7613 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr);
7614 if (ret)
7615 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n",
7616 sta->addr, arvif->vdev_id, ret);
7617
7618 ath10k_mac_dec_num_stations(arvif, sta);
7619
7620 spin_lock_bh(&ar->data_lock);
7621 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) {
7622 peer = ar->peer_map[i];
7623 if (!peer)
7624 continue;
7625
7626 if (peer->sta == sta) {
7627 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n",
7628 sta->addr, peer, i, arvif->vdev_id);
7629 peer->sta = NULL;
7630
7631 /* Clean up the peer object as well since we
7632 * must have failed to do this above.
7633 */
7634 ath10k_peer_map_cleanup(ar, peer);
7635 }
7636 }
7637 spin_unlock_bh(&ar->data_lock);
7638
7639 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) {
7640 kfree(arsta->tx_stats);
7641 arsta->tx_stats = NULL;
7642 }
7643
7644 for (i = 0; i < ARRAY_SIZE(sta->txq); i++)
7645 ath10k_mac_txq_unref(ar, sta->txq[i]);
7646
7647 if (!sta->tdls)
7648 goto exit;
7649
7650 if (ath10k_mac_tdls_vif_stations_count(hw, vif))
7651 goto exit;
7652
7653 /* This was the last tdls peer in current vif */
7654 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id,
7655 WMI_TDLS_DISABLE);
7656 if (ret) {
7657 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n",
7658 arvif->vdev_id, ret);
7659 }
7660 } else if (old_state == IEEE80211_STA_AUTH &&
7661 new_state == IEEE80211_STA_ASSOC &&
7662 (vif->type == NL80211_IFTYPE_AP ||
7663 vif->type == NL80211_IFTYPE_MESH_POINT ||
7664 vif->type == NL80211_IFTYPE_ADHOC)) {
7665 /*
7666 * New association.
7667 */
7668 ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM associated\n",
7669 sta->addr);
7670
7671 ret = ath10k_station_assoc(ar, vif, sta, false);
7672 if (ret)
7673 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n",
7674 sta->addr, arvif->vdev_id, ret);
7675 } else if (old_state == IEEE80211_STA_ASSOC &&
7676 new_state == IEEE80211_STA_AUTHORIZED &&
7677 sta->tdls) {
7678 /*
7679 * Tdls station authorized.
7680 */
7681 ath10k_dbg(ar, ATH10K_DBG_STA, "mac tdls sta %pM authorized\n",
7682 sta->addr);
7683
7684 ret = ath10k_station_assoc(ar, vif, sta, false);
7685 if (ret) {
7686 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n",
7687 sta->addr, arvif->vdev_id, ret);
7688 goto exit;
7689 }
7690
7691 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta,
7692 WMI_TDLS_PEER_STATE_CONNECTED);
7693 if (ret)
7694 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n",
7695 sta->addr, arvif->vdev_id, ret);
7696 } else if (old_state == IEEE80211_STA_ASSOC &&
7697 new_state == IEEE80211_STA_AUTH &&
7698 (vif->type == NL80211_IFTYPE_AP ||
7699 vif->type == NL80211_IFTYPE_MESH_POINT ||
7700 vif->type == NL80211_IFTYPE_ADHOC)) {
7701 /*
7702 * Disassociation.
7703 */
7704 ath10k_dbg(ar, ATH10K_DBG_STA, "mac sta %pM disassociated\n",
7705 sta->addr);
7706
7707 ret = ath10k_station_disassoc(ar, vif, sta);
7708 if (ret)
7709 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n",
7710 sta->addr, arvif->vdev_id, ret);
7711 }
7712 exit:
7713 mutex_unlock(&ar->conf_mutex);
7714 return ret;
7715 }
7716
ath10k_conf_tx_uapsd(struct ath10k * ar,struct ieee80211_vif * vif,u16 ac,bool enable)7717 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif,
7718 u16 ac, bool enable)
7719 {
7720 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7721 struct wmi_sta_uapsd_auto_trig_arg arg = {};
7722 u32 prio = 0, acc = 0;
7723 u32 value = 0;
7724 int ret = 0;
7725
7726 lockdep_assert_held(&ar->conf_mutex);
7727
7728 if (arvif->vdev_type != WMI_VDEV_TYPE_STA)
7729 return 0;
7730
7731 switch (ac) {
7732 case IEEE80211_AC_VO:
7733 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN |
7734 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN;
7735 prio = 7;
7736 acc = 3;
7737 break;
7738 case IEEE80211_AC_VI:
7739 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN |
7740 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN;
7741 prio = 5;
7742 acc = 2;
7743 break;
7744 case IEEE80211_AC_BE:
7745 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN |
7746 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN;
7747 prio = 2;
7748 acc = 1;
7749 break;
7750 case IEEE80211_AC_BK:
7751 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN |
7752 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN;
7753 prio = 0;
7754 acc = 0;
7755 break;
7756 }
7757
7758 if (enable)
7759 arvif->u.sta.uapsd |= value;
7760 else
7761 arvif->u.sta.uapsd &= ~value;
7762
7763 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
7764 WMI_STA_PS_PARAM_UAPSD,
7765 arvif->u.sta.uapsd);
7766 if (ret) {
7767 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret);
7768 goto exit;
7769 }
7770
7771 if (arvif->u.sta.uapsd)
7772 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD;
7773 else
7774 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE;
7775
7776 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id,
7777 WMI_STA_PS_PARAM_RX_WAKE_POLICY,
7778 value);
7779 if (ret)
7780 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret);
7781
7782 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif);
7783 if (ret) {
7784 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n",
7785 arvif->vdev_id, ret);
7786 return ret;
7787 }
7788
7789 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif);
7790 if (ret) {
7791 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n",
7792 arvif->vdev_id, ret);
7793 return ret;
7794 }
7795
7796 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) ||
7797 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) {
7798 /* Only userspace can make an educated decision when to send
7799 * trigger frame. The following effectively disables u-UAPSD
7800 * autotrigger in firmware (which is enabled by default
7801 * provided the autotrigger service is available).
7802 */
7803
7804 arg.wmm_ac = acc;
7805 arg.user_priority = prio;
7806 arg.service_interval = 0;
7807 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
7808 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC;
7809
7810 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id,
7811 arvif->bssid, &arg, 1);
7812 if (ret) {
7813 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n",
7814 ret);
7815 return ret;
7816 }
7817 }
7818
7819 exit:
7820 return ret;
7821 }
7822
ath10k_conf_tx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,unsigned int link_id,u16 ac,const struct ieee80211_tx_queue_params * params)7823 static int ath10k_conf_tx(struct ieee80211_hw *hw,
7824 struct ieee80211_vif *vif,
7825 unsigned int link_id, u16 ac,
7826 const struct ieee80211_tx_queue_params *params)
7827 {
7828 struct ath10k *ar = hw->priv;
7829 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7830 struct wmi_wmm_params_arg *p = NULL;
7831 int ret;
7832
7833 mutex_lock(&ar->conf_mutex);
7834
7835 switch (ac) {
7836 case IEEE80211_AC_VO:
7837 p = &arvif->wmm_params.ac_vo;
7838 break;
7839 case IEEE80211_AC_VI:
7840 p = &arvif->wmm_params.ac_vi;
7841 break;
7842 case IEEE80211_AC_BE:
7843 p = &arvif->wmm_params.ac_be;
7844 break;
7845 case IEEE80211_AC_BK:
7846 p = &arvif->wmm_params.ac_bk;
7847 break;
7848 }
7849
7850 if (WARN_ON(!p)) {
7851 ret = -EINVAL;
7852 goto exit;
7853 }
7854
7855 p->cwmin = params->cw_min;
7856 p->cwmax = params->cw_max;
7857 p->aifs = params->aifs;
7858
7859 /*
7860 * The channel time duration programmed in the HW is in absolute
7861 * microseconds, while mac80211 gives the txop in units of
7862 * 32 microseconds.
7863 */
7864 p->txop = params->txop * 32;
7865
7866 if (ar->wmi.ops->gen_vdev_wmm_conf) {
7867 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id,
7868 &arvif->wmm_params);
7869 if (ret) {
7870 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n",
7871 arvif->vdev_id, ret);
7872 goto exit;
7873 }
7874 } else {
7875 /* This won't work well with multi-interface cases but it's
7876 * better than nothing.
7877 */
7878 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params);
7879 if (ret) {
7880 ath10k_warn(ar, "failed to set wmm params: %d\n", ret);
7881 goto exit;
7882 }
7883 }
7884
7885 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd);
7886 if (ret)
7887 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret);
7888
7889 exit:
7890 mutex_unlock(&ar->conf_mutex);
7891 return ret;
7892 }
7893
ath10k_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_channel * chan,int duration,enum ieee80211_roc_type type)7894 static int ath10k_remain_on_channel(struct ieee80211_hw *hw,
7895 struct ieee80211_vif *vif,
7896 struct ieee80211_channel *chan,
7897 int duration,
7898 enum ieee80211_roc_type type)
7899 {
7900 struct ath10k *ar = hw->priv;
7901 struct ath10k_vif *arvif = (void *)vif->drv_priv;
7902 struct wmi_start_scan_arg arg;
7903 int ret = 0;
7904 u32 scan_time_msec;
7905
7906 mutex_lock(&ar->conf_mutex);
7907
7908 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) {
7909 ret = -EBUSY;
7910 goto exit;
7911 }
7912
7913 spin_lock_bh(&ar->data_lock);
7914 switch (ar->scan.state) {
7915 case ATH10K_SCAN_IDLE:
7916 reinit_completion(&ar->scan.started);
7917 reinit_completion(&ar->scan.completed);
7918 reinit_completion(&ar->scan.on_channel);
7919 ar->scan.state = ATH10K_SCAN_STARTING;
7920 ar->scan.is_roc = true;
7921 ar->scan.vdev_id = arvif->vdev_id;
7922 ar->scan.roc_freq = chan->center_freq;
7923 ar->scan.roc_notify = true;
7924 ret = 0;
7925 break;
7926 case ATH10K_SCAN_STARTING:
7927 case ATH10K_SCAN_RUNNING:
7928 case ATH10K_SCAN_ABORTING:
7929 ret = -EBUSY;
7930 break;
7931 }
7932 spin_unlock_bh(&ar->data_lock);
7933
7934 if (ret)
7935 goto exit;
7936
7937 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2;
7938
7939 memset(&arg, 0, sizeof(arg));
7940 ath10k_wmi_start_scan_init(ar, &arg);
7941 arg.vdev_id = arvif->vdev_id;
7942 arg.scan_id = ATH10K_SCAN_ID;
7943 arg.n_channels = 1;
7944 arg.channels[0] = chan->center_freq;
7945 arg.dwell_time_active = scan_time_msec;
7946 arg.dwell_time_passive = scan_time_msec;
7947 arg.max_scan_time = scan_time_msec;
7948 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE;
7949 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ;
7950 arg.burst_duration_ms = duration;
7951
7952 ret = ath10k_start_scan(ar, &arg);
7953 if (ret) {
7954 ath10k_warn(ar, "failed to start roc scan: %d\n", ret);
7955 spin_lock_bh(&ar->data_lock);
7956 ar->scan.state = ATH10K_SCAN_IDLE;
7957 spin_unlock_bh(&ar->data_lock);
7958 goto exit;
7959 }
7960
7961 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ);
7962 if (ret == 0) {
7963 ath10k_warn(ar, "failed to switch to channel for roc scan\n");
7964
7965 ret = ath10k_scan_stop(ar);
7966 if (ret)
7967 ath10k_warn(ar, "failed to stop scan: %d\n", ret);
7968
7969 ret = -ETIMEDOUT;
7970 goto exit;
7971 }
7972
7973 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout,
7974 msecs_to_jiffies(duration));
7975
7976 ret = 0;
7977 exit:
7978 mutex_unlock(&ar->conf_mutex);
7979 return ret;
7980 }
7981
ath10k_cancel_remain_on_channel(struct ieee80211_hw * hw,struct ieee80211_vif * vif)7982 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw,
7983 struct ieee80211_vif *vif)
7984 {
7985 struct ath10k *ar = hw->priv;
7986
7987 mutex_lock(&ar->conf_mutex);
7988
7989 spin_lock_bh(&ar->data_lock);
7990 ar->scan.roc_notify = false;
7991 spin_unlock_bh(&ar->data_lock);
7992
7993 ath10k_scan_abort(ar);
7994
7995 mutex_unlock(&ar->conf_mutex);
7996
7997 cancel_delayed_work_sync(&ar->scan.timeout);
7998
7999 return 0;
8000 }
8001
8002 /*
8003 * Both RTS and Fragmentation threshold are interface-specific
8004 * in ath10k, but device-specific in mac80211.
8005 */
8006
ath10k_set_rts_threshold(struct ieee80211_hw * hw,u32 value)8007 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value)
8008 {
8009 struct ath10k *ar = hw->priv;
8010 struct ath10k_vif *arvif;
8011 int ret = 0;
8012
8013 mutex_lock(&ar->conf_mutex);
8014 list_for_each_entry(arvif, &ar->arvifs, list) {
8015 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n",
8016 arvif->vdev_id, value);
8017
8018 ret = ath10k_mac_set_rts(arvif, value);
8019 if (ret) {
8020 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n",
8021 arvif->vdev_id, ret);
8022 break;
8023 }
8024 }
8025 mutex_unlock(&ar->conf_mutex);
8026
8027 return ret;
8028 }
8029
ath10k_mac_op_set_frag_threshold(struct ieee80211_hw * hw,u32 value)8030 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value)
8031 {
8032 /* Even though there's a WMI enum for fragmentation threshold no known
8033 * firmware actually implements it. Moreover it is not possible to rely
8034 * frame fragmentation to mac80211 because firmware clears the "more
8035 * fragments" bit in frame control making it impossible for remote
8036 * devices to reassemble frames.
8037 *
8038 * Hence implement a dummy callback just to say fragmentation isn't
8039 * supported. This effectively prevents mac80211 from doing frame
8040 * fragmentation in software.
8041 */
8042 return -EOPNOTSUPP;
8043 }
8044
ath10k_mac_wait_tx_complete(struct ath10k * ar)8045 void ath10k_mac_wait_tx_complete(struct ath10k *ar)
8046 {
8047 bool skip;
8048 long time_left;
8049
8050 /* mac80211 doesn't care if we really xmit queued frames or not
8051 * we'll collect those frames either way if we stop/delete vdevs
8052 */
8053
8054 if (ar->state == ATH10K_STATE_WEDGED)
8055 return;
8056
8057 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({
8058 bool empty;
8059
8060 spin_lock_bh(&ar->htt.tx_lock);
8061 empty = (ar->htt.num_pending_tx == 0);
8062 spin_unlock_bh(&ar->htt.tx_lock);
8063
8064 skip = (ar->state == ATH10K_STATE_WEDGED) ||
8065 test_bit(ATH10K_FLAG_CRASH_FLUSH,
8066 &ar->dev_flags);
8067
8068 (empty || skip);
8069 }), ATH10K_FLUSH_TIMEOUT_HZ);
8070
8071 if (time_left == 0 || skip)
8072 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n",
8073 skip, ar->state, time_left);
8074 }
8075
ath10k_flush(struct ieee80211_hw * hw,struct ieee80211_vif * vif,u32 queues,bool drop)8076 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
8077 u32 queues, bool drop)
8078 {
8079 struct ath10k *ar = hw->priv;
8080 struct ath10k_vif *arvif;
8081 u32 bitmap;
8082
8083 if (drop) {
8084 if (vif && vif->type == NL80211_IFTYPE_STATION) {
8085 bitmap = ~(1 << WMI_MGMT_TID);
8086 list_for_each_entry(arvif, &ar->arvifs, list) {
8087 if (arvif->vdev_type == WMI_VDEV_TYPE_STA)
8088 ath10k_wmi_peer_flush(ar, arvif->vdev_id,
8089 arvif->bssid, bitmap);
8090 }
8091 ath10k_htt_flush_tx(&ar->htt);
8092 }
8093 return;
8094 }
8095
8096 mutex_lock(&ar->conf_mutex);
8097 ath10k_mac_wait_tx_complete(ar);
8098 mutex_unlock(&ar->conf_mutex);
8099 }
8100
8101 /* TODO: Implement this function properly
8102 * For now it is needed to reply to Probe Requests in IBSS mode.
8103 * Probably we need this information from FW.
8104 */
ath10k_tx_last_beacon(struct ieee80211_hw * hw)8105 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw)
8106 {
8107 return 1;
8108 }
8109
ath10k_reconfig_complete(struct ieee80211_hw * hw,enum ieee80211_reconfig_type reconfig_type)8110 static void ath10k_reconfig_complete(struct ieee80211_hw *hw,
8111 enum ieee80211_reconfig_type reconfig_type)
8112 {
8113 struct ath10k *ar = hw->priv;
8114 struct ath10k_vif *arvif;
8115
8116 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART)
8117 return;
8118
8119 mutex_lock(&ar->conf_mutex);
8120
8121 /* If device failed to restart it will be in a different state, e.g.
8122 * ATH10K_STATE_WEDGED
8123 */
8124 if (ar->state == ATH10K_STATE_RESTARTED) {
8125 ath10k_info(ar, "device successfully recovered\n");
8126 ar->state = ATH10K_STATE_ON;
8127 ieee80211_wake_queues(ar->hw);
8128 clear_bit(ATH10K_FLAG_RESTARTING, &ar->dev_flags);
8129 if (ar->hw_params.hw_restart_disconnect) {
8130 list_for_each_entry(arvif, &ar->arvifs, list) {
8131 if (arvif->is_up && arvif->vdev_type == WMI_VDEV_TYPE_STA)
8132 ieee80211_hw_restart_disconnect(arvif->vif);
8133 }
8134 }
8135 }
8136
8137 mutex_unlock(&ar->conf_mutex);
8138 }
8139
8140 static void
ath10k_mac_update_bss_chan_survey(struct ath10k * ar,struct ieee80211_channel * channel)8141 ath10k_mac_update_bss_chan_survey(struct ath10k *ar,
8142 struct ieee80211_channel *channel)
8143 {
8144 int ret;
8145 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ;
8146
8147 lockdep_assert_held(&ar->conf_mutex);
8148
8149 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) ||
8150 (ar->rx_channel != channel))
8151 return;
8152
8153 if (ar->scan.state != ATH10K_SCAN_IDLE) {
8154 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n");
8155 return;
8156 }
8157
8158 reinit_completion(&ar->bss_survey_done);
8159
8160 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type);
8161 if (ret) {
8162 ath10k_warn(ar, "failed to send pdev bss chan info request\n");
8163 return;
8164 }
8165
8166 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ);
8167 if (!ret) {
8168 ath10k_warn(ar, "bss channel survey timed out\n");
8169 return;
8170 }
8171 }
8172
ath10k_get_survey(struct ieee80211_hw * hw,int idx,struct survey_info * survey)8173 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx,
8174 struct survey_info *survey)
8175 {
8176 struct ath10k *ar = hw->priv;
8177 struct ieee80211_supported_band *sband;
8178 struct survey_info *ar_survey = &ar->survey[idx];
8179 int ret = 0;
8180
8181 mutex_lock(&ar->conf_mutex);
8182
8183 sband = hw->wiphy->bands[NL80211_BAND_2GHZ];
8184 if (sband && idx >= sband->n_channels) {
8185 idx -= sband->n_channels;
8186 sband = NULL;
8187 }
8188
8189 if (!sband)
8190 sband = hw->wiphy->bands[NL80211_BAND_5GHZ];
8191
8192 if (!sband || idx >= sband->n_channels) {
8193 ret = -ENOENT;
8194 goto exit;
8195 }
8196
8197 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]);
8198
8199 spin_lock_bh(&ar->data_lock);
8200 memcpy(survey, ar_survey, sizeof(*survey));
8201 spin_unlock_bh(&ar->data_lock);
8202
8203 survey->channel = &sband->channels[idx];
8204
8205 if (ar->rx_channel == survey->channel)
8206 survey->filled |= SURVEY_INFO_IN_USE;
8207
8208 exit:
8209 mutex_unlock(&ar->conf_mutex);
8210 return ret;
8211 }
8212
8213 static bool
ath10k_mac_bitrate_mask_get_single_nss(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,int * nss)8214 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar,
8215 enum nl80211_band band,
8216 const struct cfg80211_bitrate_mask *mask,
8217 int *nss)
8218 {
8219 struct ieee80211_supported_band *sband = &ar->mac.sbands[band];
8220 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map);
8221 u8 ht_nss_mask = 0;
8222 u8 vht_nss_mask = 0;
8223 int i;
8224
8225 if (mask->control[band].legacy)
8226 return false;
8227
8228 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) {
8229 if (mask->control[band].ht_mcs[i] == 0)
8230 continue;
8231 else if (mask->control[band].ht_mcs[i] ==
8232 sband->ht_cap.mcs.rx_mask[i])
8233 ht_nss_mask |= BIT(i);
8234 else
8235 return false;
8236 }
8237
8238 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) {
8239 if (mask->control[band].vht_mcs[i] == 0)
8240 continue;
8241 else if (mask->control[band].vht_mcs[i] ==
8242 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i))
8243 vht_nss_mask |= BIT(i);
8244 else
8245 return false;
8246 }
8247
8248 if (ht_nss_mask != vht_nss_mask)
8249 return false;
8250
8251 if (ht_nss_mask == 0)
8252 return false;
8253
8254 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask)
8255 return false;
8256
8257 *nss = fls(ht_nss_mask);
8258
8259 return true;
8260 }
8261
ath10k_mac_set_fixed_rate_params(struct ath10k_vif * arvif,u8 rate,u8 nss,u8 sgi,u8 ldpc)8262 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif,
8263 u8 rate, u8 nss, u8 sgi, u8 ldpc)
8264 {
8265 struct ath10k *ar = arvif->ar;
8266 u32 vdev_param;
8267 int ret;
8268
8269 lockdep_assert_held(&ar->conf_mutex);
8270
8271 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02x nss %u sgi %u\n",
8272 arvif->vdev_id, rate, nss, sgi);
8273
8274 vdev_param = ar->wmi.vdev_param->fixed_rate;
8275 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate);
8276 if (ret) {
8277 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n",
8278 rate, ret);
8279 return ret;
8280 }
8281
8282 vdev_param = ar->wmi.vdev_param->nss;
8283 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss);
8284 if (ret) {
8285 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret);
8286 return ret;
8287 }
8288
8289 vdev_param = ar->wmi.vdev_param->sgi;
8290 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi);
8291 if (ret) {
8292 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret);
8293 return ret;
8294 }
8295
8296 vdev_param = ar->wmi.vdev_param->ldpc;
8297 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc);
8298 if (ret) {
8299 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret);
8300 return ret;
8301 }
8302
8303 return 0;
8304 }
8305
8306 static bool
ath10k_mac_can_set_bitrate_mask(struct ath10k * ar,enum nl80211_band band,const struct cfg80211_bitrate_mask * mask,bool allow_pfr)8307 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar,
8308 enum nl80211_band band,
8309 const struct cfg80211_bitrate_mask *mask,
8310 bool allow_pfr)
8311 {
8312 int i;
8313 u16 vht_mcs;
8314
8315 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible
8316 * to express all VHT MCS rate masks. Effectively only the following
8317 * ranges can be used: none, 0-7, 0-8 and 0-9.
8318 */
8319 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) {
8320 vht_mcs = mask->control[band].vht_mcs[i];
8321
8322 switch (vht_mcs) {
8323 case 0:
8324 case BIT(8) - 1:
8325 case BIT(9) - 1:
8326 case BIT(10) - 1:
8327 break;
8328 default:
8329 if (!allow_pfr)
8330 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n");
8331 return false;
8332 }
8333 }
8334
8335 return true;
8336 }
8337
ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k * ar,struct ath10k_vif * arvif,struct ieee80211_sta * sta)8338 static bool ath10k_mac_set_vht_bitrate_mask_fixup(struct ath10k *ar,
8339 struct ath10k_vif *arvif,
8340 struct ieee80211_sta *sta)
8341 {
8342 int err;
8343 u8 rate = arvif->vht_pfr;
8344
8345 /* skip non vht and multiple rate peers */
8346 if (!sta->deflink.vht_cap.vht_supported || arvif->vht_num_rates != 1)
8347 return false;
8348
8349 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
8350 WMI_PEER_PARAM_FIXED_RATE, rate);
8351 if (err)
8352 ath10k_warn(ar, "failed to enable STA %pM peer fixed rate: %d\n",
8353 sta->addr, err);
8354
8355 return true;
8356 }
8357
ath10k_mac_set_bitrate_mask_iter(void * data,struct ieee80211_sta * sta)8358 static void ath10k_mac_set_bitrate_mask_iter(void *data,
8359 struct ieee80211_sta *sta)
8360 {
8361 struct ath10k_vif *arvif = data;
8362 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8363 struct ath10k *ar = arvif->ar;
8364
8365 if (arsta->arvif != arvif)
8366 return;
8367
8368 if (ath10k_mac_set_vht_bitrate_mask_fixup(ar, arvif, sta))
8369 return;
8370
8371 spin_lock_bh(&ar->data_lock);
8372 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED;
8373 spin_unlock_bh(&ar->data_lock);
8374
8375 ieee80211_queue_work(ar->hw, &arsta->update_wk);
8376 }
8377
ath10k_mac_clr_bitrate_mask_iter(void * data,struct ieee80211_sta * sta)8378 static void ath10k_mac_clr_bitrate_mask_iter(void *data,
8379 struct ieee80211_sta *sta)
8380 {
8381 struct ath10k_vif *arvif = data;
8382 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8383 struct ath10k *ar = arvif->ar;
8384 int err;
8385
8386 /* clear vht peers only */
8387 if (arsta->arvif != arvif || !sta->deflink.vht_cap.vht_supported)
8388 return;
8389
8390 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr,
8391 WMI_PEER_PARAM_FIXED_RATE,
8392 WMI_FIXED_RATE_NONE);
8393 if (err)
8394 ath10k_warn(ar, "failed to clear STA %pM peer fixed rate: %d\n",
8395 sta->addr, err);
8396 }
8397
ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw * hw,struct ieee80211_vif * vif,const struct cfg80211_bitrate_mask * mask)8398 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw,
8399 struct ieee80211_vif *vif,
8400 const struct cfg80211_bitrate_mask *mask)
8401 {
8402 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8403 struct cfg80211_chan_def def;
8404 struct ath10k *ar = arvif->ar;
8405 enum nl80211_band band;
8406 const u8 *ht_mcs_mask;
8407 const u16 *vht_mcs_mask;
8408 u8 rate;
8409 u8 nss;
8410 u8 sgi;
8411 u8 ldpc;
8412 int single_nss;
8413 int ret;
8414 int vht_num_rates, allow_pfr;
8415 u8 vht_pfr;
8416 bool update_bitrate_mask = true;
8417
8418 if (ath10k_mac_vif_chan(vif, &def))
8419 return -EPERM;
8420
8421 band = def.chan->band;
8422 ht_mcs_mask = mask->control[band].ht_mcs;
8423 vht_mcs_mask = mask->control[band].vht_mcs;
8424 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC);
8425
8426 sgi = mask->control[band].gi;
8427 if (sgi == NL80211_TXRATE_FORCE_LGI)
8428 return -EINVAL;
8429
8430 allow_pfr = test_bit(ATH10K_FW_FEATURE_PEER_FIXED_RATE,
8431 ar->normal_mode_fw.fw_file.fw_features);
8432 if (allow_pfr) {
8433 mutex_lock(&ar->conf_mutex);
8434 ieee80211_iterate_stations_atomic(ar->hw,
8435 ath10k_mac_clr_bitrate_mask_iter,
8436 arvif);
8437 mutex_unlock(&ar->conf_mutex);
8438 }
8439
8440 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask,
8441 &vht_num_rates)) {
8442 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
8443 &rate, &nss,
8444 false);
8445 if (ret) {
8446 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n",
8447 arvif->vdev_id, ret);
8448 return ret;
8449 }
8450 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask,
8451 &single_nss)) {
8452 rate = WMI_FIXED_RATE_NONE;
8453 nss = single_nss;
8454 } else {
8455 rate = WMI_FIXED_RATE_NONE;
8456 nss = min(ar->num_rf_chains,
8457 max(ath10k_mac_max_ht_nss(ht_mcs_mask),
8458 ath10k_mac_max_vht_nss(vht_mcs_mask)));
8459
8460 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask,
8461 allow_pfr)) {
8462 u8 vht_nss;
8463
8464 if (!allow_pfr || vht_num_rates != 1)
8465 return -EINVAL;
8466
8467 /* Reach here, firmware supports peer fixed rate and has
8468 * single vht rate, and don't update vif birate_mask, as
8469 * the rate only for specific peer.
8470 */
8471 ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask,
8472 &vht_pfr,
8473 &vht_nss,
8474 true);
8475 update_bitrate_mask = false;
8476 } else {
8477 vht_pfr = 0;
8478 }
8479
8480 mutex_lock(&ar->conf_mutex);
8481
8482 if (update_bitrate_mask)
8483 arvif->bitrate_mask = *mask;
8484 arvif->vht_num_rates = vht_num_rates;
8485 arvif->vht_pfr = vht_pfr;
8486 ieee80211_iterate_stations_atomic(ar->hw,
8487 ath10k_mac_set_bitrate_mask_iter,
8488 arvif);
8489
8490 mutex_unlock(&ar->conf_mutex);
8491 }
8492
8493 mutex_lock(&ar->conf_mutex);
8494
8495 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc);
8496 if (ret) {
8497 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n",
8498 arvif->vdev_id, ret);
8499 goto exit;
8500 }
8501
8502 exit:
8503 mutex_unlock(&ar->conf_mutex);
8504
8505 return ret;
8506 }
8507
ath10k_sta_rc_update(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u32 changed)8508 static void ath10k_sta_rc_update(struct ieee80211_hw *hw,
8509 struct ieee80211_vif *vif,
8510 struct ieee80211_sta *sta,
8511 u32 changed)
8512 {
8513 struct ath10k *ar = hw->priv;
8514 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
8515 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8516 struct ath10k_peer *peer;
8517 u32 bw, smps;
8518
8519 spin_lock_bh(&ar->data_lock);
8520
8521 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr);
8522 if (!peer) {
8523 spin_unlock_bh(&ar->data_lock);
8524 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n",
8525 sta->addr, arvif->vdev_id);
8526 return;
8527 }
8528
8529 ath10k_dbg(ar, ATH10K_DBG_STA,
8530 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n",
8531 sta->addr, changed, sta->deflink.bandwidth,
8532 sta->deflink.rx_nss,
8533 sta->deflink.smps_mode);
8534
8535 if (changed & IEEE80211_RC_BW_CHANGED) {
8536 bw = WMI_PEER_CHWIDTH_20MHZ;
8537
8538 switch (sta->deflink.bandwidth) {
8539 case IEEE80211_STA_RX_BW_20:
8540 bw = WMI_PEER_CHWIDTH_20MHZ;
8541 break;
8542 case IEEE80211_STA_RX_BW_40:
8543 bw = WMI_PEER_CHWIDTH_40MHZ;
8544 break;
8545 case IEEE80211_STA_RX_BW_80:
8546 bw = WMI_PEER_CHWIDTH_80MHZ;
8547 break;
8548 case IEEE80211_STA_RX_BW_160:
8549 bw = WMI_PEER_CHWIDTH_160MHZ;
8550 break;
8551 default:
8552 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n",
8553 sta->deflink.bandwidth, sta->addr);
8554 bw = WMI_PEER_CHWIDTH_20MHZ;
8555 break;
8556 }
8557
8558 arsta->bw = bw;
8559 }
8560
8561 if (changed & IEEE80211_RC_NSS_CHANGED)
8562 arsta->nss = sta->deflink.rx_nss;
8563
8564 if (changed & IEEE80211_RC_SMPS_CHANGED) {
8565 smps = WMI_PEER_SMPS_PS_NONE;
8566
8567 switch (sta->deflink.smps_mode) {
8568 case IEEE80211_SMPS_AUTOMATIC:
8569 case IEEE80211_SMPS_OFF:
8570 smps = WMI_PEER_SMPS_PS_NONE;
8571 break;
8572 case IEEE80211_SMPS_STATIC:
8573 smps = WMI_PEER_SMPS_STATIC;
8574 break;
8575 case IEEE80211_SMPS_DYNAMIC:
8576 smps = WMI_PEER_SMPS_DYNAMIC;
8577 break;
8578 case IEEE80211_SMPS_NUM_MODES:
8579 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n",
8580 sta->deflink.smps_mode, sta->addr);
8581 smps = WMI_PEER_SMPS_PS_NONE;
8582 break;
8583 }
8584
8585 arsta->smps = smps;
8586 }
8587
8588 arsta->changed |= changed;
8589
8590 spin_unlock_bh(&ar->data_lock);
8591
8592 ieee80211_queue_work(hw, &arsta->update_wk);
8593 }
8594
ath10k_offset_tsf(struct ieee80211_hw * hw,struct ieee80211_vif * vif,s64 tsf_offset)8595 static void ath10k_offset_tsf(struct ieee80211_hw *hw,
8596 struct ieee80211_vif *vif, s64 tsf_offset)
8597 {
8598 struct ath10k *ar = hw->priv;
8599 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8600 u32 offset, vdev_param;
8601 int ret;
8602
8603 if (tsf_offset < 0) {
8604 vdev_param = ar->wmi.vdev_param->dec_tsf;
8605 offset = -tsf_offset;
8606 } else {
8607 vdev_param = ar->wmi.vdev_param->inc_tsf;
8608 offset = tsf_offset;
8609 }
8610
8611 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id,
8612 vdev_param, offset);
8613
8614 if (ret && ret != -EOPNOTSUPP)
8615 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n",
8616 offset, vdev_param, ret);
8617 }
8618
ath10k_ampdu_action(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_ampdu_params * params)8619 static int ath10k_ampdu_action(struct ieee80211_hw *hw,
8620 struct ieee80211_vif *vif,
8621 struct ieee80211_ampdu_params *params)
8622 {
8623 struct ath10k *ar = hw->priv;
8624 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8625 struct ieee80211_sta *sta = params->sta;
8626 enum ieee80211_ampdu_mlme_action action = params->action;
8627 u16 tid = params->tid;
8628
8629 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %u action %d\n",
8630 arvif->vdev_id, sta->addr, tid, action);
8631
8632 switch (action) {
8633 case IEEE80211_AMPDU_RX_START:
8634 case IEEE80211_AMPDU_RX_STOP:
8635 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session
8636 * creation/removal. Do we need to verify this?
8637 */
8638 return 0;
8639 case IEEE80211_AMPDU_TX_START:
8640 case IEEE80211_AMPDU_TX_STOP_CONT:
8641 case IEEE80211_AMPDU_TX_STOP_FLUSH:
8642 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT:
8643 case IEEE80211_AMPDU_TX_OPERATIONAL:
8644 /* Firmware offloads Tx aggregation entirely so deny mac80211
8645 * Tx aggregation requests.
8646 */
8647 return -EOPNOTSUPP;
8648 }
8649
8650 return -EINVAL;
8651 }
8652
8653 static void
ath10k_mac_update_rx_channel(struct ath10k * ar,struct ieee80211_chanctx_conf * ctx,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)8654 ath10k_mac_update_rx_channel(struct ath10k *ar,
8655 struct ieee80211_chanctx_conf *ctx,
8656 struct ieee80211_vif_chanctx_switch *vifs,
8657 int n_vifs)
8658 {
8659 struct cfg80211_chan_def *def = NULL;
8660
8661 /* Both locks are required because ar->rx_channel is modified. This
8662 * allows readers to hold either lock.
8663 */
8664 lockdep_assert_held(&ar->conf_mutex);
8665 lockdep_assert_held(&ar->data_lock);
8666
8667 WARN_ON(ctx && vifs);
8668 WARN_ON(vifs && !n_vifs);
8669
8670 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are
8671 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each
8672 * ppdu on Rx may reduce performance on low-end systems. It should be
8673 * possible to make tables/hashmaps to speed the lookup up (be vary of
8674 * cpu data cache lines though regarding sizes) but to keep the initial
8675 * implementation simple and less intrusive fallback to the slow lookup
8676 * only for multi-channel cases. Single-channel cases will remain to
8677 * use the old channel derival and thus performance should not be
8678 * affected much.
8679 */
8680 rcu_read_lock();
8681 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) {
8682 ieee80211_iter_chan_contexts_atomic(ar->hw,
8683 ath10k_mac_get_any_chandef_iter,
8684 &def);
8685
8686 if (vifs)
8687 def = &vifs[0].new_ctx->def;
8688
8689 ar->rx_channel = def->chan;
8690 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) ||
8691 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) {
8692 /* During driver restart due to firmware assert, since mac80211
8693 * already has valid channel context for given radio, channel
8694 * context iteration return num_chanctx > 0. So fix rx_channel
8695 * when restart is in progress.
8696 */
8697 ar->rx_channel = ctx->def.chan;
8698 } else {
8699 ar->rx_channel = NULL;
8700 }
8701 rcu_read_unlock();
8702 }
8703
8704 static void
ath10k_mac_update_vif_chan(struct ath10k * ar,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs)8705 ath10k_mac_update_vif_chan(struct ath10k *ar,
8706 struct ieee80211_vif_chanctx_switch *vifs,
8707 int n_vifs)
8708 {
8709 struct ath10k_vif *arvif;
8710 int ret;
8711 int i;
8712
8713 lockdep_assert_held(&ar->conf_mutex);
8714
8715 /* First stop monitor interface. Some FW versions crash if there's a
8716 * lone monitor interface.
8717 */
8718 if (ar->monitor_started)
8719 ath10k_monitor_stop(ar);
8720
8721 for (i = 0; i < n_vifs; i++) {
8722 arvif = (void *)vifs[i].vif->drv_priv;
8723
8724 ath10k_dbg(ar, ATH10K_DBG_MAC,
8725 "mac chanctx switch vdev_id %i freq %u->%u width %d->%d\n",
8726 arvif->vdev_id,
8727 vifs[i].old_ctx->def.chan->center_freq,
8728 vifs[i].new_ctx->def.chan->center_freq,
8729 vifs[i].old_ctx->def.width,
8730 vifs[i].new_ctx->def.width);
8731
8732 if (WARN_ON(!arvif->is_started))
8733 continue;
8734
8735 if (WARN_ON(!arvif->is_up))
8736 continue;
8737
8738 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
8739 if (ret) {
8740 ath10k_warn(ar, "failed to down vdev %d: %d\n",
8741 arvif->vdev_id, ret);
8742 continue;
8743 }
8744 }
8745
8746 /* All relevant vdevs are downed and associated channel resources
8747 * should be available for the channel switch now.
8748 */
8749
8750 spin_lock_bh(&ar->data_lock);
8751 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs);
8752 spin_unlock_bh(&ar->data_lock);
8753
8754 for (i = 0; i < n_vifs; i++) {
8755 arvif = (void *)vifs[i].vif->drv_priv;
8756
8757 if (WARN_ON(!arvif->is_started))
8758 continue;
8759
8760 if (WARN_ON(!arvif->is_up))
8761 continue;
8762
8763 ret = ath10k_mac_setup_bcn_tmpl(arvif);
8764 if (ret)
8765 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n",
8766 ret);
8767
8768 ret = ath10k_mac_setup_prb_tmpl(arvif);
8769 if (ret)
8770 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n",
8771 ret);
8772
8773 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def);
8774 if (ret) {
8775 ath10k_warn(ar, "failed to restart vdev %d: %d\n",
8776 arvif->vdev_id, ret);
8777 continue;
8778 }
8779
8780 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid,
8781 arvif->bssid);
8782 if (ret) {
8783 ath10k_warn(ar, "failed to bring vdev up %d: %d\n",
8784 arvif->vdev_id, ret);
8785 continue;
8786 }
8787 }
8788
8789 ath10k_monitor_recalc(ar);
8790 }
8791
8792 static int
ath10k_mac_op_add_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)8793 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw,
8794 struct ieee80211_chanctx_conf *ctx)
8795 {
8796 struct ath10k *ar = hw->priv;
8797
8798 ath10k_dbg(ar, ATH10K_DBG_MAC,
8799 "mac chanctx add freq %u width %d ptr %pK\n",
8800 ctx->def.chan->center_freq, ctx->def.width, ctx);
8801
8802 mutex_lock(&ar->conf_mutex);
8803
8804 spin_lock_bh(&ar->data_lock);
8805 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0);
8806 spin_unlock_bh(&ar->data_lock);
8807
8808 ath10k_recalc_radar_detection(ar);
8809 ath10k_monitor_recalc(ar);
8810
8811 mutex_unlock(&ar->conf_mutex);
8812
8813 return 0;
8814 }
8815
8816 static void
ath10k_mac_op_remove_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx)8817 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw,
8818 struct ieee80211_chanctx_conf *ctx)
8819 {
8820 struct ath10k *ar = hw->priv;
8821
8822 ath10k_dbg(ar, ATH10K_DBG_MAC,
8823 "mac chanctx remove freq %u width %d ptr %pK\n",
8824 ctx->def.chan->center_freq, ctx->def.width, ctx);
8825
8826 mutex_lock(&ar->conf_mutex);
8827
8828 spin_lock_bh(&ar->data_lock);
8829 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0);
8830 spin_unlock_bh(&ar->data_lock);
8831
8832 ath10k_recalc_radar_detection(ar);
8833 ath10k_monitor_recalc(ar);
8834
8835 mutex_unlock(&ar->conf_mutex);
8836 }
8837
8838 struct ath10k_mac_change_chanctx_arg {
8839 struct ieee80211_chanctx_conf *ctx;
8840 struct ieee80211_vif_chanctx_switch *vifs;
8841 int n_vifs;
8842 int next_vif;
8843 };
8844
8845 static void
ath10k_mac_change_chanctx_cnt_iter(void * data,u8 * mac,struct ieee80211_vif * vif)8846 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac,
8847 struct ieee80211_vif *vif)
8848 {
8849 struct ath10k_mac_change_chanctx_arg *arg = data;
8850
8851 if (rcu_access_pointer(vif->bss_conf.chanctx_conf) != arg->ctx)
8852 return;
8853
8854 arg->n_vifs++;
8855 }
8856
8857 static void
ath10k_mac_change_chanctx_fill_iter(void * data,u8 * mac,struct ieee80211_vif * vif)8858 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac,
8859 struct ieee80211_vif *vif)
8860 {
8861 struct ath10k_mac_change_chanctx_arg *arg = data;
8862 struct ieee80211_chanctx_conf *ctx;
8863
8864 ctx = rcu_access_pointer(vif->bss_conf.chanctx_conf);
8865 if (ctx != arg->ctx)
8866 return;
8867
8868 if (WARN_ON(arg->next_vif == arg->n_vifs))
8869 return;
8870
8871 arg->vifs[arg->next_vif].vif = vif;
8872 arg->vifs[arg->next_vif].old_ctx = ctx;
8873 arg->vifs[arg->next_vif].new_ctx = ctx;
8874 arg->next_vif++;
8875 }
8876
8877 static void
ath10k_mac_op_change_chanctx(struct ieee80211_hw * hw,struct ieee80211_chanctx_conf * ctx,u32 changed)8878 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw,
8879 struct ieee80211_chanctx_conf *ctx,
8880 u32 changed)
8881 {
8882 struct ath10k *ar = hw->priv;
8883 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx };
8884
8885 mutex_lock(&ar->conf_mutex);
8886
8887 ath10k_dbg(ar, ATH10K_DBG_MAC,
8888 "mac chanctx change freq %u width %d ptr %pK changed %x\n",
8889 ctx->def.chan->center_freq, ctx->def.width, ctx, changed);
8890
8891 /* This shouldn't really happen because channel switching should use
8892 * switch_vif_chanctx().
8893 */
8894 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL))
8895 goto unlock;
8896
8897 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) {
8898 ieee80211_iterate_active_interfaces_atomic(
8899 hw,
8900 ATH10K_ITER_NORMAL_FLAGS,
8901 ath10k_mac_change_chanctx_cnt_iter,
8902 &arg);
8903 if (arg.n_vifs == 0)
8904 goto radar;
8905
8906 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]),
8907 GFP_KERNEL);
8908 if (!arg.vifs)
8909 goto radar;
8910
8911 ieee80211_iterate_active_interfaces_atomic(
8912 hw,
8913 ATH10K_ITER_NORMAL_FLAGS,
8914 ath10k_mac_change_chanctx_fill_iter,
8915 &arg);
8916 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs);
8917 kfree(arg.vifs);
8918 }
8919
8920 radar:
8921 ath10k_recalc_radar_detection(ar);
8922
8923 /* FIXME: How to configure Rx chains properly? */
8924
8925 /* No other actions are actually necessary. Firmware maintains channel
8926 * definitions per vdev internally and there's no host-side channel
8927 * context abstraction to configure, e.g. channel width.
8928 */
8929
8930 unlock:
8931 mutex_unlock(&ar->conf_mutex);
8932 }
8933
8934 static int
ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)8935 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw,
8936 struct ieee80211_vif *vif,
8937 struct ieee80211_bss_conf *link_conf,
8938 struct ieee80211_chanctx_conf *ctx)
8939 {
8940 struct ath10k *ar = hw->priv;
8941 struct ath10k_vif *arvif = (void *)vif->drv_priv;
8942 int ret;
8943
8944 mutex_lock(&ar->conf_mutex);
8945
8946 ath10k_dbg(ar, ATH10K_DBG_MAC,
8947 "mac chanctx assign ptr %pK vdev_id %i\n",
8948 ctx, arvif->vdev_id);
8949
8950 if (WARN_ON(arvif->is_started)) {
8951 mutex_unlock(&ar->conf_mutex);
8952 return -EBUSY;
8953 }
8954
8955 ret = ath10k_vdev_start(arvif, &ctx->def);
8956 if (ret) {
8957 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n",
8958 arvif->vdev_id, vif->addr,
8959 ctx->def.chan->center_freq, ret);
8960 goto err;
8961 }
8962
8963 arvif->is_started = true;
8964
8965 ret = ath10k_mac_vif_setup_ps(arvif);
8966 if (ret) {
8967 ath10k_warn(ar, "failed to update vdev %i ps: %d\n",
8968 arvif->vdev_id, ret);
8969 goto err_stop;
8970 }
8971
8972 if (vif->type == NL80211_IFTYPE_MONITOR) {
8973 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr);
8974 if (ret) {
8975 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n",
8976 arvif->vdev_id, ret);
8977 goto err_stop;
8978 }
8979
8980 arvif->is_up = true;
8981 }
8982
8983 if (ath10k_mac_can_set_cts_prot(arvif)) {
8984 ret = ath10k_mac_set_cts_prot(arvif);
8985 if (ret)
8986 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n",
8987 arvif->vdev_id, ret);
8988 }
8989
8990 if (ath10k_peer_stats_enabled(ar) &&
8991 ar->hw_params.tx_stats_over_pktlog) {
8992 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS;
8993 ret = ath10k_wmi_pdev_pktlog_enable(ar,
8994 ar->pktlog_filter);
8995 if (ret) {
8996 ath10k_warn(ar, "failed to enable pktlog %d\n", ret);
8997 goto err_stop;
8998 }
8999 }
9000
9001 mutex_unlock(&ar->conf_mutex);
9002 return 0;
9003
9004 err_stop:
9005 ath10k_vdev_stop(arvif);
9006 arvif->is_started = false;
9007 ath10k_mac_vif_setup_ps(arvif);
9008
9009 err:
9010 mutex_unlock(&ar->conf_mutex);
9011 return ret;
9012 }
9013
9014 static void
ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_bss_conf * link_conf,struct ieee80211_chanctx_conf * ctx)9015 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw,
9016 struct ieee80211_vif *vif,
9017 struct ieee80211_bss_conf *link_conf,
9018 struct ieee80211_chanctx_conf *ctx)
9019 {
9020 struct ath10k *ar = hw->priv;
9021 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9022 int ret;
9023
9024 mutex_lock(&ar->conf_mutex);
9025
9026 ath10k_dbg(ar, ATH10K_DBG_MAC,
9027 "mac chanctx unassign ptr %pK vdev_id %i\n",
9028 ctx, arvif->vdev_id);
9029
9030 WARN_ON(!arvif->is_started);
9031
9032 if (vif->type == NL80211_IFTYPE_MONITOR) {
9033 WARN_ON(!arvif->is_up);
9034
9035 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id);
9036 if (ret)
9037 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n",
9038 arvif->vdev_id, ret);
9039
9040 arvif->is_up = false;
9041 }
9042
9043 ret = ath10k_vdev_stop(arvif);
9044 if (ret)
9045 ath10k_warn(ar, "failed to stop vdev %i: %d\n",
9046 arvif->vdev_id, ret);
9047
9048 arvif->is_started = false;
9049
9050 mutex_unlock(&ar->conf_mutex);
9051 }
9052
9053 static int
ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw * hw,struct ieee80211_vif_chanctx_switch * vifs,int n_vifs,enum ieee80211_chanctx_switch_mode mode)9054 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw,
9055 struct ieee80211_vif_chanctx_switch *vifs,
9056 int n_vifs,
9057 enum ieee80211_chanctx_switch_mode mode)
9058 {
9059 struct ath10k *ar = hw->priv;
9060
9061 mutex_lock(&ar->conf_mutex);
9062
9063 ath10k_dbg(ar, ATH10K_DBG_MAC,
9064 "mac chanctx switch n_vifs %d mode %d\n",
9065 n_vifs, mode);
9066 ath10k_mac_update_vif_chan(ar, vifs, n_vifs);
9067
9068 mutex_unlock(&ar->conf_mutex);
9069 return 0;
9070 }
9071
ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta)9072 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw,
9073 struct ieee80211_vif *vif,
9074 struct ieee80211_sta *sta)
9075 {
9076 struct ath10k *ar;
9077 struct ath10k_peer *peer;
9078
9079 ar = hw->priv;
9080
9081 list_for_each_entry(peer, &ar->peers, list)
9082 if (peer->sta == sta)
9083 peer->removed = true;
9084 }
9085
9086 /* HT MCS parameters with Nss = 1 */
9087 static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss1[] = {
9088 /* MCS L20 L40 S20 S40 */
9089 {0, { 65, 135, 72, 150} },
9090 {1, { 130, 270, 144, 300} },
9091 {2, { 195, 405, 217, 450} },
9092 {3, { 260, 540, 289, 600} },
9093 {4, { 390, 810, 433, 900} },
9094 {5, { 520, 1080, 578, 1200} },
9095 {6, { 585, 1215, 650, 1350} },
9096 {7, { 650, 1350, 722, 1500} }
9097 };
9098
9099 /* HT MCS parameters with Nss = 2 */
9100 static const struct ath10k_index_ht_data_rate_type supported_ht_mcs_rate_nss2[] = {
9101 /* MCS L20 L40 S20 S40 */
9102 {0, {130, 270, 144, 300} },
9103 {1, {260, 540, 289, 600} },
9104 {2, {390, 810, 433, 900} },
9105 {3, {520, 1080, 578, 1200} },
9106 {4, {780, 1620, 867, 1800} },
9107 {5, {1040, 2160, 1156, 2400} },
9108 {6, {1170, 2430, 1300, 2700} },
9109 {7, {1300, 2700, 1444, 3000} }
9110 };
9111
9112 /* MCS parameters with Nss = 1 */
9113 static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss1[] = {
9114 /* MCS L80 S80 L40 S40 L20 S20 */
9115 {0, {293, 325}, {135, 150}, {65, 72} },
9116 {1, {585, 650}, {270, 300}, {130, 144} },
9117 {2, {878, 975}, {405, 450}, {195, 217} },
9118 {3, {1170, 1300}, {540, 600}, {260, 289} },
9119 {4, {1755, 1950}, {810, 900}, {390, 433} },
9120 {5, {2340, 2600}, {1080, 1200}, {520, 578} },
9121 {6, {2633, 2925}, {1215, 1350}, {585, 650} },
9122 {7, {2925, 3250}, {1350, 1500}, {650, 722} },
9123 {8, {3510, 3900}, {1620, 1800}, {780, 867} },
9124 {9, {3900, 4333}, {1800, 2000}, {780, 867} }
9125 };
9126
9127 /*MCS parameters with Nss = 2 */
9128 static const struct ath10k_index_vht_data_rate_type supported_vht_mcs_rate_nss2[] = {
9129 /* MCS L80 S80 L40 S40 L20 S20 */
9130 {0, {585, 650}, {270, 300}, {130, 144} },
9131 {1, {1170, 1300}, {540, 600}, {260, 289} },
9132 {2, {1755, 1950}, {810, 900}, {390, 433} },
9133 {3, {2340, 2600}, {1080, 1200}, {520, 578} },
9134 {4, {3510, 3900}, {1620, 1800}, {780, 867} },
9135 {5, {4680, 5200}, {2160, 2400}, {1040, 1156} },
9136 {6, {5265, 5850}, {2430, 2700}, {1170, 1300} },
9137 {7, {5850, 6500}, {2700, 3000}, {1300, 1444} },
9138 {8, {7020, 7800}, {3240, 3600}, {1560, 1733} },
9139 {9, {7800, 8667}, {3600, 4000}, {1560, 1733} }
9140 };
9141
ath10k_mac_get_rate_flags_ht(struct ath10k * ar,u32 rate,u8 nss,u8 mcs,u8 * flags,u8 * bw)9142 static void ath10k_mac_get_rate_flags_ht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
9143 u8 *flags, u8 *bw)
9144 {
9145 struct ath10k_index_ht_data_rate_type *mcs_rate;
9146 u8 index;
9147 size_t len_nss1 = ARRAY_SIZE(supported_ht_mcs_rate_nss1);
9148 size_t len_nss2 = ARRAY_SIZE(supported_ht_mcs_rate_nss2);
9149
9150 if (mcs >= (len_nss1 + len_nss2)) {
9151 ath10k_warn(ar, "not supported mcs %d in current rate table", mcs);
9152 return;
9153 }
9154
9155 mcs_rate = (struct ath10k_index_ht_data_rate_type *)
9156 ((nss == 1) ? &supported_ht_mcs_rate_nss1 :
9157 &supported_ht_mcs_rate_nss2);
9158
9159 if (mcs >= len_nss1)
9160 index = mcs - len_nss1;
9161 else
9162 index = mcs;
9163
9164 if (rate == mcs_rate[index].supported_rate[0]) {
9165 *bw = RATE_INFO_BW_20;
9166 } else if (rate == mcs_rate[index].supported_rate[1]) {
9167 *bw |= RATE_INFO_BW_40;
9168 } else if (rate == mcs_rate[index].supported_rate[2]) {
9169 *bw |= RATE_INFO_BW_20;
9170 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9171 } else if (rate == mcs_rate[index].supported_rate[3]) {
9172 *bw |= RATE_INFO_BW_40;
9173 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9174 } else {
9175 ath10k_warn(ar, "invalid ht params rate %d 100kbps nss %d mcs %d",
9176 rate, nss, mcs);
9177 }
9178 }
9179
ath10k_mac_get_rate_flags_vht(struct ath10k * ar,u32 rate,u8 nss,u8 mcs,u8 * flags,u8 * bw)9180 static void ath10k_mac_get_rate_flags_vht(struct ath10k *ar, u32 rate, u8 nss, u8 mcs,
9181 u8 *flags, u8 *bw)
9182 {
9183 struct ath10k_index_vht_data_rate_type *mcs_rate;
9184
9185 mcs_rate = (struct ath10k_index_vht_data_rate_type *)
9186 ((nss == 1) ? &supported_vht_mcs_rate_nss1 :
9187 &supported_vht_mcs_rate_nss2);
9188
9189 if (rate == mcs_rate[mcs].supported_VHT80_rate[0]) {
9190 *bw = RATE_INFO_BW_80;
9191 } else if (rate == mcs_rate[mcs].supported_VHT80_rate[1]) {
9192 *bw = RATE_INFO_BW_80;
9193 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9194 } else if (rate == mcs_rate[mcs].supported_VHT40_rate[0]) {
9195 *bw = RATE_INFO_BW_40;
9196 } else if (rate == mcs_rate[mcs].supported_VHT40_rate[1]) {
9197 *bw = RATE_INFO_BW_40;
9198 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9199 } else if (rate == mcs_rate[mcs].supported_VHT20_rate[0]) {
9200 *bw = RATE_INFO_BW_20;
9201 } else if (rate == mcs_rate[mcs].supported_VHT20_rate[1]) {
9202 *bw = RATE_INFO_BW_20;
9203 *flags |= RATE_INFO_FLAGS_SHORT_GI;
9204 } else {
9205 ath10k_warn(ar, "invalid vht params rate %d 100kbps nss %d mcs %d",
9206 rate, nss, mcs);
9207 }
9208 }
9209
ath10k_mac_get_rate_flags(struct ath10k * ar,u32 rate,enum ath10k_phy_mode mode,u8 nss,u8 mcs,u8 * flags,u8 * bw)9210 static void ath10k_mac_get_rate_flags(struct ath10k *ar, u32 rate,
9211 enum ath10k_phy_mode mode, u8 nss, u8 mcs,
9212 u8 *flags, u8 *bw)
9213 {
9214 if (mode == ATH10K_PHY_MODE_HT) {
9215 *flags = RATE_INFO_FLAGS_MCS;
9216 ath10k_mac_get_rate_flags_ht(ar, rate, nss, mcs, flags, bw);
9217 } else if (mode == ATH10K_PHY_MODE_VHT) {
9218 *flags = RATE_INFO_FLAGS_VHT_MCS;
9219 ath10k_mac_get_rate_flags_vht(ar, rate, nss, mcs, flags, bw);
9220 }
9221 }
9222
ath10k_mac_parse_bitrate(struct ath10k * ar,u32 rate_code,u32 bitrate_kbps,struct rate_info * rate)9223 static void ath10k_mac_parse_bitrate(struct ath10k *ar, u32 rate_code,
9224 u32 bitrate_kbps, struct rate_info *rate)
9225 {
9226 enum ath10k_phy_mode mode = ATH10K_PHY_MODE_LEGACY;
9227 enum wmi_rate_preamble preamble = WMI_TLV_GET_HW_RC_PREAM_V1(rate_code);
9228 u8 nss = WMI_TLV_GET_HW_RC_NSS_V1(rate_code) + 1;
9229 u8 mcs = WMI_TLV_GET_HW_RC_RATE_V1(rate_code);
9230 u8 flags = 0, bw = 0;
9231
9232 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac parse rate code 0x%x bitrate %d kbps\n",
9233 rate_code, bitrate_kbps);
9234
9235 if (preamble == WMI_RATE_PREAMBLE_HT)
9236 mode = ATH10K_PHY_MODE_HT;
9237 else if (preamble == WMI_RATE_PREAMBLE_VHT)
9238 mode = ATH10K_PHY_MODE_VHT;
9239
9240 ath10k_mac_get_rate_flags(ar, bitrate_kbps / 100, mode, nss, mcs, &flags, &bw);
9241
9242 ath10k_dbg(ar, ATH10K_DBG_MAC,
9243 "mac parse bitrate preamble %d mode %d nss %d mcs %d flags %x bw %d\n",
9244 preamble, mode, nss, mcs, flags, bw);
9245
9246 rate->flags = flags;
9247 rate->bw = bw;
9248 rate->legacy = bitrate_kbps / 100;
9249 rate->nss = nss;
9250 rate->mcs = mcs;
9251 }
9252
ath10k_mac_sta_get_peer_stats_info(struct ath10k * ar,struct ieee80211_sta * sta,struct station_info * sinfo)9253 static void ath10k_mac_sta_get_peer_stats_info(struct ath10k *ar,
9254 struct ieee80211_sta *sta,
9255 struct station_info *sinfo)
9256 {
9257 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
9258 struct ath10k_peer *peer;
9259 unsigned long time_left;
9260 int ret;
9261
9262 if (!(ar->hw_params.supports_peer_stats_info &&
9263 arsta->arvif->vdev_type == WMI_VDEV_TYPE_STA))
9264 return;
9265
9266 spin_lock_bh(&ar->data_lock);
9267 peer = ath10k_peer_find(ar, arsta->arvif->vdev_id, sta->addr);
9268 spin_unlock_bh(&ar->data_lock);
9269 if (!peer)
9270 return;
9271
9272 reinit_completion(&ar->peer_stats_info_complete);
9273
9274 ret = ath10k_wmi_request_peer_stats_info(ar,
9275 arsta->arvif->vdev_id,
9276 WMI_REQUEST_ONE_PEER_STATS_INFO,
9277 arsta->arvif->bssid,
9278 0);
9279 if (ret && ret != -EOPNOTSUPP) {
9280 ath10k_warn(ar, "could not request peer stats info: %d\n", ret);
9281 return;
9282 }
9283
9284 time_left = wait_for_completion_timeout(&ar->peer_stats_info_complete, 3 * HZ);
9285 if (time_left == 0) {
9286 ath10k_warn(ar, "timed out waiting peer stats info\n");
9287 return;
9288 }
9289
9290 if (arsta->rx_rate_code != 0 && arsta->rx_bitrate_kbps != 0) {
9291 ath10k_mac_parse_bitrate(ar, arsta->rx_rate_code,
9292 arsta->rx_bitrate_kbps,
9293 &sinfo->rxrate);
9294
9295 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_BITRATE);
9296 arsta->rx_rate_code = 0;
9297 arsta->rx_bitrate_kbps = 0;
9298 }
9299
9300 if (arsta->tx_rate_code != 0 && arsta->tx_bitrate_kbps != 0) {
9301 ath10k_mac_parse_bitrate(ar, arsta->tx_rate_code,
9302 arsta->tx_bitrate_kbps,
9303 &sinfo->txrate);
9304
9305 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
9306 arsta->tx_rate_code = 0;
9307 arsta->tx_bitrate_kbps = 0;
9308 }
9309 }
9310
ath10k_sta_statistics(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct station_info * sinfo)9311 static void ath10k_sta_statistics(struct ieee80211_hw *hw,
9312 struct ieee80211_vif *vif,
9313 struct ieee80211_sta *sta,
9314 struct station_info *sinfo)
9315 {
9316 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv;
9317 struct ath10k *ar = arsta->arvif->ar;
9318
9319 if (!ath10k_peer_stats_enabled(ar))
9320 return;
9321
9322 mutex_lock(&ar->conf_mutex);
9323 ath10k_debug_fw_stats_request(ar);
9324 mutex_unlock(&ar->conf_mutex);
9325
9326 sinfo->rx_duration = arsta->rx_duration;
9327 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION);
9328
9329 if (arsta->txrate.legacy || arsta->txrate.nss) {
9330 if (arsta->txrate.legacy) {
9331 sinfo->txrate.legacy = arsta->txrate.legacy;
9332 } else {
9333 sinfo->txrate.mcs = arsta->txrate.mcs;
9334 sinfo->txrate.nss = arsta->txrate.nss;
9335 sinfo->txrate.bw = arsta->txrate.bw;
9336 }
9337 sinfo->txrate.flags = arsta->txrate.flags;
9338 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE);
9339 }
9340
9341 if (ar->htt.disable_tx_comp) {
9342 sinfo->tx_failed = arsta->tx_failed;
9343 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_FAILED);
9344 }
9345
9346 sinfo->tx_retries = arsta->tx_retries;
9347 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_RETRIES);
9348
9349 ath10k_mac_sta_get_peer_stats_info(ar, sta, sinfo);
9350 }
9351
ath10k_mac_op_set_tid_config(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,struct cfg80211_tid_config * tid_config)9352 static int ath10k_mac_op_set_tid_config(struct ieee80211_hw *hw,
9353 struct ieee80211_vif *vif,
9354 struct ieee80211_sta *sta,
9355 struct cfg80211_tid_config *tid_config)
9356 {
9357 struct ath10k *ar = hw->priv;
9358 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9359 struct ath10k_mac_iter_tid_conf_data data = {};
9360 struct wmi_per_peer_per_tid_cfg_arg arg = {};
9361 int ret, i;
9362
9363 mutex_lock(&ar->conf_mutex);
9364 arg.vdev_id = arvif->vdev_id;
9365
9366 arvif->tids_rst = 0;
9367 memset(arvif->tid_conf_changed, 0, sizeof(arvif->tid_conf_changed));
9368
9369 for (i = 0; i < tid_config->n_tid_conf; i++) {
9370 ret = ath10k_mac_parse_tid_config(ar, sta, vif,
9371 &tid_config->tid_conf[i],
9372 &arg);
9373 if (ret)
9374 goto exit;
9375 }
9376
9377 ret = 0;
9378
9379 if (sta)
9380 goto exit;
9381
9382 arvif->tids_rst = 0;
9383 data.curr_vif = vif;
9384 data.ar = ar;
9385
9386 ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
9387 &data);
9388
9389 exit:
9390 mutex_unlock(&ar->conf_mutex);
9391 return ret;
9392 }
9393
ath10k_mac_op_reset_tid_config(struct ieee80211_hw * hw,struct ieee80211_vif * vif,struct ieee80211_sta * sta,u8 tids)9394 static int ath10k_mac_op_reset_tid_config(struct ieee80211_hw *hw,
9395 struct ieee80211_vif *vif,
9396 struct ieee80211_sta *sta,
9397 u8 tids)
9398 {
9399 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9400 struct ath10k_mac_iter_tid_conf_data data = {};
9401 struct ath10k *ar = hw->priv;
9402 int ret = 0;
9403
9404 mutex_lock(&ar->conf_mutex);
9405
9406 if (sta) {
9407 arvif->tids_rst = 0;
9408 ret = ath10k_mac_reset_tid_config(ar, sta, arvif, tids);
9409 goto exit;
9410 }
9411
9412 arvif->tids_rst = tids;
9413 data.curr_vif = vif;
9414 data.ar = ar;
9415 ieee80211_iterate_stations_atomic(hw, ath10k_mac_vif_stations_tid_conf,
9416 &data);
9417
9418 exit:
9419 mutex_unlock(&ar->conf_mutex);
9420 return ret;
9421 }
9422
9423 static const struct ieee80211_ops ath10k_ops = {
9424 .tx = ath10k_mac_op_tx,
9425 .wake_tx_queue = ath10k_mac_op_wake_tx_queue,
9426 .start = ath10k_start,
9427 .stop = ath10k_stop,
9428 .config = ath10k_config,
9429 .add_interface = ath10k_add_interface,
9430 .update_vif_offload = ath10k_update_vif_offload,
9431 .remove_interface = ath10k_remove_interface,
9432 .configure_filter = ath10k_configure_filter,
9433 .bss_info_changed = ath10k_bss_info_changed,
9434 .set_coverage_class = ath10k_mac_op_set_coverage_class,
9435 .hw_scan = ath10k_hw_scan,
9436 .cancel_hw_scan = ath10k_cancel_hw_scan,
9437 .set_key = ath10k_set_key,
9438 .set_default_unicast_key = ath10k_set_default_unicast_key,
9439 .sta_state = ath10k_sta_state,
9440 .sta_set_txpwr = ath10k_sta_set_txpwr,
9441 .conf_tx = ath10k_conf_tx,
9442 .remain_on_channel = ath10k_remain_on_channel,
9443 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel,
9444 .set_rts_threshold = ath10k_set_rts_threshold,
9445 .set_frag_threshold = ath10k_mac_op_set_frag_threshold,
9446 .flush = ath10k_flush,
9447 .tx_last_beacon = ath10k_tx_last_beacon,
9448 .set_antenna = ath10k_set_antenna,
9449 .get_antenna = ath10k_get_antenna,
9450 .reconfig_complete = ath10k_reconfig_complete,
9451 .get_survey = ath10k_get_survey,
9452 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask,
9453 .sta_rc_update = ath10k_sta_rc_update,
9454 .offset_tsf = ath10k_offset_tsf,
9455 .ampdu_action = ath10k_ampdu_action,
9456 .get_et_sset_count = ath10k_debug_get_et_sset_count,
9457 .get_et_stats = ath10k_debug_get_et_stats,
9458 .get_et_strings = ath10k_debug_get_et_strings,
9459 .add_chanctx = ath10k_mac_op_add_chanctx,
9460 .remove_chanctx = ath10k_mac_op_remove_chanctx,
9461 .change_chanctx = ath10k_mac_op_change_chanctx,
9462 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx,
9463 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx,
9464 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx,
9465 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove,
9466 .sta_statistics = ath10k_sta_statistics,
9467 .set_tid_config = ath10k_mac_op_set_tid_config,
9468 .reset_tid_config = ath10k_mac_op_reset_tid_config,
9469
9470 CFG80211_TESTMODE_CMD(ath10k_tm_cmd)
9471
9472 #ifdef CONFIG_PM
9473 .suspend = ath10k_wow_op_suspend,
9474 .resume = ath10k_wow_op_resume,
9475 .set_wakeup = ath10k_wow_op_set_wakeup,
9476 #endif
9477 #ifdef CONFIG_MAC80211_DEBUGFS
9478 .sta_add_debugfs = ath10k_sta_add_debugfs,
9479 #endif
9480 .set_sar_specs = ath10k_mac_set_sar_specs,
9481 };
9482
9483 #define CHAN2G(_channel, _freq, _flags) { \
9484 .band = NL80211_BAND_2GHZ, \
9485 .hw_value = (_channel), \
9486 .center_freq = (_freq), \
9487 .flags = (_flags), \
9488 .max_antenna_gain = 0, \
9489 .max_power = 30, \
9490 }
9491
9492 #define CHAN5G(_channel, _freq, _flags) { \
9493 .band = NL80211_BAND_5GHZ, \
9494 .hw_value = (_channel), \
9495 .center_freq = (_freq), \
9496 .flags = (_flags), \
9497 .max_antenna_gain = 0, \
9498 .max_power = 30, \
9499 }
9500
9501 static const struct ieee80211_channel ath10k_2ghz_channels[] = {
9502 CHAN2G(1, 2412, 0),
9503 CHAN2G(2, 2417, 0),
9504 CHAN2G(3, 2422, 0),
9505 CHAN2G(4, 2427, 0),
9506 CHAN2G(5, 2432, 0),
9507 CHAN2G(6, 2437, 0),
9508 CHAN2G(7, 2442, 0),
9509 CHAN2G(8, 2447, 0),
9510 CHAN2G(9, 2452, 0),
9511 CHAN2G(10, 2457, 0),
9512 CHAN2G(11, 2462, 0),
9513 CHAN2G(12, 2467, 0),
9514 CHAN2G(13, 2472, 0),
9515 CHAN2G(14, 2484, 0),
9516 };
9517
9518 static const struct ieee80211_channel ath10k_5ghz_channels[] = {
9519 CHAN5G(36, 5180, 0),
9520 CHAN5G(40, 5200, 0),
9521 CHAN5G(44, 5220, 0),
9522 CHAN5G(48, 5240, 0),
9523 CHAN5G(52, 5260, 0),
9524 CHAN5G(56, 5280, 0),
9525 CHAN5G(60, 5300, 0),
9526 CHAN5G(64, 5320, 0),
9527 CHAN5G(100, 5500, 0),
9528 CHAN5G(104, 5520, 0),
9529 CHAN5G(108, 5540, 0),
9530 CHAN5G(112, 5560, 0),
9531 CHAN5G(116, 5580, 0),
9532 CHAN5G(120, 5600, 0),
9533 CHAN5G(124, 5620, 0),
9534 CHAN5G(128, 5640, 0),
9535 CHAN5G(132, 5660, 0),
9536 CHAN5G(136, 5680, 0),
9537 CHAN5G(140, 5700, 0),
9538 CHAN5G(144, 5720, 0),
9539 CHAN5G(149, 5745, 0),
9540 CHAN5G(153, 5765, 0),
9541 CHAN5G(157, 5785, 0),
9542 CHAN5G(161, 5805, 0),
9543 CHAN5G(165, 5825, 0),
9544 CHAN5G(169, 5845, 0),
9545 CHAN5G(173, 5865, 0),
9546 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */
9547 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */
9548 };
9549
ath10k_mac_create(size_t priv_size)9550 struct ath10k *ath10k_mac_create(size_t priv_size)
9551 {
9552 struct ieee80211_hw *hw;
9553 struct ieee80211_ops *ops;
9554 struct ath10k *ar;
9555
9556 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL);
9557 if (!ops)
9558 return NULL;
9559
9560 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops);
9561 if (!hw) {
9562 kfree(ops);
9563 return NULL;
9564 }
9565
9566 ar = hw->priv;
9567 ar->hw = hw;
9568 ar->ops = ops;
9569
9570 return ar;
9571 }
9572
ath10k_mac_destroy(struct ath10k * ar)9573 void ath10k_mac_destroy(struct ath10k *ar)
9574 {
9575 struct ieee80211_ops *ops = ar->ops;
9576
9577 ieee80211_free_hw(ar->hw);
9578 kfree(ops);
9579 }
9580
9581 static const struct ieee80211_iface_limit ath10k_if_limits[] = {
9582 {
9583 .max = 8,
9584 .types = BIT(NL80211_IFTYPE_STATION)
9585 | BIT(NL80211_IFTYPE_P2P_CLIENT)
9586 },
9587 {
9588 .max = 3,
9589 .types = BIT(NL80211_IFTYPE_P2P_GO)
9590 },
9591 {
9592 .max = 1,
9593 .types = BIT(NL80211_IFTYPE_P2P_DEVICE)
9594 },
9595 {
9596 .max = 7,
9597 .types = BIT(NL80211_IFTYPE_AP)
9598 #ifdef CONFIG_MAC80211_MESH
9599 | BIT(NL80211_IFTYPE_MESH_POINT)
9600 #endif
9601 },
9602 };
9603
9604 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = {
9605 {
9606 .max = 8,
9607 .types = BIT(NL80211_IFTYPE_AP)
9608 #ifdef CONFIG_MAC80211_MESH
9609 | BIT(NL80211_IFTYPE_MESH_POINT)
9610 #endif
9611 },
9612 {
9613 .max = 1,
9614 .types = BIT(NL80211_IFTYPE_STATION)
9615 },
9616 };
9617
9618 static const struct ieee80211_iface_combination ath10k_if_comb[] = {
9619 {
9620 .limits = ath10k_if_limits,
9621 .n_limits = ARRAY_SIZE(ath10k_if_limits),
9622 .max_interfaces = 8,
9623 .num_different_channels = 1,
9624 .beacon_int_infra_match = true,
9625 },
9626 };
9627
9628 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = {
9629 {
9630 .limits = ath10k_10x_if_limits,
9631 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits),
9632 .max_interfaces = 8,
9633 .num_different_channels = 1,
9634 .beacon_int_infra_match = true,
9635 .beacon_int_min_gcd = 1,
9636 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
9637 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9638 BIT(NL80211_CHAN_WIDTH_20) |
9639 BIT(NL80211_CHAN_WIDTH_40) |
9640 BIT(NL80211_CHAN_WIDTH_80),
9641 #endif
9642 },
9643 };
9644
9645 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = {
9646 {
9647 .max = 2,
9648 .types = BIT(NL80211_IFTYPE_STATION),
9649 },
9650 {
9651 .max = 2,
9652 .types = BIT(NL80211_IFTYPE_AP) |
9653 #ifdef CONFIG_MAC80211_MESH
9654 BIT(NL80211_IFTYPE_MESH_POINT) |
9655 #endif
9656 BIT(NL80211_IFTYPE_P2P_CLIENT) |
9657 BIT(NL80211_IFTYPE_P2P_GO),
9658 },
9659 {
9660 .max = 1,
9661 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
9662 },
9663 };
9664
9665 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = {
9666 {
9667 .max = 2,
9668 .types = BIT(NL80211_IFTYPE_STATION),
9669 },
9670 {
9671 .max = 2,
9672 .types = BIT(NL80211_IFTYPE_P2P_CLIENT),
9673 },
9674 {
9675 .max = 1,
9676 .types = BIT(NL80211_IFTYPE_AP) |
9677 #ifdef CONFIG_MAC80211_MESH
9678 BIT(NL80211_IFTYPE_MESH_POINT) |
9679 #endif
9680 BIT(NL80211_IFTYPE_P2P_GO),
9681 },
9682 {
9683 .max = 1,
9684 .types = BIT(NL80211_IFTYPE_P2P_DEVICE),
9685 },
9686 };
9687
9688 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = {
9689 {
9690 .max = 1,
9691 .types = BIT(NL80211_IFTYPE_STATION),
9692 },
9693 {
9694 .max = 1,
9695 .types = BIT(NL80211_IFTYPE_ADHOC),
9696 },
9697 };
9698
9699 /* FIXME: This is not thoroughly tested. These combinations may over- or
9700 * underestimate hw/fw capabilities.
9701 */
9702 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = {
9703 {
9704 .limits = ath10k_tlv_if_limit,
9705 .num_different_channels = 1,
9706 .max_interfaces = 4,
9707 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
9708 },
9709 {
9710 .limits = ath10k_tlv_if_limit_ibss,
9711 .num_different_channels = 1,
9712 .max_interfaces = 2,
9713 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
9714 },
9715 };
9716
9717 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = {
9718 {
9719 .limits = ath10k_tlv_if_limit,
9720 .num_different_channels = 1,
9721 .max_interfaces = 4,
9722 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit),
9723 },
9724 {
9725 .limits = ath10k_tlv_qcs_if_limit,
9726 .num_different_channels = 2,
9727 .max_interfaces = 4,
9728 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit),
9729 },
9730 {
9731 .limits = ath10k_tlv_if_limit_ibss,
9732 .num_different_channels = 1,
9733 .max_interfaces = 2,
9734 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss),
9735 },
9736 };
9737
9738 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = {
9739 {
9740 .max = 1,
9741 .types = BIT(NL80211_IFTYPE_STATION),
9742 },
9743 {
9744 .max = 16,
9745 .types = BIT(NL80211_IFTYPE_AP)
9746 #ifdef CONFIG_MAC80211_MESH
9747 | BIT(NL80211_IFTYPE_MESH_POINT)
9748 #endif
9749 },
9750 };
9751
9752 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = {
9753 {
9754 .limits = ath10k_10_4_if_limits,
9755 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
9756 .max_interfaces = 16,
9757 .num_different_channels = 1,
9758 .beacon_int_infra_match = true,
9759 .beacon_int_min_gcd = 1,
9760 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
9761 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9762 BIT(NL80211_CHAN_WIDTH_20) |
9763 BIT(NL80211_CHAN_WIDTH_40) |
9764 BIT(NL80211_CHAN_WIDTH_80) |
9765 BIT(NL80211_CHAN_WIDTH_80P80) |
9766 BIT(NL80211_CHAN_WIDTH_160),
9767 #endif
9768 },
9769 };
9770
9771 static const struct
9772 ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = {
9773 {
9774 .limits = ath10k_10_4_if_limits,
9775 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits),
9776 .max_interfaces = 16,
9777 .num_different_channels = 1,
9778 .beacon_int_infra_match = true,
9779 .beacon_int_min_gcd = 100,
9780 #ifdef CONFIG_ATH10K_DFS_CERTIFIED
9781 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) |
9782 BIT(NL80211_CHAN_WIDTH_20) |
9783 BIT(NL80211_CHAN_WIDTH_40) |
9784 BIT(NL80211_CHAN_WIDTH_80) |
9785 BIT(NL80211_CHAN_WIDTH_80P80) |
9786 BIT(NL80211_CHAN_WIDTH_160),
9787 #endif
9788 },
9789 };
9790
ath10k_get_arvif_iter(void * data,u8 * mac,struct ieee80211_vif * vif)9791 static void ath10k_get_arvif_iter(void *data, u8 *mac,
9792 struct ieee80211_vif *vif)
9793 {
9794 struct ath10k_vif_iter *arvif_iter = data;
9795 struct ath10k_vif *arvif = (void *)vif->drv_priv;
9796
9797 if (arvif->vdev_id == arvif_iter->vdev_id)
9798 arvif_iter->arvif = arvif;
9799 }
9800
ath10k_get_arvif(struct ath10k * ar,u32 vdev_id)9801 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id)
9802 {
9803 struct ath10k_vif_iter arvif_iter;
9804
9805 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter));
9806 arvif_iter.vdev_id = vdev_id;
9807
9808 ieee80211_iterate_active_interfaces_atomic(ar->hw,
9809 ATH10K_ITER_RESUME_FLAGS,
9810 ath10k_get_arvif_iter,
9811 &arvif_iter);
9812 if (!arvif_iter.arvif) {
9813 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id);
9814 return NULL;
9815 }
9816
9817 return arvif_iter.arvif;
9818 }
9819
9820 #define WRD_METHOD "WRDD"
9821 #define WRDD_WIFI (0x07)
9822
ath10k_mac_wrdd_get_mcc(struct ath10k * ar,union acpi_object * wrdd)9823 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd)
9824 {
9825 union acpi_object *mcc_pkg;
9826 union acpi_object *domain_type;
9827 union acpi_object *mcc_value;
9828 u32 i;
9829
9830 if (wrdd->type != ACPI_TYPE_PACKAGE ||
9831 wrdd->package.count < 2 ||
9832 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER ||
9833 wrdd->package.elements[0].integer.value != 0) {
9834 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n");
9835 return 0;
9836 }
9837
9838 for (i = 1; i < wrdd->package.count; ++i) {
9839 mcc_pkg = &wrdd->package.elements[i];
9840
9841 if (mcc_pkg->type != ACPI_TYPE_PACKAGE)
9842 continue;
9843 if (mcc_pkg->package.count < 2)
9844 continue;
9845 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER ||
9846 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER)
9847 continue;
9848
9849 domain_type = &mcc_pkg->package.elements[0];
9850 if (domain_type->integer.value != WRDD_WIFI)
9851 continue;
9852
9853 mcc_value = &mcc_pkg->package.elements[1];
9854 return mcc_value->integer.value;
9855 }
9856 return 0;
9857 }
9858
ath10k_mac_get_wrdd_regulatory(struct ath10k * ar,u16 * rd)9859 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd)
9860 {
9861 acpi_handle root_handle;
9862 acpi_handle handle;
9863 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL};
9864 acpi_status status;
9865 u32 alpha2_code;
9866 char alpha2[3];
9867
9868 root_handle = ACPI_HANDLE(ar->dev);
9869 if (!root_handle)
9870 return -EOPNOTSUPP;
9871
9872 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle);
9873 if (ACPI_FAILURE(status)) {
9874 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9875 "failed to get wrd method %d\n", status);
9876 return -EIO;
9877 }
9878
9879 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd);
9880 if (ACPI_FAILURE(status)) {
9881 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9882 "failed to call wrdc %d\n", status);
9883 return -EIO;
9884 }
9885
9886 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer);
9887 kfree(wrdd.pointer);
9888 if (!alpha2_code)
9889 return -EIO;
9890
9891 alpha2[0] = (alpha2_code >> 8) & 0xff;
9892 alpha2[1] = (alpha2_code >> 0) & 0xff;
9893 alpha2[2] = '\0';
9894
9895 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9896 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2);
9897
9898 *rd = ath_regd_find_country_by_name(alpha2);
9899 if (*rd == 0xffff)
9900 return -EIO;
9901
9902 *rd |= COUNTRY_ERD_FLAG;
9903 return 0;
9904 }
9905
ath10k_mac_init_rd(struct ath10k * ar)9906 static int ath10k_mac_init_rd(struct ath10k *ar)
9907 {
9908 int ret;
9909 u16 rd;
9910
9911 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd);
9912 if (ret) {
9913 ath10k_dbg(ar, ATH10K_DBG_BOOT,
9914 "fallback to eeprom programmed regulatory settings\n");
9915 rd = ar->hw_eeprom_rd;
9916 }
9917
9918 ar->ath_common.regulatory.current_rd = rd;
9919 return 0;
9920 }
9921
ath10k_mac_register(struct ath10k * ar)9922 int ath10k_mac_register(struct ath10k *ar)
9923 {
9924 static const u32 cipher_suites[] = {
9925 WLAN_CIPHER_SUITE_WEP40,
9926 WLAN_CIPHER_SUITE_WEP104,
9927 WLAN_CIPHER_SUITE_TKIP,
9928 WLAN_CIPHER_SUITE_CCMP,
9929
9930 /* Do not add hardware supported ciphers before this line.
9931 * Allow software encryption for all chips. Don't forget to
9932 * update n_cipher_suites below.
9933 */
9934 WLAN_CIPHER_SUITE_AES_CMAC,
9935 WLAN_CIPHER_SUITE_BIP_CMAC_256,
9936 WLAN_CIPHER_SUITE_BIP_GMAC_128,
9937 WLAN_CIPHER_SUITE_BIP_GMAC_256,
9938
9939 /* Only QCA99x0 and QCA4019 variants support GCMP-128, GCMP-256
9940 * and CCMP-256 in hardware.
9941 */
9942 WLAN_CIPHER_SUITE_GCMP,
9943 WLAN_CIPHER_SUITE_GCMP_256,
9944 WLAN_CIPHER_SUITE_CCMP_256,
9945 };
9946 struct ieee80211_supported_band *band;
9947 void *channels;
9948 int ret;
9949
9950 if (!is_valid_ether_addr(ar->mac_addr)) {
9951 ath10k_warn(ar, "invalid MAC address; choosing random\n");
9952 eth_random_addr(ar->mac_addr);
9953 }
9954 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr);
9955
9956 SET_IEEE80211_DEV(ar->hw, ar->dev);
9957
9958 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) +
9959 ARRAY_SIZE(ath10k_5ghz_channels)) !=
9960 ATH10K_NUM_CHANS);
9961
9962 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) {
9963 channels = kmemdup(ath10k_2ghz_channels,
9964 sizeof(ath10k_2ghz_channels),
9965 GFP_KERNEL);
9966 if (!channels) {
9967 ret = -ENOMEM;
9968 goto err_free;
9969 }
9970
9971 band = &ar->mac.sbands[NL80211_BAND_2GHZ];
9972 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels);
9973 band->channels = channels;
9974
9975 if (ar->hw_params.cck_rate_map_rev2) {
9976 band->n_bitrates = ath10k_g_rates_rev2_size;
9977 band->bitrates = ath10k_g_rates_rev2;
9978 } else {
9979 band->n_bitrates = ath10k_g_rates_size;
9980 band->bitrates = ath10k_g_rates;
9981 }
9982
9983 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band;
9984 }
9985
9986 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) {
9987 channels = kmemdup(ath10k_5ghz_channels,
9988 sizeof(ath10k_5ghz_channels),
9989 GFP_KERNEL);
9990 if (!channels) {
9991 ret = -ENOMEM;
9992 goto err_free;
9993 }
9994
9995 band = &ar->mac.sbands[NL80211_BAND_5GHZ];
9996 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels);
9997 band->channels = channels;
9998 band->n_bitrates = ath10k_a_rates_size;
9999 band->bitrates = ath10k_a_rates;
10000 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band;
10001 }
10002
10003 wiphy_read_of_freq_limits(ar->hw->wiphy);
10004 ath10k_mac_setup_ht_vht_cap(ar);
10005
10006 ar->hw->wiphy->interface_modes =
10007 BIT(NL80211_IFTYPE_STATION) |
10008 BIT(NL80211_IFTYPE_AP) |
10009 BIT(NL80211_IFTYPE_MESH_POINT);
10010
10011 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask;
10012 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask;
10013
10014 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features))
10015 ar->hw->wiphy->interface_modes |=
10016 BIT(NL80211_IFTYPE_P2P_DEVICE) |
10017 BIT(NL80211_IFTYPE_P2P_CLIENT) |
10018 BIT(NL80211_IFTYPE_P2P_GO);
10019
10020 ieee80211_hw_set(ar->hw, SIGNAL_DBM);
10021
10022 if (!test_bit(ATH10K_FW_FEATURE_NO_PS,
10023 ar->running_fw->fw_file.fw_features)) {
10024 ieee80211_hw_set(ar->hw, SUPPORTS_PS);
10025 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS);
10026 }
10027
10028 ieee80211_hw_set(ar->hw, MFP_CAPABLE);
10029 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS);
10030 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL);
10031 ieee80211_hw_set(ar->hw, AP_LINK_PS);
10032 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT);
10033 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT);
10034 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR);
10035 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK);
10036 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF);
10037 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA);
10038 ieee80211_hw_set(ar->hw, QUEUE_CONTROL);
10039 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG);
10040 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK);
10041
10042 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
10043 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL);
10044
10045 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS;
10046 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN;
10047
10048 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS)
10049 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS;
10050
10051 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) {
10052 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION);
10053 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW);
10054 }
10055
10056 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID;
10057 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN;
10058
10059 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) {
10060 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS;
10061 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS;
10062 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH;
10063 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS;
10064 ar->hw->wiphy->max_sched_scan_plan_interval =
10065 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT;
10066 ar->hw->wiphy->max_sched_scan_plan_iterations =
10067 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS;
10068 ar->hw->wiphy->features |= NL80211_FEATURE_ND_RANDOM_MAC_ADDR;
10069 }
10070
10071 ar->hw->vif_data_size = sizeof(struct ath10k_vif);
10072 ar->hw->sta_data_size = sizeof(struct ath10k_sta);
10073 ar->hw->txq_data_size = sizeof(struct ath10k_txq);
10074
10075 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL;
10076
10077 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) {
10078 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD;
10079
10080 /* Firmware delivers WPS/P2P Probe Requests frames to driver so
10081 * that userspace (e.g. wpa_supplicant/hostapd) can generate
10082 * correct Probe Responses. This is more of a hack advert..
10083 */
10084 ar->hw->wiphy->probe_resp_offload |=
10085 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS |
10086 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 |
10087 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P;
10088 }
10089
10090 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) ||
10091 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) {
10092 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS;
10093 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map))
10094 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW);
10095 }
10096
10097 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map))
10098 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA);
10099
10100 if (ath10k_frame_mode == ATH10K_HW_TXRX_ETHERNET) {
10101 if (ar->wmi.vdev_param->tx_encap_type !=
10102 WMI_VDEV_PARAM_UNSUPPORTED)
10103 ieee80211_hw_set(ar->hw, SUPPORTS_TX_ENCAP_OFFLOAD);
10104 }
10105
10106 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL;
10107 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH;
10108 ar->hw->wiphy->max_remain_on_channel_duration = 5000;
10109
10110 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD;
10111 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE |
10112 NL80211_FEATURE_AP_SCAN;
10113
10114 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations;
10115
10116 ret = ath10k_wow_init(ar);
10117 if (ret) {
10118 ath10k_warn(ar, "failed to init wow: %d\n", ret);
10119 goto err_free;
10120 }
10121
10122 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS);
10123 wiphy_ext_feature_set(ar->hw->wiphy,
10124 NL80211_EXT_FEATURE_SET_SCAN_DWELL);
10125 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_AQL);
10126
10127 if (ar->hw_params.mcast_frame_registration)
10128 wiphy_ext_feature_set(ar->hw->wiphy,
10129 NL80211_EXT_FEATURE_MULTICAST_REGISTRATIONS);
10130
10131 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) ||
10132 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map))
10133 wiphy_ext_feature_set(ar->hw->wiphy,
10134 NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT);
10135
10136 if (ath10k_peer_stats_enabled(ar) ||
10137 test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map))
10138 wiphy_ext_feature_set(ar->hw->wiphy,
10139 NL80211_EXT_FEATURE_AIRTIME_FAIRNESS);
10140
10141 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map))
10142 wiphy_ext_feature_set(ar->hw->wiphy,
10143 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER);
10144
10145 if (test_bit(WMI_SERVICE_TX_PWR_PER_PEER, ar->wmi.svc_map))
10146 wiphy_ext_feature_set(ar->hw->wiphy,
10147 NL80211_EXT_FEATURE_STA_TX_PWR);
10148
10149 if (test_bit(WMI_SERVICE_PEER_TID_CONFIGS_SUPPORT, ar->wmi.svc_map)) {
10150 ar->hw->wiphy->tid_config_support.vif |=
10151 BIT(NL80211_TID_CONFIG_ATTR_NOACK) |
10152 BIT(NL80211_TID_CONFIG_ATTR_RETRY_SHORT) |
10153 BIT(NL80211_TID_CONFIG_ATTR_RETRY_LONG) |
10154 BIT(NL80211_TID_CONFIG_ATTR_AMPDU_CTRL) |
10155 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE) |
10156 BIT(NL80211_TID_CONFIG_ATTR_TX_RATE_TYPE);
10157
10158 if (test_bit(WMI_SERVICE_EXT_PEER_TID_CONFIGS_SUPPORT,
10159 ar->wmi.svc_map)) {
10160 ar->hw->wiphy->tid_config_support.vif |=
10161 BIT(NL80211_TID_CONFIG_ATTR_RTSCTS_CTRL);
10162 }
10163
10164 ar->hw->wiphy->tid_config_support.peer =
10165 ar->hw->wiphy->tid_config_support.vif;
10166 ar->hw->wiphy->max_data_retry_count = ATH10K_MAX_RETRY_COUNT;
10167 } else {
10168 ar->ops->set_tid_config = NULL;
10169 }
10170 /*
10171 * on LL hardware queues are managed entirely by the FW
10172 * so we only advertise to mac we can do the queues thing
10173 */
10174 ar->hw->queues = IEEE80211_MAX_QUEUES;
10175
10176 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is
10177 * something that vdev_ids can't reach so that we don't stop the queue
10178 * accidentally.
10179 */
10180 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1;
10181
10182 switch (ar->running_fw->fw_file.wmi_op_version) {
10183 case ATH10K_FW_WMI_OP_VERSION_MAIN:
10184 ar->hw->wiphy->iface_combinations = ath10k_if_comb;
10185 ar->hw->wiphy->n_iface_combinations =
10186 ARRAY_SIZE(ath10k_if_comb);
10187 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
10188 break;
10189 case ATH10K_FW_WMI_OP_VERSION_TLV:
10190 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) {
10191 ar->hw->wiphy->iface_combinations =
10192 ath10k_tlv_qcs_if_comb;
10193 ar->hw->wiphy->n_iface_combinations =
10194 ARRAY_SIZE(ath10k_tlv_qcs_if_comb);
10195 } else {
10196 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb;
10197 ar->hw->wiphy->n_iface_combinations =
10198 ARRAY_SIZE(ath10k_tlv_if_comb);
10199 }
10200 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC);
10201 break;
10202 case ATH10K_FW_WMI_OP_VERSION_10_1:
10203 case ATH10K_FW_WMI_OP_VERSION_10_2:
10204 case ATH10K_FW_WMI_OP_VERSION_10_2_4:
10205 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb;
10206 ar->hw->wiphy->n_iface_combinations =
10207 ARRAY_SIZE(ath10k_10x_if_comb);
10208 break;
10209 case ATH10K_FW_WMI_OP_VERSION_10_4:
10210 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb;
10211 ar->hw->wiphy->n_iface_combinations =
10212 ARRAY_SIZE(ath10k_10_4_if_comb);
10213 if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT,
10214 ar->wmi.svc_map)) {
10215 ar->hw->wiphy->iface_combinations =
10216 ath10k_10_4_bcn_int_if_comb;
10217 ar->hw->wiphy->n_iface_combinations =
10218 ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb);
10219 }
10220 break;
10221 case ATH10K_FW_WMI_OP_VERSION_UNSET:
10222 case ATH10K_FW_WMI_OP_VERSION_MAX:
10223 WARN_ON(1);
10224 ret = -EINVAL;
10225 goto err_free;
10226 }
10227
10228 if (ar->hw_params.dynamic_sar_support)
10229 ar->hw->wiphy->sar_capa = &ath10k_sar_capa;
10230
10231 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags))
10232 ar->hw->netdev_features = NETIF_F_HW_CSUM;
10233
10234 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) {
10235 /* Init ath dfs pattern detector */
10236 ar->ath_common.debug_mask = ATH_DBG_DFS;
10237 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common,
10238 NL80211_DFS_UNSET);
10239
10240 if (!ar->dfs_detector)
10241 ath10k_warn(ar, "failed to initialise DFS pattern detector\n");
10242 }
10243
10244 ret = ath10k_mac_init_rd(ar);
10245 if (ret) {
10246 ath10k_err(ar, "failed to derive regdom: %d\n", ret);
10247 goto err_dfs_detector_exit;
10248 }
10249
10250 /* Disable set_coverage_class for chipsets that do not support it. */
10251 if (!ar->hw_params.hw_ops->set_coverage_class)
10252 ar->ops->set_coverage_class = NULL;
10253
10254 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy,
10255 ath10k_reg_notifier);
10256 if (ret) {
10257 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret);
10258 goto err_dfs_detector_exit;
10259 }
10260
10261 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) {
10262 ar->hw->wiphy->features |=
10263 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR;
10264 }
10265
10266 ar->hw->wiphy->cipher_suites = cipher_suites;
10267
10268 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128
10269 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported
10270 * from chip specific hw_param table.
10271 */
10272 if (!ar->hw_params.n_cipher_suites ||
10273 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) {
10274 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n",
10275 ar->hw_params.n_cipher_suites);
10276 ar->hw_params.n_cipher_suites = 8;
10277 }
10278 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites;
10279
10280 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST);
10281
10282 ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER;
10283
10284 ret = ieee80211_register_hw(ar->hw);
10285 if (ret) {
10286 ath10k_err(ar, "failed to register ieee80211: %d\n", ret);
10287 goto err_dfs_detector_exit;
10288 }
10289
10290 if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) {
10291 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN);
10292 ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN);
10293 }
10294
10295 if (!ath_is_world_regd(&ar->ath_common.reg_world_copy) &&
10296 !ath_is_world_regd(&ar->ath_common.regulatory)) {
10297 ret = regulatory_hint(ar->hw->wiphy,
10298 ar->ath_common.regulatory.alpha2);
10299 if (ret)
10300 goto err_unregister;
10301 }
10302
10303 return 0;
10304
10305 err_unregister:
10306 ieee80211_unregister_hw(ar->hw);
10307
10308 err_dfs_detector_exit:
10309 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
10310 ar->dfs_detector->exit(ar->dfs_detector);
10311
10312 err_free:
10313 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
10314 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
10315
10316 SET_IEEE80211_DEV(ar->hw, NULL);
10317 return ret;
10318 }
10319
ath10k_mac_unregister(struct ath10k * ar)10320 void ath10k_mac_unregister(struct ath10k *ar)
10321 {
10322 ieee80211_unregister_hw(ar->hw);
10323
10324 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector)
10325 ar->dfs_detector->exit(ar->dfs_detector);
10326
10327 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels);
10328 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels);
10329
10330 SET_IEEE80211_DEV(ar->hw, NULL);
10331 }
10332