1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 4 * Copyright (c) 2018, The Linux Foundation. All rights reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 13 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 15 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 16 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 */ 18 19 #include "mac.h" 20 21 #include <net/mac80211.h> 22 #include <linux/etherdevice.h> 23 #include <linux/acpi.h> 24 25 #include "hif.h" 26 #include "core.h" 27 #include "debug.h" 28 #include "wmi.h" 29 #include "htt.h" 30 #include "txrx.h" 31 #include "testmode.h" 32 #include "wmi.h" 33 #include "wmi-tlv.h" 34 #include "wmi-ops.h" 35 #include "wow.h" 36 37 /*********/ 38 /* Rates */ 39 /*********/ 40 41 static struct ieee80211_rate ath10k_rates[] = { 42 { .bitrate = 10, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 44 { .bitrate = 20, 45 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 46 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 47 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 48 { .bitrate = 55, 49 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 50 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 51 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 52 { .bitrate = 110, 53 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 54 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 55 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 56 57 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 58 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 59 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 60 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 61 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 62 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 63 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 64 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 65 }; 66 67 static struct ieee80211_rate ath10k_rates_rev2[] = { 68 { .bitrate = 10, 69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 70 { .bitrate = 20, 71 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 72 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 73 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 74 { .bitrate = 55, 75 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 76 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 77 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 78 { .bitrate = 110, 79 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 80 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 81 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 82 83 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 84 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 85 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 86 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 87 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 88 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 89 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 90 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 91 }; 92 93 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 94 95 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 96 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 97 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 98 #define ath10k_g_rates (ath10k_rates + 0) 99 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 100 101 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 102 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 103 104 static bool ath10k_mac_bitrate_is_cck(int bitrate) 105 { 106 switch (bitrate) { 107 case 10: 108 case 20: 109 case 55: 110 case 110: 111 return true; 112 } 113 114 return false; 115 } 116 117 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 118 { 119 return DIV_ROUND_UP(bitrate, 5) | 120 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 121 } 122 123 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 124 u8 hw_rate, bool cck) 125 { 126 const struct ieee80211_rate *rate; 127 int i; 128 129 for (i = 0; i < sband->n_bitrates; i++) { 130 rate = &sband->bitrates[i]; 131 132 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 133 continue; 134 135 if (rate->hw_value == hw_rate) 136 return i; 137 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 138 rate->hw_value_short == hw_rate) 139 return i; 140 } 141 142 return 0; 143 } 144 145 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 146 u32 bitrate) 147 { 148 int i; 149 150 for (i = 0; i < sband->n_bitrates; i++) 151 if (sband->bitrates[i].bitrate == bitrate) 152 return i; 153 154 return 0; 155 } 156 157 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 158 { 159 switch ((mcs_map >> (2 * nss)) & 0x3) { 160 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 161 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 162 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 163 } 164 return 0; 165 } 166 167 static u32 168 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 169 { 170 int nss; 171 172 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 173 if (ht_mcs_mask[nss]) 174 return nss + 1; 175 176 return 1; 177 } 178 179 static u32 180 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 181 { 182 int nss; 183 184 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 185 if (vht_mcs_mask[nss]) 186 return nss + 1; 187 188 return 1; 189 } 190 191 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 192 { 193 enum wmi_host_platform_type platform_type; 194 int ret; 195 196 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 197 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 198 else 199 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 200 201 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 202 203 if (ret && ret != -EOPNOTSUPP) { 204 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 205 return ret; 206 } 207 208 return 0; 209 } 210 211 /**********/ 212 /* Crypto */ 213 /**********/ 214 215 static int ath10k_send_key(struct ath10k_vif *arvif, 216 struct ieee80211_key_conf *key, 217 enum set_key_cmd cmd, 218 const u8 *macaddr, u32 flags) 219 { 220 struct ath10k *ar = arvif->ar; 221 struct wmi_vdev_install_key_arg arg = { 222 .vdev_id = arvif->vdev_id, 223 .key_idx = key->keyidx, 224 .key_len = key->keylen, 225 .key_data = key->key, 226 .key_flags = flags, 227 .macaddr = macaddr, 228 }; 229 230 lockdep_assert_held(&arvif->ar->conf_mutex); 231 232 switch (key->cipher) { 233 case WLAN_CIPHER_SUITE_CCMP: 234 arg.key_cipher = WMI_CIPHER_AES_CCM; 235 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 236 break; 237 case WLAN_CIPHER_SUITE_TKIP: 238 arg.key_cipher = WMI_CIPHER_TKIP; 239 arg.key_txmic_len = 8; 240 arg.key_rxmic_len = 8; 241 break; 242 case WLAN_CIPHER_SUITE_WEP40: 243 case WLAN_CIPHER_SUITE_WEP104: 244 arg.key_cipher = WMI_CIPHER_WEP; 245 break; 246 case WLAN_CIPHER_SUITE_CCMP_256: 247 arg.key_cipher = WMI_CIPHER_AES_CCM; 248 break; 249 case WLAN_CIPHER_SUITE_GCMP: 250 case WLAN_CIPHER_SUITE_GCMP_256: 251 arg.key_cipher = WMI_CIPHER_AES_GCM; 252 break; 253 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 254 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 255 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 256 case WLAN_CIPHER_SUITE_AES_CMAC: 257 WARN_ON(1); 258 return -EINVAL; 259 default: 260 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 261 return -EOPNOTSUPP; 262 } 263 264 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 265 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 266 267 if (cmd == DISABLE_KEY) { 268 arg.key_cipher = WMI_CIPHER_NONE; 269 arg.key_data = NULL; 270 } 271 272 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 273 } 274 275 static int ath10k_install_key(struct ath10k_vif *arvif, 276 struct ieee80211_key_conf *key, 277 enum set_key_cmd cmd, 278 const u8 *macaddr, u32 flags) 279 { 280 struct ath10k *ar = arvif->ar; 281 int ret; 282 unsigned long time_left; 283 284 lockdep_assert_held(&ar->conf_mutex); 285 286 reinit_completion(&ar->install_key_done); 287 288 if (arvif->nohwcrypt) 289 return 1; 290 291 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 292 if (ret) 293 return ret; 294 295 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 296 if (time_left == 0) 297 return -ETIMEDOUT; 298 299 return 0; 300 } 301 302 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 303 const u8 *addr) 304 { 305 struct ath10k *ar = arvif->ar; 306 struct ath10k_peer *peer; 307 int ret; 308 int i; 309 u32 flags; 310 311 lockdep_assert_held(&ar->conf_mutex); 312 313 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 314 arvif->vif->type != NL80211_IFTYPE_ADHOC && 315 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 316 return -EINVAL; 317 318 spin_lock_bh(&ar->data_lock); 319 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 320 spin_unlock_bh(&ar->data_lock); 321 322 if (!peer) 323 return -ENOENT; 324 325 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 326 if (arvif->wep_keys[i] == NULL) 327 continue; 328 329 switch (arvif->vif->type) { 330 case NL80211_IFTYPE_AP: 331 flags = WMI_KEY_PAIRWISE; 332 333 if (arvif->def_wep_key_idx == i) 334 flags |= WMI_KEY_TX_USAGE; 335 336 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 337 SET_KEY, addr, flags); 338 if (ret < 0) 339 return ret; 340 break; 341 case NL80211_IFTYPE_ADHOC: 342 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 343 SET_KEY, addr, 344 WMI_KEY_PAIRWISE); 345 if (ret < 0) 346 return ret; 347 348 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 349 SET_KEY, addr, WMI_KEY_GROUP); 350 if (ret < 0) 351 return ret; 352 break; 353 default: 354 WARN_ON(1); 355 return -EINVAL; 356 } 357 358 spin_lock_bh(&ar->data_lock); 359 peer->keys[i] = arvif->wep_keys[i]; 360 spin_unlock_bh(&ar->data_lock); 361 } 362 363 /* In some cases (notably with static WEP IBSS with multiple keys) 364 * multicast Tx becomes broken. Both pairwise and groupwise keys are 365 * installed already. Using WMI_KEY_TX_USAGE in different combinations 366 * didn't seem help. Using def_keyid vdev parameter seems to be 367 * effective so use that. 368 * 369 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 370 */ 371 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 372 return 0; 373 374 if (arvif->def_wep_key_idx == -1) 375 return 0; 376 377 ret = ath10k_wmi_vdev_set_param(arvif->ar, 378 arvif->vdev_id, 379 arvif->ar->wmi.vdev_param->def_keyid, 380 arvif->def_wep_key_idx); 381 if (ret) { 382 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 383 arvif->vdev_id, ret); 384 return ret; 385 } 386 387 return 0; 388 } 389 390 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 391 const u8 *addr) 392 { 393 struct ath10k *ar = arvif->ar; 394 struct ath10k_peer *peer; 395 int first_errno = 0; 396 int ret; 397 int i; 398 u32 flags = 0; 399 400 lockdep_assert_held(&ar->conf_mutex); 401 402 spin_lock_bh(&ar->data_lock); 403 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 404 spin_unlock_bh(&ar->data_lock); 405 406 if (!peer) 407 return -ENOENT; 408 409 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 410 if (peer->keys[i] == NULL) 411 continue; 412 413 /* key flags are not required to delete the key */ 414 ret = ath10k_install_key(arvif, peer->keys[i], 415 DISABLE_KEY, addr, flags); 416 if (ret < 0 && first_errno == 0) 417 first_errno = ret; 418 419 if (ret < 0) 420 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 421 i, ret); 422 423 spin_lock_bh(&ar->data_lock); 424 peer->keys[i] = NULL; 425 spin_unlock_bh(&ar->data_lock); 426 } 427 428 return first_errno; 429 } 430 431 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 432 u8 keyidx) 433 { 434 struct ath10k_peer *peer; 435 int i; 436 437 lockdep_assert_held(&ar->data_lock); 438 439 /* We don't know which vdev this peer belongs to, 440 * since WMI doesn't give us that information. 441 * 442 * FIXME: multi-bss needs to be handled. 443 */ 444 peer = ath10k_peer_find(ar, 0, addr); 445 if (!peer) 446 return false; 447 448 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 449 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 450 return true; 451 } 452 453 return false; 454 } 455 456 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 457 struct ieee80211_key_conf *key) 458 { 459 struct ath10k *ar = arvif->ar; 460 struct ath10k_peer *peer; 461 u8 addr[ETH_ALEN]; 462 int first_errno = 0; 463 int ret; 464 int i; 465 u32 flags = 0; 466 467 lockdep_assert_held(&ar->conf_mutex); 468 469 for (;;) { 470 /* since ath10k_install_key we can't hold data_lock all the 471 * time, so we try to remove the keys incrementally 472 */ 473 spin_lock_bh(&ar->data_lock); 474 i = 0; 475 list_for_each_entry(peer, &ar->peers, list) { 476 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 477 if (peer->keys[i] == key) { 478 ether_addr_copy(addr, peer->addr); 479 peer->keys[i] = NULL; 480 break; 481 } 482 } 483 484 if (i < ARRAY_SIZE(peer->keys)) 485 break; 486 } 487 spin_unlock_bh(&ar->data_lock); 488 489 if (i == ARRAY_SIZE(peer->keys)) 490 break; 491 /* key flags are not required to delete the key */ 492 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 493 if (ret < 0 && first_errno == 0) 494 first_errno = ret; 495 496 if (ret) 497 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 498 addr, ret); 499 } 500 501 return first_errno; 502 } 503 504 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 505 struct ieee80211_key_conf *key) 506 { 507 struct ath10k *ar = arvif->ar; 508 struct ath10k_peer *peer; 509 int ret; 510 511 lockdep_assert_held(&ar->conf_mutex); 512 513 list_for_each_entry(peer, &ar->peers, list) { 514 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 515 continue; 516 517 if (ether_addr_equal(peer->addr, arvif->bssid)) 518 continue; 519 520 if (peer->keys[key->keyidx] == key) 521 continue; 522 523 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 524 arvif->vdev_id, key->keyidx); 525 526 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 527 if (ret) { 528 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 529 arvif->vdev_id, peer->addr, ret); 530 return ret; 531 } 532 } 533 534 return 0; 535 } 536 537 /*********************/ 538 /* General utilities */ 539 /*********************/ 540 541 static inline enum wmi_phy_mode 542 chan_to_phymode(const struct cfg80211_chan_def *chandef) 543 { 544 enum wmi_phy_mode phymode = MODE_UNKNOWN; 545 546 switch (chandef->chan->band) { 547 case NL80211_BAND_2GHZ: 548 switch (chandef->width) { 549 case NL80211_CHAN_WIDTH_20_NOHT: 550 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 551 phymode = MODE_11B; 552 else 553 phymode = MODE_11G; 554 break; 555 case NL80211_CHAN_WIDTH_20: 556 phymode = MODE_11NG_HT20; 557 break; 558 case NL80211_CHAN_WIDTH_40: 559 phymode = MODE_11NG_HT40; 560 break; 561 case NL80211_CHAN_WIDTH_5: 562 case NL80211_CHAN_WIDTH_10: 563 case NL80211_CHAN_WIDTH_80: 564 case NL80211_CHAN_WIDTH_80P80: 565 case NL80211_CHAN_WIDTH_160: 566 phymode = MODE_UNKNOWN; 567 break; 568 } 569 break; 570 case NL80211_BAND_5GHZ: 571 switch (chandef->width) { 572 case NL80211_CHAN_WIDTH_20_NOHT: 573 phymode = MODE_11A; 574 break; 575 case NL80211_CHAN_WIDTH_20: 576 phymode = MODE_11NA_HT20; 577 break; 578 case NL80211_CHAN_WIDTH_40: 579 phymode = MODE_11NA_HT40; 580 break; 581 case NL80211_CHAN_WIDTH_80: 582 phymode = MODE_11AC_VHT80; 583 break; 584 case NL80211_CHAN_WIDTH_160: 585 phymode = MODE_11AC_VHT160; 586 break; 587 case NL80211_CHAN_WIDTH_80P80: 588 phymode = MODE_11AC_VHT80_80; 589 break; 590 case NL80211_CHAN_WIDTH_5: 591 case NL80211_CHAN_WIDTH_10: 592 phymode = MODE_UNKNOWN; 593 break; 594 } 595 break; 596 default: 597 break; 598 } 599 600 WARN_ON(phymode == MODE_UNKNOWN); 601 return phymode; 602 } 603 604 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 605 { 606 /* 607 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 608 * 0 for no restriction 609 * 1 for 1/4 us 610 * 2 for 1/2 us 611 * 3 for 1 us 612 * 4 for 2 us 613 * 5 for 4 us 614 * 6 for 8 us 615 * 7 for 16 us 616 */ 617 switch (mpdudensity) { 618 case 0: 619 return 0; 620 case 1: 621 case 2: 622 case 3: 623 /* Our lower layer calculations limit our precision to 624 * 1 microsecond 625 */ 626 return 1; 627 case 4: 628 return 2; 629 case 5: 630 return 4; 631 case 6: 632 return 8; 633 case 7: 634 return 16; 635 default: 636 return 0; 637 } 638 } 639 640 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 641 struct cfg80211_chan_def *def) 642 { 643 struct ieee80211_chanctx_conf *conf; 644 645 rcu_read_lock(); 646 conf = rcu_dereference(vif->chanctx_conf); 647 if (!conf) { 648 rcu_read_unlock(); 649 return -ENOENT; 650 } 651 652 *def = conf->def; 653 rcu_read_unlock(); 654 655 return 0; 656 } 657 658 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 659 struct ieee80211_chanctx_conf *conf, 660 void *data) 661 { 662 int *num = data; 663 664 (*num)++; 665 } 666 667 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 668 { 669 int num = 0; 670 671 ieee80211_iter_chan_contexts_atomic(ar->hw, 672 ath10k_mac_num_chanctxs_iter, 673 &num); 674 675 return num; 676 } 677 678 static void 679 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 680 struct ieee80211_chanctx_conf *conf, 681 void *data) 682 { 683 struct cfg80211_chan_def **def = data; 684 685 *def = &conf->def; 686 } 687 688 static int ath10k_peer_create(struct ath10k *ar, 689 struct ieee80211_vif *vif, 690 struct ieee80211_sta *sta, 691 u32 vdev_id, 692 const u8 *addr, 693 enum wmi_peer_type peer_type) 694 { 695 struct ath10k_vif *arvif; 696 struct ath10k_peer *peer; 697 int num_peers = 0; 698 int ret; 699 700 lockdep_assert_held(&ar->conf_mutex); 701 702 num_peers = ar->num_peers; 703 704 /* Each vdev consumes a peer entry as well */ 705 list_for_each_entry(arvif, &ar->arvifs, list) 706 num_peers++; 707 708 if (num_peers >= ar->max_num_peers) 709 return -ENOBUFS; 710 711 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 712 if (ret) { 713 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 714 addr, vdev_id, ret); 715 return ret; 716 } 717 718 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 719 if (ret) { 720 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 721 addr, vdev_id, ret); 722 return ret; 723 } 724 725 spin_lock_bh(&ar->data_lock); 726 727 peer = ath10k_peer_find(ar, vdev_id, addr); 728 if (!peer) { 729 spin_unlock_bh(&ar->data_lock); 730 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 731 addr, vdev_id); 732 ath10k_wmi_peer_delete(ar, vdev_id, addr); 733 return -ENOENT; 734 } 735 736 peer->vif = vif; 737 peer->sta = sta; 738 739 spin_unlock_bh(&ar->data_lock); 740 741 ar->num_peers++; 742 743 return 0; 744 } 745 746 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 747 { 748 struct ath10k *ar = arvif->ar; 749 u32 param; 750 int ret; 751 752 param = ar->wmi.pdev_param->sta_kickout_th; 753 ret = ath10k_wmi_pdev_set_param(ar, param, 754 ATH10K_KICKOUT_THRESHOLD); 755 if (ret) { 756 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 757 arvif->vdev_id, ret); 758 return ret; 759 } 760 761 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 762 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 763 ATH10K_KEEPALIVE_MIN_IDLE); 764 if (ret) { 765 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 766 arvif->vdev_id, ret); 767 return ret; 768 } 769 770 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 771 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 772 ATH10K_KEEPALIVE_MAX_IDLE); 773 if (ret) { 774 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 775 arvif->vdev_id, ret); 776 return ret; 777 } 778 779 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 780 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 781 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 782 if (ret) { 783 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 784 arvif->vdev_id, ret); 785 return ret; 786 } 787 788 return 0; 789 } 790 791 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 792 { 793 struct ath10k *ar = arvif->ar; 794 u32 vdev_param; 795 796 vdev_param = ar->wmi.vdev_param->rts_threshold; 797 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 798 } 799 800 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 801 { 802 int ret; 803 804 lockdep_assert_held(&ar->conf_mutex); 805 806 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 807 if (ret) 808 return ret; 809 810 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 811 if (ret) 812 return ret; 813 814 ar->num_peers--; 815 816 return 0; 817 } 818 819 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 820 { 821 struct ath10k_peer *peer, *tmp; 822 int peer_id; 823 int i; 824 825 lockdep_assert_held(&ar->conf_mutex); 826 827 spin_lock_bh(&ar->data_lock); 828 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 829 if (peer->vdev_id != vdev_id) 830 continue; 831 832 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 833 peer->addr, vdev_id); 834 835 for_each_set_bit(peer_id, peer->peer_ids, 836 ATH10K_MAX_NUM_PEER_IDS) { 837 ar->peer_map[peer_id] = NULL; 838 } 839 840 /* Double check that peer is properly un-referenced from 841 * the peer_map 842 */ 843 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 844 if (ar->peer_map[i] == peer) { 845 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 846 peer->addr, peer, i); 847 ar->peer_map[i] = NULL; 848 } 849 } 850 851 list_del(&peer->list); 852 kfree(peer); 853 ar->num_peers--; 854 } 855 spin_unlock_bh(&ar->data_lock); 856 } 857 858 static void ath10k_peer_cleanup_all(struct ath10k *ar) 859 { 860 struct ath10k_peer *peer, *tmp; 861 int i; 862 863 lockdep_assert_held(&ar->conf_mutex); 864 865 spin_lock_bh(&ar->data_lock); 866 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 867 list_del(&peer->list); 868 kfree(peer); 869 } 870 871 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 872 ar->peer_map[i] = NULL; 873 874 spin_unlock_bh(&ar->data_lock); 875 876 ar->num_peers = 0; 877 ar->num_stations = 0; 878 } 879 880 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 881 struct ieee80211_sta *sta, 882 enum wmi_tdls_peer_state state) 883 { 884 int ret; 885 struct wmi_tdls_peer_update_cmd_arg arg = {}; 886 struct wmi_tdls_peer_capab_arg cap = {}; 887 struct wmi_channel_arg chan_arg = {}; 888 889 lockdep_assert_held(&ar->conf_mutex); 890 891 arg.vdev_id = vdev_id; 892 arg.peer_state = state; 893 ether_addr_copy(arg.addr, sta->addr); 894 895 cap.peer_max_sp = sta->max_sp; 896 cap.peer_uapsd_queues = sta->uapsd_queues; 897 898 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 899 !sta->tdls_initiator) 900 cap.is_peer_responder = 1; 901 902 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 903 if (ret) { 904 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 905 arg.addr, vdev_id, ret); 906 return ret; 907 } 908 909 return 0; 910 } 911 912 /************************/ 913 /* Interface management */ 914 /************************/ 915 916 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 917 { 918 struct ath10k *ar = arvif->ar; 919 920 lockdep_assert_held(&ar->data_lock); 921 922 if (!arvif->beacon) 923 return; 924 925 if (!arvif->beacon_buf) 926 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 927 arvif->beacon->len, DMA_TO_DEVICE); 928 929 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 930 arvif->beacon_state != ATH10K_BEACON_SENT)) 931 return; 932 933 dev_kfree_skb_any(arvif->beacon); 934 935 arvif->beacon = NULL; 936 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 937 } 938 939 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 940 { 941 struct ath10k *ar = arvif->ar; 942 943 lockdep_assert_held(&ar->data_lock); 944 945 ath10k_mac_vif_beacon_free(arvif); 946 947 if (arvif->beacon_buf) { 948 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 949 arvif->beacon_buf, arvif->beacon_paddr); 950 arvif->beacon_buf = NULL; 951 } 952 } 953 954 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 955 { 956 unsigned long time_left; 957 958 lockdep_assert_held(&ar->conf_mutex); 959 960 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 961 return -ESHUTDOWN; 962 963 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 964 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 965 if (time_left == 0) 966 return -ETIMEDOUT; 967 968 return 0; 969 } 970 971 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 972 { 973 struct cfg80211_chan_def *chandef = NULL; 974 struct ieee80211_channel *channel = NULL; 975 struct wmi_vdev_start_request_arg arg = {}; 976 int ret = 0; 977 978 lockdep_assert_held(&ar->conf_mutex); 979 980 ieee80211_iter_chan_contexts_atomic(ar->hw, 981 ath10k_mac_get_any_chandef_iter, 982 &chandef); 983 if (WARN_ON_ONCE(!chandef)) 984 return -ENOENT; 985 986 channel = chandef->chan; 987 988 arg.vdev_id = vdev_id; 989 arg.channel.freq = channel->center_freq; 990 arg.channel.band_center_freq1 = chandef->center_freq1; 991 arg.channel.band_center_freq2 = chandef->center_freq2; 992 993 /* TODO setup this dynamically, what in case we 994 * don't have any vifs? 995 */ 996 arg.channel.mode = chan_to_phymode(chandef); 997 arg.channel.chan_radar = 998 !!(channel->flags & IEEE80211_CHAN_RADAR); 999 1000 arg.channel.min_power = 0; 1001 arg.channel.max_power = channel->max_power * 2; 1002 arg.channel.max_reg_power = channel->max_reg_power * 2; 1003 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1004 1005 reinit_completion(&ar->vdev_setup_done); 1006 1007 ret = ath10k_wmi_vdev_start(ar, &arg); 1008 if (ret) { 1009 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1010 vdev_id, ret); 1011 return ret; 1012 } 1013 1014 ret = ath10k_vdev_setup_sync(ar); 1015 if (ret) { 1016 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1017 vdev_id, ret); 1018 return ret; 1019 } 1020 1021 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1022 if (ret) { 1023 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1024 vdev_id, ret); 1025 goto vdev_stop; 1026 } 1027 1028 ar->monitor_vdev_id = vdev_id; 1029 1030 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1031 ar->monitor_vdev_id); 1032 return 0; 1033 1034 vdev_stop: 1035 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1036 if (ret) 1037 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1038 ar->monitor_vdev_id, ret); 1039 1040 return ret; 1041 } 1042 1043 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1044 { 1045 int ret = 0; 1046 1047 lockdep_assert_held(&ar->conf_mutex); 1048 1049 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1050 if (ret) 1051 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1052 ar->monitor_vdev_id, ret); 1053 1054 reinit_completion(&ar->vdev_setup_done); 1055 1056 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1057 if (ret) 1058 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1059 ar->monitor_vdev_id, ret); 1060 1061 ret = ath10k_vdev_setup_sync(ar); 1062 if (ret) 1063 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1064 ar->monitor_vdev_id, ret); 1065 1066 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1067 ar->monitor_vdev_id); 1068 return ret; 1069 } 1070 1071 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1072 { 1073 int bit, ret = 0; 1074 1075 lockdep_assert_held(&ar->conf_mutex); 1076 1077 if (ar->free_vdev_map == 0) { 1078 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1079 return -ENOMEM; 1080 } 1081 1082 bit = __ffs64(ar->free_vdev_map); 1083 1084 ar->monitor_vdev_id = bit; 1085 1086 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1087 WMI_VDEV_TYPE_MONITOR, 1088 0, ar->mac_addr); 1089 if (ret) { 1090 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1091 ar->monitor_vdev_id, ret); 1092 return ret; 1093 } 1094 1095 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1096 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1097 ar->monitor_vdev_id); 1098 1099 return 0; 1100 } 1101 1102 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1103 { 1104 int ret = 0; 1105 1106 lockdep_assert_held(&ar->conf_mutex); 1107 1108 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1109 if (ret) { 1110 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1111 ar->monitor_vdev_id, ret); 1112 return ret; 1113 } 1114 1115 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1116 1117 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1118 ar->monitor_vdev_id); 1119 return ret; 1120 } 1121 1122 static int ath10k_monitor_start(struct ath10k *ar) 1123 { 1124 int ret; 1125 1126 lockdep_assert_held(&ar->conf_mutex); 1127 1128 ret = ath10k_monitor_vdev_create(ar); 1129 if (ret) { 1130 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1131 return ret; 1132 } 1133 1134 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1135 if (ret) { 1136 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1137 ath10k_monitor_vdev_delete(ar); 1138 return ret; 1139 } 1140 1141 ar->monitor_started = true; 1142 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1143 1144 return 0; 1145 } 1146 1147 static int ath10k_monitor_stop(struct ath10k *ar) 1148 { 1149 int ret; 1150 1151 lockdep_assert_held(&ar->conf_mutex); 1152 1153 ret = ath10k_monitor_vdev_stop(ar); 1154 if (ret) { 1155 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1156 return ret; 1157 } 1158 1159 ret = ath10k_monitor_vdev_delete(ar); 1160 if (ret) { 1161 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1162 return ret; 1163 } 1164 1165 ar->monitor_started = false; 1166 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1167 1168 return 0; 1169 } 1170 1171 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1172 { 1173 int num_ctx; 1174 1175 /* At least one chanctx is required to derive a channel to start 1176 * monitor vdev on. 1177 */ 1178 num_ctx = ath10k_mac_num_chanctxs(ar); 1179 if (num_ctx == 0) 1180 return false; 1181 1182 /* If there's already an existing special monitor interface then don't 1183 * bother creating another monitor vdev. 1184 */ 1185 if (ar->monitor_arvif) 1186 return false; 1187 1188 return ar->monitor || 1189 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1190 ar->running_fw->fw_file.fw_features) && 1191 (ar->filter_flags & FIF_OTHER_BSS)) || 1192 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1193 } 1194 1195 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1196 { 1197 int num_ctx; 1198 1199 num_ctx = ath10k_mac_num_chanctxs(ar); 1200 1201 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1202 * shouldn't allow this but make sure to prevent handling the following 1203 * case anyway since multi-channel DFS hasn't been tested at all. 1204 */ 1205 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1206 return false; 1207 1208 return true; 1209 } 1210 1211 static int ath10k_monitor_recalc(struct ath10k *ar) 1212 { 1213 bool needed; 1214 bool allowed; 1215 int ret; 1216 1217 lockdep_assert_held(&ar->conf_mutex); 1218 1219 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1220 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1221 1222 ath10k_dbg(ar, ATH10K_DBG_MAC, 1223 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1224 ar->monitor_started, needed, allowed); 1225 1226 if (WARN_ON(needed && !allowed)) { 1227 if (ar->monitor_started) { 1228 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1229 1230 ret = ath10k_monitor_stop(ar); 1231 if (ret) 1232 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1233 ret); 1234 /* not serious */ 1235 } 1236 1237 return -EPERM; 1238 } 1239 1240 if (needed == ar->monitor_started) 1241 return 0; 1242 1243 if (needed) 1244 return ath10k_monitor_start(ar); 1245 else 1246 return ath10k_monitor_stop(ar); 1247 } 1248 1249 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1250 { 1251 struct ath10k *ar = arvif->ar; 1252 1253 lockdep_assert_held(&ar->conf_mutex); 1254 1255 if (!arvif->is_started) { 1256 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1257 return false; 1258 } 1259 1260 return true; 1261 } 1262 1263 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1264 { 1265 struct ath10k *ar = arvif->ar; 1266 u32 vdev_param; 1267 1268 lockdep_assert_held(&ar->conf_mutex); 1269 1270 vdev_param = ar->wmi.vdev_param->protection_mode; 1271 1272 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1273 arvif->vdev_id, arvif->use_cts_prot); 1274 1275 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1276 arvif->use_cts_prot ? 1 : 0); 1277 } 1278 1279 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1280 { 1281 struct ath10k *ar = arvif->ar; 1282 u32 vdev_param, rts_cts = 0; 1283 1284 lockdep_assert_held(&ar->conf_mutex); 1285 1286 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1287 1288 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1289 1290 if (arvif->num_legacy_stations > 0) 1291 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1292 WMI_RTSCTS_PROFILE); 1293 else 1294 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1295 WMI_RTSCTS_PROFILE); 1296 1297 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1298 arvif->vdev_id, rts_cts); 1299 1300 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1301 rts_cts); 1302 } 1303 1304 static int ath10k_start_cac(struct ath10k *ar) 1305 { 1306 int ret; 1307 1308 lockdep_assert_held(&ar->conf_mutex); 1309 1310 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1311 1312 ret = ath10k_monitor_recalc(ar); 1313 if (ret) { 1314 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1315 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1316 return ret; 1317 } 1318 1319 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1320 ar->monitor_vdev_id); 1321 1322 return 0; 1323 } 1324 1325 static int ath10k_stop_cac(struct ath10k *ar) 1326 { 1327 lockdep_assert_held(&ar->conf_mutex); 1328 1329 /* CAC is not running - do nothing */ 1330 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1331 return 0; 1332 1333 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1334 ath10k_monitor_stop(ar); 1335 1336 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1337 1338 return 0; 1339 } 1340 1341 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1342 struct ieee80211_chanctx_conf *conf, 1343 void *data) 1344 { 1345 bool *ret = data; 1346 1347 if (!*ret && conf->radar_enabled) 1348 *ret = true; 1349 } 1350 1351 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1352 { 1353 bool has_radar = false; 1354 1355 ieee80211_iter_chan_contexts_atomic(ar->hw, 1356 ath10k_mac_has_radar_iter, 1357 &has_radar); 1358 1359 return has_radar; 1360 } 1361 1362 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1363 { 1364 int ret; 1365 1366 lockdep_assert_held(&ar->conf_mutex); 1367 1368 ath10k_stop_cac(ar); 1369 1370 if (!ath10k_mac_has_radar_enabled(ar)) 1371 return; 1372 1373 if (ar->num_started_vdevs > 0) 1374 return; 1375 1376 ret = ath10k_start_cac(ar); 1377 if (ret) { 1378 /* 1379 * Not possible to start CAC on current channel so starting 1380 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1381 * by indicating that radar was detected. 1382 */ 1383 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1384 ieee80211_radar_detected(ar->hw); 1385 } 1386 } 1387 1388 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1389 { 1390 struct ath10k *ar = arvif->ar; 1391 int ret; 1392 1393 lockdep_assert_held(&ar->conf_mutex); 1394 1395 reinit_completion(&ar->vdev_setup_done); 1396 1397 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1398 if (ret) { 1399 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1400 arvif->vdev_id, ret); 1401 return ret; 1402 } 1403 1404 ret = ath10k_vdev_setup_sync(ar); 1405 if (ret) { 1406 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1407 arvif->vdev_id, ret); 1408 return ret; 1409 } 1410 1411 WARN_ON(ar->num_started_vdevs == 0); 1412 1413 if (ar->num_started_vdevs != 0) { 1414 ar->num_started_vdevs--; 1415 ath10k_recalc_radar_detection(ar); 1416 } 1417 1418 return ret; 1419 } 1420 1421 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1422 const struct cfg80211_chan_def *chandef, 1423 bool restart) 1424 { 1425 struct ath10k *ar = arvif->ar; 1426 struct wmi_vdev_start_request_arg arg = {}; 1427 int ret = 0; 1428 1429 lockdep_assert_held(&ar->conf_mutex); 1430 1431 reinit_completion(&ar->vdev_setup_done); 1432 1433 arg.vdev_id = arvif->vdev_id; 1434 arg.dtim_period = arvif->dtim_period; 1435 arg.bcn_intval = arvif->beacon_interval; 1436 1437 arg.channel.freq = chandef->chan->center_freq; 1438 arg.channel.band_center_freq1 = chandef->center_freq1; 1439 arg.channel.band_center_freq2 = chandef->center_freq2; 1440 arg.channel.mode = chan_to_phymode(chandef); 1441 1442 arg.channel.min_power = 0; 1443 arg.channel.max_power = chandef->chan->max_power * 2; 1444 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1445 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1446 1447 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1448 arg.ssid = arvif->u.ap.ssid; 1449 arg.ssid_len = arvif->u.ap.ssid_len; 1450 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1451 1452 /* For now allow DFS for AP mode */ 1453 arg.channel.chan_radar = 1454 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1455 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1456 arg.ssid = arvif->vif->bss_conf.ssid; 1457 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1458 } 1459 1460 ath10k_dbg(ar, ATH10K_DBG_MAC, 1461 "mac vdev %d start center_freq %d phymode %s\n", 1462 arg.vdev_id, arg.channel.freq, 1463 ath10k_wmi_phymode_str(arg.channel.mode)); 1464 1465 if (restart) 1466 ret = ath10k_wmi_vdev_restart(ar, &arg); 1467 else 1468 ret = ath10k_wmi_vdev_start(ar, &arg); 1469 1470 if (ret) { 1471 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1472 arg.vdev_id, ret); 1473 return ret; 1474 } 1475 1476 ret = ath10k_vdev_setup_sync(ar); 1477 if (ret) { 1478 ath10k_warn(ar, 1479 "failed to synchronize setup for vdev %i restart %d: %d\n", 1480 arg.vdev_id, restart, ret); 1481 return ret; 1482 } 1483 1484 ar->num_started_vdevs++; 1485 ath10k_recalc_radar_detection(ar); 1486 1487 return ret; 1488 } 1489 1490 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1491 const struct cfg80211_chan_def *def) 1492 { 1493 return ath10k_vdev_start_restart(arvif, def, false); 1494 } 1495 1496 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1497 const struct cfg80211_chan_def *def) 1498 { 1499 return ath10k_vdev_start_restart(arvif, def, true); 1500 } 1501 1502 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1503 struct sk_buff *bcn) 1504 { 1505 struct ath10k *ar = arvif->ar; 1506 struct ieee80211_mgmt *mgmt; 1507 const u8 *p2p_ie; 1508 int ret; 1509 1510 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1511 return 0; 1512 1513 mgmt = (void *)bcn->data; 1514 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1515 mgmt->u.beacon.variable, 1516 bcn->len - (mgmt->u.beacon.variable - 1517 bcn->data)); 1518 if (!p2p_ie) 1519 return -ENOENT; 1520 1521 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1522 if (ret) { 1523 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1524 arvif->vdev_id, ret); 1525 return ret; 1526 } 1527 1528 return 0; 1529 } 1530 1531 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1532 u8 oui_type, size_t ie_offset) 1533 { 1534 size_t len; 1535 const u8 *next; 1536 const u8 *end; 1537 u8 *ie; 1538 1539 if (WARN_ON(skb->len < ie_offset)) 1540 return -EINVAL; 1541 1542 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1543 skb->data + ie_offset, 1544 skb->len - ie_offset); 1545 if (!ie) 1546 return -ENOENT; 1547 1548 len = ie[1] + 2; 1549 end = skb->data + skb->len; 1550 next = ie + len; 1551 1552 if (WARN_ON(next > end)) 1553 return -EINVAL; 1554 1555 memmove(ie, next, end - next); 1556 skb_trim(skb, skb->len - len); 1557 1558 return 0; 1559 } 1560 1561 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1562 { 1563 struct ath10k *ar = arvif->ar; 1564 struct ieee80211_hw *hw = ar->hw; 1565 struct ieee80211_vif *vif = arvif->vif; 1566 struct ieee80211_mutable_offsets offs = {}; 1567 struct sk_buff *bcn; 1568 int ret; 1569 1570 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1571 return 0; 1572 1573 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1574 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1575 return 0; 1576 1577 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1578 if (!bcn) { 1579 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1580 return -EPERM; 1581 } 1582 1583 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1584 if (ret) { 1585 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1586 kfree_skb(bcn); 1587 return ret; 1588 } 1589 1590 /* P2P IE is inserted by firmware automatically (as configured above) 1591 * so remove it from the base beacon template to avoid duplicate P2P 1592 * IEs in beacon frames. 1593 */ 1594 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1595 offsetof(struct ieee80211_mgmt, 1596 u.beacon.variable)); 1597 1598 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1599 0, NULL, 0); 1600 kfree_skb(bcn); 1601 1602 if (ret) { 1603 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1604 ret); 1605 return ret; 1606 } 1607 1608 return 0; 1609 } 1610 1611 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1612 { 1613 struct ath10k *ar = arvif->ar; 1614 struct ieee80211_hw *hw = ar->hw; 1615 struct ieee80211_vif *vif = arvif->vif; 1616 struct sk_buff *prb; 1617 int ret; 1618 1619 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1620 return 0; 1621 1622 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1623 return 0; 1624 1625 prb = ieee80211_proberesp_get(hw, vif); 1626 if (!prb) { 1627 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1628 return -EPERM; 1629 } 1630 1631 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1632 kfree_skb(prb); 1633 1634 if (ret) { 1635 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1636 ret); 1637 return ret; 1638 } 1639 1640 return 0; 1641 } 1642 1643 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1644 { 1645 struct ath10k *ar = arvif->ar; 1646 struct cfg80211_chan_def def; 1647 int ret; 1648 1649 /* When originally vdev is started during assign_vif_chanctx() some 1650 * information is missing, notably SSID. Firmware revisions with beacon 1651 * offloading require the SSID to be provided during vdev (re)start to 1652 * handle hidden SSID properly. 1653 * 1654 * Vdev restart must be done after vdev has been both started and 1655 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1656 * deliver vdev restart response event causing timeouts during vdev 1657 * syncing in ath10k. 1658 * 1659 * Note: The vdev down/up and template reinstallation could be skipped 1660 * since only wmi-tlv firmware are known to have beacon offload and 1661 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1662 * response delivery. It's probably more robust to keep it as is. 1663 */ 1664 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1665 return 0; 1666 1667 if (WARN_ON(!arvif->is_started)) 1668 return -EINVAL; 1669 1670 if (WARN_ON(!arvif->is_up)) 1671 return -EINVAL; 1672 1673 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1674 return -EINVAL; 1675 1676 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1677 if (ret) { 1678 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1679 arvif->vdev_id, ret); 1680 return ret; 1681 } 1682 1683 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1684 * firmware will crash upon vdev up. 1685 */ 1686 1687 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1688 if (ret) { 1689 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1690 return ret; 1691 } 1692 1693 ret = ath10k_mac_setup_prb_tmpl(arvif); 1694 if (ret) { 1695 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1696 return ret; 1697 } 1698 1699 ret = ath10k_vdev_restart(arvif, &def); 1700 if (ret) { 1701 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1702 arvif->vdev_id, ret); 1703 return ret; 1704 } 1705 1706 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1707 arvif->bssid); 1708 if (ret) { 1709 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1710 arvif->vdev_id, ret); 1711 return ret; 1712 } 1713 1714 return 0; 1715 } 1716 1717 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1718 struct ieee80211_bss_conf *info) 1719 { 1720 struct ath10k *ar = arvif->ar; 1721 int ret = 0; 1722 1723 lockdep_assert_held(&arvif->ar->conf_mutex); 1724 1725 if (!info->enable_beacon) { 1726 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1727 if (ret) 1728 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1729 arvif->vdev_id, ret); 1730 1731 arvif->is_up = false; 1732 1733 spin_lock_bh(&arvif->ar->data_lock); 1734 ath10k_mac_vif_beacon_free(arvif); 1735 spin_unlock_bh(&arvif->ar->data_lock); 1736 1737 return; 1738 } 1739 1740 arvif->tx_seq_no = 0x1000; 1741 1742 arvif->aid = 0; 1743 ether_addr_copy(arvif->bssid, info->bssid); 1744 1745 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1746 arvif->bssid); 1747 if (ret) { 1748 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1749 arvif->vdev_id, ret); 1750 return; 1751 } 1752 1753 arvif->is_up = true; 1754 1755 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1756 if (ret) { 1757 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1758 arvif->vdev_id, ret); 1759 return; 1760 } 1761 1762 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1763 } 1764 1765 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1766 struct ieee80211_bss_conf *info, 1767 const u8 self_peer[ETH_ALEN]) 1768 { 1769 struct ath10k *ar = arvif->ar; 1770 u32 vdev_param; 1771 int ret = 0; 1772 1773 lockdep_assert_held(&arvif->ar->conf_mutex); 1774 1775 if (!info->ibss_joined) { 1776 if (is_zero_ether_addr(arvif->bssid)) 1777 return; 1778 1779 eth_zero_addr(arvif->bssid); 1780 1781 return; 1782 } 1783 1784 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1785 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1786 ATH10K_DEFAULT_ATIM); 1787 if (ret) 1788 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1789 arvif->vdev_id, ret); 1790 } 1791 1792 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1793 { 1794 struct ath10k *ar = arvif->ar; 1795 u32 param; 1796 u32 value; 1797 int ret; 1798 1799 lockdep_assert_held(&arvif->ar->conf_mutex); 1800 1801 if (arvif->u.sta.uapsd) 1802 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1803 else 1804 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1805 1806 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1807 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1808 if (ret) { 1809 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1810 value, arvif->vdev_id, ret); 1811 return ret; 1812 } 1813 1814 return 0; 1815 } 1816 1817 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1818 { 1819 struct ath10k *ar = arvif->ar; 1820 u32 param; 1821 u32 value; 1822 int ret; 1823 1824 lockdep_assert_held(&arvif->ar->conf_mutex); 1825 1826 if (arvif->u.sta.uapsd) 1827 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1828 else 1829 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1830 1831 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1832 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1833 param, value); 1834 if (ret) { 1835 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1836 value, arvif->vdev_id, ret); 1837 return ret; 1838 } 1839 1840 return 0; 1841 } 1842 1843 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1844 { 1845 struct ath10k_vif *arvif; 1846 int num = 0; 1847 1848 lockdep_assert_held(&ar->conf_mutex); 1849 1850 list_for_each_entry(arvif, &ar->arvifs, list) 1851 if (arvif->is_started) 1852 num++; 1853 1854 return num; 1855 } 1856 1857 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1858 { 1859 struct ath10k *ar = arvif->ar; 1860 struct ieee80211_vif *vif = arvif->vif; 1861 struct ieee80211_conf *conf = &ar->hw->conf; 1862 enum wmi_sta_powersave_param param; 1863 enum wmi_sta_ps_mode psmode; 1864 int ret; 1865 int ps_timeout; 1866 bool enable_ps; 1867 1868 lockdep_assert_held(&arvif->ar->conf_mutex); 1869 1870 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1871 return 0; 1872 1873 enable_ps = arvif->ps; 1874 1875 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1876 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1877 ar->running_fw->fw_file.fw_features)) { 1878 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1879 arvif->vdev_id); 1880 enable_ps = false; 1881 } 1882 1883 if (!arvif->is_started) { 1884 /* mac80211 can update vif powersave state while disconnected. 1885 * Firmware doesn't behave nicely and consumes more power than 1886 * necessary if PS is disabled on a non-started vdev. Hence 1887 * force-enable PS for non-running vdevs. 1888 */ 1889 psmode = WMI_STA_PS_MODE_ENABLED; 1890 } else if (enable_ps) { 1891 psmode = WMI_STA_PS_MODE_ENABLED; 1892 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1893 1894 ps_timeout = conf->dynamic_ps_timeout; 1895 if (ps_timeout == 0) { 1896 /* Firmware doesn't like 0 */ 1897 ps_timeout = ieee80211_tu_to_usec( 1898 vif->bss_conf.beacon_int) / 1000; 1899 } 1900 1901 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1902 ps_timeout); 1903 if (ret) { 1904 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1905 arvif->vdev_id, ret); 1906 return ret; 1907 } 1908 } else { 1909 psmode = WMI_STA_PS_MODE_DISABLED; 1910 } 1911 1912 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1913 arvif->vdev_id, psmode ? "enable" : "disable"); 1914 1915 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1916 if (ret) { 1917 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1918 psmode, arvif->vdev_id, ret); 1919 return ret; 1920 } 1921 1922 return 0; 1923 } 1924 1925 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1926 { 1927 struct ath10k *ar = arvif->ar; 1928 struct wmi_sta_keepalive_arg arg = {}; 1929 int ret; 1930 1931 lockdep_assert_held(&arvif->ar->conf_mutex); 1932 1933 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1934 return 0; 1935 1936 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1937 return 0; 1938 1939 /* Some firmware revisions have a bug and ignore the `enabled` field. 1940 * Instead use the interval to disable the keepalive. 1941 */ 1942 arg.vdev_id = arvif->vdev_id; 1943 arg.enabled = 1; 1944 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1945 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1946 1947 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1948 if (ret) { 1949 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1950 arvif->vdev_id, ret); 1951 return ret; 1952 } 1953 1954 return 0; 1955 } 1956 1957 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1958 { 1959 struct ath10k *ar = arvif->ar; 1960 struct ieee80211_vif *vif = arvif->vif; 1961 int ret; 1962 1963 lockdep_assert_held(&arvif->ar->conf_mutex); 1964 1965 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1966 return; 1967 1968 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1969 return; 1970 1971 if (!vif->csa_active) 1972 return; 1973 1974 if (!arvif->is_up) 1975 return; 1976 1977 if (!ieee80211_csa_is_complete(vif)) { 1978 ieee80211_csa_update_counter(vif); 1979 1980 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1981 if (ret) 1982 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1983 ret); 1984 1985 ret = ath10k_mac_setup_prb_tmpl(arvif); 1986 if (ret) 1987 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1988 ret); 1989 } else { 1990 ieee80211_csa_finish(vif); 1991 } 1992 } 1993 1994 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1995 { 1996 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1997 ap_csa_work); 1998 struct ath10k *ar = arvif->ar; 1999 2000 mutex_lock(&ar->conf_mutex); 2001 ath10k_mac_vif_ap_csa_count_down(arvif); 2002 mutex_unlock(&ar->conf_mutex); 2003 } 2004 2005 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2006 struct ieee80211_vif *vif) 2007 { 2008 struct sk_buff *skb = data; 2009 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2010 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2011 2012 if (vif->type != NL80211_IFTYPE_STATION) 2013 return; 2014 2015 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2016 return; 2017 2018 cancel_delayed_work(&arvif->connection_loss_work); 2019 } 2020 2021 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2022 { 2023 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2024 IEEE80211_IFACE_ITER_NORMAL, 2025 ath10k_mac_handle_beacon_iter, 2026 skb); 2027 } 2028 2029 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2030 struct ieee80211_vif *vif) 2031 { 2032 u32 *vdev_id = data; 2033 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2034 struct ath10k *ar = arvif->ar; 2035 struct ieee80211_hw *hw = ar->hw; 2036 2037 if (arvif->vdev_id != *vdev_id) 2038 return; 2039 2040 if (!arvif->is_up) 2041 return; 2042 2043 ieee80211_beacon_loss(vif); 2044 2045 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2046 * (done by mac80211) succeeds but beacons do not resume then it 2047 * doesn't make sense to continue operation. Queue connection loss work 2048 * which can be cancelled when beacon is received. 2049 */ 2050 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2051 ATH10K_CONNECTION_LOSS_HZ); 2052 } 2053 2054 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2055 { 2056 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2057 IEEE80211_IFACE_ITER_NORMAL, 2058 ath10k_mac_handle_beacon_miss_iter, 2059 &vdev_id); 2060 } 2061 2062 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2063 { 2064 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2065 connection_loss_work.work); 2066 struct ieee80211_vif *vif = arvif->vif; 2067 2068 if (!arvif->is_up) 2069 return; 2070 2071 ieee80211_connection_loss(vif); 2072 } 2073 2074 /**********************/ 2075 /* Station management */ 2076 /**********************/ 2077 2078 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2079 struct ieee80211_vif *vif) 2080 { 2081 /* Some firmware revisions have unstable STA powersave when listen 2082 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2083 * generate NullFunc frames properly even if buffered frames have been 2084 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2085 * buffered frames. Often pinging the device from AP would simply fail. 2086 * 2087 * As a workaround set it to 1. 2088 */ 2089 if (vif->type == NL80211_IFTYPE_STATION) 2090 return 1; 2091 2092 return ar->hw->conf.listen_interval; 2093 } 2094 2095 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2096 struct ieee80211_vif *vif, 2097 struct ieee80211_sta *sta, 2098 struct wmi_peer_assoc_complete_arg *arg) 2099 { 2100 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2101 u32 aid; 2102 2103 lockdep_assert_held(&ar->conf_mutex); 2104 2105 if (vif->type == NL80211_IFTYPE_STATION) 2106 aid = vif->bss_conf.aid; 2107 else 2108 aid = sta->aid; 2109 2110 ether_addr_copy(arg->addr, sta->addr); 2111 arg->vdev_id = arvif->vdev_id; 2112 arg->peer_aid = aid; 2113 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2114 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2115 arg->peer_num_spatial_streams = 1; 2116 arg->peer_caps = vif->bss_conf.assoc_capability; 2117 } 2118 2119 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2120 struct ieee80211_vif *vif, 2121 struct ieee80211_sta *sta, 2122 struct wmi_peer_assoc_complete_arg *arg) 2123 { 2124 struct ieee80211_bss_conf *info = &vif->bss_conf; 2125 struct cfg80211_chan_def def; 2126 struct cfg80211_bss *bss; 2127 const u8 *rsnie = NULL; 2128 const u8 *wpaie = NULL; 2129 2130 lockdep_assert_held(&ar->conf_mutex); 2131 2132 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2133 return; 2134 2135 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2136 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2137 if (bss) { 2138 const struct cfg80211_bss_ies *ies; 2139 2140 rcu_read_lock(); 2141 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2142 2143 ies = rcu_dereference(bss->ies); 2144 2145 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2146 WLAN_OUI_TYPE_MICROSOFT_WPA, 2147 ies->data, 2148 ies->len); 2149 rcu_read_unlock(); 2150 cfg80211_put_bss(ar->hw->wiphy, bss); 2151 } 2152 2153 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2154 if (rsnie || wpaie) { 2155 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2156 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2157 } 2158 2159 if (wpaie) { 2160 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2161 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2162 } 2163 2164 if (sta->mfp && 2165 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2166 ar->running_fw->fw_file.fw_features)) { 2167 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2168 } 2169 } 2170 2171 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2172 struct ieee80211_vif *vif, 2173 struct ieee80211_sta *sta, 2174 struct wmi_peer_assoc_complete_arg *arg) 2175 { 2176 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2177 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2178 struct cfg80211_chan_def def; 2179 const struct ieee80211_supported_band *sband; 2180 const struct ieee80211_rate *rates; 2181 enum nl80211_band band; 2182 u32 ratemask; 2183 u8 rate; 2184 int i; 2185 2186 lockdep_assert_held(&ar->conf_mutex); 2187 2188 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2189 return; 2190 2191 band = def.chan->band; 2192 sband = ar->hw->wiphy->bands[band]; 2193 ratemask = sta->supp_rates[band]; 2194 ratemask &= arvif->bitrate_mask.control[band].legacy; 2195 rates = sband->bitrates; 2196 2197 rateset->num_rates = 0; 2198 2199 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2200 if (!(ratemask & 1)) 2201 continue; 2202 2203 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2204 rateset->rates[rateset->num_rates] = rate; 2205 rateset->num_rates++; 2206 } 2207 } 2208 2209 static bool 2210 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2211 { 2212 int nss; 2213 2214 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2215 if (ht_mcs_mask[nss]) 2216 return false; 2217 2218 return true; 2219 } 2220 2221 static bool 2222 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2223 { 2224 int nss; 2225 2226 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2227 if (vht_mcs_mask[nss]) 2228 return false; 2229 2230 return true; 2231 } 2232 2233 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2234 struct ieee80211_vif *vif, 2235 struct ieee80211_sta *sta, 2236 struct wmi_peer_assoc_complete_arg *arg) 2237 { 2238 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2239 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2240 struct cfg80211_chan_def def; 2241 enum nl80211_band band; 2242 const u8 *ht_mcs_mask; 2243 const u16 *vht_mcs_mask; 2244 int i, n; 2245 u8 max_nss; 2246 u32 stbc; 2247 2248 lockdep_assert_held(&ar->conf_mutex); 2249 2250 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2251 return; 2252 2253 if (!ht_cap->ht_supported) 2254 return; 2255 2256 band = def.chan->band; 2257 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2258 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2259 2260 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2261 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2262 return; 2263 2264 arg->peer_flags |= ar->wmi.peer_flags->ht; 2265 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2266 ht_cap->ampdu_factor)) - 1; 2267 2268 arg->peer_mpdu_density = 2269 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2270 2271 arg->peer_ht_caps = ht_cap->cap; 2272 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2273 2274 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2275 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2276 2277 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2278 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2279 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2280 } 2281 2282 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2283 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2284 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2285 2286 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2287 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2288 } 2289 2290 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2291 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2292 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2293 } 2294 2295 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2296 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2297 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2298 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2299 arg->peer_rate_caps |= stbc; 2300 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2301 } 2302 2303 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2304 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2305 else if (ht_cap->mcs.rx_mask[1]) 2306 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2307 2308 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2309 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2310 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2311 max_nss = (i / 8) + 1; 2312 arg->peer_ht_rates.rates[n++] = i; 2313 } 2314 2315 /* 2316 * This is a workaround for HT-enabled STAs which break the spec 2317 * and have no HT capabilities RX mask (no HT RX MCS map). 2318 * 2319 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2320 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2321 * 2322 * Firmware asserts if such situation occurs. 2323 */ 2324 if (n == 0) { 2325 arg->peer_ht_rates.num_rates = 8; 2326 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2327 arg->peer_ht_rates.rates[i] = i; 2328 } else { 2329 arg->peer_ht_rates.num_rates = n; 2330 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2331 } 2332 2333 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2334 arg->addr, 2335 arg->peer_ht_rates.num_rates, 2336 arg->peer_num_spatial_streams); 2337 } 2338 2339 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2340 struct ath10k_vif *arvif, 2341 struct ieee80211_sta *sta) 2342 { 2343 u32 uapsd = 0; 2344 u32 max_sp = 0; 2345 int ret = 0; 2346 2347 lockdep_assert_held(&ar->conf_mutex); 2348 2349 if (sta->wme && sta->uapsd_queues) { 2350 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2351 sta->uapsd_queues, sta->max_sp); 2352 2353 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2354 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2355 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2356 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2357 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2358 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2359 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2360 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2361 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2362 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2363 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2364 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2365 2366 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2367 max_sp = sta->max_sp; 2368 2369 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2370 sta->addr, 2371 WMI_AP_PS_PEER_PARAM_UAPSD, 2372 uapsd); 2373 if (ret) { 2374 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2375 arvif->vdev_id, ret); 2376 return ret; 2377 } 2378 2379 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2380 sta->addr, 2381 WMI_AP_PS_PEER_PARAM_MAX_SP, 2382 max_sp); 2383 if (ret) { 2384 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2385 arvif->vdev_id, ret); 2386 return ret; 2387 } 2388 2389 /* TODO setup this based on STA listen interval and 2390 * beacon interval. Currently we don't know 2391 * sta->listen_interval - mac80211 patch required. 2392 * Currently use 10 seconds 2393 */ 2394 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2395 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2396 10); 2397 if (ret) { 2398 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2399 arvif->vdev_id, ret); 2400 return ret; 2401 } 2402 } 2403 2404 return 0; 2405 } 2406 2407 static u16 2408 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2409 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2410 { 2411 int idx_limit; 2412 int nss; 2413 u16 mcs_map; 2414 u16 mcs; 2415 2416 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2417 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2418 vht_mcs_limit[nss]; 2419 2420 if (mcs_map) 2421 idx_limit = fls(mcs_map) - 1; 2422 else 2423 idx_limit = -1; 2424 2425 switch (idx_limit) { 2426 case 0: /* fall through */ 2427 case 1: /* fall through */ 2428 case 2: /* fall through */ 2429 case 3: /* fall through */ 2430 case 4: /* fall through */ 2431 case 5: /* fall through */ 2432 case 6: /* fall through */ 2433 default: 2434 /* see ath10k_mac_can_set_bitrate_mask() */ 2435 WARN_ON(1); 2436 /* fall through */ 2437 case -1: 2438 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2439 break; 2440 case 7: 2441 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2442 break; 2443 case 8: 2444 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2445 break; 2446 case 9: 2447 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2448 break; 2449 } 2450 2451 tx_mcs_set &= ~(0x3 << (nss * 2)); 2452 tx_mcs_set |= mcs << (nss * 2); 2453 } 2454 2455 return tx_mcs_set; 2456 } 2457 2458 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2459 struct ieee80211_vif *vif, 2460 struct ieee80211_sta *sta, 2461 struct wmi_peer_assoc_complete_arg *arg) 2462 { 2463 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2464 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2465 struct cfg80211_chan_def def; 2466 enum nl80211_band band; 2467 const u16 *vht_mcs_mask; 2468 u8 ampdu_factor; 2469 u8 max_nss, vht_mcs; 2470 int i; 2471 2472 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2473 return; 2474 2475 if (!vht_cap->vht_supported) 2476 return; 2477 2478 band = def.chan->band; 2479 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2480 2481 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2482 return; 2483 2484 arg->peer_flags |= ar->wmi.peer_flags->vht; 2485 2486 if (def.chan->band == NL80211_BAND_2GHZ) 2487 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2488 2489 arg->peer_vht_caps = vht_cap->cap; 2490 2491 ampdu_factor = (vht_cap->cap & 2492 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2493 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2494 2495 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2496 * zero in VHT IE. Using it would result in degraded throughput. 2497 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2498 * it if VHT max_mpdu is smaller. 2499 */ 2500 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2501 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2502 ampdu_factor)) - 1); 2503 2504 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2505 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2506 2507 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2508 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2509 2510 /* Calculate peer NSS capability from VHT capabilities if STA 2511 * supports VHT. 2512 */ 2513 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2514 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2515 (2 * i) & 3; 2516 2517 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2518 vht_mcs_mask[i]) 2519 max_nss = i + 1; 2520 } 2521 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2522 arg->peer_vht_rates.rx_max_rate = 2523 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2524 arg->peer_vht_rates.rx_mcs_set = 2525 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2526 arg->peer_vht_rates.tx_max_rate = 2527 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2528 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2529 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2530 2531 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2532 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2533 2534 if (arg->peer_vht_rates.rx_max_rate && 2535 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2536 switch (arg->peer_vht_rates.rx_max_rate) { 2537 case 1560: 2538 /* Must be 2x2 at 160Mhz is all it can do. */ 2539 arg->peer_bw_rxnss_override = 2; 2540 break; 2541 case 780: 2542 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2543 arg->peer_bw_rxnss_override = 1; 2544 break; 2545 } 2546 } 2547 } 2548 2549 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2550 struct ieee80211_vif *vif, 2551 struct ieee80211_sta *sta, 2552 struct wmi_peer_assoc_complete_arg *arg) 2553 { 2554 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2555 2556 switch (arvif->vdev_type) { 2557 case WMI_VDEV_TYPE_AP: 2558 if (sta->wme) 2559 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2560 2561 if (sta->wme && sta->uapsd_queues) { 2562 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2563 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2564 } 2565 break; 2566 case WMI_VDEV_TYPE_STA: 2567 if (sta->wme) 2568 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2569 break; 2570 case WMI_VDEV_TYPE_IBSS: 2571 if (sta->wme) 2572 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2573 break; 2574 default: 2575 break; 2576 } 2577 2578 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2579 sta->addr, !!(arg->peer_flags & 2580 arvif->ar->wmi.peer_flags->qos)); 2581 } 2582 2583 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2584 { 2585 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2586 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2587 } 2588 2589 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2590 struct ieee80211_sta *sta) 2591 { 2592 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2593 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2594 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2595 return MODE_11AC_VHT160; 2596 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2597 return MODE_11AC_VHT80_80; 2598 default: 2599 /* not sure if this is a valid case? */ 2600 return MODE_11AC_VHT160; 2601 } 2602 } 2603 2604 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2605 return MODE_11AC_VHT80; 2606 2607 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2608 return MODE_11AC_VHT40; 2609 2610 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2611 return MODE_11AC_VHT20; 2612 2613 return MODE_UNKNOWN; 2614 } 2615 2616 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2617 struct ieee80211_vif *vif, 2618 struct ieee80211_sta *sta, 2619 struct wmi_peer_assoc_complete_arg *arg) 2620 { 2621 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2622 struct cfg80211_chan_def def; 2623 enum nl80211_band band; 2624 const u8 *ht_mcs_mask; 2625 const u16 *vht_mcs_mask; 2626 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2627 2628 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2629 return; 2630 2631 band = def.chan->band; 2632 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2633 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2634 2635 switch (band) { 2636 case NL80211_BAND_2GHZ: 2637 if (sta->vht_cap.vht_supported && 2638 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2639 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2640 phymode = MODE_11AC_VHT40; 2641 else 2642 phymode = MODE_11AC_VHT20; 2643 } else if (sta->ht_cap.ht_supported && 2644 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2645 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2646 phymode = MODE_11NG_HT40; 2647 else 2648 phymode = MODE_11NG_HT20; 2649 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2650 phymode = MODE_11G; 2651 } else { 2652 phymode = MODE_11B; 2653 } 2654 2655 break; 2656 case NL80211_BAND_5GHZ: 2657 /* 2658 * Check VHT first. 2659 */ 2660 if (sta->vht_cap.vht_supported && 2661 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2662 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2663 } else if (sta->ht_cap.ht_supported && 2664 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2665 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2666 phymode = MODE_11NA_HT40; 2667 else 2668 phymode = MODE_11NA_HT20; 2669 } else { 2670 phymode = MODE_11A; 2671 } 2672 2673 break; 2674 default: 2675 break; 2676 } 2677 2678 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2679 sta->addr, ath10k_wmi_phymode_str(phymode)); 2680 2681 arg->peer_phymode = phymode; 2682 WARN_ON(phymode == MODE_UNKNOWN); 2683 } 2684 2685 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2686 struct ieee80211_vif *vif, 2687 struct ieee80211_sta *sta, 2688 struct wmi_peer_assoc_complete_arg *arg) 2689 { 2690 lockdep_assert_held(&ar->conf_mutex); 2691 2692 memset(arg, 0, sizeof(*arg)); 2693 2694 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2695 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2696 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2697 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2698 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2699 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2700 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2701 2702 return 0; 2703 } 2704 2705 static const u32 ath10k_smps_map[] = { 2706 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2707 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2708 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2709 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2710 }; 2711 2712 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2713 const u8 *addr, 2714 const struct ieee80211_sta_ht_cap *ht_cap) 2715 { 2716 int smps; 2717 2718 if (!ht_cap->ht_supported) 2719 return 0; 2720 2721 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2722 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2723 2724 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2725 return -EINVAL; 2726 2727 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2728 WMI_PEER_SMPS_STATE, 2729 ath10k_smps_map[smps]); 2730 } 2731 2732 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2733 struct ieee80211_vif *vif, 2734 struct ieee80211_sta_vht_cap vht_cap) 2735 { 2736 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2737 int ret; 2738 u32 param; 2739 u32 value; 2740 2741 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2742 return 0; 2743 2744 if (!(ar->vht_cap_info & 2745 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2746 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2747 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2748 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2749 return 0; 2750 2751 param = ar->wmi.vdev_param->txbf; 2752 value = 0; 2753 2754 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2755 return 0; 2756 2757 /* The following logic is correct. If a remote STA advertises support 2758 * for being a beamformer then we should enable us being a beamformee. 2759 */ 2760 2761 if (ar->vht_cap_info & 2762 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2763 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2764 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2765 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2766 2767 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2768 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2769 } 2770 2771 if (ar->vht_cap_info & 2772 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2773 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2774 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2775 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2776 2777 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2778 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2779 } 2780 2781 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2782 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2783 2784 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2785 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2786 2787 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2788 if (ret) { 2789 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2790 value, ret); 2791 return ret; 2792 } 2793 2794 return 0; 2795 } 2796 2797 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2798 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2799 struct ieee80211_vif *vif, 2800 struct ieee80211_bss_conf *bss_conf) 2801 { 2802 struct ath10k *ar = hw->priv; 2803 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2804 struct ieee80211_sta_ht_cap ht_cap; 2805 struct ieee80211_sta_vht_cap vht_cap; 2806 struct wmi_peer_assoc_complete_arg peer_arg; 2807 struct ieee80211_sta *ap_sta; 2808 int ret; 2809 2810 lockdep_assert_held(&ar->conf_mutex); 2811 2812 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2813 arvif->vdev_id, arvif->bssid, arvif->aid); 2814 2815 rcu_read_lock(); 2816 2817 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2818 if (!ap_sta) { 2819 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2820 bss_conf->bssid, arvif->vdev_id); 2821 rcu_read_unlock(); 2822 return; 2823 } 2824 2825 /* ap_sta must be accessed only within rcu section which must be left 2826 * before calling ath10k_setup_peer_smps() which might sleep. 2827 */ 2828 ht_cap = ap_sta->ht_cap; 2829 vht_cap = ap_sta->vht_cap; 2830 2831 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2832 if (ret) { 2833 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2834 bss_conf->bssid, arvif->vdev_id, ret); 2835 rcu_read_unlock(); 2836 return; 2837 } 2838 2839 rcu_read_unlock(); 2840 2841 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2842 if (ret) { 2843 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2844 bss_conf->bssid, arvif->vdev_id, ret); 2845 return; 2846 } 2847 2848 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2849 if (ret) { 2850 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2851 arvif->vdev_id, ret); 2852 return; 2853 } 2854 2855 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2856 if (ret) { 2857 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2858 arvif->vdev_id, bss_conf->bssid, ret); 2859 return; 2860 } 2861 2862 ath10k_dbg(ar, ATH10K_DBG_MAC, 2863 "mac vdev %d up (associated) bssid %pM aid %d\n", 2864 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2865 2866 WARN_ON(arvif->is_up); 2867 2868 arvif->aid = bss_conf->aid; 2869 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2870 2871 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2872 if (ret) { 2873 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2874 arvif->vdev_id, ret); 2875 return; 2876 } 2877 2878 arvif->is_up = true; 2879 2880 /* Workaround: Some firmware revisions (tested with qca6174 2881 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2882 * poked with peer param command. 2883 */ 2884 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2885 WMI_PEER_DUMMY_VAR, 1); 2886 if (ret) { 2887 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2888 arvif->bssid, arvif->vdev_id, ret); 2889 return; 2890 } 2891 } 2892 2893 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2894 struct ieee80211_vif *vif) 2895 { 2896 struct ath10k *ar = hw->priv; 2897 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2898 struct ieee80211_sta_vht_cap vht_cap = {}; 2899 int ret; 2900 2901 lockdep_assert_held(&ar->conf_mutex); 2902 2903 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2904 arvif->vdev_id, arvif->bssid); 2905 2906 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2907 if (ret) 2908 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2909 arvif->vdev_id, ret); 2910 2911 arvif->def_wep_key_idx = -1; 2912 2913 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2914 if (ret) { 2915 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2916 arvif->vdev_id, ret); 2917 return; 2918 } 2919 2920 arvif->is_up = false; 2921 2922 cancel_delayed_work_sync(&arvif->connection_loss_work); 2923 } 2924 2925 static int ath10k_station_assoc(struct ath10k *ar, 2926 struct ieee80211_vif *vif, 2927 struct ieee80211_sta *sta, 2928 bool reassoc) 2929 { 2930 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2931 struct wmi_peer_assoc_complete_arg peer_arg; 2932 int ret = 0; 2933 2934 lockdep_assert_held(&ar->conf_mutex); 2935 2936 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2937 if (ret) { 2938 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2939 sta->addr, arvif->vdev_id, ret); 2940 return ret; 2941 } 2942 2943 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2944 if (ret) { 2945 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2946 sta->addr, arvif->vdev_id, ret); 2947 return ret; 2948 } 2949 2950 /* Re-assoc is run only to update supported rates for given station. It 2951 * doesn't make much sense to reconfigure the peer completely. 2952 */ 2953 if (!reassoc) { 2954 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2955 &sta->ht_cap); 2956 if (ret) { 2957 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2958 arvif->vdev_id, ret); 2959 return ret; 2960 } 2961 2962 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2963 if (ret) { 2964 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2965 sta->addr, arvif->vdev_id, ret); 2966 return ret; 2967 } 2968 2969 if (!sta->wme) { 2970 arvif->num_legacy_stations++; 2971 ret = ath10k_recalc_rtscts_prot(arvif); 2972 if (ret) { 2973 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2974 arvif->vdev_id, ret); 2975 return ret; 2976 } 2977 } 2978 2979 /* Plumb cached keys only for static WEP */ 2980 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { 2981 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2982 if (ret) { 2983 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2984 arvif->vdev_id, ret); 2985 return ret; 2986 } 2987 } 2988 } 2989 2990 return ret; 2991 } 2992 2993 static int ath10k_station_disassoc(struct ath10k *ar, 2994 struct ieee80211_vif *vif, 2995 struct ieee80211_sta *sta) 2996 { 2997 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2998 int ret = 0; 2999 3000 lockdep_assert_held(&ar->conf_mutex); 3001 3002 if (!sta->wme) { 3003 arvif->num_legacy_stations--; 3004 ret = ath10k_recalc_rtscts_prot(arvif); 3005 if (ret) { 3006 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3007 arvif->vdev_id, ret); 3008 return ret; 3009 } 3010 } 3011 3012 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3013 if (ret) { 3014 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3015 arvif->vdev_id, ret); 3016 return ret; 3017 } 3018 3019 return ret; 3020 } 3021 3022 /**************/ 3023 /* Regulatory */ 3024 /**************/ 3025 3026 static int ath10k_update_channel_list(struct ath10k *ar) 3027 { 3028 struct ieee80211_hw *hw = ar->hw; 3029 struct ieee80211_supported_band **bands; 3030 enum nl80211_band band; 3031 struct ieee80211_channel *channel; 3032 struct wmi_scan_chan_list_arg arg = {0}; 3033 struct wmi_channel_arg *ch; 3034 bool passive; 3035 int len; 3036 int ret; 3037 int i; 3038 3039 lockdep_assert_held(&ar->conf_mutex); 3040 3041 bands = hw->wiphy->bands; 3042 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3043 if (!bands[band]) 3044 continue; 3045 3046 for (i = 0; i < bands[band]->n_channels; i++) { 3047 if (bands[band]->channels[i].flags & 3048 IEEE80211_CHAN_DISABLED) 3049 continue; 3050 3051 arg.n_channels++; 3052 } 3053 } 3054 3055 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3056 arg.channels = kzalloc(len, GFP_KERNEL); 3057 if (!arg.channels) 3058 return -ENOMEM; 3059 3060 ch = arg.channels; 3061 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3062 if (!bands[band]) 3063 continue; 3064 3065 for (i = 0; i < bands[band]->n_channels; i++) { 3066 channel = &bands[band]->channels[i]; 3067 3068 if (channel->flags & IEEE80211_CHAN_DISABLED) 3069 continue; 3070 3071 ch->allow_ht = true; 3072 3073 /* FIXME: when should we really allow VHT? */ 3074 ch->allow_vht = true; 3075 3076 ch->allow_ibss = 3077 !(channel->flags & IEEE80211_CHAN_NO_IR); 3078 3079 ch->ht40plus = 3080 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3081 3082 ch->chan_radar = 3083 !!(channel->flags & IEEE80211_CHAN_RADAR); 3084 3085 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3086 ch->passive = passive; 3087 3088 ch->freq = channel->center_freq; 3089 ch->band_center_freq1 = channel->center_freq; 3090 ch->min_power = 0; 3091 ch->max_power = channel->max_power * 2; 3092 ch->max_reg_power = channel->max_reg_power * 2; 3093 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3094 ch->reg_class_id = 0; /* FIXME */ 3095 3096 /* FIXME: why use only legacy modes, why not any 3097 * HT/VHT modes? Would that even make any 3098 * difference? 3099 */ 3100 if (channel->band == NL80211_BAND_2GHZ) 3101 ch->mode = MODE_11G; 3102 else 3103 ch->mode = MODE_11A; 3104 3105 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3106 continue; 3107 3108 ath10k_dbg(ar, ATH10K_DBG_WMI, 3109 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3110 ch - arg.channels, arg.n_channels, 3111 ch->freq, ch->max_power, ch->max_reg_power, 3112 ch->max_antenna_gain, ch->mode); 3113 3114 ch++; 3115 } 3116 } 3117 3118 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3119 kfree(arg.channels); 3120 3121 return ret; 3122 } 3123 3124 static enum wmi_dfs_region 3125 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3126 { 3127 switch (dfs_region) { 3128 case NL80211_DFS_UNSET: 3129 return WMI_UNINIT_DFS_DOMAIN; 3130 case NL80211_DFS_FCC: 3131 return WMI_FCC_DFS_DOMAIN; 3132 case NL80211_DFS_ETSI: 3133 return WMI_ETSI_DFS_DOMAIN; 3134 case NL80211_DFS_JP: 3135 return WMI_MKK4_DFS_DOMAIN; 3136 } 3137 return WMI_UNINIT_DFS_DOMAIN; 3138 } 3139 3140 static void ath10k_regd_update(struct ath10k *ar) 3141 { 3142 struct reg_dmn_pair_mapping *regpair; 3143 int ret; 3144 enum wmi_dfs_region wmi_dfs_reg; 3145 enum nl80211_dfs_regions nl_dfs_reg; 3146 3147 lockdep_assert_held(&ar->conf_mutex); 3148 3149 ret = ath10k_update_channel_list(ar); 3150 if (ret) 3151 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3152 3153 regpair = ar->ath_common.regulatory.regpair; 3154 3155 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3156 nl_dfs_reg = ar->dfs_detector->region; 3157 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3158 } else { 3159 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3160 } 3161 3162 /* Target allows setting up per-band regdomain but ath_common provides 3163 * a combined one only 3164 */ 3165 ret = ath10k_wmi_pdev_set_regdomain(ar, 3166 regpair->reg_domain, 3167 regpair->reg_domain, /* 2ghz */ 3168 regpair->reg_domain, /* 5ghz */ 3169 regpair->reg_2ghz_ctl, 3170 regpair->reg_5ghz_ctl, 3171 wmi_dfs_reg); 3172 if (ret) 3173 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3174 } 3175 3176 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3177 struct ieee80211_supported_band *band) 3178 { 3179 int i; 3180 3181 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3182 for (i = 0; i < band->n_channels; i++) { 3183 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3184 band->channels[i].center_freq > ar->high_5ghz_chan) 3185 band->channels[i].flags |= 3186 IEEE80211_CHAN_DISABLED; 3187 } 3188 } 3189 } 3190 3191 static void ath10k_reg_notifier(struct wiphy *wiphy, 3192 struct regulatory_request *request) 3193 { 3194 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3195 struct ath10k *ar = hw->priv; 3196 bool result; 3197 3198 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3199 3200 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3201 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3202 request->dfs_region); 3203 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3204 request->dfs_region); 3205 if (!result) 3206 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3207 request->dfs_region); 3208 } 3209 3210 mutex_lock(&ar->conf_mutex); 3211 if (ar->state == ATH10K_STATE_ON) 3212 ath10k_regd_update(ar); 3213 mutex_unlock(&ar->conf_mutex); 3214 3215 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3216 ath10k_mac_update_channel_list(ar, 3217 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3218 } 3219 3220 /***************/ 3221 /* TX handlers */ 3222 /***************/ 3223 3224 enum ath10k_mac_tx_path { 3225 ATH10K_MAC_TX_HTT, 3226 ATH10K_MAC_TX_HTT_MGMT, 3227 ATH10K_MAC_TX_WMI_MGMT, 3228 ATH10K_MAC_TX_UNKNOWN, 3229 }; 3230 3231 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3232 { 3233 lockdep_assert_held(&ar->htt.tx_lock); 3234 3235 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3236 ar->tx_paused |= BIT(reason); 3237 ieee80211_stop_queues(ar->hw); 3238 } 3239 3240 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3241 struct ieee80211_vif *vif) 3242 { 3243 struct ath10k *ar = data; 3244 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3245 3246 if (arvif->tx_paused) 3247 return; 3248 3249 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3250 } 3251 3252 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3253 { 3254 lockdep_assert_held(&ar->htt.tx_lock); 3255 3256 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3257 ar->tx_paused &= ~BIT(reason); 3258 3259 if (ar->tx_paused) 3260 return; 3261 3262 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3263 IEEE80211_IFACE_ITER_RESUME_ALL, 3264 ath10k_mac_tx_unlock_iter, 3265 ar); 3266 3267 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3268 } 3269 3270 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3271 { 3272 struct ath10k *ar = arvif->ar; 3273 3274 lockdep_assert_held(&ar->htt.tx_lock); 3275 3276 WARN_ON(reason >= BITS_PER_LONG); 3277 arvif->tx_paused |= BIT(reason); 3278 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3279 } 3280 3281 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3282 { 3283 struct ath10k *ar = arvif->ar; 3284 3285 lockdep_assert_held(&ar->htt.tx_lock); 3286 3287 WARN_ON(reason >= BITS_PER_LONG); 3288 arvif->tx_paused &= ~BIT(reason); 3289 3290 if (ar->tx_paused) 3291 return; 3292 3293 if (arvif->tx_paused) 3294 return; 3295 3296 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3297 } 3298 3299 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3300 enum wmi_tlv_tx_pause_id pause_id, 3301 enum wmi_tlv_tx_pause_action action) 3302 { 3303 struct ath10k *ar = arvif->ar; 3304 3305 lockdep_assert_held(&ar->htt.tx_lock); 3306 3307 switch (action) { 3308 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3309 ath10k_mac_vif_tx_lock(arvif, pause_id); 3310 break; 3311 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3312 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3313 break; 3314 default: 3315 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3316 "received unknown tx pause action %d on vdev %i, ignoring\n", 3317 action, arvif->vdev_id); 3318 break; 3319 } 3320 } 3321 3322 struct ath10k_mac_tx_pause { 3323 u32 vdev_id; 3324 enum wmi_tlv_tx_pause_id pause_id; 3325 enum wmi_tlv_tx_pause_action action; 3326 }; 3327 3328 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3329 struct ieee80211_vif *vif) 3330 { 3331 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3332 struct ath10k_mac_tx_pause *arg = data; 3333 3334 if (arvif->vdev_id != arg->vdev_id) 3335 return; 3336 3337 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3338 } 3339 3340 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3341 enum wmi_tlv_tx_pause_id pause_id, 3342 enum wmi_tlv_tx_pause_action action) 3343 { 3344 struct ath10k_mac_tx_pause arg = { 3345 .vdev_id = vdev_id, 3346 .pause_id = pause_id, 3347 .action = action, 3348 }; 3349 3350 spin_lock_bh(&ar->htt.tx_lock); 3351 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3352 IEEE80211_IFACE_ITER_RESUME_ALL, 3353 ath10k_mac_handle_tx_pause_iter, 3354 &arg); 3355 spin_unlock_bh(&ar->htt.tx_lock); 3356 } 3357 3358 static enum ath10k_hw_txrx_mode 3359 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3360 struct ieee80211_vif *vif, 3361 struct ieee80211_sta *sta, 3362 struct sk_buff *skb) 3363 { 3364 const struct ieee80211_hdr *hdr = (void *)skb->data; 3365 __le16 fc = hdr->frame_control; 3366 3367 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3368 return ATH10K_HW_TXRX_RAW; 3369 3370 if (ieee80211_is_mgmt(fc)) 3371 return ATH10K_HW_TXRX_MGMT; 3372 3373 /* Workaround: 3374 * 3375 * NullFunc frames are mostly used to ping if a client or AP are still 3376 * reachable and responsive. This implies tx status reports must be 3377 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3378 * come to a conclusion that the other end disappeared and tear down 3379 * BSS connection or it can never disconnect from BSS/client (which is 3380 * the case). 3381 * 3382 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3383 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3384 * which seems to deliver correct tx reports for NullFunc frames. The 3385 * downside of using it is it ignores client powersave state so it can 3386 * end up disconnecting sleeping clients in AP mode. It should fix STA 3387 * mode though because AP don't sleep. 3388 */ 3389 if (ar->htt.target_version_major < 3 && 3390 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3391 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3392 ar->running_fw->fw_file.fw_features)) 3393 return ATH10K_HW_TXRX_MGMT; 3394 3395 /* Workaround: 3396 * 3397 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3398 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3399 * to work with Ethernet txmode so use it. 3400 * 3401 * FIXME: Check if raw mode works with TDLS. 3402 */ 3403 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3404 return ATH10K_HW_TXRX_ETHERNET; 3405 3406 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 3407 return ATH10K_HW_TXRX_RAW; 3408 3409 return ATH10K_HW_TXRX_NATIVE_WIFI; 3410 } 3411 3412 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3413 struct sk_buff *skb) 3414 { 3415 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3416 const struct ieee80211_hdr *hdr = (void *)skb->data; 3417 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3418 IEEE80211_TX_CTL_INJECTED; 3419 3420 if (!ieee80211_has_protected(hdr->frame_control)) 3421 return false; 3422 3423 if ((info->flags & mask) == mask) 3424 return false; 3425 3426 if (vif) 3427 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3428 3429 return true; 3430 } 3431 3432 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3433 * Control in the header. 3434 */ 3435 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3436 { 3437 struct ieee80211_hdr *hdr = (void *)skb->data; 3438 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3439 u8 *qos_ctl; 3440 3441 if (!ieee80211_is_data_qos(hdr->frame_control)) 3442 return; 3443 3444 qos_ctl = ieee80211_get_qos_ctl(hdr); 3445 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3446 skb->data, (void *)qos_ctl - (void *)skb->data); 3447 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3448 3449 /* Some firmware revisions don't handle sending QoS NullFunc well. 3450 * These frames are mainly used for CQM purposes so it doesn't really 3451 * matter whether QoS NullFunc or NullFunc are sent. 3452 */ 3453 hdr = (void *)skb->data; 3454 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3455 cb->flags &= ~ATH10K_SKB_F_QOS; 3456 3457 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3458 } 3459 3460 static void ath10k_tx_h_8023(struct sk_buff *skb) 3461 { 3462 struct ieee80211_hdr *hdr; 3463 struct rfc1042_hdr *rfc1042; 3464 struct ethhdr *eth; 3465 size_t hdrlen; 3466 u8 da[ETH_ALEN]; 3467 u8 sa[ETH_ALEN]; 3468 __be16 type; 3469 3470 hdr = (void *)skb->data; 3471 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3472 rfc1042 = (void *)skb->data + hdrlen; 3473 3474 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3475 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3476 type = rfc1042->snap_type; 3477 3478 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3479 skb_push(skb, sizeof(*eth)); 3480 3481 eth = (void *)skb->data; 3482 ether_addr_copy(eth->h_dest, da); 3483 ether_addr_copy(eth->h_source, sa); 3484 eth->h_proto = type; 3485 } 3486 3487 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3488 struct ieee80211_vif *vif, 3489 struct sk_buff *skb) 3490 { 3491 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3492 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3493 3494 /* This is case only for P2P_GO */ 3495 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3496 return; 3497 3498 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3499 spin_lock_bh(&ar->data_lock); 3500 if (arvif->u.ap.noa_data) 3501 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3502 GFP_ATOMIC)) 3503 skb_put_data(skb, arvif->u.ap.noa_data, 3504 arvif->u.ap.noa_len); 3505 spin_unlock_bh(&ar->data_lock); 3506 } 3507 } 3508 3509 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3510 struct ieee80211_vif *vif, 3511 struct ieee80211_txq *txq, 3512 struct sk_buff *skb) 3513 { 3514 struct ieee80211_hdr *hdr = (void *)skb->data; 3515 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3516 3517 cb->flags = 0; 3518 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3519 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3520 3521 if (ieee80211_is_mgmt(hdr->frame_control)) 3522 cb->flags |= ATH10K_SKB_F_MGMT; 3523 3524 if (ieee80211_is_data_qos(hdr->frame_control)) 3525 cb->flags |= ATH10K_SKB_F_QOS; 3526 3527 cb->vif = vif; 3528 cb->txq = txq; 3529 } 3530 3531 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3532 { 3533 /* FIXME: Not really sure since when the behaviour changed. At some 3534 * point new firmware stopped requiring creation of peer entries for 3535 * offchannel tx (and actually creating them causes issues with wmi-htc 3536 * tx credit replenishment and reliability). Assuming it's at least 3.4 3537 * because that's when the `freq` was introduced to TX_FRM HTT command. 3538 */ 3539 return (ar->htt.target_version_major >= 3 && 3540 ar->htt.target_version_minor >= 4 && 3541 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3542 } 3543 3544 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3545 { 3546 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3547 int ret = 0; 3548 3549 spin_lock_bh(&ar->data_lock); 3550 3551 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3552 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3553 ret = -ENOSPC; 3554 goto unlock; 3555 } 3556 3557 __skb_queue_tail(q, skb); 3558 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3559 3560 unlock: 3561 spin_unlock_bh(&ar->data_lock); 3562 3563 return ret; 3564 } 3565 3566 static enum ath10k_mac_tx_path 3567 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3568 struct sk_buff *skb, 3569 enum ath10k_hw_txrx_mode txmode) 3570 { 3571 switch (txmode) { 3572 case ATH10K_HW_TXRX_RAW: 3573 case ATH10K_HW_TXRX_NATIVE_WIFI: 3574 case ATH10K_HW_TXRX_ETHERNET: 3575 return ATH10K_MAC_TX_HTT; 3576 case ATH10K_HW_TXRX_MGMT: 3577 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3578 ar->running_fw->fw_file.fw_features) || 3579 test_bit(WMI_SERVICE_MGMT_TX_WMI, 3580 ar->wmi.svc_map)) 3581 return ATH10K_MAC_TX_WMI_MGMT; 3582 else if (ar->htt.target_version_major >= 3) 3583 return ATH10K_MAC_TX_HTT; 3584 else 3585 return ATH10K_MAC_TX_HTT_MGMT; 3586 } 3587 3588 return ATH10K_MAC_TX_UNKNOWN; 3589 } 3590 3591 static int ath10k_mac_tx_submit(struct ath10k *ar, 3592 enum ath10k_hw_txrx_mode txmode, 3593 enum ath10k_mac_tx_path txpath, 3594 struct sk_buff *skb) 3595 { 3596 struct ath10k_htt *htt = &ar->htt; 3597 int ret = -EINVAL; 3598 3599 switch (txpath) { 3600 case ATH10K_MAC_TX_HTT: 3601 ret = ath10k_htt_tx(htt, txmode, skb); 3602 break; 3603 case ATH10K_MAC_TX_HTT_MGMT: 3604 ret = ath10k_htt_mgmt_tx(htt, skb); 3605 break; 3606 case ATH10K_MAC_TX_WMI_MGMT: 3607 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3608 break; 3609 case ATH10K_MAC_TX_UNKNOWN: 3610 WARN_ON_ONCE(1); 3611 ret = -EINVAL; 3612 break; 3613 } 3614 3615 if (ret) { 3616 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3617 ret); 3618 ieee80211_free_txskb(ar->hw, skb); 3619 } 3620 3621 return ret; 3622 } 3623 3624 /* This function consumes the sk_buff regardless of return value as far as 3625 * caller is concerned so no freeing is necessary afterwards. 3626 */ 3627 static int ath10k_mac_tx(struct ath10k *ar, 3628 struct ieee80211_vif *vif, 3629 enum ath10k_hw_txrx_mode txmode, 3630 enum ath10k_mac_tx_path txpath, 3631 struct sk_buff *skb) 3632 { 3633 struct ieee80211_hw *hw = ar->hw; 3634 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3635 int ret; 3636 3637 /* We should disable CCK RATE due to P2P */ 3638 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3639 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3640 3641 switch (txmode) { 3642 case ATH10K_HW_TXRX_MGMT: 3643 case ATH10K_HW_TXRX_NATIVE_WIFI: 3644 ath10k_tx_h_nwifi(hw, skb); 3645 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3646 ath10k_tx_h_seq_no(vif, skb); 3647 break; 3648 case ATH10K_HW_TXRX_ETHERNET: 3649 ath10k_tx_h_8023(skb); 3650 break; 3651 case ATH10K_HW_TXRX_RAW: 3652 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 3653 WARN_ON_ONCE(1); 3654 ieee80211_free_txskb(hw, skb); 3655 return -ENOTSUPP; 3656 } 3657 } 3658 3659 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3660 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3661 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3662 skb); 3663 3664 skb_queue_tail(&ar->offchan_tx_queue, skb); 3665 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3666 return 0; 3667 } 3668 } 3669 3670 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3671 if (ret) { 3672 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3673 return ret; 3674 } 3675 3676 return 0; 3677 } 3678 3679 void ath10k_offchan_tx_purge(struct ath10k *ar) 3680 { 3681 struct sk_buff *skb; 3682 3683 for (;;) { 3684 skb = skb_dequeue(&ar->offchan_tx_queue); 3685 if (!skb) 3686 break; 3687 3688 ieee80211_free_txskb(ar->hw, skb); 3689 } 3690 } 3691 3692 void ath10k_offchan_tx_work(struct work_struct *work) 3693 { 3694 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3695 struct ath10k_peer *peer; 3696 struct ath10k_vif *arvif; 3697 enum ath10k_hw_txrx_mode txmode; 3698 enum ath10k_mac_tx_path txpath; 3699 struct ieee80211_hdr *hdr; 3700 struct ieee80211_vif *vif; 3701 struct ieee80211_sta *sta; 3702 struct sk_buff *skb; 3703 const u8 *peer_addr; 3704 int vdev_id; 3705 int ret; 3706 unsigned long time_left; 3707 bool tmp_peer_created = false; 3708 3709 /* FW requirement: We must create a peer before FW will send out 3710 * an offchannel frame. Otherwise the frame will be stuck and 3711 * never transmitted. We delete the peer upon tx completion. 3712 * It is unlikely that a peer for offchannel tx will already be 3713 * present. However it may be in some rare cases so account for that. 3714 * Otherwise we might remove a legitimate peer and break stuff. 3715 */ 3716 3717 for (;;) { 3718 skb = skb_dequeue(&ar->offchan_tx_queue); 3719 if (!skb) 3720 break; 3721 3722 mutex_lock(&ar->conf_mutex); 3723 3724 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3725 skb); 3726 3727 hdr = (struct ieee80211_hdr *)skb->data; 3728 peer_addr = ieee80211_get_DA(hdr); 3729 3730 spin_lock_bh(&ar->data_lock); 3731 vdev_id = ar->scan.vdev_id; 3732 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3733 spin_unlock_bh(&ar->data_lock); 3734 3735 if (peer) 3736 /* FIXME: should this use ath10k_warn()? */ 3737 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3738 peer_addr, vdev_id); 3739 3740 if (!peer) { 3741 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3742 peer_addr, 3743 WMI_PEER_TYPE_DEFAULT); 3744 if (ret) 3745 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3746 peer_addr, vdev_id, ret); 3747 tmp_peer_created = (ret == 0); 3748 } 3749 3750 spin_lock_bh(&ar->data_lock); 3751 reinit_completion(&ar->offchan_tx_completed); 3752 ar->offchan_tx_skb = skb; 3753 spin_unlock_bh(&ar->data_lock); 3754 3755 /* It's safe to access vif and sta - conf_mutex guarantees that 3756 * sta_state() and remove_interface() are locked exclusively 3757 * out wrt to this offchannel worker. 3758 */ 3759 arvif = ath10k_get_arvif(ar, vdev_id); 3760 if (arvif) { 3761 vif = arvif->vif; 3762 sta = ieee80211_find_sta(vif, peer_addr); 3763 } else { 3764 vif = NULL; 3765 sta = NULL; 3766 } 3767 3768 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3769 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3770 3771 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3772 if (ret) { 3773 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3774 ret); 3775 /* not serious */ 3776 } 3777 3778 time_left = 3779 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3780 if (time_left == 0) 3781 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3782 skb); 3783 3784 if (!peer && tmp_peer_created) { 3785 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3786 if (ret) 3787 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3788 peer_addr, vdev_id, ret); 3789 } 3790 3791 mutex_unlock(&ar->conf_mutex); 3792 } 3793 } 3794 3795 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3796 { 3797 struct sk_buff *skb; 3798 3799 for (;;) { 3800 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3801 if (!skb) 3802 break; 3803 3804 ieee80211_free_txskb(ar->hw, skb); 3805 } 3806 } 3807 3808 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3809 { 3810 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3811 struct sk_buff *skb; 3812 dma_addr_t paddr; 3813 int ret; 3814 3815 for (;;) { 3816 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3817 if (!skb) 3818 break; 3819 3820 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 3821 ar->running_fw->fw_file.fw_features)) { 3822 paddr = dma_map_single(ar->dev, skb->data, 3823 skb->len, DMA_TO_DEVICE); 3824 if (!paddr) 3825 continue; 3826 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); 3827 if (ret) { 3828 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", 3829 ret); 3830 dma_unmap_single(ar->dev, paddr, skb->len, 3831 DMA_FROM_DEVICE); 3832 ieee80211_free_txskb(ar->hw, skb); 3833 } 3834 } else { 3835 ret = ath10k_wmi_mgmt_tx(ar, skb); 3836 if (ret) { 3837 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3838 ret); 3839 ieee80211_free_txskb(ar->hw, skb); 3840 } 3841 } 3842 } 3843 } 3844 3845 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3846 { 3847 struct ath10k_txq *artxq; 3848 3849 if (!txq) 3850 return; 3851 3852 artxq = (void *)txq->drv_priv; 3853 INIT_LIST_HEAD(&artxq->list); 3854 } 3855 3856 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3857 { 3858 struct ath10k_txq *artxq; 3859 struct ath10k_skb_cb *cb; 3860 struct sk_buff *msdu; 3861 int msdu_id; 3862 3863 if (!txq) 3864 return; 3865 3866 artxq = (void *)txq->drv_priv; 3867 spin_lock_bh(&ar->txqs_lock); 3868 if (!list_empty(&artxq->list)) 3869 list_del_init(&artxq->list); 3870 spin_unlock_bh(&ar->txqs_lock); 3871 3872 spin_lock_bh(&ar->htt.tx_lock); 3873 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3874 cb = ATH10K_SKB_CB(msdu); 3875 if (cb->txq == txq) 3876 cb->txq = NULL; 3877 } 3878 spin_unlock_bh(&ar->htt.tx_lock); 3879 } 3880 3881 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3882 u16 peer_id, 3883 u8 tid) 3884 { 3885 struct ath10k_peer *peer; 3886 3887 lockdep_assert_held(&ar->data_lock); 3888 3889 peer = ar->peer_map[peer_id]; 3890 if (!peer) 3891 return NULL; 3892 3893 if (peer->removed) 3894 return NULL; 3895 3896 if (peer->sta) 3897 return peer->sta->txq[tid]; 3898 else if (peer->vif) 3899 return peer->vif->txq; 3900 else 3901 return NULL; 3902 } 3903 3904 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3905 struct ieee80211_txq *txq) 3906 { 3907 struct ath10k *ar = hw->priv; 3908 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3909 3910 /* No need to get locks */ 3911 3912 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3913 return true; 3914 3915 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3916 return true; 3917 3918 if (artxq->num_fw_queued < artxq->num_push_allowed) 3919 return true; 3920 3921 return false; 3922 } 3923 3924 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 3925 struct ieee80211_txq *txq) 3926 { 3927 struct ath10k *ar = hw->priv; 3928 struct ath10k_htt *htt = &ar->htt; 3929 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3930 struct ieee80211_vif *vif = txq->vif; 3931 struct ieee80211_sta *sta = txq->sta; 3932 enum ath10k_hw_txrx_mode txmode; 3933 enum ath10k_mac_tx_path txpath; 3934 struct sk_buff *skb; 3935 struct ieee80211_hdr *hdr; 3936 size_t skb_len; 3937 bool is_mgmt, is_presp; 3938 int ret; 3939 3940 spin_lock_bh(&ar->htt.tx_lock); 3941 ret = ath10k_htt_tx_inc_pending(htt); 3942 spin_unlock_bh(&ar->htt.tx_lock); 3943 3944 if (ret) 3945 return ret; 3946 3947 skb = ieee80211_tx_dequeue(hw, txq); 3948 if (!skb) { 3949 spin_lock_bh(&ar->htt.tx_lock); 3950 ath10k_htt_tx_dec_pending(htt); 3951 spin_unlock_bh(&ar->htt.tx_lock); 3952 3953 return -ENOENT; 3954 } 3955 3956 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 3957 3958 skb_len = skb->len; 3959 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3960 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3961 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 3962 3963 if (is_mgmt) { 3964 hdr = (struct ieee80211_hdr *)skb->data; 3965 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 3966 3967 spin_lock_bh(&ar->htt.tx_lock); 3968 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 3969 3970 if (ret) { 3971 ath10k_htt_tx_dec_pending(htt); 3972 spin_unlock_bh(&ar->htt.tx_lock); 3973 return ret; 3974 } 3975 spin_unlock_bh(&ar->htt.tx_lock); 3976 } 3977 3978 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3979 if (unlikely(ret)) { 3980 ath10k_warn(ar, "failed to push frame: %d\n", ret); 3981 3982 spin_lock_bh(&ar->htt.tx_lock); 3983 ath10k_htt_tx_dec_pending(htt); 3984 if (is_mgmt) 3985 ath10k_htt_tx_mgmt_dec_pending(htt); 3986 spin_unlock_bh(&ar->htt.tx_lock); 3987 3988 return ret; 3989 } 3990 3991 spin_lock_bh(&ar->htt.tx_lock); 3992 artxq->num_fw_queued++; 3993 spin_unlock_bh(&ar->htt.tx_lock); 3994 3995 return skb_len; 3996 } 3997 3998 void ath10k_mac_tx_push_pending(struct ath10k *ar) 3999 { 4000 struct ieee80211_hw *hw = ar->hw; 4001 struct ieee80211_txq *txq; 4002 struct ath10k_txq *artxq; 4003 struct ath10k_txq *last; 4004 int ret; 4005 int max; 4006 4007 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 4008 return; 4009 4010 spin_lock_bh(&ar->txqs_lock); 4011 rcu_read_lock(); 4012 4013 last = list_last_entry(&ar->txqs, struct ath10k_txq, list); 4014 while (!list_empty(&ar->txqs)) { 4015 artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4016 txq = container_of((void *)artxq, struct ieee80211_txq, 4017 drv_priv); 4018 4019 /* Prevent aggressive sta/tid taking over tx queue */ 4020 max = 16; 4021 ret = 0; 4022 while (ath10k_mac_tx_can_push(hw, txq) && max--) { 4023 ret = ath10k_mac_tx_push_txq(hw, txq); 4024 if (ret < 0) 4025 break; 4026 } 4027 4028 list_del_init(&artxq->list); 4029 if (ret != -ENOENT) 4030 list_add_tail(&artxq->list, &ar->txqs); 4031 4032 ath10k_htt_tx_txq_update(hw, txq); 4033 4034 if (artxq == last || (ret < 0 && ret != -ENOENT)) 4035 break; 4036 } 4037 4038 rcu_read_unlock(); 4039 spin_unlock_bh(&ar->txqs_lock); 4040 } 4041 4042 /************/ 4043 /* Scanning */ 4044 /************/ 4045 4046 void __ath10k_scan_finish(struct ath10k *ar) 4047 { 4048 lockdep_assert_held(&ar->data_lock); 4049 4050 switch (ar->scan.state) { 4051 case ATH10K_SCAN_IDLE: 4052 break; 4053 case ATH10K_SCAN_RUNNING: 4054 case ATH10K_SCAN_ABORTING: 4055 if (!ar->scan.is_roc) { 4056 struct cfg80211_scan_info info = { 4057 .aborted = (ar->scan.state == 4058 ATH10K_SCAN_ABORTING), 4059 }; 4060 4061 ieee80211_scan_completed(ar->hw, &info); 4062 } else if (ar->scan.roc_notify) { 4063 ieee80211_remain_on_channel_expired(ar->hw); 4064 } 4065 /* fall through */ 4066 case ATH10K_SCAN_STARTING: 4067 ar->scan.state = ATH10K_SCAN_IDLE; 4068 ar->scan_channel = NULL; 4069 ar->scan.roc_freq = 0; 4070 ath10k_offchan_tx_purge(ar); 4071 cancel_delayed_work(&ar->scan.timeout); 4072 complete(&ar->scan.completed); 4073 break; 4074 } 4075 } 4076 4077 void ath10k_scan_finish(struct ath10k *ar) 4078 { 4079 spin_lock_bh(&ar->data_lock); 4080 __ath10k_scan_finish(ar); 4081 spin_unlock_bh(&ar->data_lock); 4082 } 4083 4084 static int ath10k_scan_stop(struct ath10k *ar) 4085 { 4086 struct wmi_stop_scan_arg arg = { 4087 .req_id = 1, /* FIXME */ 4088 .req_type = WMI_SCAN_STOP_ONE, 4089 .u.scan_id = ATH10K_SCAN_ID, 4090 }; 4091 int ret; 4092 4093 lockdep_assert_held(&ar->conf_mutex); 4094 4095 ret = ath10k_wmi_stop_scan(ar, &arg); 4096 if (ret) { 4097 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4098 goto out; 4099 } 4100 4101 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4102 if (ret == 0) { 4103 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4104 ret = -ETIMEDOUT; 4105 } else if (ret > 0) { 4106 ret = 0; 4107 } 4108 4109 out: 4110 /* Scan state should be updated upon scan completion but in case 4111 * firmware fails to deliver the event (for whatever reason) it is 4112 * desired to clean up scan state anyway. Firmware may have just 4113 * dropped the scan completion event delivery due to transport pipe 4114 * being overflown with data and/or it can recover on its own before 4115 * next scan request is submitted. 4116 */ 4117 spin_lock_bh(&ar->data_lock); 4118 if (ar->scan.state != ATH10K_SCAN_IDLE) 4119 __ath10k_scan_finish(ar); 4120 spin_unlock_bh(&ar->data_lock); 4121 4122 return ret; 4123 } 4124 4125 static void ath10k_scan_abort(struct ath10k *ar) 4126 { 4127 int ret; 4128 4129 lockdep_assert_held(&ar->conf_mutex); 4130 4131 spin_lock_bh(&ar->data_lock); 4132 4133 switch (ar->scan.state) { 4134 case ATH10K_SCAN_IDLE: 4135 /* This can happen if timeout worker kicked in and called 4136 * abortion while scan completion was being processed. 4137 */ 4138 break; 4139 case ATH10K_SCAN_STARTING: 4140 case ATH10K_SCAN_ABORTING: 4141 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4142 ath10k_scan_state_str(ar->scan.state), 4143 ar->scan.state); 4144 break; 4145 case ATH10K_SCAN_RUNNING: 4146 ar->scan.state = ATH10K_SCAN_ABORTING; 4147 spin_unlock_bh(&ar->data_lock); 4148 4149 ret = ath10k_scan_stop(ar); 4150 if (ret) 4151 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4152 4153 spin_lock_bh(&ar->data_lock); 4154 break; 4155 } 4156 4157 spin_unlock_bh(&ar->data_lock); 4158 } 4159 4160 void ath10k_scan_timeout_work(struct work_struct *work) 4161 { 4162 struct ath10k *ar = container_of(work, struct ath10k, 4163 scan.timeout.work); 4164 4165 mutex_lock(&ar->conf_mutex); 4166 ath10k_scan_abort(ar); 4167 mutex_unlock(&ar->conf_mutex); 4168 } 4169 4170 static int ath10k_start_scan(struct ath10k *ar, 4171 const struct wmi_start_scan_arg *arg) 4172 { 4173 int ret; 4174 4175 lockdep_assert_held(&ar->conf_mutex); 4176 4177 ret = ath10k_wmi_start_scan(ar, arg); 4178 if (ret) 4179 return ret; 4180 4181 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4182 if (ret == 0) { 4183 ret = ath10k_scan_stop(ar); 4184 if (ret) 4185 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4186 4187 return -ETIMEDOUT; 4188 } 4189 4190 /* If we failed to start the scan, return error code at 4191 * this point. This is probably due to some issue in the 4192 * firmware, but no need to wedge the driver due to that... 4193 */ 4194 spin_lock_bh(&ar->data_lock); 4195 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4196 spin_unlock_bh(&ar->data_lock); 4197 return -EINVAL; 4198 } 4199 spin_unlock_bh(&ar->data_lock); 4200 4201 return 0; 4202 } 4203 4204 /**********************/ 4205 /* mac80211 callbacks */ 4206 /**********************/ 4207 4208 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4209 struct ieee80211_tx_control *control, 4210 struct sk_buff *skb) 4211 { 4212 struct ath10k *ar = hw->priv; 4213 struct ath10k_htt *htt = &ar->htt; 4214 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4215 struct ieee80211_vif *vif = info->control.vif; 4216 struct ieee80211_sta *sta = control->sta; 4217 struct ieee80211_txq *txq = NULL; 4218 struct ieee80211_hdr *hdr = (void *)skb->data; 4219 enum ath10k_hw_txrx_mode txmode; 4220 enum ath10k_mac_tx_path txpath; 4221 bool is_htt; 4222 bool is_mgmt; 4223 bool is_presp; 4224 int ret; 4225 4226 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb); 4227 4228 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4229 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4230 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4231 txpath == ATH10K_MAC_TX_HTT_MGMT); 4232 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4233 4234 if (is_htt) { 4235 spin_lock_bh(&ar->htt.tx_lock); 4236 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4237 4238 ret = ath10k_htt_tx_inc_pending(htt); 4239 if (ret) { 4240 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4241 ret); 4242 spin_unlock_bh(&ar->htt.tx_lock); 4243 ieee80211_free_txskb(ar->hw, skb); 4244 return; 4245 } 4246 4247 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4248 if (ret) { 4249 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4250 ret); 4251 ath10k_htt_tx_dec_pending(htt); 4252 spin_unlock_bh(&ar->htt.tx_lock); 4253 ieee80211_free_txskb(ar->hw, skb); 4254 return; 4255 } 4256 spin_unlock_bh(&ar->htt.tx_lock); 4257 } 4258 4259 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4260 if (ret) { 4261 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4262 if (is_htt) { 4263 spin_lock_bh(&ar->htt.tx_lock); 4264 ath10k_htt_tx_dec_pending(htt); 4265 if (is_mgmt) 4266 ath10k_htt_tx_mgmt_dec_pending(htt); 4267 spin_unlock_bh(&ar->htt.tx_lock); 4268 } 4269 return; 4270 } 4271 } 4272 4273 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4274 struct ieee80211_txq *txq) 4275 { 4276 struct ath10k *ar = hw->priv; 4277 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4278 struct ieee80211_txq *f_txq; 4279 struct ath10k_txq *f_artxq; 4280 int ret = 0; 4281 int max = 16; 4282 4283 spin_lock_bh(&ar->txqs_lock); 4284 if (list_empty(&artxq->list)) 4285 list_add_tail(&artxq->list, &ar->txqs); 4286 4287 f_artxq = list_first_entry(&ar->txqs, struct ath10k_txq, list); 4288 f_txq = container_of((void *)f_artxq, struct ieee80211_txq, drv_priv); 4289 list_del_init(&f_artxq->list); 4290 4291 while (ath10k_mac_tx_can_push(hw, f_txq) && max--) { 4292 ret = ath10k_mac_tx_push_txq(hw, f_txq); 4293 if (ret) 4294 break; 4295 } 4296 if (ret != -ENOENT) 4297 list_add_tail(&f_artxq->list, &ar->txqs); 4298 spin_unlock_bh(&ar->txqs_lock); 4299 4300 ath10k_htt_tx_txq_update(hw, f_txq); 4301 ath10k_htt_tx_txq_update(hw, txq); 4302 } 4303 4304 /* Must not be called with conf_mutex held as workers can use that also. */ 4305 void ath10k_drain_tx(struct ath10k *ar) 4306 { 4307 /* make sure rcu-protected mac80211 tx path itself is drained */ 4308 synchronize_net(); 4309 4310 ath10k_offchan_tx_purge(ar); 4311 ath10k_mgmt_over_wmi_tx_purge(ar); 4312 4313 cancel_work_sync(&ar->offchan_tx_work); 4314 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4315 } 4316 4317 void ath10k_halt(struct ath10k *ar) 4318 { 4319 struct ath10k_vif *arvif; 4320 4321 lockdep_assert_held(&ar->conf_mutex); 4322 4323 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4324 ar->filter_flags = 0; 4325 ar->monitor = false; 4326 ar->monitor_arvif = NULL; 4327 4328 if (ar->monitor_started) 4329 ath10k_monitor_stop(ar); 4330 4331 ar->monitor_started = false; 4332 ar->tx_paused = 0; 4333 4334 ath10k_scan_finish(ar); 4335 ath10k_peer_cleanup_all(ar); 4336 ath10k_core_stop(ar); 4337 ath10k_hif_power_down(ar); 4338 4339 spin_lock_bh(&ar->data_lock); 4340 list_for_each_entry(arvif, &ar->arvifs, list) 4341 ath10k_mac_vif_beacon_cleanup(arvif); 4342 spin_unlock_bh(&ar->data_lock); 4343 } 4344 4345 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4346 { 4347 struct ath10k *ar = hw->priv; 4348 4349 mutex_lock(&ar->conf_mutex); 4350 4351 *tx_ant = ar->cfg_tx_chainmask; 4352 *rx_ant = ar->cfg_rx_chainmask; 4353 4354 mutex_unlock(&ar->conf_mutex); 4355 4356 return 0; 4357 } 4358 4359 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4360 { 4361 /* It is not clear that allowing gaps in chainmask 4362 * is helpful. Probably it will not do what user 4363 * is hoping for, so warn in that case. 4364 */ 4365 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4366 return; 4367 4368 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4369 dbg, cm); 4370 } 4371 4372 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4373 { 4374 int nsts = ar->vht_cap_info; 4375 4376 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4377 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4378 4379 /* If firmware does not deliver to host number of space-time 4380 * streams supported, assume it support up to 4 BF STS and return 4381 * the value for VHT CAP: nsts-1) 4382 */ 4383 if (nsts == 0) 4384 return 3; 4385 4386 return nsts; 4387 } 4388 4389 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4390 { 4391 int sound_dim = ar->vht_cap_info; 4392 4393 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4394 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4395 4396 /* If the sounding dimension is not advertised by the firmware, 4397 * let's use a default value of 1 4398 */ 4399 if (sound_dim == 0) 4400 return 1; 4401 4402 return sound_dim; 4403 } 4404 4405 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4406 { 4407 struct ieee80211_sta_vht_cap vht_cap = {0}; 4408 struct ath10k_hw_params *hw = &ar->hw_params; 4409 u16 mcs_map; 4410 u32 val; 4411 int i; 4412 4413 vht_cap.vht_supported = 1; 4414 vht_cap.cap = ar->vht_cap_info; 4415 4416 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4417 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4418 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4419 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4420 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4421 4422 vht_cap.cap |= val; 4423 } 4424 4425 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4426 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4427 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4428 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4429 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4430 4431 vht_cap.cap |= val; 4432 } 4433 4434 /* Currently the firmware seems to be buggy, don't enable 80+80 4435 * mode until that's resolved. 4436 */ 4437 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4438 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4439 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4440 4441 mcs_map = 0; 4442 for (i = 0; i < 8; i++) { 4443 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4444 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4445 else 4446 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4447 } 4448 4449 if (ar->cfg_tx_chainmask <= 1) 4450 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4451 4452 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4453 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4454 4455 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4456 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4457 * user-space a clue if that is the case. 4458 */ 4459 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4460 (hw->vht160_mcs_rx_highest != 0 || 4461 hw->vht160_mcs_tx_highest != 0)) { 4462 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4463 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4464 } 4465 4466 return vht_cap; 4467 } 4468 4469 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4470 { 4471 int i; 4472 struct ieee80211_sta_ht_cap ht_cap = {0}; 4473 4474 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4475 return ht_cap; 4476 4477 ht_cap.ht_supported = 1; 4478 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4479 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4480 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4481 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4482 ht_cap.cap |= 4483 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4484 4485 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4486 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4487 4488 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4489 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4490 4491 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4492 u32 smps; 4493 4494 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4495 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4496 4497 ht_cap.cap |= smps; 4498 } 4499 4500 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4501 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4502 4503 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4504 u32 stbc; 4505 4506 stbc = ar->ht_cap_info; 4507 stbc &= WMI_HT_CAP_RX_STBC; 4508 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4509 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4510 stbc &= IEEE80211_HT_CAP_RX_STBC; 4511 4512 ht_cap.cap |= stbc; 4513 } 4514 4515 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 4516 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4517 4518 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4519 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4520 4521 /* max AMSDU is implicitly taken from vht_cap_info */ 4522 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4523 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4524 4525 for (i = 0; i < ar->num_rf_chains; i++) { 4526 if (ar->cfg_rx_chainmask & BIT(i)) 4527 ht_cap.mcs.rx_mask[i] = 0xFF; 4528 } 4529 4530 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4531 4532 return ht_cap; 4533 } 4534 4535 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4536 { 4537 struct ieee80211_supported_band *band; 4538 struct ieee80211_sta_vht_cap vht_cap; 4539 struct ieee80211_sta_ht_cap ht_cap; 4540 4541 ht_cap = ath10k_get_ht_cap(ar); 4542 vht_cap = ath10k_create_vht_cap(ar); 4543 4544 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4545 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4546 band->ht_cap = ht_cap; 4547 } 4548 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4549 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4550 band->ht_cap = ht_cap; 4551 band->vht_cap = vht_cap; 4552 } 4553 } 4554 4555 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4556 { 4557 int ret; 4558 4559 lockdep_assert_held(&ar->conf_mutex); 4560 4561 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4562 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4563 4564 ar->cfg_tx_chainmask = tx_ant; 4565 ar->cfg_rx_chainmask = rx_ant; 4566 4567 if ((ar->state != ATH10K_STATE_ON) && 4568 (ar->state != ATH10K_STATE_RESTARTED)) 4569 return 0; 4570 4571 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4572 tx_ant); 4573 if (ret) { 4574 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4575 ret, tx_ant); 4576 return ret; 4577 } 4578 4579 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4580 rx_ant); 4581 if (ret) { 4582 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4583 ret, rx_ant); 4584 return ret; 4585 } 4586 4587 /* Reload HT/VHT capability */ 4588 ath10k_mac_setup_ht_vht_cap(ar); 4589 4590 return 0; 4591 } 4592 4593 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4594 { 4595 struct ath10k *ar = hw->priv; 4596 int ret; 4597 4598 mutex_lock(&ar->conf_mutex); 4599 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4600 mutex_unlock(&ar->conf_mutex); 4601 return ret; 4602 } 4603 4604 static int ath10k_start(struct ieee80211_hw *hw) 4605 { 4606 struct ath10k *ar = hw->priv; 4607 u32 param; 4608 int ret = 0; 4609 4610 /* 4611 * This makes sense only when restarting hw. It is harmless to call 4612 * unconditionally. This is necessary to make sure no HTT/WMI tx 4613 * commands will be submitted while restarting. 4614 */ 4615 ath10k_drain_tx(ar); 4616 4617 mutex_lock(&ar->conf_mutex); 4618 4619 switch (ar->state) { 4620 case ATH10K_STATE_OFF: 4621 ar->state = ATH10K_STATE_ON; 4622 break; 4623 case ATH10K_STATE_RESTARTING: 4624 ar->state = ATH10K_STATE_RESTARTED; 4625 break; 4626 case ATH10K_STATE_ON: 4627 case ATH10K_STATE_RESTARTED: 4628 case ATH10K_STATE_WEDGED: 4629 WARN_ON(1); 4630 ret = -EINVAL; 4631 goto err; 4632 case ATH10K_STATE_UTF: 4633 ret = -EBUSY; 4634 goto err; 4635 } 4636 4637 ret = ath10k_hif_power_up(ar); 4638 if (ret) { 4639 ath10k_err(ar, "Could not init hif: %d\n", ret); 4640 goto err_off; 4641 } 4642 4643 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4644 &ar->normal_mode_fw); 4645 if (ret) { 4646 ath10k_err(ar, "Could not init core: %d\n", ret); 4647 goto err_power_down; 4648 } 4649 4650 param = ar->wmi.pdev_param->pmf_qos; 4651 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4652 if (ret) { 4653 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4654 goto err_core_stop; 4655 } 4656 4657 param = ar->wmi.pdev_param->dynamic_bw; 4658 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4659 if (ret) { 4660 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4661 goto err_core_stop; 4662 } 4663 4664 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4665 ret = ath10k_wmi_adaptive_qcs(ar, true); 4666 if (ret) { 4667 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4668 ret); 4669 goto err_core_stop; 4670 } 4671 } 4672 4673 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4674 param = ar->wmi.pdev_param->burst_enable; 4675 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4676 if (ret) { 4677 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4678 goto err_core_stop; 4679 } 4680 } 4681 4682 param = ar->wmi.pdev_param->idle_ps_config; 4683 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4684 if (ret && ret != -EOPNOTSUPP) { 4685 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); 4686 goto err_core_stop; 4687 } 4688 4689 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4690 4691 /* 4692 * By default FW set ARP frames ac to voice (6). In that case ARP 4693 * exchange is not working properly for UAPSD enabled AP. ARP requests 4694 * which arrives with access category 0 are processed by network stack 4695 * and send back with access category 0, but FW changes access category 4696 * to 6. Set ARP frames access category to best effort (0) solves 4697 * this problem. 4698 */ 4699 4700 param = ar->wmi.pdev_param->arp_ac_override; 4701 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4702 if (ret) { 4703 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4704 ret); 4705 goto err_core_stop; 4706 } 4707 4708 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4709 ar->running_fw->fw_file.fw_features)) { 4710 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4711 WMI_CCA_DETECT_LEVEL_AUTO, 4712 WMI_CCA_DETECT_MARGIN_AUTO); 4713 if (ret) { 4714 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4715 ret); 4716 goto err_core_stop; 4717 } 4718 } 4719 4720 param = ar->wmi.pdev_param->ani_enable; 4721 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4722 if (ret) { 4723 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4724 ret); 4725 goto err_core_stop; 4726 } 4727 4728 ar->ani_enabled = true; 4729 4730 if (ath10k_peer_stats_enabled(ar)) { 4731 param = ar->wmi.pdev_param->peer_stats_update_period; 4732 ret = ath10k_wmi_pdev_set_param(ar, param, 4733 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4734 if (ret) { 4735 ath10k_warn(ar, 4736 "failed to set peer stats period : %d\n", 4737 ret); 4738 goto err_core_stop; 4739 } 4740 } 4741 4742 param = ar->wmi.pdev_param->enable_btcoex; 4743 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4744 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4745 ar->running_fw->fw_file.fw_features)) { 4746 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4747 if (ret) { 4748 ath10k_warn(ar, 4749 "failed to set btcoex param: %d\n", ret); 4750 goto err_core_stop; 4751 } 4752 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4753 } 4754 4755 ar->num_started_vdevs = 0; 4756 ath10k_regd_update(ar); 4757 4758 ath10k_spectral_start(ar); 4759 ath10k_thermal_set_throttling(ar); 4760 4761 mutex_unlock(&ar->conf_mutex); 4762 return 0; 4763 4764 err_core_stop: 4765 ath10k_core_stop(ar); 4766 4767 err_power_down: 4768 ath10k_hif_power_down(ar); 4769 4770 err_off: 4771 ar->state = ATH10K_STATE_OFF; 4772 4773 err: 4774 mutex_unlock(&ar->conf_mutex); 4775 return ret; 4776 } 4777 4778 static void ath10k_stop(struct ieee80211_hw *hw) 4779 { 4780 struct ath10k *ar = hw->priv; 4781 4782 ath10k_drain_tx(ar); 4783 4784 mutex_lock(&ar->conf_mutex); 4785 if (ar->state != ATH10K_STATE_OFF) { 4786 ath10k_halt(ar); 4787 ar->state = ATH10K_STATE_OFF; 4788 } 4789 mutex_unlock(&ar->conf_mutex); 4790 4791 cancel_work_sync(&ar->set_coverage_class_work); 4792 cancel_delayed_work_sync(&ar->scan.timeout); 4793 cancel_work_sync(&ar->restart_work); 4794 } 4795 4796 static int ath10k_config_ps(struct ath10k *ar) 4797 { 4798 struct ath10k_vif *arvif; 4799 int ret = 0; 4800 4801 lockdep_assert_held(&ar->conf_mutex); 4802 4803 list_for_each_entry(arvif, &ar->arvifs, list) { 4804 ret = ath10k_mac_vif_setup_ps(arvif); 4805 if (ret) { 4806 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4807 break; 4808 } 4809 } 4810 4811 return ret; 4812 } 4813 4814 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4815 { 4816 int ret; 4817 u32 param; 4818 4819 lockdep_assert_held(&ar->conf_mutex); 4820 4821 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4822 4823 param = ar->wmi.pdev_param->txpower_limit2g; 4824 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4825 if (ret) { 4826 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4827 txpower, ret); 4828 return ret; 4829 } 4830 4831 param = ar->wmi.pdev_param->txpower_limit5g; 4832 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4833 if (ret) { 4834 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4835 txpower, ret); 4836 return ret; 4837 } 4838 4839 return 0; 4840 } 4841 4842 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4843 { 4844 struct ath10k_vif *arvif; 4845 int ret, txpower = -1; 4846 4847 lockdep_assert_held(&ar->conf_mutex); 4848 4849 list_for_each_entry(arvif, &ar->arvifs, list) { 4850 if (arvif->txpower <= 0) 4851 continue; 4852 4853 if (txpower == -1) 4854 txpower = arvif->txpower; 4855 else 4856 txpower = min(txpower, arvif->txpower); 4857 } 4858 4859 if (txpower == -1) 4860 return 0; 4861 4862 ret = ath10k_mac_txpower_setup(ar, txpower); 4863 if (ret) { 4864 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4865 txpower, ret); 4866 return ret; 4867 } 4868 4869 return 0; 4870 } 4871 4872 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4873 { 4874 struct ath10k *ar = hw->priv; 4875 struct ieee80211_conf *conf = &hw->conf; 4876 int ret = 0; 4877 4878 mutex_lock(&ar->conf_mutex); 4879 4880 if (changed & IEEE80211_CONF_CHANGE_PS) 4881 ath10k_config_ps(ar); 4882 4883 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4884 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4885 ret = ath10k_monitor_recalc(ar); 4886 if (ret) 4887 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4888 } 4889 4890 mutex_unlock(&ar->conf_mutex); 4891 return ret; 4892 } 4893 4894 static u32 get_nss_from_chainmask(u16 chain_mask) 4895 { 4896 if ((chain_mask & 0xf) == 0xf) 4897 return 4; 4898 else if ((chain_mask & 0x7) == 0x7) 4899 return 3; 4900 else if ((chain_mask & 0x3) == 0x3) 4901 return 2; 4902 return 1; 4903 } 4904 4905 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 4906 { 4907 u32 value = 0; 4908 struct ath10k *ar = arvif->ar; 4909 int nsts; 4910 int sound_dim; 4911 4912 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 4913 return 0; 4914 4915 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 4916 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4917 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 4918 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 4919 4920 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4921 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4922 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 4923 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 4924 4925 if (!value) 4926 return 0; 4927 4928 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 4929 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 4930 4931 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 4932 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 4933 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 4934 4935 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 4936 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 4937 4938 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 4939 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 4940 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 4941 4942 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 4943 ar->wmi.vdev_param->txbf, value); 4944 } 4945 4946 /* 4947 * TODO: 4948 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4949 * because we will send mgmt frames without CCK. This requirement 4950 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4951 * in the TX packet. 4952 */ 4953 static int ath10k_add_interface(struct ieee80211_hw *hw, 4954 struct ieee80211_vif *vif) 4955 { 4956 struct ath10k *ar = hw->priv; 4957 struct ath10k_vif *arvif = (void *)vif->drv_priv; 4958 struct ath10k_peer *peer; 4959 enum wmi_sta_powersave_param param; 4960 int ret = 0; 4961 u32 value; 4962 int bit; 4963 int i; 4964 u32 vdev_param; 4965 4966 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4967 4968 mutex_lock(&ar->conf_mutex); 4969 4970 memset(arvif, 0, sizeof(*arvif)); 4971 ath10k_mac_txq_init(vif->txq); 4972 4973 arvif->ar = ar; 4974 arvif->vif = vif; 4975 4976 INIT_LIST_HEAD(&arvif->list); 4977 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4978 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4979 ath10k_mac_vif_sta_connection_loss_work); 4980 4981 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4982 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4983 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4984 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4985 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4986 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4987 } 4988 4989 if (ar->num_peers >= ar->max_num_peers) { 4990 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 4991 ret = -ENOBUFS; 4992 goto err; 4993 } 4994 4995 if (ar->free_vdev_map == 0) { 4996 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4997 ret = -EBUSY; 4998 goto err; 4999 } 5000 bit = __ffs64(ar->free_vdev_map); 5001 5002 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 5003 bit, ar->free_vdev_map); 5004 5005 arvif->vdev_id = bit; 5006 arvif->vdev_subtype = 5007 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 5008 5009 switch (vif->type) { 5010 case NL80211_IFTYPE_P2P_DEVICE: 5011 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5012 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5013 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 5014 break; 5015 case NL80211_IFTYPE_UNSPECIFIED: 5016 case NL80211_IFTYPE_STATION: 5017 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5018 if (vif->p2p) 5019 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5020 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 5021 break; 5022 case NL80211_IFTYPE_ADHOC: 5023 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 5024 break; 5025 case NL80211_IFTYPE_MESH_POINT: 5026 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5027 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5028 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5029 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5030 ret = -EINVAL; 5031 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5032 goto err; 5033 } 5034 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5035 break; 5036 case NL80211_IFTYPE_AP: 5037 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5038 5039 if (vif->p2p) 5040 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5041 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5042 break; 5043 case NL80211_IFTYPE_MONITOR: 5044 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5045 break; 5046 default: 5047 WARN_ON(1); 5048 break; 5049 } 5050 5051 /* Using vdev_id as queue number will make it very easy to do per-vif 5052 * tx queue locking. This shouldn't wrap due to interface combinations 5053 * but do a modulo for correctness sake and prevent using offchannel tx 5054 * queues for regular vif tx. 5055 */ 5056 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5057 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5058 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5059 5060 /* Some firmware revisions don't wait for beacon tx completion before 5061 * sending another SWBA event. This could lead to hardware using old 5062 * (freed) beacon data in some cases, e.g. tx credit starvation 5063 * combined with missed TBTT. This is very very rare. 5064 * 5065 * On non-IOMMU-enabled hosts this could be a possible security issue 5066 * because hw could beacon some random data on the air. On 5067 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5068 * device would crash. 5069 * 5070 * Since there are no beacon tx completions (implicit nor explicit) 5071 * propagated to host the only workaround for this is to allocate a 5072 * DMA-coherent buffer for a lifetime of a vif and use it for all 5073 * beacon tx commands. Worst case for this approach is some beacons may 5074 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5075 */ 5076 if (vif->type == NL80211_IFTYPE_ADHOC || 5077 vif->type == NL80211_IFTYPE_MESH_POINT || 5078 vif->type == NL80211_IFTYPE_AP) { 5079 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 5080 IEEE80211_MAX_FRAME_LEN, 5081 &arvif->beacon_paddr, 5082 GFP_ATOMIC); 5083 if (!arvif->beacon_buf) { 5084 ret = -ENOMEM; 5085 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5086 ret); 5087 goto err; 5088 } 5089 } 5090 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5091 arvif->nohwcrypt = true; 5092 5093 if (arvif->nohwcrypt && 5094 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5095 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5096 goto err; 5097 } 5098 5099 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5100 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5101 arvif->beacon_buf ? "single-buf" : "per-skb"); 5102 5103 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5104 arvif->vdev_subtype, vif->addr); 5105 if (ret) { 5106 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5107 arvif->vdev_id, ret); 5108 goto err; 5109 } 5110 5111 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5112 spin_lock_bh(&ar->data_lock); 5113 list_add(&arvif->list, &ar->arvifs); 5114 spin_unlock_bh(&ar->data_lock); 5115 5116 /* It makes no sense to have firmware do keepalives. mac80211 already 5117 * takes care of this with idle connection polling. 5118 */ 5119 ret = ath10k_mac_vif_disable_keepalive(arvif); 5120 if (ret) { 5121 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5122 arvif->vdev_id, ret); 5123 goto err_vdev_delete; 5124 } 5125 5126 arvif->def_wep_key_idx = -1; 5127 5128 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5129 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5130 ATH10K_HW_TXRX_NATIVE_WIFI); 5131 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5132 if (ret && ret != -EOPNOTSUPP) { 5133 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5134 arvif->vdev_id, ret); 5135 goto err_vdev_delete; 5136 } 5137 5138 /* Configuring number of spatial stream for monitor interface is causing 5139 * target assert in qca9888 and qca6174. 5140 */ 5141 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5142 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5143 5144 vdev_param = ar->wmi.vdev_param->nss; 5145 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5146 nss); 5147 if (ret) { 5148 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5149 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5150 ret); 5151 goto err_vdev_delete; 5152 } 5153 } 5154 5155 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5156 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5157 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5158 vif->addr, WMI_PEER_TYPE_DEFAULT); 5159 if (ret) { 5160 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5161 arvif->vdev_id, ret); 5162 goto err_vdev_delete; 5163 } 5164 5165 spin_lock_bh(&ar->data_lock); 5166 5167 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5168 if (!peer) { 5169 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5170 vif->addr, arvif->vdev_id); 5171 spin_unlock_bh(&ar->data_lock); 5172 ret = -ENOENT; 5173 goto err_peer_delete; 5174 } 5175 5176 arvif->peer_id = find_first_bit(peer->peer_ids, 5177 ATH10K_MAX_NUM_PEER_IDS); 5178 5179 spin_unlock_bh(&ar->data_lock); 5180 } else { 5181 arvif->peer_id = HTT_INVALID_PEERID; 5182 } 5183 5184 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5185 ret = ath10k_mac_set_kickout(arvif); 5186 if (ret) { 5187 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5188 arvif->vdev_id, ret); 5189 goto err_peer_delete; 5190 } 5191 } 5192 5193 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5194 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5195 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5196 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5197 param, value); 5198 if (ret) { 5199 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5200 arvif->vdev_id, ret); 5201 goto err_peer_delete; 5202 } 5203 5204 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5205 if (ret) { 5206 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5207 arvif->vdev_id, ret); 5208 goto err_peer_delete; 5209 } 5210 5211 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5212 if (ret) { 5213 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5214 arvif->vdev_id, ret); 5215 goto err_peer_delete; 5216 } 5217 } 5218 5219 ret = ath10k_mac_set_txbf_conf(arvif); 5220 if (ret) { 5221 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5222 arvif->vdev_id, ret); 5223 goto err_peer_delete; 5224 } 5225 5226 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5227 if (ret) { 5228 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5229 arvif->vdev_id, ret); 5230 goto err_peer_delete; 5231 } 5232 5233 arvif->txpower = vif->bss_conf.txpower; 5234 ret = ath10k_mac_txpower_recalc(ar); 5235 if (ret) { 5236 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5237 goto err_peer_delete; 5238 } 5239 5240 if (vif->type == NL80211_IFTYPE_MONITOR) { 5241 ar->monitor_arvif = arvif; 5242 ret = ath10k_monitor_recalc(ar); 5243 if (ret) { 5244 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5245 goto err_peer_delete; 5246 } 5247 } 5248 5249 spin_lock_bh(&ar->htt.tx_lock); 5250 if (!ar->tx_paused) 5251 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5252 spin_unlock_bh(&ar->htt.tx_lock); 5253 5254 mutex_unlock(&ar->conf_mutex); 5255 return 0; 5256 5257 err_peer_delete: 5258 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5259 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5260 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5261 5262 err_vdev_delete: 5263 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5264 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5265 spin_lock_bh(&ar->data_lock); 5266 list_del(&arvif->list); 5267 spin_unlock_bh(&ar->data_lock); 5268 5269 err: 5270 if (arvif->beacon_buf) { 5271 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5272 arvif->beacon_buf, arvif->beacon_paddr); 5273 arvif->beacon_buf = NULL; 5274 } 5275 5276 mutex_unlock(&ar->conf_mutex); 5277 5278 return ret; 5279 } 5280 5281 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5282 { 5283 int i; 5284 5285 for (i = 0; i < BITS_PER_LONG; i++) 5286 ath10k_mac_vif_tx_unlock(arvif, i); 5287 } 5288 5289 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5290 struct ieee80211_vif *vif) 5291 { 5292 struct ath10k *ar = hw->priv; 5293 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5294 struct ath10k_peer *peer; 5295 int ret; 5296 int i; 5297 5298 cancel_work_sync(&arvif->ap_csa_work); 5299 cancel_delayed_work_sync(&arvif->connection_loss_work); 5300 5301 mutex_lock(&ar->conf_mutex); 5302 5303 spin_lock_bh(&ar->data_lock); 5304 ath10k_mac_vif_beacon_cleanup(arvif); 5305 spin_unlock_bh(&ar->data_lock); 5306 5307 ret = ath10k_spectral_vif_stop(arvif); 5308 if (ret) 5309 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5310 arvif->vdev_id, ret); 5311 5312 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5313 spin_lock_bh(&ar->data_lock); 5314 list_del(&arvif->list); 5315 spin_unlock_bh(&ar->data_lock); 5316 5317 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5318 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5319 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5320 vif->addr); 5321 if (ret) 5322 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5323 arvif->vdev_id, ret); 5324 5325 kfree(arvif->u.ap.noa_data); 5326 } 5327 5328 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5329 arvif->vdev_id); 5330 5331 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5332 if (ret) 5333 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5334 arvif->vdev_id, ret); 5335 5336 /* Some firmware revisions don't notify host about self-peer removal 5337 * until after associated vdev is deleted. 5338 */ 5339 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5340 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5341 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5342 vif->addr); 5343 if (ret) 5344 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5345 arvif->vdev_id, ret); 5346 5347 spin_lock_bh(&ar->data_lock); 5348 ar->num_peers--; 5349 spin_unlock_bh(&ar->data_lock); 5350 } 5351 5352 spin_lock_bh(&ar->data_lock); 5353 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5354 peer = ar->peer_map[i]; 5355 if (!peer) 5356 continue; 5357 5358 if (peer->vif == vif) { 5359 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5360 vif->addr, arvif->vdev_id); 5361 peer->vif = NULL; 5362 } 5363 } 5364 spin_unlock_bh(&ar->data_lock); 5365 5366 ath10k_peer_cleanup(ar, arvif->vdev_id); 5367 ath10k_mac_txq_unref(ar, vif->txq); 5368 5369 if (vif->type == NL80211_IFTYPE_MONITOR) { 5370 ar->monitor_arvif = NULL; 5371 ret = ath10k_monitor_recalc(ar); 5372 if (ret) 5373 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5374 } 5375 5376 ret = ath10k_mac_txpower_recalc(ar); 5377 if (ret) 5378 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5379 5380 spin_lock_bh(&ar->htt.tx_lock); 5381 ath10k_mac_vif_tx_unlock_all(arvif); 5382 spin_unlock_bh(&ar->htt.tx_lock); 5383 5384 ath10k_mac_txq_unref(ar, vif->txq); 5385 5386 mutex_unlock(&ar->conf_mutex); 5387 } 5388 5389 /* 5390 * FIXME: Has to be verified. 5391 */ 5392 #define SUPPORTED_FILTERS \ 5393 (FIF_ALLMULTI | \ 5394 FIF_CONTROL | \ 5395 FIF_PSPOLL | \ 5396 FIF_OTHER_BSS | \ 5397 FIF_BCN_PRBRESP_PROMISC | \ 5398 FIF_PROBE_REQ | \ 5399 FIF_FCSFAIL) 5400 5401 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5402 unsigned int changed_flags, 5403 unsigned int *total_flags, 5404 u64 multicast) 5405 { 5406 struct ath10k *ar = hw->priv; 5407 int ret; 5408 5409 mutex_lock(&ar->conf_mutex); 5410 5411 changed_flags &= SUPPORTED_FILTERS; 5412 *total_flags &= SUPPORTED_FILTERS; 5413 ar->filter_flags = *total_flags; 5414 5415 ret = ath10k_monitor_recalc(ar); 5416 if (ret) 5417 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5418 5419 mutex_unlock(&ar->conf_mutex); 5420 } 5421 5422 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5423 struct ieee80211_vif *vif, 5424 struct ieee80211_bss_conf *info, 5425 u32 changed) 5426 { 5427 struct ath10k *ar = hw->priv; 5428 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5429 int ret = 0; 5430 u32 vdev_param, pdev_param, slottime, preamble; 5431 5432 mutex_lock(&ar->conf_mutex); 5433 5434 if (changed & BSS_CHANGED_IBSS) 5435 ath10k_control_ibss(arvif, info, vif->addr); 5436 5437 if (changed & BSS_CHANGED_BEACON_INT) { 5438 arvif->beacon_interval = info->beacon_int; 5439 vdev_param = ar->wmi.vdev_param->beacon_interval; 5440 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5441 arvif->beacon_interval); 5442 ath10k_dbg(ar, ATH10K_DBG_MAC, 5443 "mac vdev %d beacon_interval %d\n", 5444 arvif->vdev_id, arvif->beacon_interval); 5445 5446 if (ret) 5447 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5448 arvif->vdev_id, ret); 5449 } 5450 5451 if (changed & BSS_CHANGED_BEACON) { 5452 ath10k_dbg(ar, ATH10K_DBG_MAC, 5453 "vdev %d set beacon tx mode to staggered\n", 5454 arvif->vdev_id); 5455 5456 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5457 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5458 WMI_BEACON_STAGGERED_MODE); 5459 if (ret) 5460 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5461 arvif->vdev_id, ret); 5462 5463 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5464 if (ret) 5465 ath10k_warn(ar, "failed to update beacon template: %d\n", 5466 ret); 5467 5468 if (ieee80211_vif_is_mesh(vif)) { 5469 /* mesh doesn't use SSID but firmware needs it */ 5470 strncpy(arvif->u.ap.ssid, "mesh", 5471 sizeof(arvif->u.ap.ssid)); 5472 arvif->u.ap.ssid_len = 4; 5473 } 5474 } 5475 5476 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5477 ret = ath10k_mac_setup_prb_tmpl(arvif); 5478 if (ret) 5479 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5480 arvif->vdev_id, ret); 5481 } 5482 5483 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5484 arvif->dtim_period = info->dtim_period; 5485 5486 ath10k_dbg(ar, ATH10K_DBG_MAC, 5487 "mac vdev %d dtim_period %d\n", 5488 arvif->vdev_id, arvif->dtim_period); 5489 5490 vdev_param = ar->wmi.vdev_param->dtim_period; 5491 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5492 arvif->dtim_period); 5493 if (ret) 5494 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5495 arvif->vdev_id, ret); 5496 } 5497 5498 if (changed & BSS_CHANGED_SSID && 5499 vif->type == NL80211_IFTYPE_AP) { 5500 arvif->u.ap.ssid_len = info->ssid_len; 5501 if (info->ssid_len) 5502 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5503 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5504 } 5505 5506 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5507 ether_addr_copy(arvif->bssid, info->bssid); 5508 5509 if (changed & BSS_CHANGED_BEACON_ENABLED) 5510 ath10k_control_beaconing(arvif, info); 5511 5512 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5513 arvif->use_cts_prot = info->use_cts_prot; 5514 5515 ret = ath10k_recalc_rtscts_prot(arvif); 5516 if (ret) 5517 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5518 arvif->vdev_id, ret); 5519 5520 if (ath10k_mac_can_set_cts_prot(arvif)) { 5521 ret = ath10k_mac_set_cts_prot(arvif); 5522 if (ret) 5523 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5524 arvif->vdev_id, ret); 5525 } 5526 } 5527 5528 if (changed & BSS_CHANGED_ERP_SLOT) { 5529 if (info->use_short_slot) 5530 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5531 5532 else 5533 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5534 5535 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5536 arvif->vdev_id, slottime); 5537 5538 vdev_param = ar->wmi.vdev_param->slot_time; 5539 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5540 slottime); 5541 if (ret) 5542 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5543 arvif->vdev_id, ret); 5544 } 5545 5546 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5547 if (info->use_short_preamble) 5548 preamble = WMI_VDEV_PREAMBLE_SHORT; 5549 else 5550 preamble = WMI_VDEV_PREAMBLE_LONG; 5551 5552 ath10k_dbg(ar, ATH10K_DBG_MAC, 5553 "mac vdev %d preamble %dn", 5554 arvif->vdev_id, preamble); 5555 5556 vdev_param = ar->wmi.vdev_param->preamble; 5557 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5558 preamble); 5559 if (ret) 5560 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5561 arvif->vdev_id, ret); 5562 } 5563 5564 if (changed & BSS_CHANGED_ASSOC) { 5565 if (info->assoc) { 5566 /* Workaround: Make sure monitor vdev is not running 5567 * when associating to prevent some firmware revisions 5568 * (e.g. 10.1 and 10.2) from crashing. 5569 */ 5570 if (ar->monitor_started) 5571 ath10k_monitor_stop(ar); 5572 ath10k_bss_assoc(hw, vif, info); 5573 ath10k_monitor_recalc(ar); 5574 } else { 5575 ath10k_bss_disassoc(hw, vif); 5576 } 5577 } 5578 5579 if (changed & BSS_CHANGED_TXPOWER) { 5580 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5581 arvif->vdev_id, info->txpower); 5582 5583 arvif->txpower = info->txpower; 5584 ret = ath10k_mac_txpower_recalc(ar); 5585 if (ret) 5586 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5587 } 5588 5589 if (changed & BSS_CHANGED_PS) { 5590 arvif->ps = vif->bss_conf.ps; 5591 5592 ret = ath10k_config_ps(ar); 5593 if (ret) 5594 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5595 arvif->vdev_id, ret); 5596 } 5597 5598 mutex_unlock(&ar->conf_mutex); 5599 } 5600 5601 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5602 { 5603 struct ath10k *ar = hw->priv; 5604 5605 /* This function should never be called if setting the coverage class 5606 * is not supported on this hardware. 5607 */ 5608 if (!ar->hw_params.hw_ops->set_coverage_class) { 5609 WARN_ON_ONCE(1); 5610 return; 5611 } 5612 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5613 } 5614 5615 struct ath10k_mac_tdls_iter_data { 5616 u32 num_tdls_stations; 5617 struct ieee80211_vif *curr_vif; 5618 }; 5619 5620 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5621 struct ieee80211_sta *sta) 5622 { 5623 struct ath10k_mac_tdls_iter_data *iter_data = data; 5624 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5625 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5626 5627 if (sta->tdls && sta_vif == iter_data->curr_vif) 5628 iter_data->num_tdls_stations++; 5629 } 5630 5631 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5632 struct ieee80211_vif *vif) 5633 { 5634 struct ath10k_mac_tdls_iter_data data = {}; 5635 5636 data.curr_vif = vif; 5637 5638 ieee80211_iterate_stations_atomic(hw, 5639 ath10k_mac_tdls_vif_stations_count_iter, 5640 &data); 5641 return data.num_tdls_stations; 5642 } 5643 5644 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 5645 struct ieee80211_vif *vif) 5646 { 5647 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5648 int *num_tdls_vifs = data; 5649 5650 if (vif->type != NL80211_IFTYPE_STATION) 5651 return; 5652 5653 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 5654 (*num_tdls_vifs)++; 5655 } 5656 5657 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 5658 { 5659 int num_tdls_vifs = 0; 5660 5661 ieee80211_iterate_active_interfaces_atomic(hw, 5662 IEEE80211_IFACE_ITER_NORMAL, 5663 ath10k_mac_tdls_vifs_count_iter, 5664 &num_tdls_vifs); 5665 return num_tdls_vifs; 5666 } 5667 5668 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5669 struct ieee80211_vif *vif, 5670 struct ieee80211_scan_request *hw_req) 5671 { 5672 struct ath10k *ar = hw->priv; 5673 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5674 struct cfg80211_scan_request *req = &hw_req->req; 5675 struct wmi_start_scan_arg arg; 5676 int ret = 0; 5677 int i; 5678 5679 mutex_lock(&ar->conf_mutex); 5680 5681 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5682 ret = -EBUSY; 5683 goto exit; 5684 } 5685 5686 spin_lock_bh(&ar->data_lock); 5687 switch (ar->scan.state) { 5688 case ATH10K_SCAN_IDLE: 5689 reinit_completion(&ar->scan.started); 5690 reinit_completion(&ar->scan.completed); 5691 ar->scan.state = ATH10K_SCAN_STARTING; 5692 ar->scan.is_roc = false; 5693 ar->scan.vdev_id = arvif->vdev_id; 5694 ret = 0; 5695 break; 5696 case ATH10K_SCAN_STARTING: 5697 case ATH10K_SCAN_RUNNING: 5698 case ATH10K_SCAN_ABORTING: 5699 ret = -EBUSY; 5700 break; 5701 } 5702 spin_unlock_bh(&ar->data_lock); 5703 5704 if (ret) 5705 goto exit; 5706 5707 memset(&arg, 0, sizeof(arg)); 5708 ath10k_wmi_start_scan_init(ar, &arg); 5709 arg.vdev_id = arvif->vdev_id; 5710 arg.scan_id = ATH10K_SCAN_ID; 5711 5712 if (req->ie_len) { 5713 arg.ie_len = req->ie_len; 5714 memcpy(arg.ie, req->ie, arg.ie_len); 5715 } 5716 5717 if (req->n_ssids) { 5718 arg.n_ssids = req->n_ssids; 5719 for (i = 0; i < arg.n_ssids; i++) { 5720 arg.ssids[i].len = req->ssids[i].ssid_len; 5721 arg.ssids[i].ssid = req->ssids[i].ssid; 5722 } 5723 } else { 5724 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5725 } 5726 5727 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 5728 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; 5729 ether_addr_copy(arg.mac_addr.addr, req->mac_addr); 5730 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); 5731 } 5732 5733 if (req->n_channels) { 5734 arg.n_channels = req->n_channels; 5735 for (i = 0; i < arg.n_channels; i++) 5736 arg.channels[i] = req->channels[i]->center_freq; 5737 } 5738 5739 ret = ath10k_start_scan(ar, &arg); 5740 if (ret) { 5741 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5742 spin_lock_bh(&ar->data_lock); 5743 ar->scan.state = ATH10K_SCAN_IDLE; 5744 spin_unlock_bh(&ar->data_lock); 5745 } 5746 5747 /* Add a 200ms margin to account for event/command processing */ 5748 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5749 msecs_to_jiffies(arg.max_scan_time + 5750 200)); 5751 5752 exit: 5753 mutex_unlock(&ar->conf_mutex); 5754 return ret; 5755 } 5756 5757 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5758 struct ieee80211_vif *vif) 5759 { 5760 struct ath10k *ar = hw->priv; 5761 5762 mutex_lock(&ar->conf_mutex); 5763 ath10k_scan_abort(ar); 5764 mutex_unlock(&ar->conf_mutex); 5765 5766 cancel_delayed_work_sync(&ar->scan.timeout); 5767 } 5768 5769 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 5770 struct ath10k_vif *arvif, 5771 enum set_key_cmd cmd, 5772 struct ieee80211_key_conf *key) 5773 { 5774 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 5775 int ret; 5776 5777 /* 10.1 firmware branch requires default key index to be set to group 5778 * key index after installing it. Otherwise FW/HW Txes corrupted 5779 * frames with multi-vif APs. This is not required for main firmware 5780 * branch (e.g. 636). 5781 * 5782 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 5783 * 5784 * FIXME: It remains unknown if this is required for multi-vif STA 5785 * interfaces on 10.1. 5786 */ 5787 5788 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 5789 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 5790 return; 5791 5792 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 5793 return; 5794 5795 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 5796 return; 5797 5798 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5799 return; 5800 5801 if (cmd != SET_KEY) 5802 return; 5803 5804 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5805 key->keyidx); 5806 if (ret) 5807 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 5808 arvif->vdev_id, ret); 5809 } 5810 5811 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 5812 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 5813 struct ieee80211_key_conf *key) 5814 { 5815 struct ath10k *ar = hw->priv; 5816 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5817 struct ath10k_peer *peer; 5818 const u8 *peer_addr; 5819 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 5820 key->cipher == WLAN_CIPHER_SUITE_WEP104; 5821 int ret = 0; 5822 int ret2; 5823 u32 flags = 0; 5824 u32 flags2; 5825 5826 /* this one needs to be done in software */ 5827 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 5828 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 5829 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 5830 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 5831 return 1; 5832 5833 if (arvif->nohwcrypt) 5834 return 1; 5835 5836 if (key->keyidx > WMI_MAX_KEY_INDEX) 5837 return -ENOSPC; 5838 5839 mutex_lock(&ar->conf_mutex); 5840 5841 if (sta) 5842 peer_addr = sta->addr; 5843 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 5844 peer_addr = vif->bss_conf.bssid; 5845 else 5846 peer_addr = vif->addr; 5847 5848 key->hw_key_idx = key->keyidx; 5849 5850 if (is_wep) { 5851 if (cmd == SET_KEY) 5852 arvif->wep_keys[key->keyidx] = key; 5853 else 5854 arvif->wep_keys[key->keyidx] = NULL; 5855 } 5856 5857 /* the peer should not disappear in mid-way (unless FW goes awry) since 5858 * we already hold conf_mutex. we just make sure its there now. 5859 */ 5860 spin_lock_bh(&ar->data_lock); 5861 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5862 spin_unlock_bh(&ar->data_lock); 5863 5864 if (!peer) { 5865 if (cmd == SET_KEY) { 5866 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 5867 peer_addr); 5868 ret = -EOPNOTSUPP; 5869 goto exit; 5870 } else { 5871 /* if the peer doesn't exist there is no key to disable anymore */ 5872 goto exit; 5873 } 5874 } 5875 5876 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 5877 flags |= WMI_KEY_PAIRWISE; 5878 else 5879 flags |= WMI_KEY_GROUP; 5880 5881 if (is_wep) { 5882 if (cmd == DISABLE_KEY) 5883 ath10k_clear_vdev_key(arvif, key); 5884 5885 /* When WEP keys are uploaded it's possible that there are 5886 * stations associated already (e.g. when merging) without any 5887 * keys. Static WEP needs an explicit per-peer key upload. 5888 */ 5889 if (vif->type == NL80211_IFTYPE_ADHOC && 5890 cmd == SET_KEY) 5891 ath10k_mac_vif_update_wep_key(arvif, key); 5892 5893 /* 802.1x never sets the def_wep_key_idx so each set_key() 5894 * call changes default tx key. 5895 * 5896 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 5897 * after first set_key(). 5898 */ 5899 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 5900 flags |= WMI_KEY_TX_USAGE; 5901 } 5902 5903 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 5904 if (ret) { 5905 WARN_ON(ret > 0); 5906 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 5907 arvif->vdev_id, peer_addr, ret); 5908 goto exit; 5909 } 5910 5911 /* mac80211 sets static WEP keys as groupwise while firmware requires 5912 * them to be installed twice as both pairwise and groupwise. 5913 */ 5914 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 5915 flags2 = flags; 5916 flags2 &= ~WMI_KEY_GROUP; 5917 flags2 |= WMI_KEY_PAIRWISE; 5918 5919 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 5920 if (ret) { 5921 WARN_ON(ret > 0); 5922 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 5923 arvif->vdev_id, peer_addr, ret); 5924 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 5925 peer_addr, flags); 5926 if (ret2) { 5927 WARN_ON(ret2 > 0); 5928 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 5929 arvif->vdev_id, peer_addr, ret2); 5930 } 5931 goto exit; 5932 } 5933 } 5934 5935 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 5936 5937 spin_lock_bh(&ar->data_lock); 5938 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 5939 if (peer && cmd == SET_KEY) 5940 peer->keys[key->keyidx] = key; 5941 else if (peer && cmd == DISABLE_KEY) 5942 peer->keys[key->keyidx] = NULL; 5943 else if (peer == NULL) 5944 /* impossible unless FW goes crazy */ 5945 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 5946 spin_unlock_bh(&ar->data_lock); 5947 5948 if (sta && sta->tdls) 5949 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 5950 WMI_PEER_AUTHORIZE, 1); 5951 5952 exit: 5953 mutex_unlock(&ar->conf_mutex); 5954 return ret; 5955 } 5956 5957 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 5958 struct ieee80211_vif *vif, 5959 int keyidx) 5960 { 5961 struct ath10k *ar = hw->priv; 5962 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5963 int ret; 5964 5965 mutex_lock(&arvif->ar->conf_mutex); 5966 5967 if (arvif->ar->state != ATH10K_STATE_ON) 5968 goto unlock; 5969 5970 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 5971 arvif->vdev_id, keyidx); 5972 5973 ret = ath10k_wmi_vdev_set_param(arvif->ar, 5974 arvif->vdev_id, 5975 arvif->ar->wmi.vdev_param->def_keyid, 5976 keyidx); 5977 5978 if (ret) { 5979 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 5980 arvif->vdev_id, 5981 ret); 5982 goto unlock; 5983 } 5984 5985 arvif->def_wep_key_idx = keyidx; 5986 5987 unlock: 5988 mutex_unlock(&arvif->ar->conf_mutex); 5989 } 5990 5991 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 5992 { 5993 struct ath10k *ar; 5994 struct ath10k_vif *arvif; 5995 struct ath10k_sta *arsta; 5996 struct ieee80211_sta *sta; 5997 struct cfg80211_chan_def def; 5998 enum nl80211_band band; 5999 const u8 *ht_mcs_mask; 6000 const u16 *vht_mcs_mask; 6001 u32 changed, bw, nss, smps; 6002 int err; 6003 6004 arsta = container_of(wk, struct ath10k_sta, update_wk); 6005 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 6006 arvif = arsta->arvif; 6007 ar = arvif->ar; 6008 6009 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 6010 return; 6011 6012 band = def.chan->band; 6013 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 6014 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 6015 6016 spin_lock_bh(&ar->data_lock); 6017 6018 changed = arsta->changed; 6019 arsta->changed = 0; 6020 6021 bw = arsta->bw; 6022 nss = arsta->nss; 6023 smps = arsta->smps; 6024 6025 spin_unlock_bh(&ar->data_lock); 6026 6027 mutex_lock(&ar->conf_mutex); 6028 6029 nss = max_t(u32, 1, nss); 6030 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6031 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6032 6033 if (changed & IEEE80211_RC_BW_CHANGED) { 6034 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 6035 sta->addr, bw); 6036 6037 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6038 WMI_PEER_CHAN_WIDTH, bw); 6039 if (err) 6040 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6041 sta->addr, bw, err); 6042 } 6043 6044 if (changed & IEEE80211_RC_NSS_CHANGED) { 6045 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6046 sta->addr, nss); 6047 6048 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6049 WMI_PEER_NSS, nss); 6050 if (err) 6051 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6052 sta->addr, nss, err); 6053 } 6054 6055 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6056 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6057 sta->addr, smps); 6058 6059 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6060 WMI_PEER_SMPS_STATE, smps); 6061 if (err) 6062 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6063 sta->addr, smps, err); 6064 } 6065 6066 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 6067 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 6068 sta->addr); 6069 6070 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6071 if (err) 6072 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6073 sta->addr); 6074 } 6075 6076 mutex_unlock(&ar->conf_mutex); 6077 } 6078 6079 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6080 struct ieee80211_sta *sta) 6081 { 6082 struct ath10k *ar = arvif->ar; 6083 6084 lockdep_assert_held(&ar->conf_mutex); 6085 6086 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6087 return 0; 6088 6089 if (ar->num_stations >= ar->max_num_stations) 6090 return -ENOBUFS; 6091 6092 ar->num_stations++; 6093 6094 return 0; 6095 } 6096 6097 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6098 struct ieee80211_sta *sta) 6099 { 6100 struct ath10k *ar = arvif->ar; 6101 6102 lockdep_assert_held(&ar->conf_mutex); 6103 6104 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6105 return; 6106 6107 ar->num_stations--; 6108 } 6109 6110 static int ath10k_sta_state(struct ieee80211_hw *hw, 6111 struct ieee80211_vif *vif, 6112 struct ieee80211_sta *sta, 6113 enum ieee80211_sta_state old_state, 6114 enum ieee80211_sta_state new_state) 6115 { 6116 struct ath10k *ar = hw->priv; 6117 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6118 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6119 struct ath10k_peer *peer; 6120 int ret = 0; 6121 int i; 6122 6123 if (old_state == IEEE80211_STA_NOTEXIST && 6124 new_state == IEEE80211_STA_NONE) { 6125 memset(arsta, 0, sizeof(*arsta)); 6126 arsta->arvif = arvif; 6127 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6128 6129 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6130 ath10k_mac_txq_init(sta->txq[i]); 6131 } 6132 6133 /* cancel must be done outside the mutex to avoid deadlock */ 6134 if ((old_state == IEEE80211_STA_NONE && 6135 new_state == IEEE80211_STA_NOTEXIST)) 6136 cancel_work_sync(&arsta->update_wk); 6137 6138 mutex_lock(&ar->conf_mutex); 6139 6140 if (old_state == IEEE80211_STA_NOTEXIST && 6141 new_state == IEEE80211_STA_NONE) { 6142 /* 6143 * New station addition. 6144 */ 6145 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6146 u32 num_tdls_stations; 6147 u32 num_tdls_vifs; 6148 6149 ath10k_dbg(ar, ATH10K_DBG_MAC, 6150 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6151 arvif->vdev_id, sta->addr, 6152 ar->num_stations + 1, ar->max_num_stations, 6153 ar->num_peers + 1, ar->max_num_peers); 6154 6155 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6156 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 6157 6158 if (sta->tdls) { 6159 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6160 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6161 arvif->vdev_id, 6162 ar->max_num_tdls_vdevs); 6163 ret = -ELNRNG; 6164 goto exit; 6165 } 6166 peer_type = WMI_PEER_TYPE_TDLS; 6167 } 6168 6169 ret = ath10k_mac_inc_num_stations(arvif, sta); 6170 if (ret) { 6171 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6172 ar->max_num_stations); 6173 goto exit; 6174 } 6175 6176 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6177 sta->addr, peer_type); 6178 if (ret) { 6179 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6180 sta->addr, arvif->vdev_id, ret); 6181 ath10k_mac_dec_num_stations(arvif, sta); 6182 goto exit; 6183 } 6184 6185 spin_lock_bh(&ar->data_lock); 6186 6187 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6188 if (!peer) { 6189 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6190 vif->addr, arvif->vdev_id); 6191 spin_unlock_bh(&ar->data_lock); 6192 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6193 ath10k_mac_dec_num_stations(arvif, sta); 6194 ret = -ENOENT; 6195 goto exit; 6196 } 6197 6198 arsta->peer_id = find_first_bit(peer->peer_ids, 6199 ATH10K_MAX_NUM_PEER_IDS); 6200 6201 spin_unlock_bh(&ar->data_lock); 6202 6203 if (!sta->tdls) 6204 goto exit; 6205 6206 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6207 WMI_TDLS_ENABLE_ACTIVE); 6208 if (ret) { 6209 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6210 arvif->vdev_id, ret); 6211 ath10k_peer_delete(ar, arvif->vdev_id, 6212 sta->addr); 6213 ath10k_mac_dec_num_stations(arvif, sta); 6214 goto exit; 6215 } 6216 6217 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6218 WMI_TDLS_PEER_STATE_PEERING); 6219 if (ret) { 6220 ath10k_warn(ar, 6221 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6222 sta->addr, arvif->vdev_id, ret); 6223 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6224 ath10k_mac_dec_num_stations(arvif, sta); 6225 6226 if (num_tdls_stations != 0) 6227 goto exit; 6228 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6229 WMI_TDLS_DISABLE); 6230 } 6231 } else if ((old_state == IEEE80211_STA_NONE && 6232 new_state == IEEE80211_STA_NOTEXIST)) { 6233 /* 6234 * Existing station deletion. 6235 */ 6236 ath10k_dbg(ar, ATH10K_DBG_MAC, 6237 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6238 arvif->vdev_id, sta->addr, sta); 6239 6240 if (sta->tdls) { 6241 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, 6242 sta, 6243 WMI_TDLS_PEER_STATE_TEARDOWN); 6244 if (ret) 6245 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", 6246 sta->addr, 6247 WMI_TDLS_PEER_STATE_TEARDOWN, ret); 6248 } 6249 6250 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6251 if (ret) 6252 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6253 sta->addr, arvif->vdev_id, ret); 6254 6255 ath10k_mac_dec_num_stations(arvif, sta); 6256 6257 spin_lock_bh(&ar->data_lock); 6258 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6259 peer = ar->peer_map[i]; 6260 if (!peer) 6261 continue; 6262 6263 if (peer->sta == sta) { 6264 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6265 sta->addr, peer, i, arvif->vdev_id); 6266 peer->sta = NULL; 6267 6268 /* Clean up the peer object as well since we 6269 * must have failed to do this above. 6270 */ 6271 list_del(&peer->list); 6272 ar->peer_map[i] = NULL; 6273 kfree(peer); 6274 ar->num_peers--; 6275 } 6276 } 6277 spin_unlock_bh(&ar->data_lock); 6278 6279 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6280 ath10k_mac_txq_unref(ar, sta->txq[i]); 6281 6282 if (!sta->tdls) 6283 goto exit; 6284 6285 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6286 goto exit; 6287 6288 /* This was the last tdls peer in current vif */ 6289 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6290 WMI_TDLS_DISABLE); 6291 if (ret) { 6292 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6293 arvif->vdev_id, ret); 6294 } 6295 } else if (old_state == IEEE80211_STA_AUTH && 6296 new_state == IEEE80211_STA_ASSOC && 6297 (vif->type == NL80211_IFTYPE_AP || 6298 vif->type == NL80211_IFTYPE_MESH_POINT || 6299 vif->type == NL80211_IFTYPE_ADHOC)) { 6300 /* 6301 * New association. 6302 */ 6303 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6304 sta->addr); 6305 6306 ret = ath10k_station_assoc(ar, vif, sta, false); 6307 if (ret) 6308 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6309 sta->addr, arvif->vdev_id, ret); 6310 } else if (old_state == IEEE80211_STA_ASSOC && 6311 new_state == IEEE80211_STA_AUTHORIZED && 6312 sta->tdls) { 6313 /* 6314 * Tdls station authorized. 6315 */ 6316 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6317 sta->addr); 6318 6319 ret = ath10k_station_assoc(ar, vif, sta, false); 6320 if (ret) { 6321 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6322 sta->addr, arvif->vdev_id, ret); 6323 goto exit; 6324 } 6325 6326 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6327 WMI_TDLS_PEER_STATE_CONNECTED); 6328 if (ret) 6329 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6330 sta->addr, arvif->vdev_id, ret); 6331 } else if (old_state == IEEE80211_STA_ASSOC && 6332 new_state == IEEE80211_STA_AUTH && 6333 (vif->type == NL80211_IFTYPE_AP || 6334 vif->type == NL80211_IFTYPE_MESH_POINT || 6335 vif->type == NL80211_IFTYPE_ADHOC)) { 6336 /* 6337 * Disassociation. 6338 */ 6339 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6340 sta->addr); 6341 6342 ret = ath10k_station_disassoc(ar, vif, sta); 6343 if (ret) 6344 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6345 sta->addr, arvif->vdev_id, ret); 6346 } 6347 exit: 6348 mutex_unlock(&ar->conf_mutex); 6349 return ret; 6350 } 6351 6352 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6353 u16 ac, bool enable) 6354 { 6355 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6356 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6357 u32 prio = 0, acc = 0; 6358 u32 value = 0; 6359 int ret = 0; 6360 6361 lockdep_assert_held(&ar->conf_mutex); 6362 6363 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6364 return 0; 6365 6366 switch (ac) { 6367 case IEEE80211_AC_VO: 6368 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6369 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6370 prio = 7; 6371 acc = 3; 6372 break; 6373 case IEEE80211_AC_VI: 6374 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6375 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6376 prio = 5; 6377 acc = 2; 6378 break; 6379 case IEEE80211_AC_BE: 6380 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6381 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6382 prio = 2; 6383 acc = 1; 6384 break; 6385 case IEEE80211_AC_BK: 6386 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6387 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6388 prio = 0; 6389 acc = 0; 6390 break; 6391 } 6392 6393 if (enable) 6394 arvif->u.sta.uapsd |= value; 6395 else 6396 arvif->u.sta.uapsd &= ~value; 6397 6398 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6399 WMI_STA_PS_PARAM_UAPSD, 6400 arvif->u.sta.uapsd); 6401 if (ret) { 6402 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6403 goto exit; 6404 } 6405 6406 if (arvif->u.sta.uapsd) 6407 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6408 else 6409 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6410 6411 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6412 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6413 value); 6414 if (ret) 6415 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6416 6417 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6418 if (ret) { 6419 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6420 arvif->vdev_id, ret); 6421 return ret; 6422 } 6423 6424 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6425 if (ret) { 6426 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6427 arvif->vdev_id, ret); 6428 return ret; 6429 } 6430 6431 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6432 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6433 /* Only userspace can make an educated decision when to send 6434 * trigger frame. The following effectively disables u-UAPSD 6435 * autotrigger in firmware (which is enabled by default 6436 * provided the autotrigger service is available). 6437 */ 6438 6439 arg.wmm_ac = acc; 6440 arg.user_priority = prio; 6441 arg.service_interval = 0; 6442 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6443 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6444 6445 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6446 arvif->bssid, &arg, 1); 6447 if (ret) { 6448 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6449 ret); 6450 return ret; 6451 } 6452 } 6453 6454 exit: 6455 return ret; 6456 } 6457 6458 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6459 struct ieee80211_vif *vif, u16 ac, 6460 const struct ieee80211_tx_queue_params *params) 6461 { 6462 struct ath10k *ar = hw->priv; 6463 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6464 struct wmi_wmm_params_arg *p = NULL; 6465 int ret; 6466 6467 mutex_lock(&ar->conf_mutex); 6468 6469 switch (ac) { 6470 case IEEE80211_AC_VO: 6471 p = &arvif->wmm_params.ac_vo; 6472 break; 6473 case IEEE80211_AC_VI: 6474 p = &arvif->wmm_params.ac_vi; 6475 break; 6476 case IEEE80211_AC_BE: 6477 p = &arvif->wmm_params.ac_be; 6478 break; 6479 case IEEE80211_AC_BK: 6480 p = &arvif->wmm_params.ac_bk; 6481 break; 6482 } 6483 6484 if (WARN_ON(!p)) { 6485 ret = -EINVAL; 6486 goto exit; 6487 } 6488 6489 p->cwmin = params->cw_min; 6490 p->cwmax = params->cw_max; 6491 p->aifs = params->aifs; 6492 6493 /* 6494 * The channel time duration programmed in the HW is in absolute 6495 * microseconds, while mac80211 gives the txop in units of 6496 * 32 microseconds. 6497 */ 6498 p->txop = params->txop * 32; 6499 6500 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6501 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6502 &arvif->wmm_params); 6503 if (ret) { 6504 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6505 arvif->vdev_id, ret); 6506 goto exit; 6507 } 6508 } else { 6509 /* This won't work well with multi-interface cases but it's 6510 * better than nothing. 6511 */ 6512 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6513 if (ret) { 6514 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6515 goto exit; 6516 } 6517 } 6518 6519 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6520 if (ret) 6521 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6522 6523 exit: 6524 mutex_unlock(&ar->conf_mutex); 6525 return ret; 6526 } 6527 6528 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6529 6530 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6531 struct ieee80211_vif *vif, 6532 struct ieee80211_channel *chan, 6533 int duration, 6534 enum ieee80211_roc_type type) 6535 { 6536 struct ath10k *ar = hw->priv; 6537 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6538 struct wmi_start_scan_arg arg; 6539 int ret = 0; 6540 u32 scan_time_msec; 6541 6542 mutex_lock(&ar->conf_mutex); 6543 6544 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6545 ret = -EBUSY; 6546 goto exit; 6547 } 6548 6549 spin_lock_bh(&ar->data_lock); 6550 switch (ar->scan.state) { 6551 case ATH10K_SCAN_IDLE: 6552 reinit_completion(&ar->scan.started); 6553 reinit_completion(&ar->scan.completed); 6554 reinit_completion(&ar->scan.on_channel); 6555 ar->scan.state = ATH10K_SCAN_STARTING; 6556 ar->scan.is_roc = true; 6557 ar->scan.vdev_id = arvif->vdev_id; 6558 ar->scan.roc_freq = chan->center_freq; 6559 ar->scan.roc_notify = true; 6560 ret = 0; 6561 break; 6562 case ATH10K_SCAN_STARTING: 6563 case ATH10K_SCAN_RUNNING: 6564 case ATH10K_SCAN_ABORTING: 6565 ret = -EBUSY; 6566 break; 6567 } 6568 spin_unlock_bh(&ar->data_lock); 6569 6570 if (ret) 6571 goto exit; 6572 6573 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6574 6575 memset(&arg, 0, sizeof(arg)); 6576 ath10k_wmi_start_scan_init(ar, &arg); 6577 arg.vdev_id = arvif->vdev_id; 6578 arg.scan_id = ATH10K_SCAN_ID; 6579 arg.n_channels = 1; 6580 arg.channels[0] = chan->center_freq; 6581 arg.dwell_time_active = scan_time_msec; 6582 arg.dwell_time_passive = scan_time_msec; 6583 arg.max_scan_time = scan_time_msec; 6584 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6585 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6586 arg.burst_duration_ms = duration; 6587 6588 ret = ath10k_start_scan(ar, &arg); 6589 if (ret) { 6590 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6591 spin_lock_bh(&ar->data_lock); 6592 ar->scan.state = ATH10K_SCAN_IDLE; 6593 spin_unlock_bh(&ar->data_lock); 6594 goto exit; 6595 } 6596 6597 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6598 if (ret == 0) { 6599 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6600 6601 ret = ath10k_scan_stop(ar); 6602 if (ret) 6603 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6604 6605 ret = -ETIMEDOUT; 6606 goto exit; 6607 } 6608 6609 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6610 msecs_to_jiffies(duration)); 6611 6612 ret = 0; 6613 exit: 6614 mutex_unlock(&ar->conf_mutex); 6615 return ret; 6616 } 6617 6618 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6619 { 6620 struct ath10k *ar = hw->priv; 6621 6622 mutex_lock(&ar->conf_mutex); 6623 6624 spin_lock_bh(&ar->data_lock); 6625 ar->scan.roc_notify = false; 6626 spin_unlock_bh(&ar->data_lock); 6627 6628 ath10k_scan_abort(ar); 6629 6630 mutex_unlock(&ar->conf_mutex); 6631 6632 cancel_delayed_work_sync(&ar->scan.timeout); 6633 6634 return 0; 6635 } 6636 6637 /* 6638 * Both RTS and Fragmentation threshold are interface-specific 6639 * in ath10k, but device-specific in mac80211. 6640 */ 6641 6642 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6643 { 6644 struct ath10k *ar = hw->priv; 6645 struct ath10k_vif *arvif; 6646 int ret = 0; 6647 6648 mutex_lock(&ar->conf_mutex); 6649 list_for_each_entry(arvif, &ar->arvifs, list) { 6650 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6651 arvif->vdev_id, value); 6652 6653 ret = ath10k_mac_set_rts(arvif, value); 6654 if (ret) { 6655 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6656 arvif->vdev_id, ret); 6657 break; 6658 } 6659 } 6660 mutex_unlock(&ar->conf_mutex); 6661 6662 return ret; 6663 } 6664 6665 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6666 { 6667 /* Even though there's a WMI enum for fragmentation threshold no known 6668 * firmware actually implements it. Moreover it is not possible to rely 6669 * frame fragmentation to mac80211 because firmware clears the "more 6670 * fragments" bit in frame control making it impossible for remote 6671 * devices to reassemble frames. 6672 * 6673 * Hence implement a dummy callback just to say fragmentation isn't 6674 * supported. This effectively prevents mac80211 from doing frame 6675 * fragmentation in software. 6676 */ 6677 return -EOPNOTSUPP; 6678 } 6679 6680 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6681 u32 queues, bool drop) 6682 { 6683 struct ath10k *ar = hw->priv; 6684 bool skip; 6685 long time_left; 6686 6687 /* mac80211 doesn't care if we really xmit queued frames or not 6688 * we'll collect those frames either way if we stop/delete vdevs 6689 */ 6690 if (drop) 6691 return; 6692 6693 mutex_lock(&ar->conf_mutex); 6694 6695 if (ar->state == ATH10K_STATE_WEDGED) 6696 goto skip; 6697 6698 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6699 bool empty; 6700 6701 spin_lock_bh(&ar->htt.tx_lock); 6702 empty = (ar->htt.num_pending_tx == 0); 6703 spin_unlock_bh(&ar->htt.tx_lock); 6704 6705 skip = (ar->state == ATH10K_STATE_WEDGED) || 6706 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6707 &ar->dev_flags); 6708 6709 (empty || skip); 6710 }), ATH10K_FLUSH_TIMEOUT_HZ); 6711 6712 if (time_left == 0 || skip) 6713 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6714 skip, ar->state, time_left); 6715 6716 skip: 6717 mutex_unlock(&ar->conf_mutex); 6718 } 6719 6720 /* TODO: Implement this function properly 6721 * For now it is needed to reply to Probe Requests in IBSS mode. 6722 * Propably we need this information from FW. 6723 */ 6724 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 6725 { 6726 return 1; 6727 } 6728 6729 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 6730 enum ieee80211_reconfig_type reconfig_type) 6731 { 6732 struct ath10k *ar = hw->priv; 6733 6734 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 6735 return; 6736 6737 mutex_lock(&ar->conf_mutex); 6738 6739 /* If device failed to restart it will be in a different state, e.g. 6740 * ATH10K_STATE_WEDGED 6741 */ 6742 if (ar->state == ATH10K_STATE_RESTARTED) { 6743 ath10k_info(ar, "device successfully recovered\n"); 6744 ar->state = ATH10K_STATE_ON; 6745 ieee80211_wake_queues(ar->hw); 6746 } 6747 6748 mutex_unlock(&ar->conf_mutex); 6749 } 6750 6751 static void 6752 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 6753 struct ieee80211_channel *channel) 6754 { 6755 int ret; 6756 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 6757 6758 lockdep_assert_held(&ar->conf_mutex); 6759 6760 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 6761 (ar->rx_channel != channel)) 6762 return; 6763 6764 if (ar->scan.state != ATH10K_SCAN_IDLE) { 6765 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 6766 return; 6767 } 6768 6769 reinit_completion(&ar->bss_survey_done); 6770 6771 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 6772 if (ret) { 6773 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 6774 return; 6775 } 6776 6777 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 6778 if (!ret) { 6779 ath10k_warn(ar, "bss channel survey timed out\n"); 6780 return; 6781 } 6782 } 6783 6784 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 6785 struct survey_info *survey) 6786 { 6787 struct ath10k *ar = hw->priv; 6788 struct ieee80211_supported_band *sband; 6789 struct survey_info *ar_survey = &ar->survey[idx]; 6790 int ret = 0; 6791 6792 mutex_lock(&ar->conf_mutex); 6793 6794 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 6795 if (sband && idx >= sband->n_channels) { 6796 idx -= sband->n_channels; 6797 sband = NULL; 6798 } 6799 6800 if (!sband) 6801 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 6802 6803 if (!sband || idx >= sband->n_channels) { 6804 ret = -ENOENT; 6805 goto exit; 6806 } 6807 6808 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 6809 6810 spin_lock_bh(&ar->data_lock); 6811 memcpy(survey, ar_survey, sizeof(*survey)); 6812 spin_unlock_bh(&ar->data_lock); 6813 6814 survey->channel = &sband->channels[idx]; 6815 6816 if (ar->rx_channel == survey->channel) 6817 survey->filled |= SURVEY_INFO_IN_USE; 6818 6819 exit: 6820 mutex_unlock(&ar->conf_mutex); 6821 return ret; 6822 } 6823 6824 static bool 6825 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 6826 enum nl80211_band band, 6827 const struct cfg80211_bitrate_mask *mask) 6828 { 6829 int num_rates = 0; 6830 int i; 6831 6832 num_rates += hweight32(mask->control[band].legacy); 6833 6834 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 6835 num_rates += hweight8(mask->control[band].ht_mcs[i]); 6836 6837 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 6838 num_rates += hweight16(mask->control[band].vht_mcs[i]); 6839 6840 return num_rates == 1; 6841 } 6842 6843 static bool 6844 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 6845 enum nl80211_band band, 6846 const struct cfg80211_bitrate_mask *mask, 6847 int *nss) 6848 { 6849 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6850 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 6851 u8 ht_nss_mask = 0; 6852 u8 vht_nss_mask = 0; 6853 int i; 6854 6855 if (mask->control[band].legacy) 6856 return false; 6857 6858 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6859 if (mask->control[band].ht_mcs[i] == 0) 6860 continue; 6861 else if (mask->control[band].ht_mcs[i] == 6862 sband->ht_cap.mcs.rx_mask[i]) 6863 ht_nss_mask |= BIT(i); 6864 else 6865 return false; 6866 } 6867 6868 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6869 if (mask->control[band].vht_mcs[i] == 0) 6870 continue; 6871 else if (mask->control[band].vht_mcs[i] == 6872 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 6873 vht_nss_mask |= BIT(i); 6874 else 6875 return false; 6876 } 6877 6878 if (ht_nss_mask != vht_nss_mask) 6879 return false; 6880 6881 if (ht_nss_mask == 0) 6882 return false; 6883 6884 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 6885 return false; 6886 6887 *nss = fls(ht_nss_mask); 6888 6889 return true; 6890 } 6891 6892 static int 6893 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 6894 enum nl80211_band band, 6895 const struct cfg80211_bitrate_mask *mask, 6896 u8 *rate, u8 *nss) 6897 { 6898 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 6899 int rate_idx; 6900 int i; 6901 u16 bitrate; 6902 u8 preamble; 6903 u8 hw_rate; 6904 6905 if (hweight32(mask->control[band].legacy) == 1) { 6906 rate_idx = ffs(mask->control[band].legacy) - 1; 6907 6908 hw_rate = sband->bitrates[rate_idx].hw_value; 6909 bitrate = sband->bitrates[rate_idx].bitrate; 6910 6911 if (ath10k_mac_bitrate_is_cck(bitrate)) 6912 preamble = WMI_RATE_PREAMBLE_CCK; 6913 else 6914 preamble = WMI_RATE_PREAMBLE_OFDM; 6915 6916 *nss = 1; 6917 *rate = preamble << 6 | 6918 (*nss - 1) << 4 | 6919 hw_rate << 0; 6920 6921 return 0; 6922 } 6923 6924 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 6925 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 6926 *nss = i + 1; 6927 *rate = WMI_RATE_PREAMBLE_HT << 6 | 6928 (*nss - 1) << 4 | 6929 (ffs(mask->control[band].ht_mcs[i]) - 1); 6930 6931 return 0; 6932 } 6933 } 6934 6935 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 6936 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 6937 *nss = i + 1; 6938 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 6939 (*nss - 1) << 4 | 6940 (ffs(mask->control[band].vht_mcs[i]) - 1); 6941 6942 return 0; 6943 } 6944 } 6945 6946 return -EINVAL; 6947 } 6948 6949 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 6950 u8 rate, u8 nss, u8 sgi, u8 ldpc) 6951 { 6952 struct ath10k *ar = arvif->ar; 6953 u32 vdev_param; 6954 int ret; 6955 6956 lockdep_assert_held(&ar->conf_mutex); 6957 6958 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 6959 arvif->vdev_id, rate, nss, sgi); 6960 6961 vdev_param = ar->wmi.vdev_param->fixed_rate; 6962 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 6963 if (ret) { 6964 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 6965 rate, ret); 6966 return ret; 6967 } 6968 6969 vdev_param = ar->wmi.vdev_param->nss; 6970 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 6971 if (ret) { 6972 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 6973 return ret; 6974 } 6975 6976 vdev_param = ar->wmi.vdev_param->sgi; 6977 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 6978 if (ret) { 6979 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 6980 return ret; 6981 } 6982 6983 vdev_param = ar->wmi.vdev_param->ldpc; 6984 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 6985 if (ret) { 6986 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 6987 return ret; 6988 } 6989 6990 return 0; 6991 } 6992 6993 static bool 6994 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 6995 enum nl80211_band band, 6996 const struct cfg80211_bitrate_mask *mask) 6997 { 6998 int i; 6999 u16 vht_mcs; 7000 7001 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 7002 * to express all VHT MCS rate masks. Effectively only the following 7003 * ranges can be used: none, 0-7, 0-8 and 0-9. 7004 */ 7005 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7006 vht_mcs = mask->control[band].vht_mcs[i]; 7007 7008 switch (vht_mcs) { 7009 case 0: 7010 case BIT(8) - 1: 7011 case BIT(9) - 1: 7012 case BIT(10) - 1: 7013 break; 7014 default: 7015 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 7016 return false; 7017 } 7018 } 7019 7020 return true; 7021 } 7022 7023 static void ath10k_mac_set_bitrate_mask_iter(void *data, 7024 struct ieee80211_sta *sta) 7025 { 7026 struct ath10k_vif *arvif = data; 7027 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7028 struct ath10k *ar = arvif->ar; 7029 7030 if (arsta->arvif != arvif) 7031 return; 7032 7033 spin_lock_bh(&ar->data_lock); 7034 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7035 spin_unlock_bh(&ar->data_lock); 7036 7037 ieee80211_queue_work(ar->hw, &arsta->update_wk); 7038 } 7039 7040 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7041 struct ieee80211_vif *vif, 7042 const struct cfg80211_bitrate_mask *mask) 7043 { 7044 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7045 struct cfg80211_chan_def def; 7046 struct ath10k *ar = arvif->ar; 7047 enum nl80211_band band; 7048 const u8 *ht_mcs_mask; 7049 const u16 *vht_mcs_mask; 7050 u8 rate; 7051 u8 nss; 7052 u8 sgi; 7053 u8 ldpc; 7054 int single_nss; 7055 int ret; 7056 7057 if (ath10k_mac_vif_chan(vif, &def)) 7058 return -EPERM; 7059 7060 band = def.chan->band; 7061 ht_mcs_mask = mask->control[band].ht_mcs; 7062 vht_mcs_mask = mask->control[band].vht_mcs; 7063 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7064 7065 sgi = mask->control[band].gi; 7066 if (sgi == NL80211_TXRATE_FORCE_LGI) 7067 return -EINVAL; 7068 7069 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7070 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7071 &rate, &nss); 7072 if (ret) { 7073 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7074 arvif->vdev_id, ret); 7075 return ret; 7076 } 7077 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7078 &single_nss)) { 7079 rate = WMI_FIXED_RATE_NONE; 7080 nss = single_nss; 7081 } else { 7082 rate = WMI_FIXED_RATE_NONE; 7083 nss = min(ar->num_rf_chains, 7084 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7085 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7086 7087 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7088 return -EINVAL; 7089 7090 mutex_lock(&ar->conf_mutex); 7091 7092 arvif->bitrate_mask = *mask; 7093 ieee80211_iterate_stations_atomic(ar->hw, 7094 ath10k_mac_set_bitrate_mask_iter, 7095 arvif); 7096 7097 mutex_unlock(&ar->conf_mutex); 7098 } 7099 7100 mutex_lock(&ar->conf_mutex); 7101 7102 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7103 if (ret) { 7104 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7105 arvif->vdev_id, ret); 7106 goto exit; 7107 } 7108 7109 exit: 7110 mutex_unlock(&ar->conf_mutex); 7111 7112 return ret; 7113 } 7114 7115 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7116 struct ieee80211_vif *vif, 7117 struct ieee80211_sta *sta, 7118 u32 changed) 7119 { 7120 struct ath10k *ar = hw->priv; 7121 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7122 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7123 struct ath10k_peer *peer; 7124 u32 bw, smps; 7125 7126 spin_lock_bh(&ar->data_lock); 7127 7128 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 7129 if (!peer) { 7130 spin_unlock_bh(&ar->data_lock); 7131 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", 7132 sta->addr, arvif->vdev_id); 7133 return; 7134 } 7135 7136 ath10k_dbg(ar, ATH10K_DBG_MAC, 7137 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7138 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7139 sta->smps_mode); 7140 7141 if (changed & IEEE80211_RC_BW_CHANGED) { 7142 bw = WMI_PEER_CHWIDTH_20MHZ; 7143 7144 switch (sta->bandwidth) { 7145 case IEEE80211_STA_RX_BW_20: 7146 bw = WMI_PEER_CHWIDTH_20MHZ; 7147 break; 7148 case IEEE80211_STA_RX_BW_40: 7149 bw = WMI_PEER_CHWIDTH_40MHZ; 7150 break; 7151 case IEEE80211_STA_RX_BW_80: 7152 bw = WMI_PEER_CHWIDTH_80MHZ; 7153 break; 7154 case IEEE80211_STA_RX_BW_160: 7155 bw = WMI_PEER_CHWIDTH_160MHZ; 7156 break; 7157 default: 7158 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7159 sta->bandwidth, sta->addr); 7160 bw = WMI_PEER_CHWIDTH_20MHZ; 7161 break; 7162 } 7163 7164 arsta->bw = bw; 7165 } 7166 7167 if (changed & IEEE80211_RC_NSS_CHANGED) 7168 arsta->nss = sta->rx_nss; 7169 7170 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7171 smps = WMI_PEER_SMPS_PS_NONE; 7172 7173 switch (sta->smps_mode) { 7174 case IEEE80211_SMPS_AUTOMATIC: 7175 case IEEE80211_SMPS_OFF: 7176 smps = WMI_PEER_SMPS_PS_NONE; 7177 break; 7178 case IEEE80211_SMPS_STATIC: 7179 smps = WMI_PEER_SMPS_STATIC; 7180 break; 7181 case IEEE80211_SMPS_DYNAMIC: 7182 smps = WMI_PEER_SMPS_DYNAMIC; 7183 break; 7184 case IEEE80211_SMPS_NUM_MODES: 7185 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7186 sta->smps_mode, sta->addr); 7187 smps = WMI_PEER_SMPS_PS_NONE; 7188 break; 7189 } 7190 7191 arsta->smps = smps; 7192 } 7193 7194 arsta->changed |= changed; 7195 7196 spin_unlock_bh(&ar->data_lock); 7197 7198 ieee80211_queue_work(hw, &arsta->update_wk); 7199 } 7200 7201 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7202 struct ieee80211_vif *vif, s64 tsf_offset) 7203 { 7204 struct ath10k *ar = hw->priv; 7205 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7206 u32 offset, vdev_param; 7207 int ret; 7208 7209 if (tsf_offset < 0) { 7210 vdev_param = ar->wmi.vdev_param->dec_tsf; 7211 offset = -tsf_offset; 7212 } else { 7213 vdev_param = ar->wmi.vdev_param->inc_tsf; 7214 offset = tsf_offset; 7215 } 7216 7217 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7218 vdev_param, offset); 7219 7220 if (ret && ret != -EOPNOTSUPP) 7221 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7222 offset, vdev_param, ret); 7223 } 7224 7225 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7226 struct ieee80211_vif *vif, 7227 struct ieee80211_ampdu_params *params) 7228 { 7229 struct ath10k *ar = hw->priv; 7230 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7231 struct ieee80211_sta *sta = params->sta; 7232 enum ieee80211_ampdu_mlme_action action = params->action; 7233 u16 tid = params->tid; 7234 7235 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7236 arvif->vdev_id, sta->addr, tid, action); 7237 7238 switch (action) { 7239 case IEEE80211_AMPDU_RX_START: 7240 case IEEE80211_AMPDU_RX_STOP: 7241 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7242 * creation/removal. Do we need to verify this? 7243 */ 7244 return 0; 7245 case IEEE80211_AMPDU_TX_START: 7246 case IEEE80211_AMPDU_TX_STOP_CONT: 7247 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7248 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7249 case IEEE80211_AMPDU_TX_OPERATIONAL: 7250 /* Firmware offloads Tx aggregation entirely so deny mac80211 7251 * Tx aggregation requests. 7252 */ 7253 return -EOPNOTSUPP; 7254 } 7255 7256 return -EINVAL; 7257 } 7258 7259 static void 7260 ath10k_mac_update_rx_channel(struct ath10k *ar, 7261 struct ieee80211_chanctx_conf *ctx, 7262 struct ieee80211_vif_chanctx_switch *vifs, 7263 int n_vifs) 7264 { 7265 struct cfg80211_chan_def *def = NULL; 7266 7267 /* Both locks are required because ar->rx_channel is modified. This 7268 * allows readers to hold either lock. 7269 */ 7270 lockdep_assert_held(&ar->conf_mutex); 7271 lockdep_assert_held(&ar->data_lock); 7272 7273 WARN_ON(ctx && vifs); 7274 WARN_ON(vifs && !n_vifs); 7275 7276 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7277 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7278 * ppdu on Rx may reduce performance on low-end systems. It should be 7279 * possible to make tables/hashmaps to speed the lookup up (be vary of 7280 * cpu data cache lines though regarding sizes) but to keep the initial 7281 * implementation simple and less intrusive fallback to the slow lookup 7282 * only for multi-channel cases. Single-channel cases will remain to 7283 * use the old channel derival and thus performance should not be 7284 * affected much. 7285 */ 7286 rcu_read_lock(); 7287 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7288 ieee80211_iter_chan_contexts_atomic(ar->hw, 7289 ath10k_mac_get_any_chandef_iter, 7290 &def); 7291 7292 if (vifs) 7293 def = &vifs[0].new_ctx->def; 7294 7295 ar->rx_channel = def->chan; 7296 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7297 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7298 /* During driver restart due to firmware assert, since mac80211 7299 * already has valid channel context for given radio, channel 7300 * context iteration return num_chanctx > 0. So fix rx_channel 7301 * when restart is in progress. 7302 */ 7303 ar->rx_channel = ctx->def.chan; 7304 } else { 7305 ar->rx_channel = NULL; 7306 } 7307 rcu_read_unlock(); 7308 } 7309 7310 static void 7311 ath10k_mac_update_vif_chan(struct ath10k *ar, 7312 struct ieee80211_vif_chanctx_switch *vifs, 7313 int n_vifs) 7314 { 7315 struct ath10k_vif *arvif; 7316 int ret; 7317 int i; 7318 7319 lockdep_assert_held(&ar->conf_mutex); 7320 7321 /* First stop monitor interface. Some FW versions crash if there's a 7322 * lone monitor interface. 7323 */ 7324 if (ar->monitor_started) 7325 ath10k_monitor_stop(ar); 7326 7327 for (i = 0; i < n_vifs; i++) { 7328 arvif = (void *)vifs[i].vif->drv_priv; 7329 7330 ath10k_dbg(ar, ATH10K_DBG_MAC, 7331 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7332 arvif->vdev_id, 7333 vifs[i].old_ctx->def.chan->center_freq, 7334 vifs[i].new_ctx->def.chan->center_freq, 7335 vifs[i].old_ctx->def.width, 7336 vifs[i].new_ctx->def.width); 7337 7338 if (WARN_ON(!arvif->is_started)) 7339 continue; 7340 7341 if (WARN_ON(!arvif->is_up)) 7342 continue; 7343 7344 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7345 if (ret) { 7346 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7347 arvif->vdev_id, ret); 7348 continue; 7349 } 7350 } 7351 7352 /* All relevant vdevs are downed and associated channel resources 7353 * should be available for the channel switch now. 7354 */ 7355 7356 spin_lock_bh(&ar->data_lock); 7357 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7358 spin_unlock_bh(&ar->data_lock); 7359 7360 for (i = 0; i < n_vifs; i++) { 7361 arvif = (void *)vifs[i].vif->drv_priv; 7362 7363 if (WARN_ON(!arvif->is_started)) 7364 continue; 7365 7366 if (WARN_ON(!arvif->is_up)) 7367 continue; 7368 7369 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7370 if (ret) 7371 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7372 ret); 7373 7374 ret = ath10k_mac_setup_prb_tmpl(arvif); 7375 if (ret) 7376 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7377 ret); 7378 7379 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7380 if (ret) { 7381 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7382 arvif->vdev_id, ret); 7383 continue; 7384 } 7385 7386 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7387 arvif->bssid); 7388 if (ret) { 7389 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7390 arvif->vdev_id, ret); 7391 continue; 7392 } 7393 } 7394 7395 ath10k_monitor_recalc(ar); 7396 } 7397 7398 static int 7399 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7400 struct ieee80211_chanctx_conf *ctx) 7401 { 7402 struct ath10k *ar = hw->priv; 7403 7404 ath10k_dbg(ar, ATH10K_DBG_MAC, 7405 "mac chanctx add freq %hu width %d ptr %pK\n", 7406 ctx->def.chan->center_freq, ctx->def.width, ctx); 7407 7408 mutex_lock(&ar->conf_mutex); 7409 7410 spin_lock_bh(&ar->data_lock); 7411 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7412 spin_unlock_bh(&ar->data_lock); 7413 7414 ath10k_recalc_radar_detection(ar); 7415 ath10k_monitor_recalc(ar); 7416 7417 mutex_unlock(&ar->conf_mutex); 7418 7419 return 0; 7420 } 7421 7422 static void 7423 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7424 struct ieee80211_chanctx_conf *ctx) 7425 { 7426 struct ath10k *ar = hw->priv; 7427 7428 ath10k_dbg(ar, ATH10K_DBG_MAC, 7429 "mac chanctx remove freq %hu width %d ptr %pK\n", 7430 ctx->def.chan->center_freq, ctx->def.width, ctx); 7431 7432 mutex_lock(&ar->conf_mutex); 7433 7434 spin_lock_bh(&ar->data_lock); 7435 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7436 spin_unlock_bh(&ar->data_lock); 7437 7438 ath10k_recalc_radar_detection(ar); 7439 ath10k_monitor_recalc(ar); 7440 7441 mutex_unlock(&ar->conf_mutex); 7442 } 7443 7444 struct ath10k_mac_change_chanctx_arg { 7445 struct ieee80211_chanctx_conf *ctx; 7446 struct ieee80211_vif_chanctx_switch *vifs; 7447 int n_vifs; 7448 int next_vif; 7449 }; 7450 7451 static void 7452 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7453 struct ieee80211_vif *vif) 7454 { 7455 struct ath10k_mac_change_chanctx_arg *arg = data; 7456 7457 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7458 return; 7459 7460 arg->n_vifs++; 7461 } 7462 7463 static void 7464 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7465 struct ieee80211_vif *vif) 7466 { 7467 struct ath10k_mac_change_chanctx_arg *arg = data; 7468 struct ieee80211_chanctx_conf *ctx; 7469 7470 ctx = rcu_access_pointer(vif->chanctx_conf); 7471 if (ctx != arg->ctx) 7472 return; 7473 7474 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7475 return; 7476 7477 arg->vifs[arg->next_vif].vif = vif; 7478 arg->vifs[arg->next_vif].old_ctx = ctx; 7479 arg->vifs[arg->next_vif].new_ctx = ctx; 7480 arg->next_vif++; 7481 } 7482 7483 static void 7484 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7485 struct ieee80211_chanctx_conf *ctx, 7486 u32 changed) 7487 { 7488 struct ath10k *ar = hw->priv; 7489 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7490 7491 mutex_lock(&ar->conf_mutex); 7492 7493 ath10k_dbg(ar, ATH10K_DBG_MAC, 7494 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7495 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7496 7497 /* This shouldn't really happen because channel switching should use 7498 * switch_vif_chanctx(). 7499 */ 7500 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7501 goto unlock; 7502 7503 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7504 ieee80211_iterate_active_interfaces_atomic( 7505 hw, 7506 IEEE80211_IFACE_ITER_NORMAL, 7507 ath10k_mac_change_chanctx_cnt_iter, 7508 &arg); 7509 if (arg.n_vifs == 0) 7510 goto radar; 7511 7512 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7513 GFP_KERNEL); 7514 if (!arg.vifs) 7515 goto radar; 7516 7517 ieee80211_iterate_active_interfaces_atomic( 7518 hw, 7519 IEEE80211_IFACE_ITER_NORMAL, 7520 ath10k_mac_change_chanctx_fill_iter, 7521 &arg); 7522 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7523 kfree(arg.vifs); 7524 } 7525 7526 radar: 7527 ath10k_recalc_radar_detection(ar); 7528 7529 /* FIXME: How to configure Rx chains properly? */ 7530 7531 /* No other actions are actually necessary. Firmware maintains channel 7532 * definitions per vdev internally and there's no host-side channel 7533 * context abstraction to configure, e.g. channel width. 7534 */ 7535 7536 unlock: 7537 mutex_unlock(&ar->conf_mutex); 7538 } 7539 7540 static int 7541 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7542 struct ieee80211_vif *vif, 7543 struct ieee80211_chanctx_conf *ctx) 7544 { 7545 struct ath10k *ar = hw->priv; 7546 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7547 int ret; 7548 7549 mutex_lock(&ar->conf_mutex); 7550 7551 ath10k_dbg(ar, ATH10K_DBG_MAC, 7552 "mac chanctx assign ptr %pK vdev_id %i\n", 7553 ctx, arvif->vdev_id); 7554 7555 if (WARN_ON(arvif->is_started)) { 7556 mutex_unlock(&ar->conf_mutex); 7557 return -EBUSY; 7558 } 7559 7560 ret = ath10k_vdev_start(arvif, &ctx->def); 7561 if (ret) { 7562 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7563 arvif->vdev_id, vif->addr, 7564 ctx->def.chan->center_freq, ret); 7565 goto err; 7566 } 7567 7568 arvif->is_started = true; 7569 7570 ret = ath10k_mac_vif_setup_ps(arvif); 7571 if (ret) { 7572 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7573 arvif->vdev_id, ret); 7574 goto err_stop; 7575 } 7576 7577 if (vif->type == NL80211_IFTYPE_MONITOR) { 7578 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7579 if (ret) { 7580 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7581 arvif->vdev_id, ret); 7582 goto err_stop; 7583 } 7584 7585 arvif->is_up = true; 7586 } 7587 7588 if (ath10k_mac_can_set_cts_prot(arvif)) { 7589 ret = ath10k_mac_set_cts_prot(arvif); 7590 if (ret) 7591 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7592 arvif->vdev_id, ret); 7593 } 7594 7595 if (ath10k_peer_stats_enabled(ar)) { 7596 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; 7597 ret = ath10k_wmi_pdev_pktlog_enable(ar, 7598 ar->pktlog_filter); 7599 if (ret) { 7600 ath10k_warn(ar, "failed to enable pktlog %d\n", ret); 7601 goto err_stop; 7602 } 7603 } 7604 7605 mutex_unlock(&ar->conf_mutex); 7606 return 0; 7607 7608 err_stop: 7609 ath10k_vdev_stop(arvif); 7610 arvif->is_started = false; 7611 ath10k_mac_vif_setup_ps(arvif); 7612 7613 err: 7614 mutex_unlock(&ar->conf_mutex); 7615 return ret; 7616 } 7617 7618 static void 7619 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7620 struct ieee80211_vif *vif, 7621 struct ieee80211_chanctx_conf *ctx) 7622 { 7623 struct ath10k *ar = hw->priv; 7624 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7625 int ret; 7626 7627 mutex_lock(&ar->conf_mutex); 7628 7629 ath10k_dbg(ar, ATH10K_DBG_MAC, 7630 "mac chanctx unassign ptr %pK vdev_id %i\n", 7631 ctx, arvif->vdev_id); 7632 7633 WARN_ON(!arvif->is_started); 7634 7635 if (vif->type == NL80211_IFTYPE_MONITOR) { 7636 WARN_ON(!arvif->is_up); 7637 7638 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7639 if (ret) 7640 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7641 arvif->vdev_id, ret); 7642 7643 arvif->is_up = false; 7644 } 7645 7646 ret = ath10k_vdev_stop(arvif); 7647 if (ret) 7648 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7649 arvif->vdev_id, ret); 7650 7651 arvif->is_started = false; 7652 7653 mutex_unlock(&ar->conf_mutex); 7654 } 7655 7656 static int 7657 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7658 struct ieee80211_vif_chanctx_switch *vifs, 7659 int n_vifs, 7660 enum ieee80211_chanctx_switch_mode mode) 7661 { 7662 struct ath10k *ar = hw->priv; 7663 7664 mutex_lock(&ar->conf_mutex); 7665 7666 ath10k_dbg(ar, ATH10K_DBG_MAC, 7667 "mac chanctx switch n_vifs %d mode %d\n", 7668 n_vifs, mode); 7669 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7670 7671 mutex_unlock(&ar->conf_mutex); 7672 return 0; 7673 } 7674 7675 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7676 struct ieee80211_vif *vif, 7677 struct ieee80211_sta *sta) 7678 { 7679 struct ath10k *ar; 7680 struct ath10k_peer *peer; 7681 7682 ar = hw->priv; 7683 7684 list_for_each_entry(peer, &ar->peers, list) 7685 if (peer->sta == sta) 7686 peer->removed = true; 7687 } 7688 7689 static void ath10k_sta_statistics(struct ieee80211_hw *hw, 7690 struct ieee80211_vif *vif, 7691 struct ieee80211_sta *sta, 7692 struct station_info *sinfo) 7693 { 7694 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7695 struct ath10k *ar = arsta->arvif->ar; 7696 7697 if (!ath10k_peer_stats_enabled(ar)) 7698 return; 7699 7700 sinfo->rx_duration = arsta->rx_duration; 7701 sinfo->filled |= 1ULL << NL80211_STA_INFO_RX_DURATION; 7702 7703 if (!arsta->txrate.legacy && !arsta->txrate.nss) 7704 return; 7705 7706 if (arsta->txrate.legacy) { 7707 sinfo->txrate.legacy = arsta->txrate.legacy; 7708 } else { 7709 sinfo->txrate.mcs = arsta->txrate.mcs; 7710 sinfo->txrate.nss = arsta->txrate.nss; 7711 sinfo->txrate.bw = arsta->txrate.bw; 7712 } 7713 sinfo->txrate.flags = arsta->txrate.flags; 7714 sinfo->filled |= 1ULL << NL80211_STA_INFO_TX_BITRATE; 7715 } 7716 7717 static const struct ieee80211_ops ath10k_ops = { 7718 .tx = ath10k_mac_op_tx, 7719 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 7720 .start = ath10k_start, 7721 .stop = ath10k_stop, 7722 .config = ath10k_config, 7723 .add_interface = ath10k_add_interface, 7724 .remove_interface = ath10k_remove_interface, 7725 .configure_filter = ath10k_configure_filter, 7726 .bss_info_changed = ath10k_bss_info_changed, 7727 .set_coverage_class = ath10k_mac_op_set_coverage_class, 7728 .hw_scan = ath10k_hw_scan, 7729 .cancel_hw_scan = ath10k_cancel_hw_scan, 7730 .set_key = ath10k_set_key, 7731 .set_default_unicast_key = ath10k_set_default_unicast_key, 7732 .sta_state = ath10k_sta_state, 7733 .conf_tx = ath10k_conf_tx, 7734 .remain_on_channel = ath10k_remain_on_channel, 7735 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 7736 .set_rts_threshold = ath10k_set_rts_threshold, 7737 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 7738 .flush = ath10k_flush, 7739 .tx_last_beacon = ath10k_tx_last_beacon, 7740 .set_antenna = ath10k_set_antenna, 7741 .get_antenna = ath10k_get_antenna, 7742 .reconfig_complete = ath10k_reconfig_complete, 7743 .get_survey = ath10k_get_survey, 7744 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 7745 .sta_rc_update = ath10k_sta_rc_update, 7746 .offset_tsf = ath10k_offset_tsf, 7747 .ampdu_action = ath10k_ampdu_action, 7748 .get_et_sset_count = ath10k_debug_get_et_sset_count, 7749 .get_et_stats = ath10k_debug_get_et_stats, 7750 .get_et_strings = ath10k_debug_get_et_strings, 7751 .add_chanctx = ath10k_mac_op_add_chanctx, 7752 .remove_chanctx = ath10k_mac_op_remove_chanctx, 7753 .change_chanctx = ath10k_mac_op_change_chanctx, 7754 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 7755 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 7756 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 7757 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 7758 .sta_statistics = ath10k_sta_statistics, 7759 7760 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 7761 7762 #ifdef CONFIG_PM 7763 .suspend = ath10k_wow_op_suspend, 7764 .resume = ath10k_wow_op_resume, 7765 .set_wakeup = ath10k_wow_op_set_wakeup, 7766 #endif 7767 #ifdef CONFIG_MAC80211_DEBUGFS 7768 .sta_add_debugfs = ath10k_sta_add_debugfs, 7769 #endif 7770 }; 7771 7772 #define CHAN2G(_channel, _freq, _flags) { \ 7773 .band = NL80211_BAND_2GHZ, \ 7774 .hw_value = (_channel), \ 7775 .center_freq = (_freq), \ 7776 .flags = (_flags), \ 7777 .max_antenna_gain = 0, \ 7778 .max_power = 30, \ 7779 } 7780 7781 #define CHAN5G(_channel, _freq, _flags) { \ 7782 .band = NL80211_BAND_5GHZ, \ 7783 .hw_value = (_channel), \ 7784 .center_freq = (_freq), \ 7785 .flags = (_flags), \ 7786 .max_antenna_gain = 0, \ 7787 .max_power = 30, \ 7788 } 7789 7790 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 7791 CHAN2G(1, 2412, 0), 7792 CHAN2G(2, 2417, 0), 7793 CHAN2G(3, 2422, 0), 7794 CHAN2G(4, 2427, 0), 7795 CHAN2G(5, 2432, 0), 7796 CHAN2G(6, 2437, 0), 7797 CHAN2G(7, 2442, 0), 7798 CHAN2G(8, 2447, 0), 7799 CHAN2G(9, 2452, 0), 7800 CHAN2G(10, 2457, 0), 7801 CHAN2G(11, 2462, 0), 7802 CHAN2G(12, 2467, 0), 7803 CHAN2G(13, 2472, 0), 7804 CHAN2G(14, 2484, 0), 7805 }; 7806 7807 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 7808 CHAN5G(36, 5180, 0), 7809 CHAN5G(40, 5200, 0), 7810 CHAN5G(44, 5220, 0), 7811 CHAN5G(48, 5240, 0), 7812 CHAN5G(52, 5260, 0), 7813 CHAN5G(56, 5280, 0), 7814 CHAN5G(60, 5300, 0), 7815 CHAN5G(64, 5320, 0), 7816 CHAN5G(100, 5500, 0), 7817 CHAN5G(104, 5520, 0), 7818 CHAN5G(108, 5540, 0), 7819 CHAN5G(112, 5560, 0), 7820 CHAN5G(116, 5580, 0), 7821 CHAN5G(120, 5600, 0), 7822 CHAN5G(124, 5620, 0), 7823 CHAN5G(128, 5640, 0), 7824 CHAN5G(132, 5660, 0), 7825 CHAN5G(136, 5680, 0), 7826 CHAN5G(140, 5700, 0), 7827 CHAN5G(144, 5720, 0), 7828 CHAN5G(149, 5745, 0), 7829 CHAN5G(153, 5765, 0), 7830 CHAN5G(157, 5785, 0), 7831 CHAN5G(161, 5805, 0), 7832 CHAN5G(165, 5825, 0), 7833 CHAN5G(169, 5845, 0), 7834 }; 7835 7836 struct ath10k *ath10k_mac_create(size_t priv_size) 7837 { 7838 struct ieee80211_hw *hw; 7839 struct ieee80211_ops *ops; 7840 struct ath10k *ar; 7841 7842 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 7843 if (!ops) 7844 return NULL; 7845 7846 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 7847 if (!hw) { 7848 kfree(ops); 7849 return NULL; 7850 } 7851 7852 ar = hw->priv; 7853 ar->hw = hw; 7854 ar->ops = ops; 7855 7856 return ar; 7857 } 7858 7859 void ath10k_mac_destroy(struct ath10k *ar) 7860 { 7861 struct ieee80211_ops *ops = ar->ops; 7862 7863 ieee80211_free_hw(ar->hw); 7864 kfree(ops); 7865 } 7866 7867 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 7868 { 7869 .max = 8, 7870 .types = BIT(NL80211_IFTYPE_STATION) 7871 | BIT(NL80211_IFTYPE_P2P_CLIENT) 7872 }, 7873 { 7874 .max = 3, 7875 .types = BIT(NL80211_IFTYPE_P2P_GO) 7876 }, 7877 { 7878 .max = 1, 7879 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 7880 }, 7881 { 7882 .max = 7, 7883 .types = BIT(NL80211_IFTYPE_AP) 7884 #ifdef CONFIG_MAC80211_MESH 7885 | BIT(NL80211_IFTYPE_MESH_POINT) 7886 #endif 7887 }, 7888 }; 7889 7890 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 7891 { 7892 .max = 8, 7893 .types = BIT(NL80211_IFTYPE_AP) 7894 #ifdef CONFIG_MAC80211_MESH 7895 | BIT(NL80211_IFTYPE_MESH_POINT) 7896 #endif 7897 }, 7898 { 7899 .max = 1, 7900 .types = BIT(NL80211_IFTYPE_STATION) 7901 }, 7902 }; 7903 7904 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 7905 { 7906 .limits = ath10k_if_limits, 7907 .n_limits = ARRAY_SIZE(ath10k_if_limits), 7908 .max_interfaces = 8, 7909 .num_different_channels = 1, 7910 .beacon_int_infra_match = true, 7911 }, 7912 }; 7913 7914 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 7915 { 7916 .limits = ath10k_10x_if_limits, 7917 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 7918 .max_interfaces = 8, 7919 .num_different_channels = 1, 7920 .beacon_int_infra_match = true, 7921 .beacon_int_min_gcd = 1, 7922 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 7923 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 7924 BIT(NL80211_CHAN_WIDTH_20) | 7925 BIT(NL80211_CHAN_WIDTH_40) | 7926 BIT(NL80211_CHAN_WIDTH_80), 7927 #endif 7928 }, 7929 }; 7930 7931 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 7932 { 7933 .max = 2, 7934 .types = BIT(NL80211_IFTYPE_STATION), 7935 }, 7936 { 7937 .max = 2, 7938 .types = BIT(NL80211_IFTYPE_AP) | 7939 #ifdef CONFIG_MAC80211_MESH 7940 BIT(NL80211_IFTYPE_MESH_POINT) | 7941 #endif 7942 BIT(NL80211_IFTYPE_P2P_CLIENT) | 7943 BIT(NL80211_IFTYPE_P2P_GO), 7944 }, 7945 { 7946 .max = 1, 7947 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7948 }, 7949 }; 7950 7951 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 7952 { 7953 .max = 2, 7954 .types = BIT(NL80211_IFTYPE_STATION), 7955 }, 7956 { 7957 .max = 2, 7958 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 7959 }, 7960 { 7961 .max = 1, 7962 .types = BIT(NL80211_IFTYPE_AP) | 7963 #ifdef CONFIG_MAC80211_MESH 7964 BIT(NL80211_IFTYPE_MESH_POINT) | 7965 #endif 7966 BIT(NL80211_IFTYPE_P2P_GO), 7967 }, 7968 { 7969 .max = 1, 7970 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 7971 }, 7972 }; 7973 7974 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 7975 { 7976 .max = 1, 7977 .types = BIT(NL80211_IFTYPE_STATION), 7978 }, 7979 { 7980 .max = 1, 7981 .types = BIT(NL80211_IFTYPE_ADHOC), 7982 }, 7983 }; 7984 7985 /* FIXME: This is not thouroughly tested. These combinations may over- or 7986 * underestimate hw/fw capabilities. 7987 */ 7988 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 7989 { 7990 .limits = ath10k_tlv_if_limit, 7991 .num_different_channels = 1, 7992 .max_interfaces = 4, 7993 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 7994 }, 7995 { 7996 .limits = ath10k_tlv_if_limit_ibss, 7997 .num_different_channels = 1, 7998 .max_interfaces = 2, 7999 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8000 }, 8001 }; 8002 8003 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 8004 { 8005 .limits = ath10k_tlv_if_limit, 8006 .num_different_channels = 1, 8007 .max_interfaces = 4, 8008 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8009 }, 8010 { 8011 .limits = ath10k_tlv_qcs_if_limit, 8012 .num_different_channels = 2, 8013 .max_interfaces = 4, 8014 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 8015 }, 8016 { 8017 .limits = ath10k_tlv_if_limit_ibss, 8018 .num_different_channels = 1, 8019 .max_interfaces = 2, 8020 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8021 }, 8022 }; 8023 8024 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 8025 { 8026 .max = 1, 8027 .types = BIT(NL80211_IFTYPE_STATION), 8028 }, 8029 { 8030 .max = 16, 8031 .types = BIT(NL80211_IFTYPE_AP) 8032 #ifdef CONFIG_MAC80211_MESH 8033 | BIT(NL80211_IFTYPE_MESH_POINT) 8034 #endif 8035 }, 8036 }; 8037 8038 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 8039 { 8040 .limits = ath10k_10_4_if_limits, 8041 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8042 .max_interfaces = 16, 8043 .num_different_channels = 1, 8044 .beacon_int_infra_match = true, 8045 .beacon_int_min_gcd = 1, 8046 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8047 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8048 BIT(NL80211_CHAN_WIDTH_20) | 8049 BIT(NL80211_CHAN_WIDTH_40) | 8050 BIT(NL80211_CHAN_WIDTH_80), 8051 #endif 8052 }, 8053 }; 8054 8055 static void ath10k_get_arvif_iter(void *data, u8 *mac, 8056 struct ieee80211_vif *vif) 8057 { 8058 struct ath10k_vif_iter *arvif_iter = data; 8059 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8060 8061 if (arvif->vdev_id == arvif_iter->vdev_id) 8062 arvif_iter->arvif = arvif; 8063 } 8064 8065 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 8066 { 8067 struct ath10k_vif_iter arvif_iter; 8068 u32 flags; 8069 8070 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 8071 arvif_iter.vdev_id = vdev_id; 8072 8073 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 8074 ieee80211_iterate_active_interfaces_atomic(ar->hw, 8075 flags, 8076 ath10k_get_arvif_iter, 8077 &arvif_iter); 8078 if (!arvif_iter.arvif) { 8079 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 8080 return NULL; 8081 } 8082 8083 return arvif_iter.arvif; 8084 } 8085 8086 #define WRD_METHOD "WRDD" 8087 #define WRDD_WIFI (0x07) 8088 8089 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 8090 { 8091 union acpi_object *mcc_pkg; 8092 union acpi_object *domain_type; 8093 union acpi_object *mcc_value; 8094 u32 i; 8095 8096 if (wrdd->type != ACPI_TYPE_PACKAGE || 8097 wrdd->package.count < 2 || 8098 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8099 wrdd->package.elements[0].integer.value != 0) { 8100 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8101 return 0; 8102 } 8103 8104 for (i = 1; i < wrdd->package.count; ++i) { 8105 mcc_pkg = &wrdd->package.elements[i]; 8106 8107 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8108 continue; 8109 if (mcc_pkg->package.count < 2) 8110 continue; 8111 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8112 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8113 continue; 8114 8115 domain_type = &mcc_pkg->package.elements[0]; 8116 if (domain_type->integer.value != WRDD_WIFI) 8117 continue; 8118 8119 mcc_value = &mcc_pkg->package.elements[1]; 8120 return mcc_value->integer.value; 8121 } 8122 return 0; 8123 } 8124 8125 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8126 { 8127 struct pci_dev __maybe_unused *pdev = to_pci_dev(ar->dev); 8128 acpi_handle root_handle; 8129 acpi_handle handle; 8130 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8131 acpi_status status; 8132 u32 alpha2_code; 8133 char alpha2[3]; 8134 8135 root_handle = ACPI_HANDLE(&pdev->dev); 8136 if (!root_handle) 8137 return -EOPNOTSUPP; 8138 8139 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8140 if (ACPI_FAILURE(status)) { 8141 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8142 "failed to get wrd method %d\n", status); 8143 return -EIO; 8144 } 8145 8146 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8147 if (ACPI_FAILURE(status)) { 8148 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8149 "failed to call wrdc %d\n", status); 8150 return -EIO; 8151 } 8152 8153 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8154 kfree(wrdd.pointer); 8155 if (!alpha2_code) 8156 return -EIO; 8157 8158 alpha2[0] = (alpha2_code >> 8) & 0xff; 8159 alpha2[1] = (alpha2_code >> 0) & 0xff; 8160 alpha2[2] = '\0'; 8161 8162 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8163 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8164 8165 *rd = ath_regd_find_country_by_name(alpha2); 8166 if (*rd == 0xffff) 8167 return -EIO; 8168 8169 *rd |= COUNTRY_ERD_FLAG; 8170 return 0; 8171 } 8172 8173 static int ath10k_mac_init_rd(struct ath10k *ar) 8174 { 8175 int ret; 8176 u16 rd; 8177 8178 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8179 if (ret) { 8180 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8181 "fallback to eeprom programmed regulatory settings\n"); 8182 rd = ar->hw_eeprom_rd; 8183 } 8184 8185 ar->ath_common.regulatory.current_rd = rd; 8186 return 0; 8187 } 8188 8189 int ath10k_mac_register(struct ath10k *ar) 8190 { 8191 static const u32 cipher_suites[] = { 8192 WLAN_CIPHER_SUITE_WEP40, 8193 WLAN_CIPHER_SUITE_WEP104, 8194 WLAN_CIPHER_SUITE_TKIP, 8195 WLAN_CIPHER_SUITE_CCMP, 8196 8197 /* Do not add hardware supported ciphers before this line. 8198 * Allow software encryption for all chips. Don't forget to 8199 * update n_cipher_suites below. 8200 */ 8201 WLAN_CIPHER_SUITE_AES_CMAC, 8202 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8203 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8204 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8205 8206 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8207 * and CCMP-256 in hardware. 8208 */ 8209 WLAN_CIPHER_SUITE_GCMP, 8210 WLAN_CIPHER_SUITE_GCMP_256, 8211 WLAN_CIPHER_SUITE_CCMP_256, 8212 }; 8213 struct ieee80211_supported_band *band; 8214 void *channels; 8215 int ret; 8216 8217 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8218 8219 SET_IEEE80211_DEV(ar->hw, ar->dev); 8220 8221 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8222 ARRAY_SIZE(ath10k_5ghz_channels)) != 8223 ATH10K_NUM_CHANS); 8224 8225 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8226 channels = kmemdup(ath10k_2ghz_channels, 8227 sizeof(ath10k_2ghz_channels), 8228 GFP_KERNEL); 8229 if (!channels) { 8230 ret = -ENOMEM; 8231 goto err_free; 8232 } 8233 8234 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8235 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8236 band->channels = channels; 8237 8238 if (ar->hw_params.cck_rate_map_rev2) { 8239 band->n_bitrates = ath10k_g_rates_rev2_size; 8240 band->bitrates = ath10k_g_rates_rev2; 8241 } else { 8242 band->n_bitrates = ath10k_g_rates_size; 8243 band->bitrates = ath10k_g_rates; 8244 } 8245 8246 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8247 } 8248 8249 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8250 channels = kmemdup(ath10k_5ghz_channels, 8251 sizeof(ath10k_5ghz_channels), 8252 GFP_KERNEL); 8253 if (!channels) { 8254 ret = -ENOMEM; 8255 goto err_free; 8256 } 8257 8258 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8259 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8260 band->channels = channels; 8261 band->n_bitrates = ath10k_a_rates_size; 8262 band->bitrates = ath10k_a_rates; 8263 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8264 } 8265 8266 ath10k_mac_setup_ht_vht_cap(ar); 8267 8268 ar->hw->wiphy->interface_modes = 8269 BIT(NL80211_IFTYPE_STATION) | 8270 BIT(NL80211_IFTYPE_AP) | 8271 BIT(NL80211_IFTYPE_MESH_POINT); 8272 8273 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8274 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8275 8276 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8277 ar->hw->wiphy->interface_modes |= 8278 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8279 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8280 BIT(NL80211_IFTYPE_P2P_GO); 8281 8282 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8283 8284 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8285 ar->running_fw->fw_file.fw_features)) { 8286 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8287 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8288 } 8289 8290 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8291 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8292 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8293 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8294 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8295 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8296 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8297 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8298 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8299 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8300 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8301 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8302 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8303 8304 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8305 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8306 8307 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8308 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8309 8310 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8311 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8312 8313 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8314 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8315 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8316 } 8317 8318 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8319 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8320 8321 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8322 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8323 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8324 8325 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8326 8327 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8328 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8329 8330 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8331 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8332 * correct Probe Responses. This is more of a hack advert.. 8333 */ 8334 ar->hw->wiphy->probe_resp_offload |= 8335 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8336 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8337 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8338 } 8339 8340 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8341 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8342 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8343 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) 8344 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8345 } 8346 8347 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8348 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); 8349 8350 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8351 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8352 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8353 8354 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8355 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8356 NL80211_FEATURE_AP_SCAN; 8357 8358 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8359 8360 ret = ath10k_wow_init(ar); 8361 if (ret) { 8362 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8363 goto err_free; 8364 } 8365 8366 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8367 8368 /* 8369 * on LL hardware queues are managed entirely by the FW 8370 * so we only advertise to mac we can do the queues thing 8371 */ 8372 ar->hw->queues = IEEE80211_MAX_QUEUES; 8373 8374 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8375 * something that vdev_ids can't reach so that we don't stop the queue 8376 * accidentally. 8377 */ 8378 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8379 8380 switch (ar->running_fw->fw_file.wmi_op_version) { 8381 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8382 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8383 ar->hw->wiphy->n_iface_combinations = 8384 ARRAY_SIZE(ath10k_if_comb); 8385 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8386 break; 8387 case ATH10K_FW_WMI_OP_VERSION_TLV: 8388 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8389 ar->hw->wiphy->iface_combinations = 8390 ath10k_tlv_qcs_if_comb; 8391 ar->hw->wiphy->n_iface_combinations = 8392 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8393 } else { 8394 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8395 ar->hw->wiphy->n_iface_combinations = 8396 ARRAY_SIZE(ath10k_tlv_if_comb); 8397 } 8398 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8399 break; 8400 case ATH10K_FW_WMI_OP_VERSION_10_1: 8401 case ATH10K_FW_WMI_OP_VERSION_10_2: 8402 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8403 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8404 ar->hw->wiphy->n_iface_combinations = 8405 ARRAY_SIZE(ath10k_10x_if_comb); 8406 break; 8407 case ATH10K_FW_WMI_OP_VERSION_10_4: 8408 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8409 ar->hw->wiphy->n_iface_combinations = 8410 ARRAY_SIZE(ath10k_10_4_if_comb); 8411 break; 8412 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8413 case ATH10K_FW_WMI_OP_VERSION_MAX: 8414 WARN_ON(1); 8415 ret = -EINVAL; 8416 goto err_free; 8417 } 8418 8419 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8420 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8421 8422 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8423 /* Init ath dfs pattern detector */ 8424 ar->ath_common.debug_mask = ATH_DBG_DFS; 8425 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8426 NL80211_DFS_UNSET); 8427 8428 if (!ar->dfs_detector) 8429 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8430 } 8431 8432 ret = ath10k_mac_init_rd(ar); 8433 if (ret) { 8434 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8435 goto err_dfs_detector_exit; 8436 } 8437 8438 /* Disable set_coverage_class for chipsets that do not support it. */ 8439 if (!ar->hw_params.hw_ops->set_coverage_class) 8440 ar->ops->set_coverage_class = NULL; 8441 8442 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8443 ath10k_reg_notifier); 8444 if (ret) { 8445 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8446 goto err_dfs_detector_exit; 8447 } 8448 8449 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8450 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 8451 if (ret) { 8452 ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 8453 goto err_dfs_detector_exit; 8454 } 8455 8456 ar->hw->wiphy->features |= 8457 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8458 } 8459 8460 ar->hw->wiphy->cipher_suites = cipher_suites; 8461 8462 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8463 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8464 * from chip specific hw_param table. 8465 */ 8466 if (!ar->hw_params.n_cipher_suites || 8467 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8468 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8469 ar->hw_params.n_cipher_suites); 8470 ar->hw_params.n_cipher_suites = 8; 8471 } 8472 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8473 8474 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8475 8476 ret = ieee80211_register_hw(ar->hw); 8477 if (ret) { 8478 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8479 goto err_dfs_detector_exit; 8480 } 8481 8482 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8483 ret = regulatory_hint(ar->hw->wiphy, 8484 ar->ath_common.regulatory.alpha2); 8485 if (ret) 8486 goto err_unregister; 8487 } 8488 8489 return 0; 8490 8491 err_unregister: 8492 ieee80211_unregister_hw(ar->hw); 8493 8494 err_dfs_detector_exit: 8495 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8496 ar->dfs_detector->exit(ar->dfs_detector); 8497 8498 err_free: 8499 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8500 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8501 8502 SET_IEEE80211_DEV(ar->hw, NULL); 8503 return ret; 8504 } 8505 8506 void ath10k_mac_unregister(struct ath10k *ar) 8507 { 8508 ieee80211_unregister_hw(ar->hw); 8509 8510 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8511 ar->dfs_detector->exit(ar->dfs_detector); 8512 8513 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8514 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8515 8516 SET_IEEE80211_DEV(ar->hw, NULL); 8517 } 8518