1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2018-2019, The Linux Foundation. All rights reserved. 6 */ 7 8 #include "mac.h" 9 10 #include <net/cfg80211.h> 11 #include <net/mac80211.h> 12 #include <linux/etherdevice.h> 13 #include <linux/acpi.h> 14 #include <linux/of.h> 15 16 #include "hif.h" 17 #include "core.h" 18 #include "debug.h" 19 #include "wmi.h" 20 #include "htt.h" 21 #include "txrx.h" 22 #include "testmode.h" 23 #include "wmi-tlv.h" 24 #include "wmi-ops.h" 25 #include "wow.h" 26 27 /*********/ 28 /* Rates */ 29 /*********/ 30 31 static struct ieee80211_rate ath10k_rates[] = { 32 { .bitrate = 10, 33 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 34 { .bitrate = 20, 35 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 36 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 37 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 38 { .bitrate = 55, 39 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 40 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 41 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 42 { .bitrate = 110, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 45 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 46 47 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 48 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 49 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 50 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 51 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 52 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 53 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 54 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 55 }; 56 57 static struct ieee80211_rate ath10k_rates_rev2[] = { 58 { .bitrate = 10, 59 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_1M }, 60 { .bitrate = 20, 61 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_2M, 62 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_2M, 63 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 64 { .bitrate = 55, 65 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_5_5M, 66 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_5_5M, 67 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 68 { .bitrate = 110, 69 .hw_value = ATH10K_HW_RATE_REV2_CCK_LP_11M, 70 .hw_value_short = ATH10K_HW_RATE_REV2_CCK_SP_11M, 71 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 72 73 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 74 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 75 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 76 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 77 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 78 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 79 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 80 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 81 }; 82 83 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 84 85 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 86 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 87 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 88 #define ath10k_g_rates (ath10k_rates + 0) 89 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 90 91 #define ath10k_g_rates_rev2 (ath10k_rates_rev2 + 0) 92 #define ath10k_g_rates_rev2_size (ARRAY_SIZE(ath10k_rates_rev2)) 93 94 #define ath10k_wmi_legacy_rates ath10k_rates 95 96 static bool ath10k_mac_bitrate_is_cck(int bitrate) 97 { 98 switch (bitrate) { 99 case 10: 100 case 20: 101 case 55: 102 case 110: 103 return true; 104 } 105 106 return false; 107 } 108 109 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 110 { 111 return DIV_ROUND_UP(bitrate, 5) | 112 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 113 } 114 115 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 116 u8 hw_rate, bool cck) 117 { 118 const struct ieee80211_rate *rate; 119 int i; 120 121 for (i = 0; i < sband->n_bitrates; i++) { 122 rate = &sband->bitrates[i]; 123 124 if (ath10k_mac_bitrate_is_cck(rate->bitrate) != cck) 125 continue; 126 127 if (rate->hw_value == hw_rate) 128 return i; 129 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 130 rate->hw_value_short == hw_rate) 131 return i; 132 } 133 134 return 0; 135 } 136 137 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 138 u32 bitrate) 139 { 140 int i; 141 142 for (i = 0; i < sband->n_bitrates; i++) 143 if (sband->bitrates[i].bitrate == bitrate) 144 return i; 145 146 return 0; 147 } 148 149 static int ath10k_mac_get_rate_hw_value(int bitrate) 150 { 151 int i; 152 u8 hw_value_prefix = 0; 153 154 if (ath10k_mac_bitrate_is_cck(bitrate)) 155 hw_value_prefix = WMI_RATE_PREAMBLE_CCK << 6; 156 157 for (i = 0; i < ARRAY_SIZE(ath10k_rates); i++) { 158 if (ath10k_rates[i].bitrate == bitrate) 159 return hw_value_prefix | ath10k_rates[i].hw_value; 160 } 161 162 return -EINVAL; 163 } 164 165 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 166 { 167 switch ((mcs_map >> (2 * nss)) & 0x3) { 168 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 169 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 170 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 171 } 172 return 0; 173 } 174 175 static u32 176 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 177 { 178 int nss; 179 180 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 181 if (ht_mcs_mask[nss]) 182 return nss + 1; 183 184 return 1; 185 } 186 187 static u32 188 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 189 { 190 int nss; 191 192 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 193 if (vht_mcs_mask[nss]) 194 return nss + 1; 195 196 return 1; 197 } 198 199 int ath10k_mac_ext_resource_config(struct ath10k *ar, u32 val) 200 { 201 enum wmi_host_platform_type platform_type; 202 int ret; 203 204 if (test_bit(WMI_SERVICE_TX_MODE_DYNAMIC, ar->wmi.svc_map)) 205 platform_type = WMI_HOST_PLATFORM_LOW_PERF; 206 else 207 platform_type = WMI_HOST_PLATFORM_HIGH_PERF; 208 209 ret = ath10k_wmi_ext_resource_config(ar, platform_type, val); 210 211 if (ret && ret != -EOPNOTSUPP) { 212 ath10k_warn(ar, "failed to configure ext resource: %d\n", ret); 213 return ret; 214 } 215 216 return 0; 217 } 218 219 /**********/ 220 /* Crypto */ 221 /**********/ 222 223 static int ath10k_send_key(struct ath10k_vif *arvif, 224 struct ieee80211_key_conf *key, 225 enum set_key_cmd cmd, 226 const u8 *macaddr, u32 flags) 227 { 228 struct ath10k *ar = arvif->ar; 229 struct wmi_vdev_install_key_arg arg = { 230 .vdev_id = arvif->vdev_id, 231 .key_idx = key->keyidx, 232 .key_len = key->keylen, 233 .key_data = key->key, 234 .key_flags = flags, 235 .macaddr = macaddr, 236 }; 237 238 lockdep_assert_held(&arvif->ar->conf_mutex); 239 240 switch (key->cipher) { 241 case WLAN_CIPHER_SUITE_CCMP: 242 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 243 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 244 break; 245 case WLAN_CIPHER_SUITE_TKIP: 246 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_TKIP]; 247 arg.key_txmic_len = 8; 248 arg.key_rxmic_len = 8; 249 break; 250 case WLAN_CIPHER_SUITE_WEP40: 251 case WLAN_CIPHER_SUITE_WEP104: 252 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_WEP]; 253 break; 254 case WLAN_CIPHER_SUITE_CCMP_256: 255 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_CCM]; 256 break; 257 case WLAN_CIPHER_SUITE_GCMP: 258 case WLAN_CIPHER_SUITE_GCMP_256: 259 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_AES_GCM]; 260 break; 261 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 262 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 263 case WLAN_CIPHER_SUITE_BIP_CMAC_256: 264 case WLAN_CIPHER_SUITE_AES_CMAC: 265 WARN_ON(1); 266 return -EINVAL; 267 default: 268 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 269 return -EOPNOTSUPP; 270 } 271 272 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 273 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; 274 275 if (cmd == DISABLE_KEY) { 276 arg.key_cipher = ar->wmi_key_cipher[WMI_CIPHER_NONE]; 277 arg.key_data = NULL; 278 } 279 280 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 281 } 282 283 static int ath10k_install_key(struct ath10k_vif *arvif, 284 struct ieee80211_key_conf *key, 285 enum set_key_cmd cmd, 286 const u8 *macaddr, u32 flags) 287 { 288 struct ath10k *ar = arvif->ar; 289 int ret; 290 unsigned long time_left; 291 292 lockdep_assert_held(&ar->conf_mutex); 293 294 reinit_completion(&ar->install_key_done); 295 296 if (arvif->nohwcrypt) 297 return 1; 298 299 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 300 if (ret) 301 return ret; 302 303 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 304 if (time_left == 0) 305 return -ETIMEDOUT; 306 307 return 0; 308 } 309 310 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 311 const u8 *addr) 312 { 313 struct ath10k *ar = arvif->ar; 314 struct ath10k_peer *peer; 315 int ret; 316 int i; 317 u32 flags; 318 319 lockdep_assert_held(&ar->conf_mutex); 320 321 if (WARN_ON(arvif->vif->type != NL80211_IFTYPE_AP && 322 arvif->vif->type != NL80211_IFTYPE_ADHOC && 323 arvif->vif->type != NL80211_IFTYPE_MESH_POINT)) 324 return -EINVAL; 325 326 spin_lock_bh(&ar->data_lock); 327 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 328 spin_unlock_bh(&ar->data_lock); 329 330 if (!peer) 331 return -ENOENT; 332 333 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 334 if (arvif->wep_keys[i] == NULL) 335 continue; 336 337 switch (arvif->vif->type) { 338 case NL80211_IFTYPE_AP: 339 flags = WMI_KEY_PAIRWISE; 340 341 if (arvif->def_wep_key_idx == i) 342 flags |= WMI_KEY_TX_USAGE; 343 344 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 345 SET_KEY, addr, flags); 346 if (ret < 0) 347 return ret; 348 break; 349 case NL80211_IFTYPE_ADHOC: 350 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 351 SET_KEY, addr, 352 WMI_KEY_PAIRWISE); 353 if (ret < 0) 354 return ret; 355 356 ret = ath10k_install_key(arvif, arvif->wep_keys[i], 357 SET_KEY, addr, WMI_KEY_GROUP); 358 if (ret < 0) 359 return ret; 360 break; 361 default: 362 WARN_ON(1); 363 return -EINVAL; 364 } 365 366 spin_lock_bh(&ar->data_lock); 367 peer->keys[i] = arvif->wep_keys[i]; 368 spin_unlock_bh(&ar->data_lock); 369 } 370 371 /* In some cases (notably with static WEP IBSS with multiple keys) 372 * multicast Tx becomes broken. Both pairwise and groupwise keys are 373 * installed already. Using WMI_KEY_TX_USAGE in different combinations 374 * didn't seem help. Using def_keyid vdev parameter seems to be 375 * effective so use that. 376 * 377 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 378 */ 379 if (arvif->vif->type != NL80211_IFTYPE_ADHOC) 380 return 0; 381 382 if (arvif->def_wep_key_idx == -1) 383 return 0; 384 385 ret = ath10k_wmi_vdev_set_param(arvif->ar, 386 arvif->vdev_id, 387 arvif->ar->wmi.vdev_param->def_keyid, 388 arvif->def_wep_key_idx); 389 if (ret) { 390 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 391 arvif->vdev_id, ret); 392 return ret; 393 } 394 395 return 0; 396 } 397 398 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 399 const u8 *addr) 400 { 401 struct ath10k *ar = arvif->ar; 402 struct ath10k_peer *peer; 403 int first_errno = 0; 404 int ret; 405 int i; 406 u32 flags = 0; 407 408 lockdep_assert_held(&ar->conf_mutex); 409 410 spin_lock_bh(&ar->data_lock); 411 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 412 spin_unlock_bh(&ar->data_lock); 413 414 if (!peer) 415 return -ENOENT; 416 417 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 418 if (peer->keys[i] == NULL) 419 continue; 420 421 /* key flags are not required to delete the key */ 422 ret = ath10k_install_key(arvif, peer->keys[i], 423 DISABLE_KEY, addr, flags); 424 if (ret < 0 && first_errno == 0) 425 first_errno = ret; 426 427 if (ret < 0) 428 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 429 i, ret); 430 431 spin_lock_bh(&ar->data_lock); 432 peer->keys[i] = NULL; 433 spin_unlock_bh(&ar->data_lock); 434 } 435 436 return first_errno; 437 } 438 439 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 440 u8 keyidx) 441 { 442 struct ath10k_peer *peer; 443 int i; 444 445 lockdep_assert_held(&ar->data_lock); 446 447 /* We don't know which vdev this peer belongs to, 448 * since WMI doesn't give us that information. 449 * 450 * FIXME: multi-bss needs to be handled. 451 */ 452 peer = ath10k_peer_find(ar, 0, addr); 453 if (!peer) 454 return false; 455 456 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 457 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 458 return true; 459 } 460 461 return false; 462 } 463 464 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 465 struct ieee80211_key_conf *key) 466 { 467 struct ath10k *ar = arvif->ar; 468 struct ath10k_peer *peer; 469 u8 addr[ETH_ALEN]; 470 int first_errno = 0; 471 int ret; 472 int i; 473 u32 flags = 0; 474 475 lockdep_assert_held(&ar->conf_mutex); 476 477 for (;;) { 478 /* since ath10k_install_key we can't hold data_lock all the 479 * time, so we try to remove the keys incrementally 480 */ 481 spin_lock_bh(&ar->data_lock); 482 i = 0; 483 list_for_each_entry(peer, &ar->peers, list) { 484 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 485 if (peer->keys[i] == key) { 486 ether_addr_copy(addr, peer->addr); 487 peer->keys[i] = NULL; 488 break; 489 } 490 } 491 492 if (i < ARRAY_SIZE(peer->keys)) 493 break; 494 } 495 spin_unlock_bh(&ar->data_lock); 496 497 if (i == ARRAY_SIZE(peer->keys)) 498 break; 499 /* key flags are not required to delete the key */ 500 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 501 if (ret < 0 && first_errno == 0) 502 first_errno = ret; 503 504 if (ret) 505 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 506 addr, ret); 507 } 508 509 return first_errno; 510 } 511 512 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 513 struct ieee80211_key_conf *key) 514 { 515 struct ath10k *ar = arvif->ar; 516 struct ath10k_peer *peer; 517 int ret; 518 519 lockdep_assert_held(&ar->conf_mutex); 520 521 list_for_each_entry(peer, &ar->peers, list) { 522 if (ether_addr_equal(peer->addr, arvif->vif->addr)) 523 continue; 524 525 if (ether_addr_equal(peer->addr, arvif->bssid)) 526 continue; 527 528 if (peer->keys[key->keyidx] == key) 529 continue; 530 531 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 532 arvif->vdev_id, key->keyidx); 533 534 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 535 if (ret) { 536 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 537 arvif->vdev_id, peer->addr, ret); 538 return ret; 539 } 540 } 541 542 return 0; 543 } 544 545 /*********************/ 546 /* General utilities */ 547 /*********************/ 548 549 static inline enum wmi_phy_mode 550 chan_to_phymode(const struct cfg80211_chan_def *chandef) 551 { 552 enum wmi_phy_mode phymode = MODE_UNKNOWN; 553 554 switch (chandef->chan->band) { 555 case NL80211_BAND_2GHZ: 556 switch (chandef->width) { 557 case NL80211_CHAN_WIDTH_20_NOHT: 558 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 559 phymode = MODE_11B; 560 else 561 phymode = MODE_11G; 562 break; 563 case NL80211_CHAN_WIDTH_20: 564 phymode = MODE_11NG_HT20; 565 break; 566 case NL80211_CHAN_WIDTH_40: 567 phymode = MODE_11NG_HT40; 568 break; 569 case NL80211_CHAN_WIDTH_5: 570 case NL80211_CHAN_WIDTH_10: 571 case NL80211_CHAN_WIDTH_80: 572 case NL80211_CHAN_WIDTH_80P80: 573 case NL80211_CHAN_WIDTH_160: 574 phymode = MODE_UNKNOWN; 575 break; 576 } 577 break; 578 case NL80211_BAND_5GHZ: 579 switch (chandef->width) { 580 case NL80211_CHAN_WIDTH_20_NOHT: 581 phymode = MODE_11A; 582 break; 583 case NL80211_CHAN_WIDTH_20: 584 phymode = MODE_11NA_HT20; 585 break; 586 case NL80211_CHAN_WIDTH_40: 587 phymode = MODE_11NA_HT40; 588 break; 589 case NL80211_CHAN_WIDTH_80: 590 phymode = MODE_11AC_VHT80; 591 break; 592 case NL80211_CHAN_WIDTH_160: 593 phymode = MODE_11AC_VHT160; 594 break; 595 case NL80211_CHAN_WIDTH_80P80: 596 phymode = MODE_11AC_VHT80_80; 597 break; 598 case NL80211_CHAN_WIDTH_5: 599 case NL80211_CHAN_WIDTH_10: 600 phymode = MODE_UNKNOWN; 601 break; 602 } 603 break; 604 default: 605 break; 606 } 607 608 WARN_ON(phymode == MODE_UNKNOWN); 609 return phymode; 610 } 611 612 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 613 { 614 /* 615 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 616 * 0 for no restriction 617 * 1 for 1/4 us 618 * 2 for 1/2 us 619 * 3 for 1 us 620 * 4 for 2 us 621 * 5 for 4 us 622 * 6 for 8 us 623 * 7 for 16 us 624 */ 625 switch (mpdudensity) { 626 case 0: 627 return 0; 628 case 1: 629 case 2: 630 case 3: 631 /* Our lower layer calculations limit our precision to 632 * 1 microsecond 633 */ 634 return 1; 635 case 4: 636 return 2; 637 case 5: 638 return 4; 639 case 6: 640 return 8; 641 case 7: 642 return 16; 643 default: 644 return 0; 645 } 646 } 647 648 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 649 struct cfg80211_chan_def *def) 650 { 651 struct ieee80211_chanctx_conf *conf; 652 653 rcu_read_lock(); 654 conf = rcu_dereference(vif->chanctx_conf); 655 if (!conf) { 656 rcu_read_unlock(); 657 return -ENOENT; 658 } 659 660 *def = conf->def; 661 rcu_read_unlock(); 662 663 return 0; 664 } 665 666 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 667 struct ieee80211_chanctx_conf *conf, 668 void *data) 669 { 670 int *num = data; 671 672 (*num)++; 673 } 674 675 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 676 { 677 int num = 0; 678 679 ieee80211_iter_chan_contexts_atomic(ar->hw, 680 ath10k_mac_num_chanctxs_iter, 681 &num); 682 683 return num; 684 } 685 686 static void 687 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 688 struct ieee80211_chanctx_conf *conf, 689 void *data) 690 { 691 struct cfg80211_chan_def **def = data; 692 693 *def = &conf->def; 694 } 695 696 static int ath10k_peer_create(struct ath10k *ar, 697 struct ieee80211_vif *vif, 698 struct ieee80211_sta *sta, 699 u32 vdev_id, 700 const u8 *addr, 701 enum wmi_peer_type peer_type) 702 { 703 struct ath10k_vif *arvif; 704 struct ath10k_peer *peer; 705 int num_peers = 0; 706 int ret; 707 708 lockdep_assert_held(&ar->conf_mutex); 709 710 num_peers = ar->num_peers; 711 712 /* Each vdev consumes a peer entry as well */ 713 list_for_each_entry(arvif, &ar->arvifs, list) 714 num_peers++; 715 716 if (num_peers >= ar->max_num_peers) 717 return -ENOBUFS; 718 719 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 720 if (ret) { 721 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 722 addr, vdev_id, ret); 723 return ret; 724 } 725 726 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 727 if (ret) { 728 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 729 addr, vdev_id, ret); 730 return ret; 731 } 732 733 spin_lock_bh(&ar->data_lock); 734 735 peer = ath10k_peer_find(ar, vdev_id, addr); 736 if (!peer) { 737 spin_unlock_bh(&ar->data_lock); 738 ath10k_warn(ar, "failed to find peer %pM on vdev %i after creation\n", 739 addr, vdev_id); 740 ath10k_wmi_peer_delete(ar, vdev_id, addr); 741 return -ENOENT; 742 } 743 744 peer->vif = vif; 745 peer->sta = sta; 746 747 spin_unlock_bh(&ar->data_lock); 748 749 ar->num_peers++; 750 751 return 0; 752 } 753 754 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 755 { 756 struct ath10k *ar = arvif->ar; 757 u32 param; 758 int ret; 759 760 param = ar->wmi.pdev_param->sta_kickout_th; 761 ret = ath10k_wmi_pdev_set_param(ar, param, 762 ATH10K_KICKOUT_THRESHOLD); 763 if (ret) { 764 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 765 arvif->vdev_id, ret); 766 return ret; 767 } 768 769 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 770 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 771 ATH10K_KEEPALIVE_MIN_IDLE); 772 if (ret) { 773 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 774 arvif->vdev_id, ret); 775 return ret; 776 } 777 778 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 779 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 780 ATH10K_KEEPALIVE_MAX_IDLE); 781 if (ret) { 782 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 783 arvif->vdev_id, ret); 784 return ret; 785 } 786 787 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 788 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 789 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 790 if (ret) { 791 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 792 arvif->vdev_id, ret); 793 return ret; 794 } 795 796 return 0; 797 } 798 799 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 800 { 801 struct ath10k *ar = arvif->ar; 802 u32 vdev_param; 803 804 vdev_param = ar->wmi.vdev_param->rts_threshold; 805 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 806 } 807 808 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 809 { 810 int ret; 811 812 lockdep_assert_held(&ar->conf_mutex); 813 814 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 815 if (ret) 816 return ret; 817 818 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 819 if (ret) 820 return ret; 821 822 ar->num_peers--; 823 824 return 0; 825 } 826 827 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 828 { 829 struct ath10k_peer *peer, *tmp; 830 int peer_id; 831 int i; 832 833 lockdep_assert_held(&ar->conf_mutex); 834 835 spin_lock_bh(&ar->data_lock); 836 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 837 if (peer->vdev_id != vdev_id) 838 continue; 839 840 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 841 peer->addr, vdev_id); 842 843 for_each_set_bit(peer_id, peer->peer_ids, 844 ATH10K_MAX_NUM_PEER_IDS) { 845 ar->peer_map[peer_id] = NULL; 846 } 847 848 /* Double check that peer is properly un-referenced from 849 * the peer_map 850 */ 851 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 852 if (ar->peer_map[i] == peer) { 853 ath10k_warn(ar, "removing stale peer_map entry for %pM (ptr %pK idx %d)\n", 854 peer->addr, peer, i); 855 ar->peer_map[i] = NULL; 856 } 857 } 858 859 list_del(&peer->list); 860 kfree(peer); 861 ar->num_peers--; 862 } 863 spin_unlock_bh(&ar->data_lock); 864 } 865 866 static void ath10k_peer_cleanup_all(struct ath10k *ar) 867 { 868 struct ath10k_peer *peer, *tmp; 869 int i; 870 871 lockdep_assert_held(&ar->conf_mutex); 872 873 spin_lock_bh(&ar->data_lock); 874 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 875 list_del(&peer->list); 876 kfree(peer); 877 } 878 879 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) 880 ar->peer_map[i] = NULL; 881 882 spin_unlock_bh(&ar->data_lock); 883 884 ar->num_peers = 0; 885 ar->num_stations = 0; 886 } 887 888 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 889 struct ieee80211_sta *sta, 890 enum wmi_tdls_peer_state state) 891 { 892 int ret; 893 struct wmi_tdls_peer_update_cmd_arg arg = {}; 894 struct wmi_tdls_peer_capab_arg cap = {}; 895 struct wmi_channel_arg chan_arg = {}; 896 897 lockdep_assert_held(&ar->conf_mutex); 898 899 arg.vdev_id = vdev_id; 900 arg.peer_state = state; 901 ether_addr_copy(arg.addr, sta->addr); 902 903 cap.peer_max_sp = sta->max_sp; 904 cap.peer_uapsd_queues = sta->uapsd_queues; 905 906 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 907 !sta->tdls_initiator) 908 cap.is_peer_responder = 1; 909 910 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 911 if (ret) { 912 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 913 arg.addr, vdev_id, ret); 914 return ret; 915 } 916 917 return 0; 918 } 919 920 /************************/ 921 /* Interface management */ 922 /************************/ 923 924 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 925 { 926 struct ath10k *ar = arvif->ar; 927 928 lockdep_assert_held(&ar->data_lock); 929 930 if (!arvif->beacon) 931 return; 932 933 if (!arvif->beacon_buf) 934 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 935 arvif->beacon->len, DMA_TO_DEVICE); 936 937 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 938 arvif->beacon_state != ATH10K_BEACON_SENT)) 939 return; 940 941 dev_kfree_skb_any(arvif->beacon); 942 943 arvif->beacon = NULL; 944 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 945 } 946 947 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 948 { 949 struct ath10k *ar = arvif->ar; 950 951 lockdep_assert_held(&ar->data_lock); 952 953 ath10k_mac_vif_beacon_free(arvif); 954 955 if (arvif->beacon_buf) { 956 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 957 arvif->beacon_buf, arvif->beacon_paddr); 958 arvif->beacon_buf = NULL; 959 } 960 } 961 962 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 963 { 964 unsigned long time_left; 965 966 lockdep_assert_held(&ar->conf_mutex); 967 968 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 969 return -ESHUTDOWN; 970 971 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 972 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 973 if (time_left == 0) 974 return -ETIMEDOUT; 975 976 return ar->last_wmi_vdev_start_status; 977 } 978 979 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 980 { 981 struct cfg80211_chan_def *chandef = NULL; 982 struct ieee80211_channel *channel = NULL; 983 struct wmi_vdev_start_request_arg arg = {}; 984 int ret = 0; 985 986 lockdep_assert_held(&ar->conf_mutex); 987 988 ieee80211_iter_chan_contexts_atomic(ar->hw, 989 ath10k_mac_get_any_chandef_iter, 990 &chandef); 991 if (WARN_ON_ONCE(!chandef)) 992 return -ENOENT; 993 994 channel = chandef->chan; 995 996 arg.vdev_id = vdev_id; 997 arg.channel.freq = channel->center_freq; 998 arg.channel.band_center_freq1 = chandef->center_freq1; 999 arg.channel.band_center_freq2 = chandef->center_freq2; 1000 1001 /* TODO setup this dynamically, what in case we 1002 * don't have any vifs? 1003 */ 1004 arg.channel.mode = chan_to_phymode(chandef); 1005 arg.channel.chan_radar = 1006 !!(channel->flags & IEEE80211_CHAN_RADAR); 1007 1008 arg.channel.min_power = 0; 1009 arg.channel.max_power = channel->max_power * 2; 1010 arg.channel.max_reg_power = channel->max_reg_power * 2; 1011 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 1012 1013 reinit_completion(&ar->vdev_setup_done); 1014 1015 ret = ath10k_wmi_vdev_start(ar, &arg); 1016 if (ret) { 1017 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 1018 vdev_id, ret); 1019 return ret; 1020 } 1021 1022 ret = ath10k_vdev_setup_sync(ar); 1023 if (ret) { 1024 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 1025 vdev_id, ret); 1026 return ret; 1027 } 1028 1029 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 1030 if (ret) { 1031 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 1032 vdev_id, ret); 1033 goto vdev_stop; 1034 } 1035 1036 ar->monitor_vdev_id = vdev_id; 1037 1038 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 1039 ar->monitor_vdev_id); 1040 return 0; 1041 1042 vdev_stop: 1043 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1044 if (ret) 1045 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 1046 ar->monitor_vdev_id, ret); 1047 1048 return ret; 1049 } 1050 1051 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 1052 { 1053 int ret = 0; 1054 1055 lockdep_assert_held(&ar->conf_mutex); 1056 1057 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 1058 if (ret) 1059 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 1060 ar->monitor_vdev_id, ret); 1061 1062 reinit_completion(&ar->vdev_setup_done); 1063 1064 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 1065 if (ret) 1066 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 1067 ar->monitor_vdev_id, ret); 1068 1069 ret = ath10k_vdev_setup_sync(ar); 1070 if (ret) 1071 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 1072 ar->monitor_vdev_id, ret); 1073 1074 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 1075 ar->monitor_vdev_id); 1076 return ret; 1077 } 1078 1079 static int ath10k_monitor_vdev_create(struct ath10k *ar) 1080 { 1081 int bit, ret = 0; 1082 1083 lockdep_assert_held(&ar->conf_mutex); 1084 1085 if (ar->free_vdev_map == 0) { 1086 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 1087 return -ENOMEM; 1088 } 1089 1090 bit = __ffs64(ar->free_vdev_map); 1091 1092 ar->monitor_vdev_id = bit; 1093 1094 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 1095 WMI_VDEV_TYPE_MONITOR, 1096 0, ar->mac_addr); 1097 if (ret) { 1098 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 1099 ar->monitor_vdev_id, ret); 1100 return ret; 1101 } 1102 1103 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 1104 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 1105 ar->monitor_vdev_id); 1106 1107 return 0; 1108 } 1109 1110 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 1111 { 1112 int ret = 0; 1113 1114 lockdep_assert_held(&ar->conf_mutex); 1115 1116 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 1117 if (ret) { 1118 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 1119 ar->monitor_vdev_id, ret); 1120 return ret; 1121 } 1122 1123 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 1124 1125 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 1126 ar->monitor_vdev_id); 1127 return ret; 1128 } 1129 1130 static int ath10k_monitor_start(struct ath10k *ar) 1131 { 1132 int ret; 1133 1134 lockdep_assert_held(&ar->conf_mutex); 1135 1136 ret = ath10k_monitor_vdev_create(ar); 1137 if (ret) { 1138 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 1139 return ret; 1140 } 1141 1142 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 1143 if (ret) { 1144 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1145 ath10k_monitor_vdev_delete(ar); 1146 return ret; 1147 } 1148 1149 ar->monitor_started = true; 1150 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1151 1152 return 0; 1153 } 1154 1155 static int ath10k_monitor_stop(struct ath10k *ar) 1156 { 1157 int ret; 1158 1159 lockdep_assert_held(&ar->conf_mutex); 1160 1161 ret = ath10k_monitor_vdev_stop(ar); 1162 if (ret) { 1163 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1164 return ret; 1165 } 1166 1167 ret = ath10k_monitor_vdev_delete(ar); 1168 if (ret) { 1169 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1170 return ret; 1171 } 1172 1173 ar->monitor_started = false; 1174 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1175 1176 return 0; 1177 } 1178 1179 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1180 { 1181 int num_ctx; 1182 1183 /* At least one chanctx is required to derive a channel to start 1184 * monitor vdev on. 1185 */ 1186 num_ctx = ath10k_mac_num_chanctxs(ar); 1187 if (num_ctx == 0) 1188 return false; 1189 1190 /* If there's already an existing special monitor interface then don't 1191 * bother creating another monitor vdev. 1192 */ 1193 if (ar->monitor_arvif) 1194 return false; 1195 1196 return ar->monitor || 1197 (!test_bit(ATH10K_FW_FEATURE_ALLOWS_MESH_BCAST, 1198 ar->running_fw->fw_file.fw_features) && 1199 (ar->filter_flags & FIF_OTHER_BSS)) || 1200 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1201 } 1202 1203 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1204 { 1205 int num_ctx; 1206 1207 num_ctx = ath10k_mac_num_chanctxs(ar); 1208 1209 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1210 * shouldn't allow this but make sure to prevent handling the following 1211 * case anyway since multi-channel DFS hasn't been tested at all. 1212 */ 1213 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1214 return false; 1215 1216 return true; 1217 } 1218 1219 static int ath10k_monitor_recalc(struct ath10k *ar) 1220 { 1221 bool needed; 1222 bool allowed; 1223 int ret; 1224 1225 lockdep_assert_held(&ar->conf_mutex); 1226 1227 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1228 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1229 1230 ath10k_dbg(ar, ATH10K_DBG_MAC, 1231 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1232 ar->monitor_started, needed, allowed); 1233 1234 if (WARN_ON(needed && !allowed)) { 1235 if (ar->monitor_started) { 1236 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1237 1238 ret = ath10k_monitor_stop(ar); 1239 if (ret) 1240 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", 1241 ret); 1242 /* not serious */ 1243 } 1244 1245 return -EPERM; 1246 } 1247 1248 if (needed == ar->monitor_started) 1249 return 0; 1250 1251 if (needed) 1252 return ath10k_monitor_start(ar); 1253 else 1254 return ath10k_monitor_stop(ar); 1255 } 1256 1257 static bool ath10k_mac_can_set_cts_prot(struct ath10k_vif *arvif) 1258 { 1259 struct ath10k *ar = arvif->ar; 1260 1261 lockdep_assert_held(&ar->conf_mutex); 1262 1263 if (!arvif->is_started) { 1264 ath10k_dbg(ar, ATH10K_DBG_MAC, "defer cts setup, vdev is not ready yet\n"); 1265 return false; 1266 } 1267 1268 return true; 1269 } 1270 1271 static int ath10k_mac_set_cts_prot(struct ath10k_vif *arvif) 1272 { 1273 struct ath10k *ar = arvif->ar; 1274 u32 vdev_param; 1275 1276 lockdep_assert_held(&ar->conf_mutex); 1277 1278 vdev_param = ar->wmi.vdev_param->protection_mode; 1279 1280 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_protection %d\n", 1281 arvif->vdev_id, arvif->use_cts_prot); 1282 1283 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1284 arvif->use_cts_prot ? 1 : 0); 1285 } 1286 1287 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1288 { 1289 struct ath10k *ar = arvif->ar; 1290 u32 vdev_param, rts_cts = 0; 1291 1292 lockdep_assert_held(&ar->conf_mutex); 1293 1294 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1295 1296 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1297 1298 if (arvif->num_legacy_stations > 0) 1299 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1300 WMI_RTSCTS_PROFILE); 1301 else 1302 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1303 WMI_RTSCTS_PROFILE); 1304 1305 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d recalc rts/cts prot %d\n", 1306 arvif->vdev_id, rts_cts); 1307 1308 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1309 rts_cts); 1310 } 1311 1312 static int ath10k_start_cac(struct ath10k *ar) 1313 { 1314 int ret; 1315 1316 lockdep_assert_held(&ar->conf_mutex); 1317 1318 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1319 1320 ret = ath10k_monitor_recalc(ar); 1321 if (ret) { 1322 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1323 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1324 return ret; 1325 } 1326 1327 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1328 ar->monitor_vdev_id); 1329 1330 return 0; 1331 } 1332 1333 static int ath10k_stop_cac(struct ath10k *ar) 1334 { 1335 lockdep_assert_held(&ar->conf_mutex); 1336 1337 /* CAC is not running - do nothing */ 1338 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1339 return 0; 1340 1341 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1342 ath10k_monitor_stop(ar); 1343 1344 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1345 1346 return 0; 1347 } 1348 1349 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1350 struct ieee80211_chanctx_conf *conf, 1351 void *data) 1352 { 1353 bool *ret = data; 1354 1355 if (!*ret && conf->radar_enabled) 1356 *ret = true; 1357 } 1358 1359 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1360 { 1361 bool has_radar = false; 1362 1363 ieee80211_iter_chan_contexts_atomic(ar->hw, 1364 ath10k_mac_has_radar_iter, 1365 &has_radar); 1366 1367 return has_radar; 1368 } 1369 1370 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1371 { 1372 int ret; 1373 1374 lockdep_assert_held(&ar->conf_mutex); 1375 1376 ath10k_stop_cac(ar); 1377 1378 if (!ath10k_mac_has_radar_enabled(ar)) 1379 return; 1380 1381 if (ar->num_started_vdevs > 0) 1382 return; 1383 1384 ret = ath10k_start_cac(ar); 1385 if (ret) { 1386 /* 1387 * Not possible to start CAC on current channel so starting 1388 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1389 * by indicating that radar was detected. 1390 */ 1391 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1392 ieee80211_radar_detected(ar->hw); 1393 } 1394 } 1395 1396 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1397 { 1398 struct ath10k *ar = arvif->ar; 1399 int ret; 1400 1401 lockdep_assert_held(&ar->conf_mutex); 1402 1403 reinit_completion(&ar->vdev_setup_done); 1404 1405 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1406 if (ret) { 1407 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1408 arvif->vdev_id, ret); 1409 return ret; 1410 } 1411 1412 ret = ath10k_vdev_setup_sync(ar); 1413 if (ret) { 1414 ath10k_warn(ar, "failed to synchronize setup for vdev %i: %d\n", 1415 arvif->vdev_id, ret); 1416 return ret; 1417 } 1418 1419 WARN_ON(ar->num_started_vdevs == 0); 1420 1421 if (ar->num_started_vdevs != 0) { 1422 ar->num_started_vdevs--; 1423 ath10k_recalc_radar_detection(ar); 1424 } 1425 1426 return ret; 1427 } 1428 1429 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1430 const struct cfg80211_chan_def *chandef, 1431 bool restart) 1432 { 1433 struct ath10k *ar = arvif->ar; 1434 struct wmi_vdev_start_request_arg arg = {}; 1435 int ret = 0; 1436 1437 lockdep_assert_held(&ar->conf_mutex); 1438 1439 reinit_completion(&ar->vdev_setup_done); 1440 1441 arg.vdev_id = arvif->vdev_id; 1442 arg.dtim_period = arvif->dtim_period; 1443 arg.bcn_intval = arvif->beacon_interval; 1444 1445 arg.channel.freq = chandef->chan->center_freq; 1446 arg.channel.band_center_freq1 = chandef->center_freq1; 1447 arg.channel.band_center_freq2 = chandef->center_freq2; 1448 arg.channel.mode = chan_to_phymode(chandef); 1449 1450 arg.channel.min_power = 0; 1451 arg.channel.max_power = chandef->chan->max_power * 2; 1452 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1453 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1454 1455 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1456 arg.ssid = arvif->u.ap.ssid; 1457 arg.ssid_len = arvif->u.ap.ssid_len; 1458 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1459 1460 /* For now allow DFS for AP mode */ 1461 arg.channel.chan_radar = 1462 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1463 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1464 arg.ssid = arvif->vif->bss_conf.ssid; 1465 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1466 } 1467 1468 ath10k_dbg(ar, ATH10K_DBG_MAC, 1469 "mac vdev %d start center_freq %d phymode %s\n", 1470 arg.vdev_id, arg.channel.freq, 1471 ath10k_wmi_phymode_str(arg.channel.mode)); 1472 1473 if (restart) 1474 ret = ath10k_wmi_vdev_restart(ar, &arg); 1475 else 1476 ret = ath10k_wmi_vdev_start(ar, &arg); 1477 1478 if (ret) { 1479 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1480 arg.vdev_id, ret); 1481 return ret; 1482 } 1483 1484 ret = ath10k_vdev_setup_sync(ar); 1485 if (ret) { 1486 ath10k_warn(ar, 1487 "failed to synchronize setup for vdev %i restart %d: %d\n", 1488 arg.vdev_id, restart, ret); 1489 return ret; 1490 } 1491 1492 ar->num_started_vdevs++; 1493 ath10k_recalc_radar_detection(ar); 1494 1495 return ret; 1496 } 1497 1498 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1499 const struct cfg80211_chan_def *def) 1500 { 1501 return ath10k_vdev_start_restart(arvif, def, false); 1502 } 1503 1504 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1505 const struct cfg80211_chan_def *def) 1506 { 1507 return ath10k_vdev_start_restart(arvif, def, true); 1508 } 1509 1510 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1511 struct sk_buff *bcn) 1512 { 1513 struct ath10k *ar = arvif->ar; 1514 struct ieee80211_mgmt *mgmt; 1515 const u8 *p2p_ie; 1516 int ret; 1517 1518 if (arvif->vif->type != NL80211_IFTYPE_AP || !arvif->vif->p2p) 1519 return 0; 1520 1521 mgmt = (void *)bcn->data; 1522 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1523 mgmt->u.beacon.variable, 1524 bcn->len - (mgmt->u.beacon.variable - 1525 bcn->data)); 1526 if (!p2p_ie) 1527 return -ENOENT; 1528 1529 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1530 if (ret) { 1531 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1532 arvif->vdev_id, ret); 1533 return ret; 1534 } 1535 1536 return 0; 1537 } 1538 1539 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1540 u8 oui_type, size_t ie_offset) 1541 { 1542 size_t len; 1543 const u8 *next; 1544 const u8 *end; 1545 u8 *ie; 1546 1547 if (WARN_ON(skb->len < ie_offset)) 1548 return -EINVAL; 1549 1550 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1551 skb->data + ie_offset, 1552 skb->len - ie_offset); 1553 if (!ie) 1554 return -ENOENT; 1555 1556 len = ie[1] + 2; 1557 end = skb->data + skb->len; 1558 next = ie + len; 1559 1560 if (WARN_ON(next > end)) 1561 return -EINVAL; 1562 1563 memmove(ie, next, end - next); 1564 skb_trim(skb, skb->len - len); 1565 1566 return 0; 1567 } 1568 1569 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1570 { 1571 struct ath10k *ar = arvif->ar; 1572 struct ieee80211_hw *hw = ar->hw; 1573 struct ieee80211_vif *vif = arvif->vif; 1574 struct ieee80211_mutable_offsets offs = {}; 1575 struct sk_buff *bcn; 1576 int ret; 1577 1578 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1579 return 0; 1580 1581 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1582 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1583 return 0; 1584 1585 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1586 if (!bcn) { 1587 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1588 return -EPERM; 1589 } 1590 1591 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1592 if (ret) { 1593 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1594 kfree_skb(bcn); 1595 return ret; 1596 } 1597 1598 /* P2P IE is inserted by firmware automatically (as configured above) 1599 * so remove it from the base beacon template to avoid duplicate P2P 1600 * IEs in beacon frames. 1601 */ 1602 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1603 offsetof(struct ieee80211_mgmt, 1604 u.beacon.variable)); 1605 1606 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1607 0, NULL, 0); 1608 kfree_skb(bcn); 1609 1610 if (ret) { 1611 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1612 ret); 1613 return ret; 1614 } 1615 1616 return 0; 1617 } 1618 1619 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1620 { 1621 struct ath10k *ar = arvif->ar; 1622 struct ieee80211_hw *hw = ar->hw; 1623 struct ieee80211_vif *vif = arvif->vif; 1624 struct sk_buff *prb; 1625 int ret; 1626 1627 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1628 return 0; 1629 1630 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1631 return 0; 1632 1633 /* For mesh, probe response and beacon share the same template */ 1634 if (ieee80211_vif_is_mesh(vif)) 1635 return 0; 1636 1637 prb = ieee80211_proberesp_get(hw, vif); 1638 if (!prb) { 1639 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1640 return -EPERM; 1641 } 1642 1643 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1644 kfree_skb(prb); 1645 1646 if (ret) { 1647 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1648 ret); 1649 return ret; 1650 } 1651 1652 return 0; 1653 } 1654 1655 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1656 { 1657 struct ath10k *ar = arvif->ar; 1658 struct cfg80211_chan_def def; 1659 int ret; 1660 1661 /* When originally vdev is started during assign_vif_chanctx() some 1662 * information is missing, notably SSID. Firmware revisions with beacon 1663 * offloading require the SSID to be provided during vdev (re)start to 1664 * handle hidden SSID properly. 1665 * 1666 * Vdev restart must be done after vdev has been both started and 1667 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1668 * deliver vdev restart response event causing timeouts during vdev 1669 * syncing in ath10k. 1670 * 1671 * Note: The vdev down/up and template reinstallation could be skipped 1672 * since only wmi-tlv firmware are known to have beacon offload and 1673 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1674 * response delivery. It's probably more robust to keep it as is. 1675 */ 1676 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1677 return 0; 1678 1679 if (WARN_ON(!arvif->is_started)) 1680 return -EINVAL; 1681 1682 if (WARN_ON(!arvif->is_up)) 1683 return -EINVAL; 1684 1685 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1686 return -EINVAL; 1687 1688 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1689 if (ret) { 1690 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1691 arvif->vdev_id, ret); 1692 return ret; 1693 } 1694 1695 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1696 * firmware will crash upon vdev up. 1697 */ 1698 1699 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1700 if (ret) { 1701 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1702 return ret; 1703 } 1704 1705 ret = ath10k_mac_setup_prb_tmpl(arvif); 1706 if (ret) { 1707 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1708 return ret; 1709 } 1710 1711 ret = ath10k_vdev_restart(arvif, &def); 1712 if (ret) { 1713 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1714 arvif->vdev_id, ret); 1715 return ret; 1716 } 1717 1718 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1719 arvif->bssid); 1720 if (ret) { 1721 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1722 arvif->vdev_id, ret); 1723 return ret; 1724 } 1725 1726 return 0; 1727 } 1728 1729 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1730 struct ieee80211_bss_conf *info) 1731 { 1732 struct ath10k *ar = arvif->ar; 1733 int ret = 0; 1734 1735 lockdep_assert_held(&arvif->ar->conf_mutex); 1736 1737 if (!info->enable_beacon) { 1738 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1739 if (ret) 1740 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1741 arvif->vdev_id, ret); 1742 1743 arvif->is_up = false; 1744 1745 spin_lock_bh(&arvif->ar->data_lock); 1746 ath10k_mac_vif_beacon_free(arvif); 1747 spin_unlock_bh(&arvif->ar->data_lock); 1748 1749 return; 1750 } 1751 1752 arvif->tx_seq_no = 0x1000; 1753 1754 arvif->aid = 0; 1755 ether_addr_copy(arvif->bssid, info->bssid); 1756 1757 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1758 arvif->bssid); 1759 if (ret) { 1760 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1761 arvif->vdev_id, ret); 1762 return; 1763 } 1764 1765 arvif->is_up = true; 1766 1767 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1768 if (ret) { 1769 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1770 arvif->vdev_id, ret); 1771 return; 1772 } 1773 1774 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1775 } 1776 1777 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1778 struct ieee80211_bss_conf *info, 1779 const u8 self_peer[ETH_ALEN]) 1780 { 1781 struct ath10k *ar = arvif->ar; 1782 u32 vdev_param; 1783 int ret = 0; 1784 1785 lockdep_assert_held(&arvif->ar->conf_mutex); 1786 1787 if (!info->ibss_joined) { 1788 if (is_zero_ether_addr(arvif->bssid)) 1789 return; 1790 1791 eth_zero_addr(arvif->bssid); 1792 1793 return; 1794 } 1795 1796 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1797 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1798 ATH10K_DEFAULT_ATIM); 1799 if (ret) 1800 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1801 arvif->vdev_id, ret); 1802 } 1803 1804 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1805 { 1806 struct ath10k *ar = arvif->ar; 1807 u32 param; 1808 u32 value; 1809 int ret; 1810 1811 lockdep_assert_held(&arvif->ar->conf_mutex); 1812 1813 if (arvif->u.sta.uapsd) 1814 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1815 else 1816 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1817 1818 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1819 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1820 if (ret) { 1821 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1822 value, arvif->vdev_id, ret); 1823 return ret; 1824 } 1825 1826 return 0; 1827 } 1828 1829 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1830 { 1831 struct ath10k *ar = arvif->ar; 1832 u32 param; 1833 u32 value; 1834 int ret; 1835 1836 lockdep_assert_held(&arvif->ar->conf_mutex); 1837 1838 if (arvif->u.sta.uapsd) 1839 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1840 else 1841 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1842 1843 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1844 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1845 param, value); 1846 if (ret) { 1847 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1848 value, arvif->vdev_id, ret); 1849 return ret; 1850 } 1851 1852 return 0; 1853 } 1854 1855 static int ath10k_mac_num_vifs_started(struct ath10k *ar) 1856 { 1857 struct ath10k_vif *arvif; 1858 int num = 0; 1859 1860 lockdep_assert_held(&ar->conf_mutex); 1861 1862 list_for_each_entry(arvif, &ar->arvifs, list) 1863 if (arvif->is_started) 1864 num++; 1865 1866 return num; 1867 } 1868 1869 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1870 { 1871 struct ath10k *ar = arvif->ar; 1872 struct ieee80211_vif *vif = arvif->vif; 1873 struct ieee80211_conf *conf = &ar->hw->conf; 1874 enum wmi_sta_powersave_param param; 1875 enum wmi_sta_ps_mode psmode; 1876 int ret; 1877 int ps_timeout; 1878 bool enable_ps; 1879 1880 lockdep_assert_held(&arvif->ar->conf_mutex); 1881 1882 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1883 return 0; 1884 1885 enable_ps = arvif->ps; 1886 1887 if (enable_ps && ath10k_mac_num_vifs_started(ar) > 1 && 1888 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1889 ar->running_fw->fw_file.fw_features)) { 1890 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1891 arvif->vdev_id); 1892 enable_ps = false; 1893 } 1894 1895 if (!arvif->is_started) { 1896 /* mac80211 can update vif powersave state while disconnected. 1897 * Firmware doesn't behave nicely and consumes more power than 1898 * necessary if PS is disabled on a non-started vdev. Hence 1899 * force-enable PS for non-running vdevs. 1900 */ 1901 psmode = WMI_STA_PS_MODE_ENABLED; 1902 } else if (enable_ps) { 1903 psmode = WMI_STA_PS_MODE_ENABLED; 1904 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1905 1906 ps_timeout = conf->dynamic_ps_timeout; 1907 if (ps_timeout == 0) { 1908 /* Firmware doesn't like 0 */ 1909 ps_timeout = ieee80211_tu_to_usec( 1910 vif->bss_conf.beacon_int) / 1000; 1911 } 1912 1913 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1914 ps_timeout); 1915 if (ret) { 1916 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1917 arvif->vdev_id, ret); 1918 return ret; 1919 } 1920 } else { 1921 psmode = WMI_STA_PS_MODE_DISABLED; 1922 } 1923 1924 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1925 arvif->vdev_id, psmode ? "enable" : "disable"); 1926 1927 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1928 if (ret) { 1929 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1930 psmode, arvif->vdev_id, ret); 1931 return ret; 1932 } 1933 1934 return 0; 1935 } 1936 1937 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1938 { 1939 struct ath10k *ar = arvif->ar; 1940 struct wmi_sta_keepalive_arg arg = {}; 1941 int ret; 1942 1943 lockdep_assert_held(&arvif->ar->conf_mutex); 1944 1945 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1946 return 0; 1947 1948 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1949 return 0; 1950 1951 /* Some firmware revisions have a bug and ignore the `enabled` field. 1952 * Instead use the interval to disable the keepalive. 1953 */ 1954 arg.vdev_id = arvif->vdev_id; 1955 arg.enabled = 1; 1956 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1957 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1958 1959 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1960 if (ret) { 1961 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1962 arvif->vdev_id, ret); 1963 return ret; 1964 } 1965 1966 return 0; 1967 } 1968 1969 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1970 { 1971 struct ath10k *ar = arvif->ar; 1972 struct ieee80211_vif *vif = arvif->vif; 1973 int ret; 1974 1975 lockdep_assert_held(&arvif->ar->conf_mutex); 1976 1977 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1978 return; 1979 1980 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1981 return; 1982 1983 if (!vif->csa_active) 1984 return; 1985 1986 if (!arvif->is_up) 1987 return; 1988 1989 if (!ieee80211_csa_is_complete(vif)) { 1990 ieee80211_csa_update_counter(vif); 1991 1992 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1993 if (ret) 1994 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1995 ret); 1996 1997 ret = ath10k_mac_setup_prb_tmpl(arvif); 1998 if (ret) 1999 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 2000 ret); 2001 } else { 2002 ieee80211_csa_finish(vif); 2003 } 2004 } 2005 2006 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 2007 { 2008 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2009 ap_csa_work); 2010 struct ath10k *ar = arvif->ar; 2011 2012 mutex_lock(&ar->conf_mutex); 2013 ath10k_mac_vif_ap_csa_count_down(arvif); 2014 mutex_unlock(&ar->conf_mutex); 2015 } 2016 2017 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 2018 struct ieee80211_vif *vif) 2019 { 2020 struct sk_buff *skb = data; 2021 struct ieee80211_mgmt *mgmt = (void *)skb->data; 2022 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2023 2024 if (vif->type != NL80211_IFTYPE_STATION) 2025 return; 2026 2027 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 2028 return; 2029 2030 cancel_delayed_work(&arvif->connection_loss_work); 2031 } 2032 2033 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 2034 { 2035 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2036 IEEE80211_IFACE_ITER_NORMAL, 2037 ath10k_mac_handle_beacon_iter, 2038 skb); 2039 } 2040 2041 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 2042 struct ieee80211_vif *vif) 2043 { 2044 u32 *vdev_id = data; 2045 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2046 struct ath10k *ar = arvif->ar; 2047 struct ieee80211_hw *hw = ar->hw; 2048 2049 if (arvif->vdev_id != *vdev_id) 2050 return; 2051 2052 if (!arvif->is_up) 2053 return; 2054 2055 ieee80211_beacon_loss(vif); 2056 2057 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 2058 * (done by mac80211) succeeds but beacons do not resume then it 2059 * doesn't make sense to continue operation. Queue connection loss work 2060 * which can be cancelled when beacon is received. 2061 */ 2062 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 2063 ATH10K_CONNECTION_LOSS_HZ); 2064 } 2065 2066 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 2067 { 2068 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2069 IEEE80211_IFACE_ITER_NORMAL, 2070 ath10k_mac_handle_beacon_miss_iter, 2071 &vdev_id); 2072 } 2073 2074 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 2075 { 2076 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 2077 connection_loss_work.work); 2078 struct ieee80211_vif *vif = arvif->vif; 2079 2080 if (!arvif->is_up) 2081 return; 2082 2083 ieee80211_connection_loss(vif); 2084 } 2085 2086 /**********************/ 2087 /* Station management */ 2088 /**********************/ 2089 2090 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 2091 struct ieee80211_vif *vif) 2092 { 2093 /* Some firmware revisions have unstable STA powersave when listen 2094 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 2095 * generate NullFunc frames properly even if buffered frames have been 2096 * indicated in Beacon TIM. Firmware would seldom wake up to pull 2097 * buffered frames. Often pinging the device from AP would simply fail. 2098 * 2099 * As a workaround set it to 1. 2100 */ 2101 if (vif->type == NL80211_IFTYPE_STATION) 2102 return 1; 2103 2104 return ar->hw->conf.listen_interval; 2105 } 2106 2107 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 2108 struct ieee80211_vif *vif, 2109 struct ieee80211_sta *sta, 2110 struct wmi_peer_assoc_complete_arg *arg) 2111 { 2112 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2113 u32 aid; 2114 2115 lockdep_assert_held(&ar->conf_mutex); 2116 2117 if (vif->type == NL80211_IFTYPE_STATION) 2118 aid = vif->bss_conf.aid; 2119 else 2120 aid = sta->aid; 2121 2122 ether_addr_copy(arg->addr, sta->addr); 2123 arg->vdev_id = arvif->vdev_id; 2124 arg->peer_aid = aid; 2125 arg->peer_flags |= arvif->ar->wmi.peer_flags->auth; 2126 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 2127 arg->peer_num_spatial_streams = 1; 2128 arg->peer_caps = vif->bss_conf.assoc_capability; 2129 } 2130 2131 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 2132 struct ieee80211_vif *vif, 2133 struct ieee80211_sta *sta, 2134 struct wmi_peer_assoc_complete_arg *arg) 2135 { 2136 struct ieee80211_bss_conf *info = &vif->bss_conf; 2137 struct cfg80211_chan_def def; 2138 struct cfg80211_bss *bss; 2139 const u8 *rsnie = NULL; 2140 const u8 *wpaie = NULL; 2141 2142 lockdep_assert_held(&ar->conf_mutex); 2143 2144 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2145 return; 2146 2147 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 2148 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 2149 if (bss) { 2150 const struct cfg80211_bss_ies *ies; 2151 2152 rcu_read_lock(); 2153 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 2154 2155 ies = rcu_dereference(bss->ies); 2156 2157 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 2158 WLAN_OUI_TYPE_MICROSOFT_WPA, 2159 ies->data, 2160 ies->len); 2161 rcu_read_unlock(); 2162 cfg80211_put_bss(ar->hw->wiphy, bss); 2163 } 2164 2165 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 2166 if (rsnie || wpaie) { 2167 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 2168 arg->peer_flags |= ar->wmi.peer_flags->need_ptk_4_way; 2169 } 2170 2171 if (wpaie) { 2172 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 2173 arg->peer_flags |= ar->wmi.peer_flags->need_gtk_2_way; 2174 } 2175 2176 if (sta->mfp && 2177 test_bit(ATH10K_FW_FEATURE_MFP_SUPPORT, 2178 ar->running_fw->fw_file.fw_features)) { 2179 arg->peer_flags |= ar->wmi.peer_flags->pmf; 2180 } 2181 } 2182 2183 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 2184 struct ieee80211_vif *vif, 2185 struct ieee80211_sta *sta, 2186 struct wmi_peer_assoc_complete_arg *arg) 2187 { 2188 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2189 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 2190 struct cfg80211_chan_def def; 2191 const struct ieee80211_supported_band *sband; 2192 const struct ieee80211_rate *rates; 2193 enum nl80211_band band; 2194 u32 ratemask; 2195 u8 rate; 2196 int i; 2197 2198 lockdep_assert_held(&ar->conf_mutex); 2199 2200 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2201 return; 2202 2203 band = def.chan->band; 2204 sband = ar->hw->wiphy->bands[band]; 2205 ratemask = sta->supp_rates[band]; 2206 ratemask &= arvif->bitrate_mask.control[band].legacy; 2207 rates = sband->bitrates; 2208 2209 rateset->num_rates = 0; 2210 2211 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2212 if (!(ratemask & 1)) 2213 continue; 2214 2215 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2216 rateset->rates[rateset->num_rates] = rate; 2217 rateset->num_rates++; 2218 } 2219 } 2220 2221 static bool 2222 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2223 { 2224 int nss; 2225 2226 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2227 if (ht_mcs_mask[nss]) 2228 return false; 2229 2230 return true; 2231 } 2232 2233 static bool 2234 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2235 { 2236 int nss; 2237 2238 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2239 if (vht_mcs_mask[nss]) 2240 return false; 2241 2242 return true; 2243 } 2244 2245 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2246 struct ieee80211_vif *vif, 2247 struct ieee80211_sta *sta, 2248 struct wmi_peer_assoc_complete_arg *arg) 2249 { 2250 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2251 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2252 struct cfg80211_chan_def def; 2253 enum nl80211_band band; 2254 const u8 *ht_mcs_mask; 2255 const u16 *vht_mcs_mask; 2256 int i, n; 2257 u8 max_nss; 2258 u32 stbc; 2259 2260 lockdep_assert_held(&ar->conf_mutex); 2261 2262 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2263 return; 2264 2265 if (!ht_cap->ht_supported) 2266 return; 2267 2268 band = def.chan->band; 2269 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2270 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2271 2272 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2273 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2274 return; 2275 2276 arg->peer_flags |= ar->wmi.peer_flags->ht; 2277 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2278 ht_cap->ampdu_factor)) - 1; 2279 2280 arg->peer_mpdu_density = 2281 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2282 2283 arg->peer_ht_caps = ht_cap->cap; 2284 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2285 2286 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2287 arg->peer_flags |= ar->wmi.peer_flags->ldbc; 2288 2289 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2290 arg->peer_flags |= ar->wmi.peer_flags->bw40; 2291 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2292 } 2293 2294 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2295 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2296 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2297 2298 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2299 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2300 } 2301 2302 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2303 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2304 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2305 } 2306 2307 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2308 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2309 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2310 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2311 arg->peer_rate_caps |= stbc; 2312 arg->peer_flags |= ar->wmi.peer_flags->stbc; 2313 } 2314 2315 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2316 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2317 else if (ht_cap->mcs.rx_mask[1]) 2318 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2319 2320 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2321 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2322 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2323 max_nss = (i / 8) + 1; 2324 arg->peer_ht_rates.rates[n++] = i; 2325 } 2326 2327 /* 2328 * This is a workaround for HT-enabled STAs which break the spec 2329 * and have no HT capabilities RX mask (no HT RX MCS map). 2330 * 2331 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2332 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2333 * 2334 * Firmware asserts if such situation occurs. 2335 */ 2336 if (n == 0) { 2337 arg->peer_ht_rates.num_rates = 8; 2338 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2339 arg->peer_ht_rates.rates[i] = i; 2340 } else { 2341 arg->peer_ht_rates.num_rates = n; 2342 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2343 } 2344 2345 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2346 arg->addr, 2347 arg->peer_ht_rates.num_rates, 2348 arg->peer_num_spatial_streams); 2349 } 2350 2351 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2352 struct ath10k_vif *arvif, 2353 struct ieee80211_sta *sta) 2354 { 2355 u32 uapsd = 0; 2356 u32 max_sp = 0; 2357 int ret = 0; 2358 2359 lockdep_assert_held(&ar->conf_mutex); 2360 2361 if (sta->wme && sta->uapsd_queues) { 2362 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2363 sta->uapsd_queues, sta->max_sp); 2364 2365 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2366 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2367 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2368 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2369 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2370 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2371 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2372 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2373 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2374 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2375 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2376 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2377 2378 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2379 max_sp = sta->max_sp; 2380 2381 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2382 sta->addr, 2383 WMI_AP_PS_PEER_PARAM_UAPSD, 2384 uapsd); 2385 if (ret) { 2386 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2387 arvif->vdev_id, ret); 2388 return ret; 2389 } 2390 2391 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2392 sta->addr, 2393 WMI_AP_PS_PEER_PARAM_MAX_SP, 2394 max_sp); 2395 if (ret) { 2396 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2397 arvif->vdev_id, ret); 2398 return ret; 2399 } 2400 2401 /* TODO setup this based on STA listen interval and 2402 * beacon interval. Currently we don't know 2403 * sta->listen_interval - mac80211 patch required. 2404 * Currently use 10 seconds 2405 */ 2406 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2407 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2408 10); 2409 if (ret) { 2410 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2411 arvif->vdev_id, ret); 2412 return ret; 2413 } 2414 } 2415 2416 return 0; 2417 } 2418 2419 static u16 2420 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2421 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2422 { 2423 int idx_limit; 2424 int nss; 2425 u16 mcs_map; 2426 u16 mcs; 2427 2428 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2429 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2430 vht_mcs_limit[nss]; 2431 2432 if (mcs_map) 2433 idx_limit = fls(mcs_map) - 1; 2434 else 2435 idx_limit = -1; 2436 2437 switch (idx_limit) { 2438 case 0: /* fall through */ 2439 case 1: /* fall through */ 2440 case 2: /* fall through */ 2441 case 3: /* fall through */ 2442 case 4: /* fall through */ 2443 case 5: /* fall through */ 2444 case 6: /* fall through */ 2445 default: 2446 /* see ath10k_mac_can_set_bitrate_mask() */ 2447 WARN_ON(1); 2448 /* fall through */ 2449 case -1: 2450 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2451 break; 2452 case 7: 2453 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2454 break; 2455 case 8: 2456 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2457 break; 2458 case 9: 2459 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2460 break; 2461 } 2462 2463 tx_mcs_set &= ~(0x3 << (nss * 2)); 2464 tx_mcs_set |= mcs << (nss * 2); 2465 } 2466 2467 return tx_mcs_set; 2468 } 2469 2470 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2471 struct ieee80211_vif *vif, 2472 struct ieee80211_sta *sta, 2473 struct wmi_peer_assoc_complete_arg *arg) 2474 { 2475 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2476 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2477 struct cfg80211_chan_def def; 2478 enum nl80211_band band; 2479 const u16 *vht_mcs_mask; 2480 u8 ampdu_factor; 2481 u8 max_nss, vht_mcs; 2482 int i; 2483 2484 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2485 return; 2486 2487 if (!vht_cap->vht_supported) 2488 return; 2489 2490 band = def.chan->band; 2491 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2492 2493 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2494 return; 2495 2496 arg->peer_flags |= ar->wmi.peer_flags->vht; 2497 2498 if (def.chan->band == NL80211_BAND_2GHZ) 2499 arg->peer_flags |= ar->wmi.peer_flags->vht_2g; 2500 2501 arg->peer_vht_caps = vht_cap->cap; 2502 2503 ampdu_factor = (vht_cap->cap & 2504 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2505 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2506 2507 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2508 * zero in VHT IE. Using it would result in degraded throughput. 2509 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2510 * it if VHT max_mpdu is smaller. 2511 */ 2512 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2513 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2514 ampdu_factor)) - 1); 2515 2516 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2517 arg->peer_flags |= ar->wmi.peer_flags->bw80; 2518 2519 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) 2520 arg->peer_flags |= ar->wmi.peer_flags->bw160; 2521 2522 /* Calculate peer NSS capability from VHT capabilities if STA 2523 * supports VHT. 2524 */ 2525 for (i = 0, max_nss = 0, vht_mcs = 0; i < NL80211_VHT_NSS_MAX; i++) { 2526 vht_mcs = __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map) >> 2527 (2 * i) & 3; 2528 2529 if ((vht_mcs != IEEE80211_VHT_MCS_NOT_SUPPORTED) && 2530 vht_mcs_mask[i]) 2531 max_nss = i + 1; 2532 } 2533 arg->peer_num_spatial_streams = min(sta->rx_nss, max_nss); 2534 arg->peer_vht_rates.rx_max_rate = 2535 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2536 arg->peer_vht_rates.rx_mcs_set = 2537 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2538 arg->peer_vht_rates.tx_max_rate = 2539 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2540 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2541 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2542 2543 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2544 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2545 2546 if (arg->peer_vht_rates.rx_max_rate && 2547 (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK)) { 2548 switch (arg->peer_vht_rates.rx_max_rate) { 2549 case 1560: 2550 /* Must be 2x2 at 160Mhz is all it can do. */ 2551 arg->peer_bw_rxnss_override = 2; 2552 break; 2553 case 780: 2554 /* Can only do 1x1 at 160Mhz (Long Guard Interval) */ 2555 arg->peer_bw_rxnss_override = 1; 2556 break; 2557 } 2558 } 2559 } 2560 2561 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2562 struct ieee80211_vif *vif, 2563 struct ieee80211_sta *sta, 2564 struct wmi_peer_assoc_complete_arg *arg) 2565 { 2566 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2567 2568 switch (arvif->vdev_type) { 2569 case WMI_VDEV_TYPE_AP: 2570 if (sta->wme) 2571 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2572 2573 if (sta->wme && sta->uapsd_queues) { 2574 arg->peer_flags |= arvif->ar->wmi.peer_flags->apsd; 2575 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2576 } 2577 break; 2578 case WMI_VDEV_TYPE_STA: 2579 if (sta->wme) 2580 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2581 break; 2582 case WMI_VDEV_TYPE_IBSS: 2583 if (sta->wme) 2584 arg->peer_flags |= arvif->ar->wmi.peer_flags->qos; 2585 break; 2586 default: 2587 break; 2588 } 2589 2590 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2591 sta->addr, !!(arg->peer_flags & 2592 arvif->ar->wmi.peer_flags->qos)); 2593 } 2594 2595 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2596 { 2597 return sta->supp_rates[NL80211_BAND_2GHZ] >> 2598 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2599 } 2600 2601 static enum wmi_phy_mode ath10k_mac_get_phymode_vht(struct ath10k *ar, 2602 struct ieee80211_sta *sta) 2603 { 2604 if (sta->bandwidth == IEEE80211_STA_RX_BW_160) { 2605 switch (sta->vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { 2606 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: 2607 return MODE_11AC_VHT160; 2608 case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160_80PLUS80MHZ: 2609 return MODE_11AC_VHT80_80; 2610 default: 2611 /* not sure if this is a valid case? */ 2612 return MODE_11AC_VHT160; 2613 } 2614 } 2615 2616 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2617 return MODE_11AC_VHT80; 2618 2619 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2620 return MODE_11AC_VHT40; 2621 2622 if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2623 return MODE_11AC_VHT20; 2624 2625 return MODE_UNKNOWN; 2626 } 2627 2628 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2629 struct ieee80211_vif *vif, 2630 struct ieee80211_sta *sta, 2631 struct wmi_peer_assoc_complete_arg *arg) 2632 { 2633 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2634 struct cfg80211_chan_def def; 2635 enum nl80211_band band; 2636 const u8 *ht_mcs_mask; 2637 const u16 *vht_mcs_mask; 2638 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2639 2640 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2641 return; 2642 2643 band = def.chan->band; 2644 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2645 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2646 2647 switch (band) { 2648 case NL80211_BAND_2GHZ: 2649 if (sta->vht_cap.vht_supported && 2650 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2651 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2652 phymode = MODE_11AC_VHT40; 2653 else 2654 phymode = MODE_11AC_VHT20; 2655 } else if (sta->ht_cap.ht_supported && 2656 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2657 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2658 phymode = MODE_11NG_HT40; 2659 else 2660 phymode = MODE_11NG_HT20; 2661 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2662 phymode = MODE_11G; 2663 } else { 2664 phymode = MODE_11B; 2665 } 2666 2667 break; 2668 case NL80211_BAND_5GHZ: 2669 /* 2670 * Check VHT first. 2671 */ 2672 if (sta->vht_cap.vht_supported && 2673 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2674 phymode = ath10k_mac_get_phymode_vht(ar, sta); 2675 } else if (sta->ht_cap.ht_supported && 2676 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2677 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2678 phymode = MODE_11NA_HT40; 2679 else 2680 phymode = MODE_11NA_HT20; 2681 } else { 2682 phymode = MODE_11A; 2683 } 2684 2685 break; 2686 default: 2687 break; 2688 } 2689 2690 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2691 sta->addr, ath10k_wmi_phymode_str(phymode)); 2692 2693 arg->peer_phymode = phymode; 2694 WARN_ON(phymode == MODE_UNKNOWN); 2695 } 2696 2697 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2698 struct ieee80211_vif *vif, 2699 struct ieee80211_sta *sta, 2700 struct wmi_peer_assoc_complete_arg *arg) 2701 { 2702 lockdep_assert_held(&ar->conf_mutex); 2703 2704 memset(arg, 0, sizeof(*arg)); 2705 2706 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2707 ath10k_peer_assoc_h_crypto(ar, vif, sta, arg); 2708 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2709 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2710 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2711 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2712 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2713 2714 return 0; 2715 } 2716 2717 static const u32 ath10k_smps_map[] = { 2718 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2719 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2720 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2721 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2722 }; 2723 2724 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2725 const u8 *addr, 2726 const struct ieee80211_sta_ht_cap *ht_cap) 2727 { 2728 int smps; 2729 2730 if (!ht_cap->ht_supported) 2731 return 0; 2732 2733 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2734 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2735 2736 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2737 return -EINVAL; 2738 2739 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2740 WMI_PEER_SMPS_STATE, 2741 ath10k_smps_map[smps]); 2742 } 2743 2744 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2745 struct ieee80211_vif *vif, 2746 struct ieee80211_sta_vht_cap vht_cap) 2747 { 2748 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2749 int ret; 2750 u32 param; 2751 u32 value; 2752 2753 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_AFTER_ASSOC) 2754 return 0; 2755 2756 if (!(ar->vht_cap_info & 2757 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2758 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2759 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2760 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2761 return 0; 2762 2763 param = ar->wmi.vdev_param->txbf; 2764 value = 0; 2765 2766 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2767 return 0; 2768 2769 /* The following logic is correct. If a remote STA advertises support 2770 * for being a beamformer then we should enable us being a beamformee. 2771 */ 2772 2773 if (ar->vht_cap_info & 2774 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2775 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2776 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2777 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2778 2779 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2780 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2781 } 2782 2783 if (ar->vht_cap_info & 2784 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2785 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2786 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2787 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2788 2789 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2790 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2791 } 2792 2793 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2794 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2795 2796 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2797 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2798 2799 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2800 if (ret) { 2801 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2802 value, ret); 2803 return ret; 2804 } 2805 2806 return 0; 2807 } 2808 2809 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2810 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2811 struct ieee80211_vif *vif, 2812 struct ieee80211_bss_conf *bss_conf) 2813 { 2814 struct ath10k *ar = hw->priv; 2815 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2816 struct ieee80211_sta_ht_cap ht_cap; 2817 struct ieee80211_sta_vht_cap vht_cap; 2818 struct wmi_peer_assoc_complete_arg peer_arg; 2819 struct ieee80211_sta *ap_sta; 2820 int ret; 2821 2822 lockdep_assert_held(&ar->conf_mutex); 2823 2824 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2825 arvif->vdev_id, arvif->bssid, arvif->aid); 2826 2827 rcu_read_lock(); 2828 2829 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2830 if (!ap_sta) { 2831 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2832 bss_conf->bssid, arvif->vdev_id); 2833 rcu_read_unlock(); 2834 return; 2835 } 2836 2837 /* ap_sta must be accessed only within rcu section which must be left 2838 * before calling ath10k_setup_peer_smps() which might sleep. 2839 */ 2840 ht_cap = ap_sta->ht_cap; 2841 vht_cap = ap_sta->vht_cap; 2842 2843 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2844 if (ret) { 2845 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2846 bss_conf->bssid, arvif->vdev_id, ret); 2847 rcu_read_unlock(); 2848 return; 2849 } 2850 2851 rcu_read_unlock(); 2852 2853 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2854 if (ret) { 2855 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2856 bss_conf->bssid, arvif->vdev_id, ret); 2857 return; 2858 } 2859 2860 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2861 if (ret) { 2862 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2863 arvif->vdev_id, ret); 2864 return; 2865 } 2866 2867 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2868 if (ret) { 2869 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2870 arvif->vdev_id, bss_conf->bssid, ret); 2871 return; 2872 } 2873 2874 ath10k_dbg(ar, ATH10K_DBG_MAC, 2875 "mac vdev %d up (associated) bssid %pM aid %d\n", 2876 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2877 2878 WARN_ON(arvif->is_up); 2879 2880 arvif->aid = bss_conf->aid; 2881 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2882 2883 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2884 if (ret) { 2885 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2886 arvif->vdev_id, ret); 2887 return; 2888 } 2889 2890 arvif->is_up = true; 2891 2892 /* Workaround: Some firmware revisions (tested with qca6174 2893 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2894 * poked with peer param command. 2895 */ 2896 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2897 WMI_PEER_DUMMY_VAR, 1); 2898 if (ret) { 2899 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2900 arvif->bssid, arvif->vdev_id, ret); 2901 return; 2902 } 2903 } 2904 2905 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2906 struct ieee80211_vif *vif) 2907 { 2908 struct ath10k *ar = hw->priv; 2909 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2910 struct ieee80211_sta_vht_cap vht_cap = {}; 2911 int ret; 2912 2913 lockdep_assert_held(&ar->conf_mutex); 2914 2915 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2916 arvif->vdev_id, arvif->bssid); 2917 2918 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2919 if (ret) 2920 ath10k_warn(ar, "failed to down vdev %i: %d\n", 2921 arvif->vdev_id, ret); 2922 2923 arvif->def_wep_key_idx = -1; 2924 2925 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2926 if (ret) { 2927 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2928 arvif->vdev_id, ret); 2929 return; 2930 } 2931 2932 arvif->is_up = false; 2933 2934 cancel_delayed_work_sync(&arvif->connection_loss_work); 2935 } 2936 2937 static int ath10k_station_assoc(struct ath10k *ar, 2938 struct ieee80211_vif *vif, 2939 struct ieee80211_sta *sta, 2940 bool reassoc) 2941 { 2942 struct ath10k_vif *arvif = (void *)vif->drv_priv; 2943 struct wmi_peer_assoc_complete_arg peer_arg; 2944 int ret = 0; 2945 2946 lockdep_assert_held(&ar->conf_mutex); 2947 2948 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2949 if (ret) { 2950 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2951 sta->addr, arvif->vdev_id, ret); 2952 return ret; 2953 } 2954 2955 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2956 if (ret) { 2957 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2958 sta->addr, arvif->vdev_id, ret); 2959 return ret; 2960 } 2961 2962 /* Re-assoc is run only to update supported rates for given station. It 2963 * doesn't make much sense to reconfigure the peer completely. 2964 */ 2965 if (!reassoc) { 2966 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2967 &sta->ht_cap); 2968 if (ret) { 2969 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2970 arvif->vdev_id, ret); 2971 return ret; 2972 } 2973 2974 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2975 if (ret) { 2976 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2977 sta->addr, arvif->vdev_id, ret); 2978 return ret; 2979 } 2980 2981 if (!sta->wme) { 2982 arvif->num_legacy_stations++; 2983 ret = ath10k_recalc_rtscts_prot(arvif); 2984 if (ret) { 2985 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2986 arvif->vdev_id, ret); 2987 return ret; 2988 } 2989 } 2990 2991 /* Plumb cached keys only for static WEP */ 2992 if ((arvif->def_wep_key_idx != -1) && (!sta->tdls)) { 2993 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2994 if (ret) { 2995 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2996 arvif->vdev_id, ret); 2997 return ret; 2998 } 2999 } 3000 } 3001 3002 return ret; 3003 } 3004 3005 static int ath10k_station_disassoc(struct ath10k *ar, 3006 struct ieee80211_vif *vif, 3007 struct ieee80211_sta *sta) 3008 { 3009 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3010 int ret = 0; 3011 3012 lockdep_assert_held(&ar->conf_mutex); 3013 3014 if (!sta->wme) { 3015 arvif->num_legacy_stations--; 3016 ret = ath10k_recalc_rtscts_prot(arvif); 3017 if (ret) { 3018 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 3019 arvif->vdev_id, ret); 3020 return ret; 3021 } 3022 } 3023 3024 ret = ath10k_clear_peer_keys(arvif, sta->addr); 3025 if (ret) { 3026 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 3027 arvif->vdev_id, ret); 3028 return ret; 3029 } 3030 3031 return ret; 3032 } 3033 3034 /**************/ 3035 /* Regulatory */ 3036 /**************/ 3037 3038 static int ath10k_update_channel_list(struct ath10k *ar) 3039 { 3040 struct ieee80211_hw *hw = ar->hw; 3041 struct ieee80211_supported_band **bands; 3042 enum nl80211_band band; 3043 struct ieee80211_channel *channel; 3044 struct wmi_scan_chan_list_arg arg = {0}; 3045 struct wmi_channel_arg *ch; 3046 bool passive; 3047 int len; 3048 int ret; 3049 int i; 3050 3051 lockdep_assert_held(&ar->conf_mutex); 3052 3053 bands = hw->wiphy->bands; 3054 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3055 if (!bands[band]) 3056 continue; 3057 3058 for (i = 0; i < bands[band]->n_channels; i++) { 3059 if (bands[band]->channels[i].flags & 3060 IEEE80211_CHAN_DISABLED) 3061 continue; 3062 3063 arg.n_channels++; 3064 } 3065 } 3066 3067 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 3068 arg.channels = kzalloc(len, GFP_KERNEL); 3069 if (!arg.channels) 3070 return -ENOMEM; 3071 3072 ch = arg.channels; 3073 for (band = 0; band < NUM_NL80211_BANDS; band++) { 3074 if (!bands[band]) 3075 continue; 3076 3077 for (i = 0; i < bands[band]->n_channels; i++) { 3078 channel = &bands[band]->channels[i]; 3079 3080 if (channel->flags & IEEE80211_CHAN_DISABLED) 3081 continue; 3082 3083 ch->allow_ht = true; 3084 3085 /* FIXME: when should we really allow VHT? */ 3086 ch->allow_vht = true; 3087 3088 ch->allow_ibss = 3089 !(channel->flags & IEEE80211_CHAN_NO_IR); 3090 3091 ch->ht40plus = 3092 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 3093 3094 ch->chan_radar = 3095 !!(channel->flags & IEEE80211_CHAN_RADAR); 3096 3097 passive = channel->flags & IEEE80211_CHAN_NO_IR; 3098 ch->passive = passive; 3099 3100 /* the firmware is ignoring the "radar" flag of the 3101 * channel and is scanning actively using Probe Requests 3102 * on "Radar detection"/DFS channels which are not 3103 * marked as "available" 3104 */ 3105 ch->passive |= ch->chan_radar; 3106 3107 ch->freq = channel->center_freq; 3108 ch->band_center_freq1 = channel->center_freq; 3109 ch->min_power = 0; 3110 ch->max_power = channel->max_power * 2; 3111 ch->max_reg_power = channel->max_reg_power * 2; 3112 ch->max_antenna_gain = channel->max_antenna_gain * 2; 3113 ch->reg_class_id = 0; /* FIXME */ 3114 3115 /* FIXME: why use only legacy modes, why not any 3116 * HT/VHT modes? Would that even make any 3117 * difference? 3118 */ 3119 if (channel->band == NL80211_BAND_2GHZ) 3120 ch->mode = MODE_11G; 3121 else 3122 ch->mode = MODE_11A; 3123 3124 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 3125 continue; 3126 3127 ath10k_dbg(ar, ATH10K_DBG_WMI, 3128 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 3129 ch - arg.channels, arg.n_channels, 3130 ch->freq, ch->max_power, ch->max_reg_power, 3131 ch->max_antenna_gain, ch->mode); 3132 3133 ch++; 3134 } 3135 } 3136 3137 ret = ath10k_wmi_scan_chan_list(ar, &arg); 3138 kfree(arg.channels); 3139 3140 return ret; 3141 } 3142 3143 static enum wmi_dfs_region 3144 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 3145 { 3146 switch (dfs_region) { 3147 case NL80211_DFS_UNSET: 3148 return WMI_UNINIT_DFS_DOMAIN; 3149 case NL80211_DFS_FCC: 3150 return WMI_FCC_DFS_DOMAIN; 3151 case NL80211_DFS_ETSI: 3152 return WMI_ETSI_DFS_DOMAIN; 3153 case NL80211_DFS_JP: 3154 return WMI_MKK4_DFS_DOMAIN; 3155 } 3156 return WMI_UNINIT_DFS_DOMAIN; 3157 } 3158 3159 static void ath10k_regd_update(struct ath10k *ar) 3160 { 3161 struct reg_dmn_pair_mapping *regpair; 3162 int ret; 3163 enum wmi_dfs_region wmi_dfs_reg; 3164 enum nl80211_dfs_regions nl_dfs_reg; 3165 3166 lockdep_assert_held(&ar->conf_mutex); 3167 3168 ret = ath10k_update_channel_list(ar); 3169 if (ret) 3170 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 3171 3172 regpair = ar->ath_common.regulatory.regpair; 3173 3174 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3175 nl_dfs_reg = ar->dfs_detector->region; 3176 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 3177 } else { 3178 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 3179 } 3180 3181 /* Target allows setting up per-band regdomain but ath_common provides 3182 * a combined one only 3183 */ 3184 ret = ath10k_wmi_pdev_set_regdomain(ar, 3185 regpair->reg_domain, 3186 regpair->reg_domain, /* 2ghz */ 3187 regpair->reg_domain, /* 5ghz */ 3188 regpair->reg_2ghz_ctl, 3189 regpair->reg_5ghz_ctl, 3190 wmi_dfs_reg); 3191 if (ret) 3192 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 3193 } 3194 3195 static void ath10k_mac_update_channel_list(struct ath10k *ar, 3196 struct ieee80211_supported_band *band) 3197 { 3198 int i; 3199 3200 if (ar->low_5ghz_chan && ar->high_5ghz_chan) { 3201 for (i = 0; i < band->n_channels; i++) { 3202 if (band->channels[i].center_freq < ar->low_5ghz_chan || 3203 band->channels[i].center_freq > ar->high_5ghz_chan) 3204 band->channels[i].flags |= 3205 IEEE80211_CHAN_DISABLED; 3206 } 3207 } 3208 } 3209 3210 static void ath10k_reg_notifier(struct wiphy *wiphy, 3211 struct regulatory_request *request) 3212 { 3213 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 3214 struct ath10k *ar = hw->priv; 3215 bool result; 3216 3217 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 3218 3219 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 3220 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 3221 request->dfs_region); 3222 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 3223 request->dfs_region); 3224 if (!result) 3225 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 3226 request->dfs_region); 3227 } 3228 3229 mutex_lock(&ar->conf_mutex); 3230 if (ar->state == ATH10K_STATE_ON) 3231 ath10k_regd_update(ar); 3232 mutex_unlock(&ar->conf_mutex); 3233 3234 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 3235 ath10k_mac_update_channel_list(ar, 3236 ar->hw->wiphy->bands[NL80211_BAND_5GHZ]); 3237 } 3238 3239 static void ath10k_stop_radar_confirmation(struct ath10k *ar) 3240 { 3241 spin_lock_bh(&ar->data_lock); 3242 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_STOPPED; 3243 spin_unlock_bh(&ar->data_lock); 3244 3245 cancel_work_sync(&ar->radar_confirmation_work); 3246 } 3247 3248 /***************/ 3249 /* TX handlers */ 3250 /***************/ 3251 3252 enum ath10k_mac_tx_path { 3253 ATH10K_MAC_TX_HTT, 3254 ATH10K_MAC_TX_HTT_MGMT, 3255 ATH10K_MAC_TX_WMI_MGMT, 3256 ATH10K_MAC_TX_UNKNOWN, 3257 }; 3258 3259 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 3260 { 3261 lockdep_assert_held(&ar->htt.tx_lock); 3262 3263 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3264 ar->tx_paused |= BIT(reason); 3265 ieee80211_stop_queues(ar->hw); 3266 } 3267 3268 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 3269 struct ieee80211_vif *vif) 3270 { 3271 struct ath10k *ar = data; 3272 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3273 3274 if (arvif->tx_paused) 3275 return; 3276 3277 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3278 } 3279 3280 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 3281 { 3282 lockdep_assert_held(&ar->htt.tx_lock); 3283 3284 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 3285 ar->tx_paused &= ~BIT(reason); 3286 3287 if (ar->tx_paused) 3288 return; 3289 3290 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3291 IEEE80211_IFACE_ITER_RESUME_ALL, 3292 ath10k_mac_tx_unlock_iter, 3293 ar); 3294 3295 ieee80211_wake_queue(ar->hw, ar->hw->offchannel_tx_hw_queue); 3296 } 3297 3298 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3299 { 3300 struct ath10k *ar = arvif->ar; 3301 3302 lockdep_assert_held(&ar->htt.tx_lock); 3303 3304 WARN_ON(reason >= BITS_PER_LONG); 3305 arvif->tx_paused |= BIT(reason); 3306 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3307 } 3308 3309 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3310 { 3311 struct ath10k *ar = arvif->ar; 3312 3313 lockdep_assert_held(&ar->htt.tx_lock); 3314 3315 WARN_ON(reason >= BITS_PER_LONG); 3316 arvif->tx_paused &= ~BIT(reason); 3317 3318 if (ar->tx_paused) 3319 return; 3320 3321 if (arvif->tx_paused) 3322 return; 3323 3324 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3325 } 3326 3327 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3328 enum wmi_tlv_tx_pause_id pause_id, 3329 enum wmi_tlv_tx_pause_action action) 3330 { 3331 struct ath10k *ar = arvif->ar; 3332 3333 lockdep_assert_held(&ar->htt.tx_lock); 3334 3335 switch (action) { 3336 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3337 ath10k_mac_vif_tx_lock(arvif, pause_id); 3338 break; 3339 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3340 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3341 break; 3342 default: 3343 ath10k_dbg(ar, ATH10K_DBG_BOOT, 3344 "received unknown tx pause action %d on vdev %i, ignoring\n", 3345 action, arvif->vdev_id); 3346 break; 3347 } 3348 } 3349 3350 struct ath10k_mac_tx_pause { 3351 u32 vdev_id; 3352 enum wmi_tlv_tx_pause_id pause_id; 3353 enum wmi_tlv_tx_pause_action action; 3354 }; 3355 3356 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3357 struct ieee80211_vif *vif) 3358 { 3359 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3360 struct ath10k_mac_tx_pause *arg = data; 3361 3362 if (arvif->vdev_id != arg->vdev_id) 3363 return; 3364 3365 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3366 } 3367 3368 void ath10k_mac_handle_tx_pause_vdev(struct ath10k *ar, u32 vdev_id, 3369 enum wmi_tlv_tx_pause_id pause_id, 3370 enum wmi_tlv_tx_pause_action action) 3371 { 3372 struct ath10k_mac_tx_pause arg = { 3373 .vdev_id = vdev_id, 3374 .pause_id = pause_id, 3375 .action = action, 3376 }; 3377 3378 spin_lock_bh(&ar->htt.tx_lock); 3379 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3380 IEEE80211_IFACE_ITER_RESUME_ALL, 3381 ath10k_mac_handle_tx_pause_iter, 3382 &arg); 3383 spin_unlock_bh(&ar->htt.tx_lock); 3384 } 3385 3386 static enum ath10k_hw_txrx_mode 3387 ath10k_mac_tx_h_get_txmode(struct ath10k *ar, 3388 struct ieee80211_vif *vif, 3389 struct ieee80211_sta *sta, 3390 struct sk_buff *skb) 3391 { 3392 const struct ieee80211_hdr *hdr = (void *)skb->data; 3393 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3394 __le16 fc = hdr->frame_control; 3395 3396 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3397 return ATH10K_HW_TXRX_RAW; 3398 3399 if (ieee80211_is_mgmt(fc)) 3400 return ATH10K_HW_TXRX_MGMT; 3401 3402 /* Workaround: 3403 * 3404 * NullFunc frames are mostly used to ping if a client or AP are still 3405 * reachable and responsive. This implies tx status reports must be 3406 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3407 * come to a conclusion that the other end disappeared and tear down 3408 * BSS connection or it can never disconnect from BSS/client (which is 3409 * the case). 3410 * 3411 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3412 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3413 * which seems to deliver correct tx reports for NullFunc frames. The 3414 * downside of using it is it ignores client powersave state so it can 3415 * end up disconnecting sleeping clients in AP mode. It should fix STA 3416 * mode though because AP don't sleep. 3417 */ 3418 if (ar->htt.target_version_major < 3 && 3419 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3420 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3421 ar->running_fw->fw_file.fw_features)) 3422 return ATH10K_HW_TXRX_MGMT; 3423 3424 /* Workaround: 3425 * 3426 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3427 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3428 * to work with Ethernet txmode so use it. 3429 * 3430 * FIXME: Check if raw mode works with TDLS. 3431 */ 3432 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3433 return ATH10K_HW_TXRX_ETHERNET; 3434 3435 if (test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) || 3436 skb_cb->flags & ATH10K_SKB_F_RAW_TX) 3437 return ATH10K_HW_TXRX_RAW; 3438 3439 return ATH10K_HW_TXRX_NATIVE_WIFI; 3440 } 3441 3442 static bool ath10k_tx_h_use_hwcrypto(struct ieee80211_vif *vif, 3443 struct sk_buff *skb) 3444 { 3445 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3446 const struct ieee80211_hdr *hdr = (void *)skb->data; 3447 const u32 mask = IEEE80211_TX_INTFL_DONT_ENCRYPT | 3448 IEEE80211_TX_CTL_INJECTED; 3449 3450 if (!ieee80211_has_protected(hdr->frame_control)) 3451 return false; 3452 3453 if ((info->flags & mask) == mask) 3454 return false; 3455 3456 if (vif) 3457 return !((struct ath10k_vif *)vif->drv_priv)->nohwcrypt; 3458 3459 return true; 3460 } 3461 3462 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3463 * Control in the header. 3464 */ 3465 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3466 { 3467 struct ieee80211_hdr *hdr = (void *)skb->data; 3468 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3469 u8 *qos_ctl; 3470 3471 if (!ieee80211_is_data_qos(hdr->frame_control)) 3472 return; 3473 3474 qos_ctl = ieee80211_get_qos_ctl(hdr); 3475 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3476 skb->data, (void *)qos_ctl - (void *)skb->data); 3477 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3478 3479 /* Some firmware revisions don't handle sending QoS NullFunc well. 3480 * These frames are mainly used for CQM purposes so it doesn't really 3481 * matter whether QoS NullFunc or NullFunc are sent. 3482 */ 3483 hdr = (void *)skb->data; 3484 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3485 cb->flags &= ~ATH10K_SKB_F_QOS; 3486 3487 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3488 } 3489 3490 static void ath10k_tx_h_8023(struct sk_buff *skb) 3491 { 3492 struct ieee80211_hdr *hdr; 3493 struct rfc1042_hdr *rfc1042; 3494 struct ethhdr *eth; 3495 size_t hdrlen; 3496 u8 da[ETH_ALEN]; 3497 u8 sa[ETH_ALEN]; 3498 __be16 type; 3499 3500 hdr = (void *)skb->data; 3501 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3502 rfc1042 = (void *)skb->data + hdrlen; 3503 3504 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3505 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3506 type = rfc1042->snap_type; 3507 3508 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3509 skb_push(skb, sizeof(*eth)); 3510 3511 eth = (void *)skb->data; 3512 ether_addr_copy(eth->h_dest, da); 3513 ether_addr_copy(eth->h_source, sa); 3514 eth->h_proto = type; 3515 } 3516 3517 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3518 struct ieee80211_vif *vif, 3519 struct sk_buff *skb) 3520 { 3521 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3522 struct ath10k_vif *arvif = (void *)vif->drv_priv; 3523 3524 /* This is case only for P2P_GO */ 3525 if (vif->type != NL80211_IFTYPE_AP || !vif->p2p) 3526 return; 3527 3528 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3529 spin_lock_bh(&ar->data_lock); 3530 if (arvif->u.ap.noa_data) 3531 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3532 GFP_ATOMIC)) 3533 skb_put_data(skb, arvif->u.ap.noa_data, 3534 arvif->u.ap.noa_len); 3535 spin_unlock_bh(&ar->data_lock); 3536 } 3537 } 3538 3539 static void ath10k_mac_tx_h_fill_cb(struct ath10k *ar, 3540 struct ieee80211_vif *vif, 3541 struct ieee80211_txq *txq, 3542 struct sk_buff *skb, u16 airtime) 3543 { 3544 struct ieee80211_hdr *hdr = (void *)skb->data; 3545 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3546 const struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3547 bool is_data = ieee80211_is_data(hdr->frame_control) || 3548 ieee80211_is_data_qos(hdr->frame_control); 3549 3550 cb->flags = 0; 3551 if (!ath10k_tx_h_use_hwcrypto(vif, skb)) 3552 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3553 3554 if (ieee80211_is_mgmt(hdr->frame_control)) 3555 cb->flags |= ATH10K_SKB_F_MGMT; 3556 3557 if (ieee80211_is_data_qos(hdr->frame_control)) 3558 cb->flags |= ATH10K_SKB_F_QOS; 3559 3560 /* Data frames encrypted in software will be posted to firmware 3561 * with tx encap mode set to RAW. Ex: Multicast traffic generated 3562 * for a specific VLAN group will always be encrypted in software. 3563 */ 3564 if (is_data && ieee80211_has_protected(hdr->frame_control) && 3565 !info->control.hw_key) { 3566 cb->flags |= ATH10K_SKB_F_NO_HWCRYPT; 3567 cb->flags |= ATH10K_SKB_F_RAW_TX; 3568 } 3569 3570 cb->vif = vif; 3571 cb->txq = txq; 3572 cb->airtime_est = airtime; 3573 } 3574 3575 bool ath10k_mac_tx_frm_has_freq(struct ath10k *ar) 3576 { 3577 /* FIXME: Not really sure since when the behaviour changed. At some 3578 * point new firmware stopped requiring creation of peer entries for 3579 * offchannel tx (and actually creating them causes issues with wmi-htc 3580 * tx credit replenishment and reliability). Assuming it's at least 3.4 3581 * because that's when the `freq` was introduced to TX_FRM HTT command. 3582 */ 3583 return (ar->htt.target_version_major >= 3 && 3584 ar->htt.target_version_minor >= 4 && 3585 ar->running_fw->fw_file.htt_op_version == ATH10K_FW_HTT_OP_VERSION_TLV); 3586 } 3587 3588 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3589 { 3590 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3591 int ret = 0; 3592 3593 spin_lock_bh(&ar->data_lock); 3594 3595 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3596 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3597 ret = -ENOSPC; 3598 goto unlock; 3599 } 3600 3601 __skb_queue_tail(q, skb); 3602 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3603 3604 unlock: 3605 spin_unlock_bh(&ar->data_lock); 3606 3607 return ret; 3608 } 3609 3610 static enum ath10k_mac_tx_path 3611 ath10k_mac_tx_h_get_txpath(struct ath10k *ar, 3612 struct sk_buff *skb, 3613 enum ath10k_hw_txrx_mode txmode) 3614 { 3615 switch (txmode) { 3616 case ATH10K_HW_TXRX_RAW: 3617 case ATH10K_HW_TXRX_NATIVE_WIFI: 3618 case ATH10K_HW_TXRX_ETHERNET: 3619 return ATH10K_MAC_TX_HTT; 3620 case ATH10K_HW_TXRX_MGMT: 3621 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3622 ar->running_fw->fw_file.fw_features) || 3623 test_bit(WMI_SERVICE_MGMT_TX_WMI, 3624 ar->wmi.svc_map)) 3625 return ATH10K_MAC_TX_WMI_MGMT; 3626 else if (ar->htt.target_version_major >= 3) 3627 return ATH10K_MAC_TX_HTT; 3628 else 3629 return ATH10K_MAC_TX_HTT_MGMT; 3630 } 3631 3632 return ATH10K_MAC_TX_UNKNOWN; 3633 } 3634 3635 static int ath10k_mac_tx_submit(struct ath10k *ar, 3636 enum ath10k_hw_txrx_mode txmode, 3637 enum ath10k_mac_tx_path txpath, 3638 struct sk_buff *skb) 3639 { 3640 struct ath10k_htt *htt = &ar->htt; 3641 int ret = -EINVAL; 3642 3643 switch (txpath) { 3644 case ATH10K_MAC_TX_HTT: 3645 ret = ath10k_htt_tx(htt, txmode, skb); 3646 break; 3647 case ATH10K_MAC_TX_HTT_MGMT: 3648 ret = ath10k_htt_mgmt_tx(htt, skb); 3649 break; 3650 case ATH10K_MAC_TX_WMI_MGMT: 3651 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3652 break; 3653 case ATH10K_MAC_TX_UNKNOWN: 3654 WARN_ON_ONCE(1); 3655 ret = -EINVAL; 3656 break; 3657 } 3658 3659 if (ret) { 3660 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3661 ret); 3662 ieee80211_free_txskb(ar->hw, skb); 3663 } 3664 3665 return ret; 3666 } 3667 3668 /* This function consumes the sk_buff regardless of return value as far as 3669 * caller is concerned so no freeing is necessary afterwards. 3670 */ 3671 static int ath10k_mac_tx(struct ath10k *ar, 3672 struct ieee80211_vif *vif, 3673 enum ath10k_hw_txrx_mode txmode, 3674 enum ath10k_mac_tx_path txpath, 3675 struct sk_buff *skb) 3676 { 3677 struct ieee80211_hw *hw = ar->hw; 3678 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3679 const struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(skb); 3680 int ret; 3681 3682 /* We should disable CCK RATE due to P2P */ 3683 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3684 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3685 3686 switch (txmode) { 3687 case ATH10K_HW_TXRX_MGMT: 3688 case ATH10K_HW_TXRX_NATIVE_WIFI: 3689 ath10k_tx_h_nwifi(hw, skb); 3690 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3691 ath10k_tx_h_seq_no(vif, skb); 3692 break; 3693 case ATH10K_HW_TXRX_ETHERNET: 3694 ath10k_tx_h_8023(skb); 3695 break; 3696 case ATH10K_HW_TXRX_RAW: 3697 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags) && 3698 !(skb_cb->flags & ATH10K_SKB_F_RAW_TX)) { 3699 WARN_ON_ONCE(1); 3700 ieee80211_free_txskb(hw, skb); 3701 return -ENOTSUPP; 3702 } 3703 } 3704 3705 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3706 if (!ath10k_mac_tx_frm_has_freq(ar)) { 3707 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %pK\n", 3708 skb); 3709 3710 skb_queue_tail(&ar->offchan_tx_queue, skb); 3711 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3712 return 0; 3713 } 3714 } 3715 3716 ret = ath10k_mac_tx_submit(ar, txmode, txpath, skb); 3717 if (ret) { 3718 ath10k_warn(ar, "failed to submit frame: %d\n", ret); 3719 return ret; 3720 } 3721 3722 return 0; 3723 } 3724 3725 void ath10k_offchan_tx_purge(struct ath10k *ar) 3726 { 3727 struct sk_buff *skb; 3728 3729 for (;;) { 3730 skb = skb_dequeue(&ar->offchan_tx_queue); 3731 if (!skb) 3732 break; 3733 3734 ieee80211_free_txskb(ar->hw, skb); 3735 } 3736 } 3737 3738 void ath10k_offchan_tx_work(struct work_struct *work) 3739 { 3740 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3741 struct ath10k_peer *peer; 3742 struct ath10k_vif *arvif; 3743 enum ath10k_hw_txrx_mode txmode; 3744 enum ath10k_mac_tx_path txpath; 3745 struct ieee80211_hdr *hdr; 3746 struct ieee80211_vif *vif; 3747 struct ieee80211_sta *sta; 3748 struct sk_buff *skb; 3749 const u8 *peer_addr; 3750 int vdev_id; 3751 int ret; 3752 unsigned long time_left; 3753 bool tmp_peer_created = false; 3754 3755 /* FW requirement: We must create a peer before FW will send out 3756 * an offchannel frame. Otherwise the frame will be stuck and 3757 * never transmitted. We delete the peer upon tx completion. 3758 * It is unlikely that a peer for offchannel tx will already be 3759 * present. However it may be in some rare cases so account for that. 3760 * Otherwise we might remove a legitimate peer and break stuff. 3761 */ 3762 3763 for (;;) { 3764 skb = skb_dequeue(&ar->offchan_tx_queue); 3765 if (!skb) 3766 break; 3767 3768 mutex_lock(&ar->conf_mutex); 3769 3770 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %pK\n", 3771 skb); 3772 3773 hdr = (struct ieee80211_hdr *)skb->data; 3774 peer_addr = ieee80211_get_DA(hdr); 3775 3776 spin_lock_bh(&ar->data_lock); 3777 vdev_id = ar->scan.vdev_id; 3778 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3779 spin_unlock_bh(&ar->data_lock); 3780 3781 if (peer) 3782 /* FIXME: should this use ath10k_warn()? */ 3783 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3784 peer_addr, vdev_id); 3785 3786 if (!peer) { 3787 ret = ath10k_peer_create(ar, NULL, NULL, vdev_id, 3788 peer_addr, 3789 WMI_PEER_TYPE_DEFAULT); 3790 if (ret) 3791 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3792 peer_addr, vdev_id, ret); 3793 tmp_peer_created = (ret == 0); 3794 } 3795 3796 spin_lock_bh(&ar->data_lock); 3797 reinit_completion(&ar->offchan_tx_completed); 3798 ar->offchan_tx_skb = skb; 3799 spin_unlock_bh(&ar->data_lock); 3800 3801 /* It's safe to access vif and sta - conf_mutex guarantees that 3802 * sta_state() and remove_interface() are locked exclusively 3803 * out wrt to this offchannel worker. 3804 */ 3805 arvif = ath10k_get_arvif(ar, vdev_id); 3806 if (arvif) { 3807 vif = arvif->vif; 3808 sta = ieee80211_find_sta(vif, peer_addr); 3809 } else { 3810 vif = NULL; 3811 sta = NULL; 3812 } 3813 3814 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 3815 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 3816 3817 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 3818 if (ret) { 3819 ath10k_warn(ar, "failed to transmit offchannel frame: %d\n", 3820 ret); 3821 /* not serious */ 3822 } 3823 3824 time_left = 3825 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3826 if (time_left == 0) 3827 ath10k_warn(ar, "timed out waiting for offchannel skb %pK\n", 3828 skb); 3829 3830 if (!peer && tmp_peer_created) { 3831 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3832 if (ret) 3833 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3834 peer_addr, vdev_id, ret); 3835 } 3836 3837 mutex_unlock(&ar->conf_mutex); 3838 } 3839 } 3840 3841 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3842 { 3843 struct sk_buff *skb; 3844 3845 for (;;) { 3846 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3847 if (!skb) 3848 break; 3849 3850 ieee80211_free_txskb(ar->hw, skb); 3851 } 3852 } 3853 3854 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3855 { 3856 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3857 struct sk_buff *skb; 3858 dma_addr_t paddr; 3859 int ret; 3860 3861 for (;;) { 3862 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3863 if (!skb) 3864 break; 3865 3866 if (test_bit(ATH10K_FW_FEATURE_MGMT_TX_BY_REF, 3867 ar->running_fw->fw_file.fw_features)) { 3868 paddr = dma_map_single(ar->dev, skb->data, 3869 skb->len, DMA_TO_DEVICE); 3870 if (!paddr) 3871 continue; 3872 ret = ath10k_wmi_mgmt_tx_send(ar, skb, paddr); 3873 if (ret) { 3874 ath10k_warn(ar, "failed to transmit management frame by ref via WMI: %d\n", 3875 ret); 3876 dma_unmap_single(ar->dev, paddr, skb->len, 3877 DMA_TO_DEVICE); 3878 ieee80211_free_txskb(ar->hw, skb); 3879 } 3880 } else { 3881 ret = ath10k_wmi_mgmt_tx(ar, skb); 3882 if (ret) { 3883 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3884 ret); 3885 ieee80211_free_txskb(ar->hw, skb); 3886 } 3887 } 3888 } 3889 } 3890 3891 static void ath10k_mac_txq_init(struct ieee80211_txq *txq) 3892 { 3893 struct ath10k_txq *artxq; 3894 3895 if (!txq) 3896 return; 3897 3898 artxq = (void *)txq->drv_priv; 3899 INIT_LIST_HEAD(&artxq->list); 3900 } 3901 3902 static void ath10k_mac_txq_unref(struct ath10k *ar, struct ieee80211_txq *txq) 3903 { 3904 struct ath10k_skb_cb *cb; 3905 struct sk_buff *msdu; 3906 int msdu_id; 3907 3908 if (!txq) 3909 return; 3910 3911 spin_lock_bh(&ar->htt.tx_lock); 3912 idr_for_each_entry(&ar->htt.pending_tx, msdu, msdu_id) { 3913 cb = ATH10K_SKB_CB(msdu); 3914 if (cb->txq == txq) 3915 cb->txq = NULL; 3916 } 3917 spin_unlock_bh(&ar->htt.tx_lock); 3918 } 3919 3920 struct ieee80211_txq *ath10k_mac_txq_lookup(struct ath10k *ar, 3921 u16 peer_id, 3922 u8 tid) 3923 { 3924 struct ath10k_peer *peer; 3925 3926 lockdep_assert_held(&ar->data_lock); 3927 3928 peer = ar->peer_map[peer_id]; 3929 if (!peer) 3930 return NULL; 3931 3932 if (peer->removed) 3933 return NULL; 3934 3935 if (peer->sta) 3936 return peer->sta->txq[tid]; 3937 else if (peer->vif) 3938 return peer->vif->txq; 3939 else 3940 return NULL; 3941 } 3942 3943 static bool ath10k_mac_tx_can_push(struct ieee80211_hw *hw, 3944 struct ieee80211_txq *txq) 3945 { 3946 struct ath10k *ar = hw->priv; 3947 struct ath10k_txq *artxq = (void *)txq->drv_priv; 3948 3949 /* No need to get locks */ 3950 if (ar->htt.tx_q_state.mode == HTT_TX_MODE_SWITCH_PUSH) 3951 return true; 3952 3953 if (ar->htt.num_pending_tx < ar->htt.tx_q_state.num_push_allowed) 3954 return true; 3955 3956 if (artxq->num_fw_queued < artxq->num_push_allowed) 3957 return true; 3958 3959 return false; 3960 } 3961 3962 /* Return estimated airtime in microsecond, which is calculated using last 3963 * reported TX rate. This is just a rough estimation because host driver has no 3964 * knowledge of the actual transmit rate, retries or aggregation. If actual 3965 * airtime can be reported by firmware, then delta between estimated and actual 3966 * airtime can be adjusted from deficit. 3967 */ 3968 #define IEEE80211_ATF_OVERHEAD 100 /* IFS + some slot time */ 3969 #define IEEE80211_ATF_OVERHEAD_IFS 16 /* IFS only */ 3970 static u16 ath10k_mac_update_airtime(struct ath10k *ar, 3971 struct ieee80211_txq *txq, 3972 struct sk_buff *skb) 3973 { 3974 struct ath10k_sta *arsta; 3975 u32 pktlen; 3976 u16 airtime = 0; 3977 3978 if (!txq || !txq->sta) 3979 return airtime; 3980 3981 if (test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 3982 return airtime; 3983 3984 spin_lock_bh(&ar->data_lock); 3985 arsta = (struct ath10k_sta *)txq->sta->drv_priv; 3986 3987 pktlen = skb->len + 38; /* Assume MAC header 30, SNAP 8 for most case */ 3988 if (arsta->last_tx_bitrate) { 3989 /* airtime in us, last_tx_bitrate in 100kbps */ 3990 airtime = (pktlen * 8 * (1000 / 100)) 3991 / arsta->last_tx_bitrate; 3992 /* overhead for media access time and IFS */ 3993 airtime += IEEE80211_ATF_OVERHEAD_IFS; 3994 } else { 3995 /* This is mostly for throttle excessive BC/MC frames, and the 3996 * airtime/rate doesn't need be exact. Airtime of BC/MC frames 3997 * in 2G get some discount, which helps prevent very low rate 3998 * frames from being blocked for too long. 3999 */ 4000 airtime = (pktlen * 8 * (1000 / 100)) / 60; /* 6M */ 4001 airtime += IEEE80211_ATF_OVERHEAD; 4002 } 4003 spin_unlock_bh(&ar->data_lock); 4004 4005 return airtime; 4006 } 4007 4008 int ath10k_mac_tx_push_txq(struct ieee80211_hw *hw, 4009 struct ieee80211_txq *txq) 4010 { 4011 struct ath10k *ar = hw->priv; 4012 struct ath10k_htt *htt = &ar->htt; 4013 struct ath10k_txq *artxq = (void *)txq->drv_priv; 4014 struct ieee80211_vif *vif = txq->vif; 4015 struct ieee80211_sta *sta = txq->sta; 4016 enum ath10k_hw_txrx_mode txmode; 4017 enum ath10k_mac_tx_path txpath; 4018 struct sk_buff *skb; 4019 struct ieee80211_hdr *hdr; 4020 size_t skb_len; 4021 bool is_mgmt, is_presp; 4022 int ret; 4023 u16 airtime; 4024 4025 spin_lock_bh(&ar->htt.tx_lock); 4026 ret = ath10k_htt_tx_inc_pending(htt); 4027 spin_unlock_bh(&ar->htt.tx_lock); 4028 4029 if (ret) 4030 return ret; 4031 4032 skb = ieee80211_tx_dequeue(hw, txq); 4033 if (!skb) { 4034 spin_lock_bh(&ar->htt.tx_lock); 4035 ath10k_htt_tx_dec_pending(htt); 4036 spin_unlock_bh(&ar->htt.tx_lock); 4037 4038 return -ENOENT; 4039 } 4040 4041 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4042 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4043 4044 skb_len = skb->len; 4045 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4046 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4047 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4048 4049 if (is_mgmt) { 4050 hdr = (struct ieee80211_hdr *)skb->data; 4051 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4052 4053 spin_lock_bh(&ar->htt.tx_lock); 4054 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4055 4056 if (ret) { 4057 ath10k_htt_tx_dec_pending(htt); 4058 spin_unlock_bh(&ar->htt.tx_lock); 4059 return ret; 4060 } 4061 spin_unlock_bh(&ar->htt.tx_lock); 4062 } 4063 4064 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4065 if (unlikely(ret)) { 4066 ath10k_warn(ar, "failed to push frame: %d\n", ret); 4067 4068 spin_lock_bh(&ar->htt.tx_lock); 4069 ath10k_htt_tx_dec_pending(htt); 4070 if (is_mgmt) 4071 ath10k_htt_tx_mgmt_dec_pending(htt); 4072 spin_unlock_bh(&ar->htt.tx_lock); 4073 4074 return ret; 4075 } 4076 4077 spin_lock_bh(&ar->htt.tx_lock); 4078 artxq->num_fw_queued++; 4079 spin_unlock_bh(&ar->htt.tx_lock); 4080 4081 return skb_len; 4082 } 4083 4084 static int ath10k_mac_schedule_txq(struct ieee80211_hw *hw, u32 ac) 4085 { 4086 struct ieee80211_txq *txq; 4087 int ret = 0; 4088 4089 ieee80211_txq_schedule_start(hw, ac); 4090 while ((txq = ieee80211_next_txq(hw, ac))) { 4091 while (ath10k_mac_tx_can_push(hw, txq)) { 4092 ret = ath10k_mac_tx_push_txq(hw, txq); 4093 if (ret < 0) 4094 break; 4095 } 4096 ieee80211_return_txq(hw, txq, false); 4097 ath10k_htt_tx_txq_update(hw, txq); 4098 if (ret == -EBUSY) 4099 break; 4100 } 4101 ieee80211_txq_schedule_end(hw, ac); 4102 4103 return ret; 4104 } 4105 4106 void ath10k_mac_tx_push_pending(struct ath10k *ar) 4107 { 4108 struct ieee80211_hw *hw = ar->hw; 4109 u32 ac; 4110 4111 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4112 return; 4113 4114 if (ar->htt.num_pending_tx >= (ar->htt.max_num_pending_tx / 2)) 4115 return; 4116 4117 rcu_read_lock(); 4118 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 4119 if (ath10k_mac_schedule_txq(hw, ac) == -EBUSY) 4120 break; 4121 } 4122 rcu_read_unlock(); 4123 } 4124 EXPORT_SYMBOL(ath10k_mac_tx_push_pending); 4125 4126 /************/ 4127 /* Scanning */ 4128 /************/ 4129 4130 void __ath10k_scan_finish(struct ath10k *ar) 4131 { 4132 lockdep_assert_held(&ar->data_lock); 4133 4134 switch (ar->scan.state) { 4135 case ATH10K_SCAN_IDLE: 4136 break; 4137 case ATH10K_SCAN_RUNNING: 4138 case ATH10K_SCAN_ABORTING: 4139 if (!ar->scan.is_roc) { 4140 struct cfg80211_scan_info info = { 4141 .aborted = (ar->scan.state == 4142 ATH10K_SCAN_ABORTING), 4143 }; 4144 4145 ieee80211_scan_completed(ar->hw, &info); 4146 } else if (ar->scan.roc_notify) { 4147 ieee80211_remain_on_channel_expired(ar->hw); 4148 } 4149 /* fall through */ 4150 case ATH10K_SCAN_STARTING: 4151 ar->scan.state = ATH10K_SCAN_IDLE; 4152 ar->scan_channel = NULL; 4153 ar->scan.roc_freq = 0; 4154 ath10k_offchan_tx_purge(ar); 4155 cancel_delayed_work(&ar->scan.timeout); 4156 complete(&ar->scan.completed); 4157 break; 4158 } 4159 } 4160 4161 void ath10k_scan_finish(struct ath10k *ar) 4162 { 4163 spin_lock_bh(&ar->data_lock); 4164 __ath10k_scan_finish(ar); 4165 spin_unlock_bh(&ar->data_lock); 4166 } 4167 4168 static int ath10k_scan_stop(struct ath10k *ar) 4169 { 4170 struct wmi_stop_scan_arg arg = { 4171 .req_id = 1, /* FIXME */ 4172 .req_type = WMI_SCAN_STOP_ONE, 4173 .u.scan_id = ATH10K_SCAN_ID, 4174 }; 4175 int ret; 4176 4177 lockdep_assert_held(&ar->conf_mutex); 4178 4179 ret = ath10k_wmi_stop_scan(ar, &arg); 4180 if (ret) { 4181 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 4182 goto out; 4183 } 4184 4185 ret = wait_for_completion_timeout(&ar->scan.completed, 3 * HZ); 4186 if (ret == 0) { 4187 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 4188 ret = -ETIMEDOUT; 4189 } else if (ret > 0) { 4190 ret = 0; 4191 } 4192 4193 out: 4194 /* Scan state should be updated upon scan completion but in case 4195 * firmware fails to deliver the event (for whatever reason) it is 4196 * desired to clean up scan state anyway. Firmware may have just 4197 * dropped the scan completion event delivery due to transport pipe 4198 * being overflown with data and/or it can recover on its own before 4199 * next scan request is submitted. 4200 */ 4201 spin_lock_bh(&ar->data_lock); 4202 if (ar->scan.state != ATH10K_SCAN_IDLE) 4203 __ath10k_scan_finish(ar); 4204 spin_unlock_bh(&ar->data_lock); 4205 4206 return ret; 4207 } 4208 4209 static void ath10k_scan_abort(struct ath10k *ar) 4210 { 4211 int ret; 4212 4213 lockdep_assert_held(&ar->conf_mutex); 4214 4215 spin_lock_bh(&ar->data_lock); 4216 4217 switch (ar->scan.state) { 4218 case ATH10K_SCAN_IDLE: 4219 /* This can happen if timeout worker kicked in and called 4220 * abortion while scan completion was being processed. 4221 */ 4222 break; 4223 case ATH10K_SCAN_STARTING: 4224 case ATH10K_SCAN_ABORTING: 4225 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 4226 ath10k_scan_state_str(ar->scan.state), 4227 ar->scan.state); 4228 break; 4229 case ATH10K_SCAN_RUNNING: 4230 ar->scan.state = ATH10K_SCAN_ABORTING; 4231 spin_unlock_bh(&ar->data_lock); 4232 4233 ret = ath10k_scan_stop(ar); 4234 if (ret) 4235 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 4236 4237 spin_lock_bh(&ar->data_lock); 4238 break; 4239 } 4240 4241 spin_unlock_bh(&ar->data_lock); 4242 } 4243 4244 void ath10k_scan_timeout_work(struct work_struct *work) 4245 { 4246 struct ath10k *ar = container_of(work, struct ath10k, 4247 scan.timeout.work); 4248 4249 mutex_lock(&ar->conf_mutex); 4250 ath10k_scan_abort(ar); 4251 mutex_unlock(&ar->conf_mutex); 4252 } 4253 4254 static int ath10k_start_scan(struct ath10k *ar, 4255 const struct wmi_start_scan_arg *arg) 4256 { 4257 int ret; 4258 4259 lockdep_assert_held(&ar->conf_mutex); 4260 4261 ret = ath10k_wmi_start_scan(ar, arg); 4262 if (ret) 4263 return ret; 4264 4265 ret = wait_for_completion_timeout(&ar->scan.started, 1 * HZ); 4266 if (ret == 0) { 4267 ret = ath10k_scan_stop(ar); 4268 if (ret) 4269 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 4270 4271 return -ETIMEDOUT; 4272 } 4273 4274 /* If we failed to start the scan, return error code at 4275 * this point. This is probably due to some issue in the 4276 * firmware, but no need to wedge the driver due to that... 4277 */ 4278 spin_lock_bh(&ar->data_lock); 4279 if (ar->scan.state == ATH10K_SCAN_IDLE) { 4280 spin_unlock_bh(&ar->data_lock); 4281 return -EINVAL; 4282 } 4283 spin_unlock_bh(&ar->data_lock); 4284 4285 return 0; 4286 } 4287 4288 /**********************/ 4289 /* mac80211 callbacks */ 4290 /**********************/ 4291 4292 static void ath10k_mac_op_tx(struct ieee80211_hw *hw, 4293 struct ieee80211_tx_control *control, 4294 struct sk_buff *skb) 4295 { 4296 struct ath10k *ar = hw->priv; 4297 struct ath10k_htt *htt = &ar->htt; 4298 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 4299 struct ieee80211_vif *vif = info->control.vif; 4300 struct ieee80211_sta *sta = control->sta; 4301 struct ieee80211_txq *txq = NULL; 4302 struct ieee80211_hdr *hdr = (void *)skb->data; 4303 enum ath10k_hw_txrx_mode txmode; 4304 enum ath10k_mac_tx_path txpath; 4305 bool is_htt; 4306 bool is_mgmt; 4307 bool is_presp; 4308 int ret; 4309 u16 airtime; 4310 4311 airtime = ath10k_mac_update_airtime(ar, txq, skb); 4312 ath10k_mac_tx_h_fill_cb(ar, vif, txq, skb, airtime); 4313 4314 txmode = ath10k_mac_tx_h_get_txmode(ar, vif, sta, skb); 4315 txpath = ath10k_mac_tx_h_get_txpath(ar, skb, txmode); 4316 is_htt = (txpath == ATH10K_MAC_TX_HTT || 4317 txpath == ATH10K_MAC_TX_HTT_MGMT); 4318 is_mgmt = (txpath == ATH10K_MAC_TX_HTT_MGMT); 4319 4320 if (is_htt) { 4321 spin_lock_bh(&ar->htt.tx_lock); 4322 is_presp = ieee80211_is_probe_resp(hdr->frame_control); 4323 4324 ret = ath10k_htt_tx_inc_pending(htt); 4325 if (ret) { 4326 ath10k_warn(ar, "failed to increase tx pending count: %d, dropping\n", 4327 ret); 4328 spin_unlock_bh(&ar->htt.tx_lock); 4329 ieee80211_free_txskb(ar->hw, skb); 4330 return; 4331 } 4332 4333 ret = ath10k_htt_tx_mgmt_inc_pending(htt, is_mgmt, is_presp); 4334 if (ret) { 4335 ath10k_dbg(ar, ATH10K_DBG_MAC, "failed to increase tx mgmt pending count: %d, dropping\n", 4336 ret); 4337 ath10k_htt_tx_dec_pending(htt); 4338 spin_unlock_bh(&ar->htt.tx_lock); 4339 ieee80211_free_txskb(ar->hw, skb); 4340 return; 4341 } 4342 spin_unlock_bh(&ar->htt.tx_lock); 4343 } 4344 4345 ret = ath10k_mac_tx(ar, vif, txmode, txpath, skb); 4346 if (ret) { 4347 ath10k_warn(ar, "failed to transmit frame: %d\n", ret); 4348 if (is_htt) { 4349 spin_lock_bh(&ar->htt.tx_lock); 4350 ath10k_htt_tx_dec_pending(htt); 4351 if (is_mgmt) 4352 ath10k_htt_tx_mgmt_dec_pending(htt); 4353 spin_unlock_bh(&ar->htt.tx_lock); 4354 } 4355 return; 4356 } 4357 } 4358 4359 static void ath10k_mac_op_wake_tx_queue(struct ieee80211_hw *hw, 4360 struct ieee80211_txq *txq) 4361 { 4362 struct ath10k *ar = hw->priv; 4363 int ret; 4364 u8 ac; 4365 4366 ath10k_htt_tx_txq_update(hw, txq); 4367 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH) 4368 return; 4369 4370 ac = txq->ac; 4371 ieee80211_txq_schedule_start(hw, ac); 4372 txq = ieee80211_next_txq(hw, ac); 4373 if (!txq) 4374 goto out; 4375 4376 while (ath10k_mac_tx_can_push(hw, txq)) { 4377 ret = ath10k_mac_tx_push_txq(hw, txq); 4378 if (ret < 0) 4379 break; 4380 } 4381 ieee80211_return_txq(hw, txq, false); 4382 ath10k_htt_tx_txq_update(hw, txq); 4383 out: 4384 ieee80211_txq_schedule_end(hw, ac); 4385 } 4386 4387 /* Must not be called with conf_mutex held as workers can use that also. */ 4388 void ath10k_drain_tx(struct ath10k *ar) 4389 { 4390 /* make sure rcu-protected mac80211 tx path itself is drained */ 4391 synchronize_net(); 4392 4393 ath10k_offchan_tx_purge(ar); 4394 ath10k_mgmt_over_wmi_tx_purge(ar); 4395 4396 cancel_work_sync(&ar->offchan_tx_work); 4397 cancel_work_sync(&ar->wmi_mgmt_tx_work); 4398 } 4399 4400 void ath10k_halt(struct ath10k *ar) 4401 { 4402 struct ath10k_vif *arvif; 4403 4404 lockdep_assert_held(&ar->conf_mutex); 4405 4406 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 4407 ar->filter_flags = 0; 4408 ar->monitor = false; 4409 ar->monitor_arvif = NULL; 4410 4411 if (ar->monitor_started) 4412 ath10k_monitor_stop(ar); 4413 4414 ar->monitor_started = false; 4415 ar->tx_paused = 0; 4416 4417 ath10k_scan_finish(ar); 4418 ath10k_peer_cleanup_all(ar); 4419 ath10k_stop_radar_confirmation(ar); 4420 ath10k_core_stop(ar); 4421 ath10k_hif_power_down(ar); 4422 4423 spin_lock_bh(&ar->data_lock); 4424 list_for_each_entry(arvif, &ar->arvifs, list) 4425 ath10k_mac_vif_beacon_cleanup(arvif); 4426 spin_unlock_bh(&ar->data_lock); 4427 } 4428 4429 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 4430 { 4431 struct ath10k *ar = hw->priv; 4432 4433 mutex_lock(&ar->conf_mutex); 4434 4435 *tx_ant = ar->cfg_tx_chainmask; 4436 *rx_ant = ar->cfg_rx_chainmask; 4437 4438 mutex_unlock(&ar->conf_mutex); 4439 4440 return 0; 4441 } 4442 4443 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 4444 { 4445 /* It is not clear that allowing gaps in chainmask 4446 * is helpful. Probably it will not do what user 4447 * is hoping for, so warn in that case. 4448 */ 4449 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 4450 return; 4451 4452 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 4453 dbg, cm); 4454 } 4455 4456 static int ath10k_mac_get_vht_cap_bf_sts(struct ath10k *ar) 4457 { 4458 int nsts = ar->vht_cap_info; 4459 4460 nsts &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4461 nsts >>= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4462 4463 /* If firmware does not deliver to host number of space-time 4464 * streams supported, assume it support up to 4 BF STS and return 4465 * the value for VHT CAP: nsts-1) 4466 */ 4467 if (nsts == 0) 4468 return 3; 4469 4470 return nsts; 4471 } 4472 4473 static int ath10k_mac_get_vht_cap_bf_sound_dim(struct ath10k *ar) 4474 { 4475 int sound_dim = ar->vht_cap_info; 4476 4477 sound_dim &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4478 sound_dim >>= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4479 4480 /* If the sounding dimension is not advertised by the firmware, 4481 * let's use a default value of 1 4482 */ 4483 if (sound_dim == 0) 4484 return 1; 4485 4486 return sound_dim; 4487 } 4488 4489 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 4490 { 4491 struct ieee80211_sta_vht_cap vht_cap = {0}; 4492 struct ath10k_hw_params *hw = &ar->hw_params; 4493 u16 mcs_map; 4494 u32 val; 4495 int i; 4496 4497 vht_cap.vht_supported = 1; 4498 vht_cap.cap = ar->vht_cap_info; 4499 4500 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 4501 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 4502 val = ath10k_mac_get_vht_cap_bf_sts(ar); 4503 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 4504 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 4505 4506 vht_cap.cap |= val; 4507 } 4508 4509 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 4510 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 4511 val = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 4512 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 4513 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 4514 4515 vht_cap.cap |= val; 4516 } 4517 4518 /* Currently the firmware seems to be buggy, don't enable 80+80 4519 * mode until that's resolved. 4520 */ 4521 if ((ar->vht_cap_info & IEEE80211_VHT_CAP_SHORT_GI_160) && 4522 (ar->vht_cap_info & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) == 0) 4523 vht_cap.cap |= IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ; 4524 4525 mcs_map = 0; 4526 for (i = 0; i < 8; i++) { 4527 if ((i < ar->num_rf_chains) && (ar->cfg_tx_chainmask & BIT(i))) 4528 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i * 2); 4529 else 4530 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i * 2); 4531 } 4532 4533 if (ar->cfg_tx_chainmask <= 1) 4534 vht_cap.cap &= ~IEEE80211_VHT_CAP_TXSTBC; 4535 4536 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 4537 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 4538 4539 /* If we are supporting 160Mhz or 80+80, then the NIC may be able to do 4540 * a restricted NSS for 160 or 80+80 vs what it can do for 80Mhz. Give 4541 * user-space a clue if that is the case. 4542 */ 4543 if ((vht_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) && 4544 (hw->vht160_mcs_rx_highest != 0 || 4545 hw->vht160_mcs_tx_highest != 0)) { 4546 vht_cap.vht_mcs.rx_highest = cpu_to_le16(hw->vht160_mcs_rx_highest); 4547 vht_cap.vht_mcs.tx_highest = cpu_to_le16(hw->vht160_mcs_tx_highest); 4548 } 4549 4550 return vht_cap; 4551 } 4552 4553 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 4554 { 4555 int i; 4556 struct ieee80211_sta_ht_cap ht_cap = {0}; 4557 4558 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 4559 return ht_cap; 4560 4561 ht_cap.ht_supported = 1; 4562 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 4563 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 4564 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 4565 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 4566 ht_cap.cap |= 4567 WLAN_HT_CAP_SM_PS_DISABLED << IEEE80211_HT_CAP_SM_PS_SHIFT; 4568 4569 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 4570 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 4571 4572 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 4573 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 4574 4575 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 4576 u32 smps; 4577 4578 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 4579 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 4580 4581 ht_cap.cap |= smps; 4582 } 4583 4584 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC && (ar->cfg_tx_chainmask > 1)) 4585 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 4586 4587 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 4588 u32 stbc; 4589 4590 stbc = ar->ht_cap_info; 4591 stbc &= WMI_HT_CAP_RX_STBC; 4592 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 4593 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 4594 stbc &= IEEE80211_HT_CAP_RX_STBC; 4595 4596 ht_cap.cap |= stbc; 4597 } 4598 4599 if (ar->ht_cap_info & WMI_HT_CAP_LDPC || (ar->ht_cap_info & 4600 WMI_HT_CAP_RX_LDPC && (ar->ht_cap_info & WMI_HT_CAP_TX_LDPC))) 4601 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 4602 4603 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 4604 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 4605 4606 /* max AMSDU is implicitly taken from vht_cap_info */ 4607 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 4608 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 4609 4610 for (i = 0; i < ar->num_rf_chains; i++) { 4611 if (ar->cfg_rx_chainmask & BIT(i)) 4612 ht_cap.mcs.rx_mask[i] = 0xFF; 4613 } 4614 4615 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 4616 4617 return ht_cap; 4618 } 4619 4620 static void ath10k_mac_setup_ht_vht_cap(struct ath10k *ar) 4621 { 4622 struct ieee80211_supported_band *band; 4623 struct ieee80211_sta_vht_cap vht_cap; 4624 struct ieee80211_sta_ht_cap ht_cap; 4625 4626 ht_cap = ath10k_get_ht_cap(ar); 4627 vht_cap = ath10k_create_vht_cap(ar); 4628 4629 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 4630 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 4631 band->ht_cap = ht_cap; 4632 } 4633 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 4634 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 4635 band->ht_cap = ht_cap; 4636 band->vht_cap = vht_cap; 4637 } 4638 } 4639 4640 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 4641 { 4642 int ret; 4643 4644 lockdep_assert_held(&ar->conf_mutex); 4645 4646 ath10k_check_chain_mask(ar, tx_ant, "tx"); 4647 ath10k_check_chain_mask(ar, rx_ant, "rx"); 4648 4649 ar->cfg_tx_chainmask = tx_ant; 4650 ar->cfg_rx_chainmask = rx_ant; 4651 4652 if ((ar->state != ATH10K_STATE_ON) && 4653 (ar->state != ATH10K_STATE_RESTARTED)) 4654 return 0; 4655 4656 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 4657 tx_ant); 4658 if (ret) { 4659 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 4660 ret, tx_ant); 4661 return ret; 4662 } 4663 4664 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 4665 rx_ant); 4666 if (ret) { 4667 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 4668 ret, rx_ant); 4669 return ret; 4670 } 4671 4672 /* Reload HT/VHT capability */ 4673 ath10k_mac_setup_ht_vht_cap(ar); 4674 4675 return 0; 4676 } 4677 4678 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 4679 { 4680 struct ath10k *ar = hw->priv; 4681 int ret; 4682 4683 mutex_lock(&ar->conf_mutex); 4684 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 4685 mutex_unlock(&ar->conf_mutex); 4686 return ret; 4687 } 4688 4689 static int __ath10k_fetch_bb_timing_dt(struct ath10k *ar, 4690 struct wmi_bb_timing_cfg_arg *bb_timing) 4691 { 4692 struct device_node *node; 4693 const char *fem_name; 4694 int ret; 4695 4696 node = ar->dev->of_node; 4697 if (!node) 4698 return -ENOENT; 4699 4700 ret = of_property_read_string_index(node, "ext-fem-name", 0, &fem_name); 4701 if (ret) 4702 return -ENOENT; 4703 4704 /* 4705 * If external Front End module used in hardware, then default base band timing 4706 * parameter cannot be used since they were fine tuned for reference hardware, 4707 * so choosing different value suitable for that external FEM. 4708 */ 4709 if (!strcmp("microsemi-lx5586", fem_name)) { 4710 bb_timing->bb_tx_timing = 0x00; 4711 bb_timing->bb_xpa_timing = 0x0101; 4712 } else { 4713 return -ENOENT; 4714 } 4715 4716 ath10k_dbg(ar, ATH10K_DBG_BOOT, "boot bb_tx_timing 0x%x bb_xpa_timing 0x%x\n", 4717 bb_timing->bb_tx_timing, bb_timing->bb_xpa_timing); 4718 return 0; 4719 } 4720 4721 static int ath10k_start(struct ieee80211_hw *hw) 4722 { 4723 struct ath10k *ar = hw->priv; 4724 u32 param; 4725 int ret = 0; 4726 struct wmi_bb_timing_cfg_arg bb_timing = {0}; 4727 4728 /* 4729 * This makes sense only when restarting hw. It is harmless to call 4730 * unconditionally. This is necessary to make sure no HTT/WMI tx 4731 * commands will be submitted while restarting. 4732 */ 4733 ath10k_drain_tx(ar); 4734 4735 mutex_lock(&ar->conf_mutex); 4736 4737 switch (ar->state) { 4738 case ATH10K_STATE_OFF: 4739 ar->state = ATH10K_STATE_ON; 4740 break; 4741 case ATH10K_STATE_RESTARTING: 4742 ar->state = ATH10K_STATE_RESTARTED; 4743 break; 4744 case ATH10K_STATE_ON: 4745 case ATH10K_STATE_RESTARTED: 4746 case ATH10K_STATE_WEDGED: 4747 WARN_ON(1); 4748 ret = -EINVAL; 4749 goto err; 4750 case ATH10K_STATE_UTF: 4751 ret = -EBUSY; 4752 goto err; 4753 } 4754 4755 ret = ath10k_hif_power_up(ar, ATH10K_FIRMWARE_MODE_NORMAL); 4756 if (ret) { 4757 ath10k_err(ar, "Could not init hif: %d\n", ret); 4758 goto err_off; 4759 } 4760 4761 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL, 4762 &ar->normal_mode_fw); 4763 if (ret) { 4764 ath10k_err(ar, "Could not init core: %d\n", ret); 4765 goto err_power_down; 4766 } 4767 4768 param = ar->wmi.pdev_param->pmf_qos; 4769 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4770 if (ret) { 4771 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 4772 goto err_core_stop; 4773 } 4774 4775 param = ar->wmi.pdev_param->dynamic_bw; 4776 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4777 if (ret) { 4778 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 4779 goto err_core_stop; 4780 } 4781 4782 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 4783 ret = ath10k_wmi_scan_prob_req_oui(ar, ar->mac_addr); 4784 if (ret) { 4785 ath10k_err(ar, "failed to set prob req oui: %i\n", ret); 4786 goto err_core_stop; 4787 } 4788 } 4789 4790 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 4791 ret = ath10k_wmi_adaptive_qcs(ar, true); 4792 if (ret) { 4793 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 4794 ret); 4795 goto err_core_stop; 4796 } 4797 } 4798 4799 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 4800 param = ar->wmi.pdev_param->burst_enable; 4801 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4802 if (ret) { 4803 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 4804 goto err_core_stop; 4805 } 4806 } 4807 4808 param = ar->wmi.pdev_param->idle_ps_config; 4809 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4810 if (ret && ret != -EOPNOTSUPP) { 4811 ath10k_warn(ar, "failed to enable idle_ps_config: %d\n", ret); 4812 goto err_core_stop; 4813 } 4814 4815 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, ar->cfg_rx_chainmask); 4816 4817 /* 4818 * By default FW set ARP frames ac to voice (6). In that case ARP 4819 * exchange is not working properly for UAPSD enabled AP. ARP requests 4820 * which arrives with access category 0 are processed by network stack 4821 * and send back with access category 0, but FW changes access category 4822 * to 6. Set ARP frames access category to best effort (0) solves 4823 * this problem. 4824 */ 4825 4826 param = ar->wmi.pdev_param->arp_ac_override; 4827 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4828 if (ret) { 4829 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 4830 ret); 4831 goto err_core_stop; 4832 } 4833 4834 if (test_bit(ATH10K_FW_FEATURE_SUPPORTS_ADAPTIVE_CCA, 4835 ar->running_fw->fw_file.fw_features)) { 4836 ret = ath10k_wmi_pdev_enable_adaptive_cca(ar, 1, 4837 WMI_CCA_DETECT_LEVEL_AUTO, 4838 WMI_CCA_DETECT_MARGIN_AUTO); 4839 if (ret) { 4840 ath10k_warn(ar, "failed to enable adaptive cca: %d\n", 4841 ret); 4842 goto err_core_stop; 4843 } 4844 } 4845 4846 param = ar->wmi.pdev_param->ani_enable; 4847 ret = ath10k_wmi_pdev_set_param(ar, param, 1); 4848 if (ret) { 4849 ath10k_warn(ar, "failed to enable ani by default: %d\n", 4850 ret); 4851 goto err_core_stop; 4852 } 4853 4854 ar->ani_enabled = true; 4855 4856 if (ath10k_peer_stats_enabled(ar)) { 4857 param = ar->wmi.pdev_param->peer_stats_update_period; 4858 ret = ath10k_wmi_pdev_set_param(ar, param, 4859 PEER_DEFAULT_STATS_UPDATE_PERIOD); 4860 if (ret) { 4861 ath10k_warn(ar, 4862 "failed to set peer stats period : %d\n", 4863 ret); 4864 goto err_core_stop; 4865 } 4866 } 4867 4868 param = ar->wmi.pdev_param->enable_btcoex; 4869 if (test_bit(WMI_SERVICE_COEX_GPIO, ar->wmi.svc_map) && 4870 test_bit(ATH10K_FW_FEATURE_BTCOEX_PARAM, 4871 ar->running_fw->fw_file.fw_features)) { 4872 ret = ath10k_wmi_pdev_set_param(ar, param, 0); 4873 if (ret) { 4874 ath10k_warn(ar, 4875 "failed to set btcoex param: %d\n", ret); 4876 goto err_core_stop; 4877 } 4878 clear_bit(ATH10K_FLAG_BTCOEX, &ar->dev_flags); 4879 } 4880 4881 if (test_bit(WMI_SERVICE_BB_TIMING_CONFIG_SUPPORT, ar->wmi.svc_map)) { 4882 ret = __ath10k_fetch_bb_timing_dt(ar, &bb_timing); 4883 if (!ret) { 4884 ret = ath10k_wmi_pdev_bb_timing(ar, &bb_timing); 4885 if (ret) { 4886 ath10k_warn(ar, 4887 "failed to set bb timings: %d\n", 4888 ret); 4889 goto err_core_stop; 4890 } 4891 } 4892 } 4893 4894 ar->num_started_vdevs = 0; 4895 ath10k_regd_update(ar); 4896 4897 ath10k_spectral_start(ar); 4898 ath10k_thermal_set_throttling(ar); 4899 4900 ar->radar_conf_state = ATH10K_RADAR_CONFIRMATION_IDLE; 4901 4902 mutex_unlock(&ar->conf_mutex); 4903 return 0; 4904 4905 err_core_stop: 4906 ath10k_core_stop(ar); 4907 4908 err_power_down: 4909 ath10k_hif_power_down(ar); 4910 4911 err_off: 4912 ar->state = ATH10K_STATE_OFF; 4913 4914 err: 4915 mutex_unlock(&ar->conf_mutex); 4916 return ret; 4917 } 4918 4919 static void ath10k_stop(struct ieee80211_hw *hw) 4920 { 4921 struct ath10k *ar = hw->priv; 4922 4923 ath10k_drain_tx(ar); 4924 4925 mutex_lock(&ar->conf_mutex); 4926 if (ar->state != ATH10K_STATE_OFF) { 4927 ath10k_halt(ar); 4928 ar->state = ATH10K_STATE_OFF; 4929 } 4930 mutex_unlock(&ar->conf_mutex); 4931 4932 cancel_work_sync(&ar->set_coverage_class_work); 4933 cancel_delayed_work_sync(&ar->scan.timeout); 4934 cancel_work_sync(&ar->restart_work); 4935 } 4936 4937 static int ath10k_config_ps(struct ath10k *ar) 4938 { 4939 struct ath10k_vif *arvif; 4940 int ret = 0; 4941 4942 lockdep_assert_held(&ar->conf_mutex); 4943 4944 list_for_each_entry(arvif, &ar->arvifs, list) { 4945 ret = ath10k_mac_vif_setup_ps(arvif); 4946 if (ret) { 4947 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 4948 break; 4949 } 4950 } 4951 4952 return ret; 4953 } 4954 4955 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 4956 { 4957 int ret; 4958 u32 param; 4959 4960 lockdep_assert_held(&ar->conf_mutex); 4961 4962 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 4963 4964 param = ar->wmi.pdev_param->txpower_limit2g; 4965 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4966 if (ret) { 4967 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 4968 txpower, ret); 4969 return ret; 4970 } 4971 4972 param = ar->wmi.pdev_param->txpower_limit5g; 4973 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 4974 if (ret) { 4975 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 4976 txpower, ret); 4977 return ret; 4978 } 4979 4980 return 0; 4981 } 4982 4983 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 4984 { 4985 struct ath10k_vif *arvif; 4986 int ret, txpower = -1; 4987 4988 lockdep_assert_held(&ar->conf_mutex); 4989 4990 list_for_each_entry(arvif, &ar->arvifs, list) { 4991 if (arvif->txpower <= 0) 4992 continue; 4993 4994 if (txpower == -1) 4995 txpower = arvif->txpower; 4996 else 4997 txpower = min(txpower, arvif->txpower); 4998 } 4999 5000 if (txpower == -1) 5001 return 0; 5002 5003 ret = ath10k_mac_txpower_setup(ar, txpower); 5004 if (ret) { 5005 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 5006 txpower, ret); 5007 return ret; 5008 } 5009 5010 return 0; 5011 } 5012 5013 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 5014 { 5015 struct ath10k *ar = hw->priv; 5016 struct ieee80211_conf *conf = &hw->conf; 5017 int ret = 0; 5018 5019 mutex_lock(&ar->conf_mutex); 5020 5021 if (changed & IEEE80211_CONF_CHANGE_PS) 5022 ath10k_config_ps(ar); 5023 5024 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 5025 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 5026 ret = ath10k_monitor_recalc(ar); 5027 if (ret) 5028 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5029 } 5030 5031 mutex_unlock(&ar->conf_mutex); 5032 return ret; 5033 } 5034 5035 static u32 get_nss_from_chainmask(u16 chain_mask) 5036 { 5037 if ((chain_mask & 0xf) == 0xf) 5038 return 4; 5039 else if ((chain_mask & 0x7) == 0x7) 5040 return 3; 5041 else if ((chain_mask & 0x3) == 0x3) 5042 return 2; 5043 return 1; 5044 } 5045 5046 static int ath10k_mac_set_txbf_conf(struct ath10k_vif *arvif) 5047 { 5048 u32 value = 0; 5049 struct ath10k *ar = arvif->ar; 5050 int nsts; 5051 int sound_dim; 5052 5053 if (ath10k_wmi_get_txbf_conf_scheme(ar) != WMI_TXBF_CONF_BEFORE_ASSOC) 5054 return 0; 5055 5056 nsts = ath10k_mac_get_vht_cap_bf_sts(ar); 5057 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 5058 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) 5059 value |= SM(nsts, WMI_TXBF_STS_CAP_OFFSET); 5060 5061 sound_dim = ath10k_mac_get_vht_cap_bf_sound_dim(ar); 5062 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 5063 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) 5064 value |= SM(sound_dim, WMI_BF_SOUND_DIM_OFFSET); 5065 5066 if (!value) 5067 return 0; 5068 5069 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 5070 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 5071 5072 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 5073 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFER | 5074 WMI_VDEV_PARAM_TXBF_SU_TX_BFER); 5075 5076 if (ar->vht_cap_info & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 5077 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 5078 5079 if (ar->vht_cap_info & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 5080 value |= (WMI_VDEV_PARAM_TXBF_MU_TX_BFEE | 5081 WMI_VDEV_PARAM_TXBF_SU_TX_BFEE); 5082 5083 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5084 ar->wmi.vdev_param->txbf, value); 5085 } 5086 5087 /* 5088 * TODO: 5089 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 5090 * because we will send mgmt frames without CCK. This requirement 5091 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 5092 * in the TX packet. 5093 */ 5094 static int ath10k_add_interface(struct ieee80211_hw *hw, 5095 struct ieee80211_vif *vif) 5096 { 5097 struct ath10k *ar = hw->priv; 5098 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5099 struct ath10k_peer *peer; 5100 enum wmi_sta_powersave_param param; 5101 int ret = 0; 5102 u32 value; 5103 int bit; 5104 int i; 5105 u32 vdev_param; 5106 5107 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 5108 5109 mutex_lock(&ar->conf_mutex); 5110 5111 memset(arvif, 0, sizeof(*arvif)); 5112 ath10k_mac_txq_init(vif->txq); 5113 5114 arvif->ar = ar; 5115 arvif->vif = vif; 5116 5117 INIT_LIST_HEAD(&arvif->list); 5118 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 5119 INIT_DELAYED_WORK(&arvif->connection_loss_work, 5120 ath10k_mac_vif_sta_connection_loss_work); 5121 5122 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 5123 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 5124 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 5125 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 5126 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 5127 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 5128 } 5129 5130 if (ar->num_peers >= ar->max_num_peers) { 5131 ath10k_warn(ar, "refusing vdev creation due to insufficient peer entry resources in firmware\n"); 5132 ret = -ENOBUFS; 5133 goto err; 5134 } 5135 5136 if (ar->free_vdev_map == 0) { 5137 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 5138 ret = -EBUSY; 5139 goto err; 5140 } 5141 bit = __ffs64(ar->free_vdev_map); 5142 5143 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 5144 bit, ar->free_vdev_map); 5145 5146 arvif->vdev_id = bit; 5147 arvif->vdev_subtype = 5148 ath10k_wmi_get_vdev_subtype(ar, WMI_VDEV_SUBTYPE_NONE); 5149 5150 switch (vif->type) { 5151 case NL80211_IFTYPE_P2P_DEVICE: 5152 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5153 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5154 (ar, WMI_VDEV_SUBTYPE_P2P_DEVICE); 5155 break; 5156 case NL80211_IFTYPE_UNSPECIFIED: 5157 case NL80211_IFTYPE_STATION: 5158 arvif->vdev_type = WMI_VDEV_TYPE_STA; 5159 if (vif->p2p) 5160 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5161 (ar, WMI_VDEV_SUBTYPE_P2P_CLIENT); 5162 break; 5163 case NL80211_IFTYPE_ADHOC: 5164 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 5165 break; 5166 case NL80211_IFTYPE_MESH_POINT: 5167 if (test_bit(WMI_SERVICE_MESH_11S, ar->wmi.svc_map)) { 5168 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5169 (ar, WMI_VDEV_SUBTYPE_MESH_11S); 5170 } else if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5171 ret = -EINVAL; 5172 ath10k_warn(ar, "must load driver with rawmode=1 to add mesh interfaces\n"); 5173 goto err; 5174 } 5175 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5176 break; 5177 case NL80211_IFTYPE_AP: 5178 arvif->vdev_type = WMI_VDEV_TYPE_AP; 5179 5180 if (vif->p2p) 5181 arvif->vdev_subtype = ath10k_wmi_get_vdev_subtype 5182 (ar, WMI_VDEV_SUBTYPE_P2P_GO); 5183 break; 5184 case NL80211_IFTYPE_MONITOR: 5185 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 5186 break; 5187 default: 5188 WARN_ON(1); 5189 break; 5190 } 5191 5192 /* Using vdev_id as queue number will make it very easy to do per-vif 5193 * tx queue locking. This shouldn't wrap due to interface combinations 5194 * but do a modulo for correctness sake and prevent using offchannel tx 5195 * queues for regular vif tx. 5196 */ 5197 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5198 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 5199 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 5200 5201 /* Some firmware revisions don't wait for beacon tx completion before 5202 * sending another SWBA event. This could lead to hardware using old 5203 * (freed) beacon data in some cases, e.g. tx credit starvation 5204 * combined with missed TBTT. This is very very rare. 5205 * 5206 * On non-IOMMU-enabled hosts this could be a possible security issue 5207 * because hw could beacon some random data on the air. On 5208 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 5209 * device would crash. 5210 * 5211 * Since there are no beacon tx completions (implicit nor explicit) 5212 * propagated to host the only workaround for this is to allocate a 5213 * DMA-coherent buffer for a lifetime of a vif and use it for all 5214 * beacon tx commands. Worst case for this approach is some beacons may 5215 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 5216 */ 5217 if (vif->type == NL80211_IFTYPE_ADHOC || 5218 vif->type == NL80211_IFTYPE_MESH_POINT || 5219 vif->type == NL80211_IFTYPE_AP) { 5220 arvif->beacon_buf = dma_alloc_coherent(ar->dev, 5221 IEEE80211_MAX_FRAME_LEN, 5222 &arvif->beacon_paddr, 5223 GFP_ATOMIC); 5224 if (!arvif->beacon_buf) { 5225 ret = -ENOMEM; 5226 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 5227 ret); 5228 goto err; 5229 } 5230 } 5231 if (test_bit(ATH10K_FLAG_HW_CRYPTO_DISABLED, &ar->dev_flags)) 5232 arvif->nohwcrypt = true; 5233 5234 if (arvif->nohwcrypt && 5235 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 5236 ath10k_warn(ar, "cryptmode module param needed for sw crypto\n"); 5237 goto err; 5238 } 5239 5240 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 5241 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 5242 arvif->beacon_buf ? "single-buf" : "per-skb"); 5243 5244 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 5245 arvif->vdev_subtype, vif->addr); 5246 if (ret) { 5247 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 5248 arvif->vdev_id, ret); 5249 goto err; 5250 } 5251 5252 if (test_bit(WMI_SERVICE_VDEV_DISABLE_4_ADDR_SRC_LRN_SUPPORT, 5253 ar->wmi.svc_map)) { 5254 vdev_param = ar->wmi.vdev_param->disable_4addr_src_lrn; 5255 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5256 WMI_VDEV_DISABLE_4_ADDR_SRC_LRN); 5257 if (ret && ret != -EOPNOTSUPP) { 5258 ath10k_warn(ar, "failed to disable 4addr src lrn vdev %i: %d\n", 5259 arvif->vdev_id, ret); 5260 } 5261 } 5262 5263 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 5264 spin_lock_bh(&ar->data_lock); 5265 list_add(&arvif->list, &ar->arvifs); 5266 spin_unlock_bh(&ar->data_lock); 5267 5268 /* It makes no sense to have firmware do keepalives. mac80211 already 5269 * takes care of this with idle connection polling. 5270 */ 5271 ret = ath10k_mac_vif_disable_keepalive(arvif); 5272 if (ret) { 5273 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 5274 arvif->vdev_id, ret); 5275 goto err_vdev_delete; 5276 } 5277 5278 arvif->def_wep_key_idx = -1; 5279 5280 vdev_param = ar->wmi.vdev_param->tx_encap_type; 5281 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5282 ATH10K_HW_TXRX_NATIVE_WIFI); 5283 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 5284 if (ret && ret != -EOPNOTSUPP) { 5285 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 5286 arvif->vdev_id, ret); 5287 goto err_vdev_delete; 5288 } 5289 5290 /* Configuring number of spatial stream for monitor interface is causing 5291 * target assert in qca9888 and qca6174. 5292 */ 5293 if (ar->cfg_tx_chainmask && (vif->type != NL80211_IFTYPE_MONITOR)) { 5294 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 5295 5296 vdev_param = ar->wmi.vdev_param->nss; 5297 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5298 nss); 5299 if (ret) { 5300 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 5301 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 5302 ret); 5303 goto err_vdev_delete; 5304 } 5305 } 5306 5307 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5308 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5309 ret = ath10k_peer_create(ar, vif, NULL, arvif->vdev_id, 5310 vif->addr, WMI_PEER_TYPE_DEFAULT); 5311 if (ret) { 5312 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 5313 arvif->vdev_id, ret); 5314 goto err_vdev_delete; 5315 } 5316 5317 spin_lock_bh(&ar->data_lock); 5318 5319 peer = ath10k_peer_find(ar, arvif->vdev_id, vif->addr); 5320 if (!peer) { 5321 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 5322 vif->addr, arvif->vdev_id); 5323 spin_unlock_bh(&ar->data_lock); 5324 ret = -ENOENT; 5325 goto err_peer_delete; 5326 } 5327 5328 arvif->peer_id = find_first_bit(peer->peer_ids, 5329 ATH10K_MAX_NUM_PEER_IDS); 5330 5331 spin_unlock_bh(&ar->data_lock); 5332 } else { 5333 arvif->peer_id = HTT_INVALID_PEERID; 5334 } 5335 5336 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 5337 ret = ath10k_mac_set_kickout(arvif); 5338 if (ret) { 5339 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 5340 arvif->vdev_id, ret); 5341 goto err_peer_delete; 5342 } 5343 } 5344 5345 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 5346 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 5347 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5348 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5349 param, value); 5350 if (ret) { 5351 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 5352 arvif->vdev_id, ret); 5353 goto err_peer_delete; 5354 } 5355 5356 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5357 if (ret) { 5358 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5359 arvif->vdev_id, ret); 5360 goto err_peer_delete; 5361 } 5362 5363 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5364 if (ret) { 5365 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5366 arvif->vdev_id, ret); 5367 goto err_peer_delete; 5368 } 5369 } 5370 5371 ret = ath10k_mac_set_txbf_conf(arvif); 5372 if (ret) { 5373 ath10k_warn(ar, "failed to set txbf for vdev %d: %d\n", 5374 arvif->vdev_id, ret); 5375 goto err_peer_delete; 5376 } 5377 5378 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 5379 if (ret) { 5380 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5381 arvif->vdev_id, ret); 5382 goto err_peer_delete; 5383 } 5384 5385 arvif->txpower = vif->bss_conf.txpower; 5386 ret = ath10k_mac_txpower_recalc(ar); 5387 if (ret) { 5388 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5389 goto err_peer_delete; 5390 } 5391 5392 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5393 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5394 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5395 arvif->ftm_responder); 5396 5397 /* It is harmless to not set FTM role. Do not warn */ 5398 if (ret && ret != -EOPNOTSUPP) 5399 ath10k_warn(ar, "failed to set vdev %i FTM Responder: %d\n", 5400 arvif->vdev_id, ret); 5401 } 5402 5403 if (vif->type == NL80211_IFTYPE_MONITOR) { 5404 ar->monitor_arvif = arvif; 5405 ret = ath10k_monitor_recalc(ar); 5406 if (ret) { 5407 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5408 goto err_peer_delete; 5409 } 5410 } 5411 5412 spin_lock_bh(&ar->htt.tx_lock); 5413 if (!ar->tx_paused) 5414 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 5415 spin_unlock_bh(&ar->htt.tx_lock); 5416 5417 mutex_unlock(&ar->conf_mutex); 5418 return 0; 5419 5420 err_peer_delete: 5421 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5422 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 5423 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 5424 5425 err_vdev_delete: 5426 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5427 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5428 spin_lock_bh(&ar->data_lock); 5429 list_del(&arvif->list); 5430 spin_unlock_bh(&ar->data_lock); 5431 5432 err: 5433 if (arvif->beacon_buf) { 5434 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 5435 arvif->beacon_buf, arvif->beacon_paddr); 5436 arvif->beacon_buf = NULL; 5437 } 5438 5439 mutex_unlock(&ar->conf_mutex); 5440 5441 return ret; 5442 } 5443 5444 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 5445 { 5446 int i; 5447 5448 for (i = 0; i < BITS_PER_LONG; i++) 5449 ath10k_mac_vif_tx_unlock(arvif, i); 5450 } 5451 5452 static void ath10k_remove_interface(struct ieee80211_hw *hw, 5453 struct ieee80211_vif *vif) 5454 { 5455 struct ath10k *ar = hw->priv; 5456 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5457 struct ath10k_peer *peer; 5458 int ret; 5459 int i; 5460 5461 cancel_work_sync(&arvif->ap_csa_work); 5462 cancel_delayed_work_sync(&arvif->connection_loss_work); 5463 5464 mutex_lock(&ar->conf_mutex); 5465 5466 spin_lock_bh(&ar->data_lock); 5467 ath10k_mac_vif_beacon_cleanup(arvif); 5468 spin_unlock_bh(&ar->data_lock); 5469 5470 ret = ath10k_spectral_vif_stop(arvif); 5471 if (ret) 5472 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 5473 arvif->vdev_id, ret); 5474 5475 ar->free_vdev_map |= 1LL << arvif->vdev_id; 5476 spin_lock_bh(&ar->data_lock); 5477 list_del(&arvif->list); 5478 spin_unlock_bh(&ar->data_lock); 5479 5480 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5481 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5482 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 5483 vif->addr); 5484 if (ret) 5485 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 5486 arvif->vdev_id, ret); 5487 5488 kfree(arvif->u.ap.noa_data); 5489 } 5490 5491 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 5492 arvif->vdev_id); 5493 5494 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 5495 if (ret) 5496 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 5497 arvif->vdev_id, ret); 5498 5499 /* Some firmware revisions don't notify host about self-peer removal 5500 * until after associated vdev is deleted. 5501 */ 5502 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 5503 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 5504 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 5505 vif->addr); 5506 if (ret) 5507 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 5508 arvif->vdev_id, ret); 5509 5510 spin_lock_bh(&ar->data_lock); 5511 ar->num_peers--; 5512 spin_unlock_bh(&ar->data_lock); 5513 } 5514 5515 spin_lock_bh(&ar->data_lock); 5516 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 5517 peer = ar->peer_map[i]; 5518 if (!peer) 5519 continue; 5520 5521 if (peer->vif == vif) { 5522 ath10k_warn(ar, "found vif peer %pM entry on vdev %i after it was supposedly removed\n", 5523 vif->addr, arvif->vdev_id); 5524 peer->vif = NULL; 5525 } 5526 } 5527 spin_unlock_bh(&ar->data_lock); 5528 5529 ath10k_peer_cleanup(ar, arvif->vdev_id); 5530 ath10k_mac_txq_unref(ar, vif->txq); 5531 5532 if (vif->type == NL80211_IFTYPE_MONITOR) { 5533 ar->monitor_arvif = NULL; 5534 ret = ath10k_monitor_recalc(ar); 5535 if (ret) 5536 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5537 } 5538 5539 ret = ath10k_mac_txpower_recalc(ar); 5540 if (ret) 5541 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5542 5543 spin_lock_bh(&ar->htt.tx_lock); 5544 ath10k_mac_vif_tx_unlock_all(arvif); 5545 spin_unlock_bh(&ar->htt.tx_lock); 5546 5547 ath10k_mac_txq_unref(ar, vif->txq); 5548 5549 mutex_unlock(&ar->conf_mutex); 5550 } 5551 5552 /* 5553 * FIXME: Has to be verified. 5554 */ 5555 #define SUPPORTED_FILTERS \ 5556 (FIF_ALLMULTI | \ 5557 FIF_CONTROL | \ 5558 FIF_PSPOLL | \ 5559 FIF_OTHER_BSS | \ 5560 FIF_BCN_PRBRESP_PROMISC | \ 5561 FIF_PROBE_REQ | \ 5562 FIF_FCSFAIL) 5563 5564 static void ath10k_configure_filter(struct ieee80211_hw *hw, 5565 unsigned int changed_flags, 5566 unsigned int *total_flags, 5567 u64 multicast) 5568 { 5569 struct ath10k *ar = hw->priv; 5570 int ret; 5571 5572 mutex_lock(&ar->conf_mutex); 5573 5574 changed_flags &= SUPPORTED_FILTERS; 5575 *total_flags &= SUPPORTED_FILTERS; 5576 ar->filter_flags = *total_flags; 5577 5578 ret = ath10k_monitor_recalc(ar); 5579 if (ret) 5580 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 5581 5582 mutex_unlock(&ar->conf_mutex); 5583 } 5584 5585 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 5586 struct ieee80211_vif *vif, 5587 struct ieee80211_bss_conf *info, 5588 u32 changed) 5589 { 5590 struct ath10k *ar = hw->priv; 5591 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5592 struct cfg80211_chan_def def; 5593 u32 vdev_param, pdev_param, slottime, preamble; 5594 u16 bitrate, hw_value; 5595 u8 rate, basic_rate_idx, rateidx; 5596 int ret = 0, hw_rate_code, mcast_rate; 5597 enum nl80211_band band; 5598 const struct ieee80211_supported_band *sband; 5599 5600 mutex_lock(&ar->conf_mutex); 5601 5602 if (changed & BSS_CHANGED_IBSS) 5603 ath10k_control_ibss(arvif, info, vif->addr); 5604 5605 if (changed & BSS_CHANGED_BEACON_INT) { 5606 arvif->beacon_interval = info->beacon_int; 5607 vdev_param = ar->wmi.vdev_param->beacon_interval; 5608 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5609 arvif->beacon_interval); 5610 ath10k_dbg(ar, ATH10K_DBG_MAC, 5611 "mac vdev %d beacon_interval %d\n", 5612 arvif->vdev_id, arvif->beacon_interval); 5613 5614 if (ret) 5615 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 5616 arvif->vdev_id, ret); 5617 } 5618 5619 if (changed & BSS_CHANGED_BEACON) { 5620 ath10k_dbg(ar, ATH10K_DBG_MAC, 5621 "vdev %d set beacon tx mode to staggered\n", 5622 arvif->vdev_id); 5623 5624 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 5625 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 5626 WMI_BEACON_STAGGERED_MODE); 5627 if (ret) 5628 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 5629 arvif->vdev_id, ret); 5630 5631 ret = ath10k_mac_setup_bcn_tmpl(arvif); 5632 if (ret) 5633 ath10k_warn(ar, "failed to update beacon template: %d\n", 5634 ret); 5635 5636 if (ieee80211_vif_is_mesh(vif)) { 5637 /* mesh doesn't use SSID but firmware needs it */ 5638 strncpy(arvif->u.ap.ssid, "mesh", 5639 sizeof(arvif->u.ap.ssid)); 5640 arvif->u.ap.ssid_len = 4; 5641 } 5642 } 5643 5644 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 5645 ret = ath10k_mac_setup_prb_tmpl(arvif); 5646 if (ret) 5647 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 5648 arvif->vdev_id, ret); 5649 } 5650 5651 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 5652 arvif->dtim_period = info->dtim_period; 5653 5654 ath10k_dbg(ar, ATH10K_DBG_MAC, 5655 "mac vdev %d dtim_period %d\n", 5656 arvif->vdev_id, arvif->dtim_period); 5657 5658 vdev_param = ar->wmi.vdev_param->dtim_period; 5659 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5660 arvif->dtim_period); 5661 if (ret) 5662 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 5663 arvif->vdev_id, ret); 5664 } 5665 5666 if (changed & BSS_CHANGED_SSID && 5667 vif->type == NL80211_IFTYPE_AP) { 5668 arvif->u.ap.ssid_len = info->ssid_len; 5669 if (info->ssid_len) 5670 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 5671 arvif->u.ap.hidden_ssid = info->hidden_ssid; 5672 } 5673 5674 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 5675 ether_addr_copy(arvif->bssid, info->bssid); 5676 5677 if (changed & BSS_CHANGED_FTM_RESPONDER && 5678 arvif->ftm_responder != info->ftm_responder && 5679 test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) { 5680 arvif->ftm_responder = info->ftm_responder; 5681 5682 vdev_param = ar->wmi.vdev_param->rtt_responder_role; 5683 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5684 arvif->ftm_responder); 5685 5686 ath10k_dbg(ar, ATH10K_DBG_MAC, 5687 "mac vdev %d ftm_responder %d:ret %d\n", 5688 arvif->vdev_id, arvif->ftm_responder, ret); 5689 } 5690 5691 if (changed & BSS_CHANGED_BEACON_ENABLED) 5692 ath10k_control_beaconing(arvif, info); 5693 5694 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 5695 arvif->use_cts_prot = info->use_cts_prot; 5696 5697 ret = ath10k_recalc_rtscts_prot(arvif); 5698 if (ret) 5699 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 5700 arvif->vdev_id, ret); 5701 5702 if (ath10k_mac_can_set_cts_prot(arvif)) { 5703 ret = ath10k_mac_set_cts_prot(arvif); 5704 if (ret) 5705 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 5706 arvif->vdev_id, ret); 5707 } 5708 } 5709 5710 if (changed & BSS_CHANGED_ERP_SLOT) { 5711 if (info->use_short_slot) 5712 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 5713 5714 else 5715 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 5716 5717 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 5718 arvif->vdev_id, slottime); 5719 5720 vdev_param = ar->wmi.vdev_param->slot_time; 5721 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5722 slottime); 5723 if (ret) 5724 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 5725 arvif->vdev_id, ret); 5726 } 5727 5728 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 5729 if (info->use_short_preamble) 5730 preamble = WMI_VDEV_PREAMBLE_SHORT; 5731 else 5732 preamble = WMI_VDEV_PREAMBLE_LONG; 5733 5734 ath10k_dbg(ar, ATH10K_DBG_MAC, 5735 "mac vdev %d preamble %dn", 5736 arvif->vdev_id, preamble); 5737 5738 vdev_param = ar->wmi.vdev_param->preamble; 5739 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5740 preamble); 5741 if (ret) 5742 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 5743 arvif->vdev_id, ret); 5744 } 5745 5746 if (changed & BSS_CHANGED_ASSOC) { 5747 if (info->assoc) { 5748 /* Workaround: Make sure monitor vdev is not running 5749 * when associating to prevent some firmware revisions 5750 * (e.g. 10.1 and 10.2) from crashing. 5751 */ 5752 if (ar->monitor_started) 5753 ath10k_monitor_stop(ar); 5754 ath10k_bss_assoc(hw, vif, info); 5755 ath10k_monitor_recalc(ar); 5756 } else { 5757 ath10k_bss_disassoc(hw, vif); 5758 } 5759 } 5760 5761 if (changed & BSS_CHANGED_TXPOWER) { 5762 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 5763 arvif->vdev_id, info->txpower); 5764 5765 arvif->txpower = info->txpower; 5766 ret = ath10k_mac_txpower_recalc(ar); 5767 if (ret) 5768 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 5769 } 5770 5771 if (changed & BSS_CHANGED_PS) { 5772 arvif->ps = vif->bss_conf.ps; 5773 5774 ret = ath10k_config_ps(ar); 5775 if (ret) 5776 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 5777 arvif->vdev_id, ret); 5778 } 5779 5780 if (changed & BSS_CHANGED_MCAST_RATE && 5781 !ath10k_mac_vif_chan(arvif->vif, &def)) { 5782 band = def.chan->band; 5783 mcast_rate = vif->bss_conf.mcast_rate[band]; 5784 if (mcast_rate > 0) 5785 rateidx = mcast_rate - 1; 5786 else 5787 rateidx = ffs(vif->bss_conf.basic_rates) - 1; 5788 5789 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 5790 rateidx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 5791 5792 bitrate = ath10k_wmi_legacy_rates[rateidx].bitrate; 5793 hw_value = ath10k_wmi_legacy_rates[rateidx].hw_value; 5794 if (ath10k_mac_bitrate_is_cck(bitrate)) 5795 preamble = WMI_RATE_PREAMBLE_CCK; 5796 else 5797 preamble = WMI_RATE_PREAMBLE_OFDM; 5798 5799 rate = ATH10K_HW_RATECODE(hw_value, 0, preamble); 5800 5801 ath10k_dbg(ar, ATH10K_DBG_MAC, 5802 "mac vdev %d mcast_rate %x\n", 5803 arvif->vdev_id, rate); 5804 5805 vdev_param = ar->wmi.vdev_param->mcast_data_rate; 5806 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5807 vdev_param, rate); 5808 if (ret) 5809 ath10k_warn(ar, 5810 "failed to set mcast rate on vdev %i: %d\n", 5811 arvif->vdev_id, ret); 5812 5813 vdev_param = ar->wmi.vdev_param->bcast_data_rate; 5814 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 5815 vdev_param, rate); 5816 if (ret) 5817 ath10k_warn(ar, 5818 "failed to set bcast rate on vdev %i: %d\n", 5819 arvif->vdev_id, ret); 5820 } 5821 5822 if (changed & BSS_CHANGED_BASIC_RATES) { 5823 if (ath10k_mac_vif_chan(vif, &def)) { 5824 mutex_unlock(&ar->conf_mutex); 5825 return; 5826 } 5827 5828 sband = ar->hw->wiphy->bands[def.chan->band]; 5829 basic_rate_idx = ffs(vif->bss_conf.basic_rates) - 1; 5830 bitrate = sband->bitrates[basic_rate_idx].bitrate; 5831 5832 hw_rate_code = ath10k_mac_get_rate_hw_value(bitrate); 5833 if (hw_rate_code < 0) { 5834 ath10k_warn(ar, "bitrate not supported %d\n", bitrate); 5835 mutex_unlock(&ar->conf_mutex); 5836 return; 5837 } 5838 5839 vdev_param = ar->wmi.vdev_param->mgmt_rate; 5840 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 5841 hw_rate_code); 5842 if (ret) 5843 ath10k_warn(ar, "failed to set mgmt tx rate %d\n", ret); 5844 } 5845 5846 mutex_unlock(&ar->conf_mutex); 5847 } 5848 5849 static void ath10k_mac_op_set_coverage_class(struct ieee80211_hw *hw, s16 value) 5850 { 5851 struct ath10k *ar = hw->priv; 5852 5853 /* This function should never be called if setting the coverage class 5854 * is not supported on this hardware. 5855 */ 5856 if (!ar->hw_params.hw_ops->set_coverage_class) { 5857 WARN_ON_ONCE(1); 5858 return; 5859 } 5860 ar->hw_params.hw_ops->set_coverage_class(ar, value); 5861 } 5862 5863 struct ath10k_mac_tdls_iter_data { 5864 u32 num_tdls_stations; 5865 struct ieee80211_vif *curr_vif; 5866 }; 5867 5868 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5869 struct ieee80211_sta *sta) 5870 { 5871 struct ath10k_mac_tdls_iter_data *iter_data = data; 5872 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5873 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5874 5875 if (sta->tdls && sta_vif == iter_data->curr_vif) 5876 iter_data->num_tdls_stations++; 5877 } 5878 5879 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5880 struct ieee80211_vif *vif) 5881 { 5882 struct ath10k_mac_tdls_iter_data data = {}; 5883 5884 data.curr_vif = vif; 5885 5886 ieee80211_iterate_stations_atomic(hw, 5887 ath10k_mac_tdls_vif_stations_count_iter, 5888 &data); 5889 return data.num_tdls_stations; 5890 } 5891 5892 static int ath10k_hw_scan(struct ieee80211_hw *hw, 5893 struct ieee80211_vif *vif, 5894 struct ieee80211_scan_request *hw_req) 5895 { 5896 struct ath10k *ar = hw->priv; 5897 struct ath10k_vif *arvif = (void *)vif->drv_priv; 5898 struct cfg80211_scan_request *req = &hw_req->req; 5899 struct wmi_start_scan_arg arg; 5900 int ret = 0; 5901 int i; 5902 u32 scan_timeout; 5903 5904 mutex_lock(&ar->conf_mutex); 5905 5906 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 5907 ret = -EBUSY; 5908 goto exit; 5909 } 5910 5911 spin_lock_bh(&ar->data_lock); 5912 switch (ar->scan.state) { 5913 case ATH10K_SCAN_IDLE: 5914 reinit_completion(&ar->scan.started); 5915 reinit_completion(&ar->scan.completed); 5916 ar->scan.state = ATH10K_SCAN_STARTING; 5917 ar->scan.is_roc = false; 5918 ar->scan.vdev_id = arvif->vdev_id; 5919 ret = 0; 5920 break; 5921 case ATH10K_SCAN_STARTING: 5922 case ATH10K_SCAN_RUNNING: 5923 case ATH10K_SCAN_ABORTING: 5924 ret = -EBUSY; 5925 break; 5926 } 5927 spin_unlock_bh(&ar->data_lock); 5928 5929 if (ret) 5930 goto exit; 5931 5932 memset(&arg, 0, sizeof(arg)); 5933 ath10k_wmi_start_scan_init(ar, &arg); 5934 arg.vdev_id = arvif->vdev_id; 5935 arg.scan_id = ATH10K_SCAN_ID; 5936 5937 if (req->ie_len) { 5938 arg.ie_len = req->ie_len; 5939 memcpy(arg.ie, req->ie, arg.ie_len); 5940 } 5941 5942 if (req->n_ssids) { 5943 arg.n_ssids = req->n_ssids; 5944 for (i = 0; i < arg.n_ssids; i++) { 5945 arg.ssids[i].len = req->ssids[i].ssid_len; 5946 arg.ssids[i].ssid = req->ssids[i].ssid; 5947 } 5948 } else { 5949 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5950 } 5951 5952 if (req->flags & NL80211_SCAN_FLAG_RANDOM_ADDR) { 5953 arg.scan_ctrl_flags |= WMI_SCAN_ADD_SPOOFED_MAC_IN_PROBE_REQ; 5954 ether_addr_copy(arg.mac_addr.addr, req->mac_addr); 5955 ether_addr_copy(arg.mac_mask.addr, req->mac_addr_mask); 5956 } 5957 5958 if (req->n_channels) { 5959 arg.n_channels = req->n_channels; 5960 for (i = 0; i < arg.n_channels; i++) 5961 arg.channels[i] = req->channels[i]->center_freq; 5962 } 5963 5964 /* if duration is set, default dwell times will be overwritten */ 5965 if (req->duration) { 5966 arg.dwell_time_active = req->duration; 5967 arg.dwell_time_passive = req->duration; 5968 arg.burst_duration_ms = req->duration; 5969 5970 scan_timeout = min_t(u32, arg.max_rest_time * 5971 (arg.n_channels - 1) + (req->duration + 5972 ATH10K_SCAN_CHANNEL_SWITCH_WMI_EVT_OVERHEAD) * 5973 arg.n_channels, arg.max_scan_time + 200); 5974 5975 } else { 5976 /* Add a 200ms margin to account for event/command processing */ 5977 scan_timeout = arg.max_scan_time + 200; 5978 } 5979 5980 ret = ath10k_start_scan(ar, &arg); 5981 if (ret) { 5982 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 5983 spin_lock_bh(&ar->data_lock); 5984 ar->scan.state = ATH10K_SCAN_IDLE; 5985 spin_unlock_bh(&ar->data_lock); 5986 } 5987 5988 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5989 msecs_to_jiffies(scan_timeout)); 5990 5991 exit: 5992 mutex_unlock(&ar->conf_mutex); 5993 return ret; 5994 } 5995 5996 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 5997 struct ieee80211_vif *vif) 5998 { 5999 struct ath10k *ar = hw->priv; 6000 6001 mutex_lock(&ar->conf_mutex); 6002 ath10k_scan_abort(ar); 6003 mutex_unlock(&ar->conf_mutex); 6004 6005 cancel_delayed_work_sync(&ar->scan.timeout); 6006 } 6007 6008 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 6009 struct ath10k_vif *arvif, 6010 enum set_key_cmd cmd, 6011 struct ieee80211_key_conf *key) 6012 { 6013 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 6014 int ret; 6015 6016 /* 10.1 firmware branch requires default key index to be set to group 6017 * key index after installing it. Otherwise FW/HW Txes corrupted 6018 * frames with multi-vif APs. This is not required for main firmware 6019 * branch (e.g. 636). 6020 * 6021 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 6022 * 6023 * FIXME: It remains unknown if this is required for multi-vif STA 6024 * interfaces on 10.1. 6025 */ 6026 6027 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 6028 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 6029 return; 6030 6031 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 6032 return; 6033 6034 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 6035 return; 6036 6037 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6038 return; 6039 6040 if (cmd != SET_KEY) 6041 return; 6042 6043 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 6044 key->keyidx); 6045 if (ret) 6046 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 6047 arvif->vdev_id, ret); 6048 } 6049 6050 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 6051 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 6052 struct ieee80211_key_conf *key) 6053 { 6054 struct ath10k *ar = hw->priv; 6055 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6056 struct ath10k_peer *peer; 6057 const u8 *peer_addr; 6058 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 6059 key->cipher == WLAN_CIPHER_SUITE_WEP104; 6060 int ret = 0; 6061 int ret2; 6062 u32 flags = 0; 6063 u32 flags2; 6064 6065 /* this one needs to be done in software */ 6066 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 6067 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 6068 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256 || 6069 key->cipher == WLAN_CIPHER_SUITE_BIP_CMAC_256) 6070 return 1; 6071 6072 if (arvif->nohwcrypt) 6073 return 1; 6074 6075 if (key->keyidx > WMI_MAX_KEY_INDEX) 6076 return -ENOSPC; 6077 6078 mutex_lock(&ar->conf_mutex); 6079 6080 if (sta) 6081 peer_addr = sta->addr; 6082 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6083 peer_addr = vif->bss_conf.bssid; 6084 else 6085 peer_addr = vif->addr; 6086 6087 key->hw_key_idx = key->keyidx; 6088 6089 if (is_wep) { 6090 if (cmd == SET_KEY) 6091 arvif->wep_keys[key->keyidx] = key; 6092 else 6093 arvif->wep_keys[key->keyidx] = NULL; 6094 } 6095 6096 /* the peer should not disappear in mid-way (unless FW goes awry) since 6097 * we already hold conf_mutex. we just make sure its there now. 6098 */ 6099 spin_lock_bh(&ar->data_lock); 6100 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6101 spin_unlock_bh(&ar->data_lock); 6102 6103 if (!peer) { 6104 if (cmd == SET_KEY) { 6105 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 6106 peer_addr); 6107 ret = -EOPNOTSUPP; 6108 goto exit; 6109 } else { 6110 /* if the peer doesn't exist there is no key to disable anymore */ 6111 goto exit; 6112 } 6113 } 6114 6115 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 6116 flags |= WMI_KEY_PAIRWISE; 6117 else 6118 flags |= WMI_KEY_GROUP; 6119 6120 if (is_wep) { 6121 if (cmd == DISABLE_KEY) 6122 ath10k_clear_vdev_key(arvif, key); 6123 6124 /* When WEP keys are uploaded it's possible that there are 6125 * stations associated already (e.g. when merging) without any 6126 * keys. Static WEP needs an explicit per-peer key upload. 6127 */ 6128 if (vif->type == NL80211_IFTYPE_ADHOC && 6129 cmd == SET_KEY) 6130 ath10k_mac_vif_update_wep_key(arvif, key); 6131 6132 /* 802.1x never sets the def_wep_key_idx so each set_key() 6133 * call changes default tx key. 6134 * 6135 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 6136 * after first set_key(). 6137 */ 6138 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 6139 flags |= WMI_KEY_TX_USAGE; 6140 } 6141 6142 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 6143 if (ret) { 6144 WARN_ON(ret > 0); 6145 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 6146 arvif->vdev_id, peer_addr, ret); 6147 goto exit; 6148 } 6149 6150 /* mac80211 sets static WEP keys as groupwise while firmware requires 6151 * them to be installed twice as both pairwise and groupwise. 6152 */ 6153 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 6154 flags2 = flags; 6155 flags2 &= ~WMI_KEY_GROUP; 6156 flags2 |= WMI_KEY_PAIRWISE; 6157 6158 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 6159 if (ret) { 6160 WARN_ON(ret > 0); 6161 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 6162 arvif->vdev_id, peer_addr, ret); 6163 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 6164 peer_addr, flags); 6165 if (ret2) { 6166 WARN_ON(ret2 > 0); 6167 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 6168 arvif->vdev_id, peer_addr, ret2); 6169 } 6170 goto exit; 6171 } 6172 } 6173 6174 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 6175 6176 spin_lock_bh(&ar->data_lock); 6177 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 6178 if (peer && cmd == SET_KEY) 6179 peer->keys[key->keyidx] = key; 6180 else if (peer && cmd == DISABLE_KEY) 6181 peer->keys[key->keyidx] = NULL; 6182 else if (peer == NULL) 6183 /* impossible unless FW goes crazy */ 6184 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 6185 spin_unlock_bh(&ar->data_lock); 6186 6187 if (sta && sta->tdls) 6188 ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6189 WMI_PEER_AUTHORIZE, 1); 6190 6191 exit: 6192 mutex_unlock(&ar->conf_mutex); 6193 return ret; 6194 } 6195 6196 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 6197 struct ieee80211_vif *vif, 6198 int keyidx) 6199 { 6200 struct ath10k *ar = hw->priv; 6201 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6202 int ret; 6203 6204 mutex_lock(&arvif->ar->conf_mutex); 6205 6206 if (arvif->ar->state != ATH10K_STATE_ON) 6207 goto unlock; 6208 6209 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 6210 arvif->vdev_id, keyidx); 6211 6212 ret = ath10k_wmi_vdev_set_param(arvif->ar, 6213 arvif->vdev_id, 6214 arvif->ar->wmi.vdev_param->def_keyid, 6215 keyidx); 6216 6217 if (ret) { 6218 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 6219 arvif->vdev_id, 6220 ret); 6221 goto unlock; 6222 } 6223 6224 arvif->def_wep_key_idx = keyidx; 6225 6226 unlock: 6227 mutex_unlock(&arvif->ar->conf_mutex); 6228 } 6229 6230 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 6231 { 6232 struct ath10k *ar; 6233 struct ath10k_vif *arvif; 6234 struct ath10k_sta *arsta; 6235 struct ieee80211_sta *sta; 6236 struct cfg80211_chan_def def; 6237 enum nl80211_band band; 6238 const u8 *ht_mcs_mask; 6239 const u16 *vht_mcs_mask; 6240 u32 changed, bw, nss, smps; 6241 int err; 6242 6243 arsta = container_of(wk, struct ath10k_sta, update_wk); 6244 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 6245 arvif = arsta->arvif; 6246 ar = arvif->ar; 6247 6248 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 6249 return; 6250 6251 band = def.chan->band; 6252 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 6253 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 6254 6255 spin_lock_bh(&ar->data_lock); 6256 6257 changed = arsta->changed; 6258 arsta->changed = 0; 6259 6260 bw = arsta->bw; 6261 nss = arsta->nss; 6262 smps = arsta->smps; 6263 6264 spin_unlock_bh(&ar->data_lock); 6265 6266 mutex_lock(&ar->conf_mutex); 6267 6268 nss = max_t(u32, 1, nss); 6269 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 6270 ath10k_mac_max_vht_nss(vht_mcs_mask))); 6271 6272 if (changed & IEEE80211_RC_BW_CHANGED) { 6273 enum wmi_phy_mode mode; 6274 6275 mode = chan_to_phymode(&def); 6276 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d phymode %d\n", 6277 sta->addr, bw, mode); 6278 6279 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6280 WMI_PEER_PHYMODE, mode); 6281 if (err) { 6282 ath10k_warn(ar, "failed to update STA %pM peer phymode %d: %d\n", 6283 sta->addr, mode, err); 6284 goto exit; 6285 } 6286 6287 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6288 WMI_PEER_CHAN_WIDTH, bw); 6289 if (err) 6290 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 6291 sta->addr, bw, err); 6292 } 6293 6294 if (changed & IEEE80211_RC_NSS_CHANGED) { 6295 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 6296 sta->addr, nss); 6297 6298 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6299 WMI_PEER_NSS, nss); 6300 if (err) 6301 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 6302 sta->addr, nss, err); 6303 } 6304 6305 if (changed & IEEE80211_RC_SMPS_CHANGED) { 6306 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 6307 sta->addr, smps); 6308 6309 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 6310 WMI_PEER_SMPS_STATE, smps); 6311 if (err) 6312 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 6313 sta->addr, smps, err); 6314 } 6315 6316 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED) { 6317 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates\n", 6318 sta->addr); 6319 6320 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 6321 if (err) 6322 ath10k_warn(ar, "failed to reassociate station: %pM\n", 6323 sta->addr); 6324 } 6325 6326 exit: 6327 mutex_unlock(&ar->conf_mutex); 6328 } 6329 6330 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 6331 struct ieee80211_sta *sta) 6332 { 6333 struct ath10k *ar = arvif->ar; 6334 6335 lockdep_assert_held(&ar->conf_mutex); 6336 6337 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6338 return 0; 6339 6340 if (ar->num_stations >= ar->max_num_stations) 6341 return -ENOBUFS; 6342 6343 ar->num_stations++; 6344 6345 return 0; 6346 } 6347 6348 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 6349 struct ieee80211_sta *sta) 6350 { 6351 struct ath10k *ar = arvif->ar; 6352 6353 lockdep_assert_held(&ar->conf_mutex); 6354 6355 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 6356 return; 6357 6358 ar->num_stations--; 6359 } 6360 6361 static int ath10k_sta_state(struct ieee80211_hw *hw, 6362 struct ieee80211_vif *vif, 6363 struct ieee80211_sta *sta, 6364 enum ieee80211_sta_state old_state, 6365 enum ieee80211_sta_state new_state) 6366 { 6367 struct ath10k *ar = hw->priv; 6368 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6369 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 6370 struct ath10k_peer *peer; 6371 int ret = 0; 6372 int i; 6373 6374 if (old_state == IEEE80211_STA_NOTEXIST && 6375 new_state == IEEE80211_STA_NONE) { 6376 memset(arsta, 0, sizeof(*arsta)); 6377 arsta->arvif = arvif; 6378 arsta->peer_ps_state = WMI_PEER_PS_STATE_DISABLED; 6379 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 6380 6381 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6382 ath10k_mac_txq_init(sta->txq[i]); 6383 } 6384 6385 /* cancel must be done outside the mutex to avoid deadlock */ 6386 if ((old_state == IEEE80211_STA_NONE && 6387 new_state == IEEE80211_STA_NOTEXIST)) 6388 cancel_work_sync(&arsta->update_wk); 6389 6390 mutex_lock(&ar->conf_mutex); 6391 6392 if (old_state == IEEE80211_STA_NOTEXIST && 6393 new_state == IEEE80211_STA_NONE) { 6394 /* 6395 * New station addition. 6396 */ 6397 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 6398 u32 num_tdls_stations; 6399 6400 ath10k_dbg(ar, ATH10K_DBG_MAC, 6401 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 6402 arvif->vdev_id, sta->addr, 6403 ar->num_stations + 1, ar->max_num_stations, 6404 ar->num_peers + 1, ar->max_num_peers); 6405 6406 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 6407 6408 if (sta->tdls) { 6409 if (num_tdls_stations >= ar->max_num_tdls_vdevs) { 6410 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 6411 arvif->vdev_id, 6412 ar->max_num_tdls_vdevs); 6413 ret = -ELNRNG; 6414 goto exit; 6415 } 6416 peer_type = WMI_PEER_TYPE_TDLS; 6417 } 6418 6419 ret = ath10k_mac_inc_num_stations(arvif, sta); 6420 if (ret) { 6421 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 6422 ar->max_num_stations); 6423 goto exit; 6424 } 6425 6426 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6427 arsta->tx_stats = kzalloc(sizeof(*arsta->tx_stats), 6428 GFP_KERNEL); 6429 if (!arsta->tx_stats) { 6430 ret = -ENOMEM; 6431 goto exit; 6432 } 6433 } 6434 6435 ret = ath10k_peer_create(ar, vif, sta, arvif->vdev_id, 6436 sta->addr, peer_type); 6437 if (ret) { 6438 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 6439 sta->addr, arvif->vdev_id, ret); 6440 ath10k_mac_dec_num_stations(arvif, sta); 6441 kfree(arsta->tx_stats); 6442 goto exit; 6443 } 6444 6445 spin_lock_bh(&ar->data_lock); 6446 6447 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 6448 if (!peer) { 6449 ath10k_warn(ar, "failed to lookup peer %pM on vdev %i\n", 6450 vif->addr, arvif->vdev_id); 6451 spin_unlock_bh(&ar->data_lock); 6452 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6453 ath10k_mac_dec_num_stations(arvif, sta); 6454 kfree(arsta->tx_stats); 6455 ret = -ENOENT; 6456 goto exit; 6457 } 6458 6459 arsta->peer_id = find_first_bit(peer->peer_ids, 6460 ATH10K_MAX_NUM_PEER_IDS); 6461 6462 spin_unlock_bh(&ar->data_lock); 6463 6464 if (!sta->tdls) 6465 goto exit; 6466 6467 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6468 WMI_TDLS_ENABLE_ACTIVE); 6469 if (ret) { 6470 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6471 arvif->vdev_id, ret); 6472 ath10k_peer_delete(ar, arvif->vdev_id, 6473 sta->addr); 6474 ath10k_mac_dec_num_stations(arvif, sta); 6475 kfree(arsta->tx_stats); 6476 goto exit; 6477 } 6478 6479 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6480 WMI_TDLS_PEER_STATE_PEERING); 6481 if (ret) { 6482 ath10k_warn(ar, 6483 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 6484 sta->addr, arvif->vdev_id, ret); 6485 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6486 ath10k_mac_dec_num_stations(arvif, sta); 6487 kfree(arsta->tx_stats); 6488 6489 if (num_tdls_stations != 0) 6490 goto exit; 6491 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6492 WMI_TDLS_DISABLE); 6493 } 6494 } else if ((old_state == IEEE80211_STA_NONE && 6495 new_state == IEEE80211_STA_NOTEXIST)) { 6496 /* 6497 * Existing station deletion. 6498 */ 6499 ath10k_dbg(ar, ATH10K_DBG_MAC, 6500 "mac vdev %d peer delete %pM sta %pK (sta gone)\n", 6501 arvif->vdev_id, sta->addr, sta); 6502 6503 if (sta->tdls) { 6504 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, 6505 sta, 6506 WMI_TDLS_PEER_STATE_TEARDOWN); 6507 if (ret) 6508 ath10k_warn(ar, "failed to update tdls peer state for %pM state %d: %i\n", 6509 sta->addr, 6510 WMI_TDLS_PEER_STATE_TEARDOWN, ret); 6511 } 6512 6513 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 6514 if (ret) 6515 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 6516 sta->addr, arvif->vdev_id, ret); 6517 6518 ath10k_mac_dec_num_stations(arvif, sta); 6519 6520 spin_lock_bh(&ar->data_lock); 6521 for (i = 0; i < ARRAY_SIZE(ar->peer_map); i++) { 6522 peer = ar->peer_map[i]; 6523 if (!peer) 6524 continue; 6525 6526 if (peer->sta == sta) { 6527 ath10k_warn(ar, "found sta peer %pM (ptr %pK id %d) entry on vdev %i after it was supposedly removed\n", 6528 sta->addr, peer, i, arvif->vdev_id); 6529 peer->sta = NULL; 6530 6531 /* Clean up the peer object as well since we 6532 * must have failed to do this above. 6533 */ 6534 list_del(&peer->list); 6535 ar->peer_map[i] = NULL; 6536 kfree(peer); 6537 ar->num_peers--; 6538 } 6539 } 6540 spin_unlock_bh(&ar->data_lock); 6541 6542 if (ath10k_debug_is_extd_tx_stats_enabled(ar)) { 6543 kfree(arsta->tx_stats); 6544 arsta->tx_stats = NULL; 6545 } 6546 6547 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) 6548 ath10k_mac_txq_unref(ar, sta->txq[i]); 6549 6550 if (!sta->tdls) 6551 goto exit; 6552 6553 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 6554 goto exit; 6555 6556 /* This was the last tdls peer in current vif */ 6557 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 6558 WMI_TDLS_DISABLE); 6559 if (ret) { 6560 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 6561 arvif->vdev_id, ret); 6562 } 6563 } else if (old_state == IEEE80211_STA_AUTH && 6564 new_state == IEEE80211_STA_ASSOC && 6565 (vif->type == NL80211_IFTYPE_AP || 6566 vif->type == NL80211_IFTYPE_MESH_POINT || 6567 vif->type == NL80211_IFTYPE_ADHOC)) { 6568 /* 6569 * New association. 6570 */ 6571 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 6572 sta->addr); 6573 6574 ret = ath10k_station_assoc(ar, vif, sta, false); 6575 if (ret) 6576 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 6577 sta->addr, arvif->vdev_id, ret); 6578 } else if (old_state == IEEE80211_STA_ASSOC && 6579 new_state == IEEE80211_STA_AUTHORIZED && 6580 sta->tdls) { 6581 /* 6582 * Tdls station authorized. 6583 */ 6584 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 6585 sta->addr); 6586 6587 ret = ath10k_station_assoc(ar, vif, sta, false); 6588 if (ret) { 6589 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 6590 sta->addr, arvif->vdev_id, ret); 6591 goto exit; 6592 } 6593 6594 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 6595 WMI_TDLS_PEER_STATE_CONNECTED); 6596 if (ret) 6597 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 6598 sta->addr, arvif->vdev_id, ret); 6599 } else if (old_state == IEEE80211_STA_ASSOC && 6600 new_state == IEEE80211_STA_AUTH && 6601 (vif->type == NL80211_IFTYPE_AP || 6602 vif->type == NL80211_IFTYPE_MESH_POINT || 6603 vif->type == NL80211_IFTYPE_ADHOC)) { 6604 /* 6605 * Disassociation. 6606 */ 6607 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 6608 sta->addr); 6609 6610 ret = ath10k_station_disassoc(ar, vif, sta); 6611 if (ret) 6612 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 6613 sta->addr, arvif->vdev_id, ret); 6614 } 6615 exit: 6616 mutex_unlock(&ar->conf_mutex); 6617 return ret; 6618 } 6619 6620 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 6621 u16 ac, bool enable) 6622 { 6623 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6624 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 6625 u32 prio = 0, acc = 0; 6626 u32 value = 0; 6627 int ret = 0; 6628 6629 lockdep_assert_held(&ar->conf_mutex); 6630 6631 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 6632 return 0; 6633 6634 switch (ac) { 6635 case IEEE80211_AC_VO: 6636 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 6637 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 6638 prio = 7; 6639 acc = 3; 6640 break; 6641 case IEEE80211_AC_VI: 6642 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 6643 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 6644 prio = 5; 6645 acc = 2; 6646 break; 6647 case IEEE80211_AC_BE: 6648 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 6649 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 6650 prio = 2; 6651 acc = 1; 6652 break; 6653 case IEEE80211_AC_BK: 6654 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 6655 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 6656 prio = 0; 6657 acc = 0; 6658 break; 6659 } 6660 6661 if (enable) 6662 arvif->u.sta.uapsd |= value; 6663 else 6664 arvif->u.sta.uapsd &= ~value; 6665 6666 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6667 WMI_STA_PS_PARAM_UAPSD, 6668 arvif->u.sta.uapsd); 6669 if (ret) { 6670 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 6671 goto exit; 6672 } 6673 6674 if (arvif->u.sta.uapsd) 6675 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 6676 else 6677 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 6678 6679 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 6680 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 6681 value); 6682 if (ret) 6683 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 6684 6685 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 6686 if (ret) { 6687 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 6688 arvif->vdev_id, ret); 6689 return ret; 6690 } 6691 6692 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 6693 if (ret) { 6694 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 6695 arvif->vdev_id, ret); 6696 return ret; 6697 } 6698 6699 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 6700 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 6701 /* Only userspace can make an educated decision when to send 6702 * trigger frame. The following effectively disables u-UAPSD 6703 * autotrigger in firmware (which is enabled by default 6704 * provided the autotrigger service is available). 6705 */ 6706 6707 arg.wmm_ac = acc; 6708 arg.user_priority = prio; 6709 arg.service_interval = 0; 6710 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6711 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 6712 6713 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 6714 arvif->bssid, &arg, 1); 6715 if (ret) { 6716 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 6717 ret); 6718 return ret; 6719 } 6720 } 6721 6722 exit: 6723 return ret; 6724 } 6725 6726 static int ath10k_conf_tx(struct ieee80211_hw *hw, 6727 struct ieee80211_vif *vif, u16 ac, 6728 const struct ieee80211_tx_queue_params *params) 6729 { 6730 struct ath10k *ar = hw->priv; 6731 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6732 struct wmi_wmm_params_arg *p = NULL; 6733 int ret; 6734 6735 mutex_lock(&ar->conf_mutex); 6736 6737 switch (ac) { 6738 case IEEE80211_AC_VO: 6739 p = &arvif->wmm_params.ac_vo; 6740 break; 6741 case IEEE80211_AC_VI: 6742 p = &arvif->wmm_params.ac_vi; 6743 break; 6744 case IEEE80211_AC_BE: 6745 p = &arvif->wmm_params.ac_be; 6746 break; 6747 case IEEE80211_AC_BK: 6748 p = &arvif->wmm_params.ac_bk; 6749 break; 6750 } 6751 6752 if (WARN_ON(!p)) { 6753 ret = -EINVAL; 6754 goto exit; 6755 } 6756 6757 p->cwmin = params->cw_min; 6758 p->cwmax = params->cw_max; 6759 p->aifs = params->aifs; 6760 6761 /* 6762 * The channel time duration programmed in the HW is in absolute 6763 * microseconds, while mac80211 gives the txop in units of 6764 * 32 microseconds. 6765 */ 6766 p->txop = params->txop * 32; 6767 6768 if (ar->wmi.ops->gen_vdev_wmm_conf) { 6769 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 6770 &arvif->wmm_params); 6771 if (ret) { 6772 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 6773 arvif->vdev_id, ret); 6774 goto exit; 6775 } 6776 } else { 6777 /* This won't work well with multi-interface cases but it's 6778 * better than nothing. 6779 */ 6780 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 6781 if (ret) { 6782 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 6783 goto exit; 6784 } 6785 } 6786 6787 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 6788 if (ret) 6789 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 6790 6791 exit: 6792 mutex_unlock(&ar->conf_mutex); 6793 return ret; 6794 } 6795 6796 #define ATH10K_ROC_TIMEOUT_HZ (2 * HZ) 6797 6798 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 6799 struct ieee80211_vif *vif, 6800 struct ieee80211_channel *chan, 6801 int duration, 6802 enum ieee80211_roc_type type) 6803 { 6804 struct ath10k *ar = hw->priv; 6805 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6806 struct wmi_start_scan_arg arg; 6807 int ret = 0; 6808 u32 scan_time_msec; 6809 6810 mutex_lock(&ar->conf_mutex); 6811 6812 if (ath10k_mac_tdls_vif_stations_count(hw, vif) > 0) { 6813 ret = -EBUSY; 6814 goto exit; 6815 } 6816 6817 spin_lock_bh(&ar->data_lock); 6818 switch (ar->scan.state) { 6819 case ATH10K_SCAN_IDLE: 6820 reinit_completion(&ar->scan.started); 6821 reinit_completion(&ar->scan.completed); 6822 reinit_completion(&ar->scan.on_channel); 6823 ar->scan.state = ATH10K_SCAN_STARTING; 6824 ar->scan.is_roc = true; 6825 ar->scan.vdev_id = arvif->vdev_id; 6826 ar->scan.roc_freq = chan->center_freq; 6827 ar->scan.roc_notify = true; 6828 ret = 0; 6829 break; 6830 case ATH10K_SCAN_STARTING: 6831 case ATH10K_SCAN_RUNNING: 6832 case ATH10K_SCAN_ABORTING: 6833 ret = -EBUSY; 6834 break; 6835 } 6836 spin_unlock_bh(&ar->data_lock); 6837 6838 if (ret) 6839 goto exit; 6840 6841 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 6842 6843 memset(&arg, 0, sizeof(arg)); 6844 ath10k_wmi_start_scan_init(ar, &arg); 6845 arg.vdev_id = arvif->vdev_id; 6846 arg.scan_id = ATH10K_SCAN_ID; 6847 arg.n_channels = 1; 6848 arg.channels[0] = chan->center_freq; 6849 arg.dwell_time_active = scan_time_msec; 6850 arg.dwell_time_passive = scan_time_msec; 6851 arg.max_scan_time = scan_time_msec; 6852 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 6853 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 6854 arg.burst_duration_ms = duration; 6855 6856 ret = ath10k_start_scan(ar, &arg); 6857 if (ret) { 6858 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 6859 spin_lock_bh(&ar->data_lock); 6860 ar->scan.state = ATH10K_SCAN_IDLE; 6861 spin_unlock_bh(&ar->data_lock); 6862 goto exit; 6863 } 6864 6865 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3 * HZ); 6866 if (ret == 0) { 6867 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 6868 6869 ret = ath10k_scan_stop(ar); 6870 if (ret) 6871 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 6872 6873 ret = -ETIMEDOUT; 6874 goto exit; 6875 } 6876 6877 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 6878 msecs_to_jiffies(duration)); 6879 6880 ret = 0; 6881 exit: 6882 mutex_unlock(&ar->conf_mutex); 6883 return ret; 6884 } 6885 6886 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 6887 { 6888 struct ath10k *ar = hw->priv; 6889 6890 mutex_lock(&ar->conf_mutex); 6891 6892 spin_lock_bh(&ar->data_lock); 6893 ar->scan.roc_notify = false; 6894 spin_unlock_bh(&ar->data_lock); 6895 6896 ath10k_scan_abort(ar); 6897 6898 mutex_unlock(&ar->conf_mutex); 6899 6900 cancel_delayed_work_sync(&ar->scan.timeout); 6901 6902 return 0; 6903 } 6904 6905 /* 6906 * Both RTS and Fragmentation threshold are interface-specific 6907 * in ath10k, but device-specific in mac80211. 6908 */ 6909 6910 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 6911 { 6912 struct ath10k *ar = hw->priv; 6913 struct ath10k_vif *arvif; 6914 int ret = 0; 6915 6916 mutex_lock(&ar->conf_mutex); 6917 list_for_each_entry(arvif, &ar->arvifs, list) { 6918 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 6919 arvif->vdev_id, value); 6920 6921 ret = ath10k_mac_set_rts(arvif, value); 6922 if (ret) { 6923 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 6924 arvif->vdev_id, ret); 6925 break; 6926 } 6927 } 6928 mutex_unlock(&ar->conf_mutex); 6929 6930 return ret; 6931 } 6932 6933 static int ath10k_mac_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) 6934 { 6935 /* Even though there's a WMI enum for fragmentation threshold no known 6936 * firmware actually implements it. Moreover it is not possible to rely 6937 * frame fragmentation to mac80211 because firmware clears the "more 6938 * fragments" bit in frame control making it impossible for remote 6939 * devices to reassemble frames. 6940 * 6941 * Hence implement a dummy callback just to say fragmentation isn't 6942 * supported. This effectively prevents mac80211 from doing frame 6943 * fragmentation in software. 6944 */ 6945 return -EOPNOTSUPP; 6946 } 6947 6948 void ath10k_mac_wait_tx_complete(struct ath10k *ar) 6949 { 6950 bool skip; 6951 long time_left; 6952 6953 /* mac80211 doesn't care if we really xmit queued frames or not 6954 * we'll collect those frames either way if we stop/delete vdevs 6955 */ 6956 6957 if (ar->state == ATH10K_STATE_WEDGED) 6958 return; 6959 6960 time_left = wait_event_timeout(ar->htt.empty_tx_wq, ({ 6961 bool empty; 6962 6963 spin_lock_bh(&ar->htt.tx_lock); 6964 empty = (ar->htt.num_pending_tx == 0); 6965 spin_unlock_bh(&ar->htt.tx_lock); 6966 6967 skip = (ar->state == ATH10K_STATE_WEDGED) || 6968 test_bit(ATH10K_FLAG_CRASH_FLUSH, 6969 &ar->dev_flags); 6970 6971 (empty || skip); 6972 }), ATH10K_FLUSH_TIMEOUT_HZ); 6973 6974 if (time_left == 0 || skip) 6975 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %ld\n", 6976 skip, ar->state, time_left); 6977 } 6978 6979 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 6980 u32 queues, bool drop) 6981 { 6982 struct ath10k *ar = hw->priv; 6983 struct ath10k_vif *arvif; 6984 u32 bitmap; 6985 6986 if (drop) { 6987 if (vif && vif->type == NL80211_IFTYPE_STATION) { 6988 bitmap = ~(1 << WMI_MGMT_TID); 6989 list_for_each_entry(arvif, &ar->arvifs, list) { 6990 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 6991 ath10k_wmi_peer_flush(ar, arvif->vdev_id, 6992 arvif->bssid, bitmap); 6993 } 6994 } 6995 return; 6996 } 6997 6998 mutex_lock(&ar->conf_mutex); 6999 ath10k_mac_wait_tx_complete(ar); 7000 mutex_unlock(&ar->conf_mutex); 7001 } 7002 7003 /* TODO: Implement this function properly 7004 * For now it is needed to reply to Probe Requests in IBSS mode. 7005 * Propably we need this information from FW. 7006 */ 7007 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 7008 { 7009 return 1; 7010 } 7011 7012 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 7013 enum ieee80211_reconfig_type reconfig_type) 7014 { 7015 struct ath10k *ar = hw->priv; 7016 7017 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 7018 return; 7019 7020 mutex_lock(&ar->conf_mutex); 7021 7022 /* If device failed to restart it will be in a different state, e.g. 7023 * ATH10K_STATE_WEDGED 7024 */ 7025 if (ar->state == ATH10K_STATE_RESTARTED) { 7026 ath10k_info(ar, "device successfully recovered\n"); 7027 ar->state = ATH10K_STATE_ON; 7028 ieee80211_wake_queues(ar->hw); 7029 } 7030 7031 mutex_unlock(&ar->conf_mutex); 7032 } 7033 7034 static void 7035 ath10k_mac_update_bss_chan_survey(struct ath10k *ar, 7036 struct ieee80211_channel *channel) 7037 { 7038 int ret; 7039 enum wmi_bss_survey_req_type type = WMI_BSS_SURVEY_REQ_TYPE_READ_CLEAR; 7040 7041 lockdep_assert_held(&ar->conf_mutex); 7042 7043 if (!test_bit(WMI_SERVICE_BSS_CHANNEL_INFO_64, ar->wmi.svc_map) || 7044 (ar->rx_channel != channel)) 7045 return; 7046 7047 if (ar->scan.state != ATH10K_SCAN_IDLE) { 7048 ath10k_dbg(ar, ATH10K_DBG_MAC, "ignoring bss chan info request while scanning..\n"); 7049 return; 7050 } 7051 7052 reinit_completion(&ar->bss_survey_done); 7053 7054 ret = ath10k_wmi_pdev_bss_chan_info_request(ar, type); 7055 if (ret) { 7056 ath10k_warn(ar, "failed to send pdev bss chan info request\n"); 7057 return; 7058 } 7059 7060 ret = wait_for_completion_timeout(&ar->bss_survey_done, 3 * HZ); 7061 if (!ret) { 7062 ath10k_warn(ar, "bss channel survey timed out\n"); 7063 return; 7064 } 7065 } 7066 7067 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 7068 struct survey_info *survey) 7069 { 7070 struct ath10k *ar = hw->priv; 7071 struct ieee80211_supported_band *sband; 7072 struct survey_info *ar_survey = &ar->survey[idx]; 7073 int ret = 0; 7074 7075 mutex_lock(&ar->conf_mutex); 7076 7077 sband = hw->wiphy->bands[NL80211_BAND_2GHZ]; 7078 if (sband && idx >= sband->n_channels) { 7079 idx -= sband->n_channels; 7080 sband = NULL; 7081 } 7082 7083 if (!sband) 7084 sband = hw->wiphy->bands[NL80211_BAND_5GHZ]; 7085 7086 if (!sband || idx >= sband->n_channels) { 7087 ret = -ENOENT; 7088 goto exit; 7089 } 7090 7091 ath10k_mac_update_bss_chan_survey(ar, &sband->channels[idx]); 7092 7093 spin_lock_bh(&ar->data_lock); 7094 memcpy(survey, ar_survey, sizeof(*survey)); 7095 spin_unlock_bh(&ar->data_lock); 7096 7097 survey->channel = &sband->channels[idx]; 7098 7099 if (ar->rx_channel == survey->channel) 7100 survey->filled |= SURVEY_INFO_IN_USE; 7101 7102 exit: 7103 mutex_unlock(&ar->conf_mutex); 7104 return ret; 7105 } 7106 7107 static bool 7108 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 7109 enum nl80211_band band, 7110 const struct cfg80211_bitrate_mask *mask) 7111 { 7112 int num_rates = 0; 7113 int i; 7114 7115 num_rates += hweight32(mask->control[band].legacy); 7116 7117 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 7118 num_rates += hweight8(mask->control[band].ht_mcs[i]); 7119 7120 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 7121 num_rates += hweight16(mask->control[band].vht_mcs[i]); 7122 7123 return num_rates == 1; 7124 } 7125 7126 static bool 7127 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 7128 enum nl80211_band band, 7129 const struct cfg80211_bitrate_mask *mask, 7130 int *nss) 7131 { 7132 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 7133 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 7134 u8 ht_nss_mask = 0; 7135 u8 vht_nss_mask = 0; 7136 int i; 7137 7138 if (mask->control[band].legacy) 7139 return false; 7140 7141 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7142 if (mask->control[band].ht_mcs[i] == 0) 7143 continue; 7144 else if (mask->control[band].ht_mcs[i] == 7145 sband->ht_cap.mcs.rx_mask[i]) 7146 ht_nss_mask |= BIT(i); 7147 else 7148 return false; 7149 } 7150 7151 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7152 if (mask->control[band].vht_mcs[i] == 0) 7153 continue; 7154 else if (mask->control[band].vht_mcs[i] == 7155 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 7156 vht_nss_mask |= BIT(i); 7157 else 7158 return false; 7159 } 7160 7161 if (ht_nss_mask != vht_nss_mask) 7162 return false; 7163 7164 if (ht_nss_mask == 0) 7165 return false; 7166 7167 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 7168 return false; 7169 7170 *nss = fls(ht_nss_mask); 7171 7172 return true; 7173 } 7174 7175 static int 7176 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 7177 enum nl80211_band band, 7178 const struct cfg80211_bitrate_mask *mask, 7179 u8 *rate, u8 *nss) 7180 { 7181 int rate_idx; 7182 int i; 7183 u16 bitrate; 7184 u8 preamble; 7185 u8 hw_rate; 7186 7187 if (hweight32(mask->control[band].legacy) == 1) { 7188 rate_idx = ffs(mask->control[band].legacy) - 1; 7189 7190 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) 7191 rate_idx += ATH10K_MAC_FIRST_OFDM_RATE_IDX; 7192 7193 hw_rate = ath10k_wmi_legacy_rates[rate_idx].hw_value; 7194 bitrate = ath10k_wmi_legacy_rates[rate_idx].bitrate; 7195 7196 if (ath10k_mac_bitrate_is_cck(bitrate)) 7197 preamble = WMI_RATE_PREAMBLE_CCK; 7198 else 7199 preamble = WMI_RATE_PREAMBLE_OFDM; 7200 7201 *nss = 1; 7202 *rate = preamble << 6 | 7203 (*nss - 1) << 4 | 7204 hw_rate << 0; 7205 7206 return 0; 7207 } 7208 7209 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 7210 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 7211 *nss = i + 1; 7212 *rate = WMI_RATE_PREAMBLE_HT << 6 | 7213 (*nss - 1) << 4 | 7214 (ffs(mask->control[band].ht_mcs[i]) - 1); 7215 7216 return 0; 7217 } 7218 } 7219 7220 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 7221 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 7222 *nss = i + 1; 7223 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 7224 (*nss - 1) << 4 | 7225 (ffs(mask->control[band].vht_mcs[i]) - 1); 7226 7227 return 0; 7228 } 7229 } 7230 7231 return -EINVAL; 7232 } 7233 7234 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 7235 u8 rate, u8 nss, u8 sgi, u8 ldpc) 7236 { 7237 struct ath10k *ar = arvif->ar; 7238 u32 vdev_param; 7239 int ret; 7240 7241 lockdep_assert_held(&ar->conf_mutex); 7242 7243 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 7244 arvif->vdev_id, rate, nss, sgi); 7245 7246 vdev_param = ar->wmi.vdev_param->fixed_rate; 7247 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 7248 if (ret) { 7249 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 7250 rate, ret); 7251 return ret; 7252 } 7253 7254 vdev_param = ar->wmi.vdev_param->nss; 7255 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 7256 if (ret) { 7257 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 7258 return ret; 7259 } 7260 7261 vdev_param = ar->wmi.vdev_param->sgi; 7262 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 7263 if (ret) { 7264 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 7265 return ret; 7266 } 7267 7268 vdev_param = ar->wmi.vdev_param->ldpc; 7269 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, ldpc); 7270 if (ret) { 7271 ath10k_warn(ar, "failed to set ldpc param %d: %d\n", ldpc, ret); 7272 return ret; 7273 } 7274 7275 return 0; 7276 } 7277 7278 static bool 7279 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 7280 enum nl80211_band band, 7281 const struct cfg80211_bitrate_mask *mask) 7282 { 7283 int i; 7284 u16 vht_mcs; 7285 7286 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 7287 * to express all VHT MCS rate masks. Effectively only the following 7288 * ranges can be used: none, 0-7, 0-8 and 0-9. 7289 */ 7290 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 7291 vht_mcs = mask->control[band].vht_mcs[i]; 7292 7293 switch (vht_mcs) { 7294 case 0: 7295 case BIT(8) - 1: 7296 case BIT(9) - 1: 7297 case BIT(10) - 1: 7298 break; 7299 default: 7300 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 7301 return false; 7302 } 7303 } 7304 7305 return true; 7306 } 7307 7308 static void ath10k_mac_set_bitrate_mask_iter(void *data, 7309 struct ieee80211_sta *sta) 7310 { 7311 struct ath10k_vif *arvif = data; 7312 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7313 struct ath10k *ar = arvif->ar; 7314 7315 if (arsta->arvif != arvif) 7316 return; 7317 7318 spin_lock_bh(&ar->data_lock); 7319 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 7320 spin_unlock_bh(&ar->data_lock); 7321 7322 ieee80211_queue_work(ar->hw, &arsta->update_wk); 7323 } 7324 7325 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 7326 struct ieee80211_vif *vif, 7327 const struct cfg80211_bitrate_mask *mask) 7328 { 7329 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7330 struct cfg80211_chan_def def; 7331 struct ath10k *ar = arvif->ar; 7332 enum nl80211_band band; 7333 const u8 *ht_mcs_mask; 7334 const u16 *vht_mcs_mask; 7335 u8 rate; 7336 u8 nss; 7337 u8 sgi; 7338 u8 ldpc; 7339 int single_nss; 7340 int ret; 7341 7342 if (ath10k_mac_vif_chan(vif, &def)) 7343 return -EPERM; 7344 7345 band = def.chan->band; 7346 ht_mcs_mask = mask->control[band].ht_mcs; 7347 vht_mcs_mask = mask->control[band].vht_mcs; 7348 ldpc = !!(ar->ht_cap_info & WMI_HT_CAP_LDPC); 7349 7350 sgi = mask->control[band].gi; 7351 if (sgi == NL80211_TXRATE_FORCE_LGI) 7352 return -EINVAL; 7353 7354 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 7355 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 7356 &rate, &nss); 7357 if (ret) { 7358 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 7359 arvif->vdev_id, ret); 7360 return ret; 7361 } 7362 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 7363 &single_nss)) { 7364 rate = WMI_FIXED_RATE_NONE; 7365 nss = single_nss; 7366 } else { 7367 rate = WMI_FIXED_RATE_NONE; 7368 nss = min(ar->num_rf_chains, 7369 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 7370 ath10k_mac_max_vht_nss(vht_mcs_mask))); 7371 7372 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 7373 return -EINVAL; 7374 7375 mutex_lock(&ar->conf_mutex); 7376 7377 arvif->bitrate_mask = *mask; 7378 ieee80211_iterate_stations_atomic(ar->hw, 7379 ath10k_mac_set_bitrate_mask_iter, 7380 arvif); 7381 7382 mutex_unlock(&ar->conf_mutex); 7383 } 7384 7385 mutex_lock(&ar->conf_mutex); 7386 7387 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi, ldpc); 7388 if (ret) { 7389 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 7390 arvif->vdev_id, ret); 7391 goto exit; 7392 } 7393 7394 exit: 7395 mutex_unlock(&ar->conf_mutex); 7396 7397 return ret; 7398 } 7399 7400 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 7401 struct ieee80211_vif *vif, 7402 struct ieee80211_sta *sta, 7403 u32 changed) 7404 { 7405 struct ath10k *ar = hw->priv; 7406 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7407 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7408 struct ath10k_peer *peer; 7409 u32 bw, smps; 7410 7411 spin_lock_bh(&ar->data_lock); 7412 7413 peer = ath10k_peer_find(ar, arvif->vdev_id, sta->addr); 7414 if (!peer) { 7415 spin_unlock_bh(&ar->data_lock); 7416 ath10k_warn(ar, "mac sta rc update failed to find peer %pM on vdev %i\n", 7417 sta->addr, arvif->vdev_id); 7418 return; 7419 } 7420 7421 ath10k_dbg(ar, ATH10K_DBG_MAC, 7422 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 7423 sta->addr, changed, sta->bandwidth, sta->rx_nss, 7424 sta->smps_mode); 7425 7426 if (changed & IEEE80211_RC_BW_CHANGED) { 7427 bw = WMI_PEER_CHWIDTH_20MHZ; 7428 7429 switch (sta->bandwidth) { 7430 case IEEE80211_STA_RX_BW_20: 7431 bw = WMI_PEER_CHWIDTH_20MHZ; 7432 break; 7433 case IEEE80211_STA_RX_BW_40: 7434 bw = WMI_PEER_CHWIDTH_40MHZ; 7435 break; 7436 case IEEE80211_STA_RX_BW_80: 7437 bw = WMI_PEER_CHWIDTH_80MHZ; 7438 break; 7439 case IEEE80211_STA_RX_BW_160: 7440 bw = WMI_PEER_CHWIDTH_160MHZ; 7441 break; 7442 default: 7443 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 7444 sta->bandwidth, sta->addr); 7445 bw = WMI_PEER_CHWIDTH_20MHZ; 7446 break; 7447 } 7448 7449 arsta->bw = bw; 7450 } 7451 7452 if (changed & IEEE80211_RC_NSS_CHANGED) 7453 arsta->nss = sta->rx_nss; 7454 7455 if (changed & IEEE80211_RC_SMPS_CHANGED) { 7456 smps = WMI_PEER_SMPS_PS_NONE; 7457 7458 switch (sta->smps_mode) { 7459 case IEEE80211_SMPS_AUTOMATIC: 7460 case IEEE80211_SMPS_OFF: 7461 smps = WMI_PEER_SMPS_PS_NONE; 7462 break; 7463 case IEEE80211_SMPS_STATIC: 7464 smps = WMI_PEER_SMPS_STATIC; 7465 break; 7466 case IEEE80211_SMPS_DYNAMIC: 7467 smps = WMI_PEER_SMPS_DYNAMIC; 7468 break; 7469 case IEEE80211_SMPS_NUM_MODES: 7470 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 7471 sta->smps_mode, sta->addr); 7472 smps = WMI_PEER_SMPS_PS_NONE; 7473 break; 7474 } 7475 7476 arsta->smps = smps; 7477 } 7478 7479 arsta->changed |= changed; 7480 7481 spin_unlock_bh(&ar->data_lock); 7482 7483 ieee80211_queue_work(hw, &arsta->update_wk); 7484 } 7485 7486 static void ath10k_offset_tsf(struct ieee80211_hw *hw, 7487 struct ieee80211_vif *vif, s64 tsf_offset) 7488 { 7489 struct ath10k *ar = hw->priv; 7490 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7491 u32 offset, vdev_param; 7492 int ret; 7493 7494 if (tsf_offset < 0) { 7495 vdev_param = ar->wmi.vdev_param->dec_tsf; 7496 offset = -tsf_offset; 7497 } else { 7498 vdev_param = ar->wmi.vdev_param->inc_tsf; 7499 offset = tsf_offset; 7500 } 7501 7502 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, 7503 vdev_param, offset); 7504 7505 if (ret && ret != -EOPNOTSUPP) 7506 ath10k_warn(ar, "failed to set tsf offset %d cmd %d: %d\n", 7507 offset, vdev_param, ret); 7508 } 7509 7510 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 7511 struct ieee80211_vif *vif, 7512 struct ieee80211_ampdu_params *params) 7513 { 7514 struct ath10k *ar = hw->priv; 7515 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7516 struct ieee80211_sta *sta = params->sta; 7517 enum ieee80211_ampdu_mlme_action action = params->action; 7518 u16 tid = params->tid; 7519 7520 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 7521 arvif->vdev_id, sta->addr, tid, action); 7522 7523 switch (action) { 7524 case IEEE80211_AMPDU_RX_START: 7525 case IEEE80211_AMPDU_RX_STOP: 7526 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 7527 * creation/removal. Do we need to verify this? 7528 */ 7529 return 0; 7530 case IEEE80211_AMPDU_TX_START: 7531 case IEEE80211_AMPDU_TX_STOP_CONT: 7532 case IEEE80211_AMPDU_TX_STOP_FLUSH: 7533 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 7534 case IEEE80211_AMPDU_TX_OPERATIONAL: 7535 /* Firmware offloads Tx aggregation entirely so deny mac80211 7536 * Tx aggregation requests. 7537 */ 7538 return -EOPNOTSUPP; 7539 } 7540 7541 return -EINVAL; 7542 } 7543 7544 static void 7545 ath10k_mac_update_rx_channel(struct ath10k *ar, 7546 struct ieee80211_chanctx_conf *ctx, 7547 struct ieee80211_vif_chanctx_switch *vifs, 7548 int n_vifs) 7549 { 7550 struct cfg80211_chan_def *def = NULL; 7551 7552 /* Both locks are required because ar->rx_channel is modified. This 7553 * allows readers to hold either lock. 7554 */ 7555 lockdep_assert_held(&ar->conf_mutex); 7556 lockdep_assert_held(&ar->data_lock); 7557 7558 WARN_ON(ctx && vifs); 7559 WARN_ON(vifs && !n_vifs); 7560 7561 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 7562 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 7563 * ppdu on Rx may reduce performance on low-end systems. It should be 7564 * possible to make tables/hashmaps to speed the lookup up (be vary of 7565 * cpu data cache lines though regarding sizes) but to keep the initial 7566 * implementation simple and less intrusive fallback to the slow lookup 7567 * only for multi-channel cases. Single-channel cases will remain to 7568 * use the old channel derival and thus performance should not be 7569 * affected much. 7570 */ 7571 rcu_read_lock(); 7572 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 7573 ieee80211_iter_chan_contexts_atomic(ar->hw, 7574 ath10k_mac_get_any_chandef_iter, 7575 &def); 7576 7577 if (vifs) 7578 def = &vifs[0].new_ctx->def; 7579 7580 ar->rx_channel = def->chan; 7581 } else if ((ctx && ath10k_mac_num_chanctxs(ar) == 0) || 7582 (ctx && (ar->state == ATH10K_STATE_RESTARTED))) { 7583 /* During driver restart due to firmware assert, since mac80211 7584 * already has valid channel context for given radio, channel 7585 * context iteration return num_chanctx > 0. So fix rx_channel 7586 * when restart is in progress. 7587 */ 7588 ar->rx_channel = ctx->def.chan; 7589 } else { 7590 ar->rx_channel = NULL; 7591 } 7592 rcu_read_unlock(); 7593 } 7594 7595 static void 7596 ath10k_mac_update_vif_chan(struct ath10k *ar, 7597 struct ieee80211_vif_chanctx_switch *vifs, 7598 int n_vifs) 7599 { 7600 struct ath10k_vif *arvif; 7601 int ret; 7602 int i; 7603 7604 lockdep_assert_held(&ar->conf_mutex); 7605 7606 /* First stop monitor interface. Some FW versions crash if there's a 7607 * lone monitor interface. 7608 */ 7609 if (ar->monitor_started) 7610 ath10k_monitor_stop(ar); 7611 7612 for (i = 0; i < n_vifs; i++) { 7613 arvif = (void *)vifs[i].vif->drv_priv; 7614 7615 ath10k_dbg(ar, ATH10K_DBG_MAC, 7616 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 7617 arvif->vdev_id, 7618 vifs[i].old_ctx->def.chan->center_freq, 7619 vifs[i].new_ctx->def.chan->center_freq, 7620 vifs[i].old_ctx->def.width, 7621 vifs[i].new_ctx->def.width); 7622 7623 if (WARN_ON(!arvif->is_started)) 7624 continue; 7625 7626 if (WARN_ON(!arvif->is_up)) 7627 continue; 7628 7629 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7630 if (ret) { 7631 ath10k_warn(ar, "failed to down vdev %d: %d\n", 7632 arvif->vdev_id, ret); 7633 continue; 7634 } 7635 } 7636 7637 /* All relevant vdevs are downed and associated channel resources 7638 * should be available for the channel switch now. 7639 */ 7640 7641 spin_lock_bh(&ar->data_lock); 7642 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 7643 spin_unlock_bh(&ar->data_lock); 7644 7645 for (i = 0; i < n_vifs; i++) { 7646 arvif = (void *)vifs[i].vif->drv_priv; 7647 7648 if (WARN_ON(!arvif->is_started)) 7649 continue; 7650 7651 if (WARN_ON(!arvif->is_up)) 7652 continue; 7653 7654 ret = ath10k_mac_setup_bcn_tmpl(arvif); 7655 if (ret) 7656 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 7657 ret); 7658 7659 ret = ath10k_mac_setup_prb_tmpl(arvif); 7660 if (ret) 7661 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 7662 ret); 7663 7664 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 7665 if (ret) { 7666 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 7667 arvif->vdev_id, ret); 7668 continue; 7669 } 7670 7671 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 7672 arvif->bssid); 7673 if (ret) { 7674 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 7675 arvif->vdev_id, ret); 7676 continue; 7677 } 7678 } 7679 7680 ath10k_monitor_recalc(ar); 7681 } 7682 7683 static int 7684 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 7685 struct ieee80211_chanctx_conf *ctx) 7686 { 7687 struct ath10k *ar = hw->priv; 7688 7689 ath10k_dbg(ar, ATH10K_DBG_MAC, 7690 "mac chanctx add freq %hu width %d ptr %pK\n", 7691 ctx->def.chan->center_freq, ctx->def.width, ctx); 7692 7693 mutex_lock(&ar->conf_mutex); 7694 7695 spin_lock_bh(&ar->data_lock); 7696 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 7697 spin_unlock_bh(&ar->data_lock); 7698 7699 ath10k_recalc_radar_detection(ar); 7700 ath10k_monitor_recalc(ar); 7701 7702 mutex_unlock(&ar->conf_mutex); 7703 7704 return 0; 7705 } 7706 7707 static void 7708 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 7709 struct ieee80211_chanctx_conf *ctx) 7710 { 7711 struct ath10k *ar = hw->priv; 7712 7713 ath10k_dbg(ar, ATH10K_DBG_MAC, 7714 "mac chanctx remove freq %hu width %d ptr %pK\n", 7715 ctx->def.chan->center_freq, ctx->def.width, ctx); 7716 7717 mutex_lock(&ar->conf_mutex); 7718 7719 spin_lock_bh(&ar->data_lock); 7720 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 7721 spin_unlock_bh(&ar->data_lock); 7722 7723 ath10k_recalc_radar_detection(ar); 7724 ath10k_monitor_recalc(ar); 7725 7726 mutex_unlock(&ar->conf_mutex); 7727 } 7728 7729 struct ath10k_mac_change_chanctx_arg { 7730 struct ieee80211_chanctx_conf *ctx; 7731 struct ieee80211_vif_chanctx_switch *vifs; 7732 int n_vifs; 7733 int next_vif; 7734 }; 7735 7736 static void 7737 ath10k_mac_change_chanctx_cnt_iter(void *data, u8 *mac, 7738 struct ieee80211_vif *vif) 7739 { 7740 struct ath10k_mac_change_chanctx_arg *arg = data; 7741 7742 if (rcu_access_pointer(vif->chanctx_conf) != arg->ctx) 7743 return; 7744 7745 arg->n_vifs++; 7746 } 7747 7748 static void 7749 ath10k_mac_change_chanctx_fill_iter(void *data, u8 *mac, 7750 struct ieee80211_vif *vif) 7751 { 7752 struct ath10k_mac_change_chanctx_arg *arg = data; 7753 struct ieee80211_chanctx_conf *ctx; 7754 7755 ctx = rcu_access_pointer(vif->chanctx_conf); 7756 if (ctx != arg->ctx) 7757 return; 7758 7759 if (WARN_ON(arg->next_vif == arg->n_vifs)) 7760 return; 7761 7762 arg->vifs[arg->next_vif].vif = vif; 7763 arg->vifs[arg->next_vif].old_ctx = ctx; 7764 arg->vifs[arg->next_vif].new_ctx = ctx; 7765 arg->next_vif++; 7766 } 7767 7768 static void 7769 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 7770 struct ieee80211_chanctx_conf *ctx, 7771 u32 changed) 7772 { 7773 struct ath10k *ar = hw->priv; 7774 struct ath10k_mac_change_chanctx_arg arg = { .ctx = ctx }; 7775 7776 mutex_lock(&ar->conf_mutex); 7777 7778 ath10k_dbg(ar, ATH10K_DBG_MAC, 7779 "mac chanctx change freq %hu width %d ptr %pK changed %x\n", 7780 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 7781 7782 /* This shouldn't really happen because channel switching should use 7783 * switch_vif_chanctx(). 7784 */ 7785 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 7786 goto unlock; 7787 7788 if (changed & IEEE80211_CHANCTX_CHANGE_WIDTH) { 7789 ieee80211_iterate_active_interfaces_atomic( 7790 hw, 7791 IEEE80211_IFACE_ITER_NORMAL, 7792 ath10k_mac_change_chanctx_cnt_iter, 7793 &arg); 7794 if (arg.n_vifs == 0) 7795 goto radar; 7796 7797 arg.vifs = kcalloc(arg.n_vifs, sizeof(arg.vifs[0]), 7798 GFP_KERNEL); 7799 if (!arg.vifs) 7800 goto radar; 7801 7802 ieee80211_iterate_active_interfaces_atomic( 7803 hw, 7804 IEEE80211_IFACE_ITER_NORMAL, 7805 ath10k_mac_change_chanctx_fill_iter, 7806 &arg); 7807 ath10k_mac_update_vif_chan(ar, arg.vifs, arg.n_vifs); 7808 kfree(arg.vifs); 7809 } 7810 7811 radar: 7812 ath10k_recalc_radar_detection(ar); 7813 7814 /* FIXME: How to configure Rx chains properly? */ 7815 7816 /* No other actions are actually necessary. Firmware maintains channel 7817 * definitions per vdev internally and there's no host-side channel 7818 * context abstraction to configure, e.g. channel width. 7819 */ 7820 7821 unlock: 7822 mutex_unlock(&ar->conf_mutex); 7823 } 7824 7825 static int 7826 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 7827 struct ieee80211_vif *vif, 7828 struct ieee80211_chanctx_conf *ctx) 7829 { 7830 struct ath10k *ar = hw->priv; 7831 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7832 int ret; 7833 7834 mutex_lock(&ar->conf_mutex); 7835 7836 ath10k_dbg(ar, ATH10K_DBG_MAC, 7837 "mac chanctx assign ptr %pK vdev_id %i\n", 7838 ctx, arvif->vdev_id); 7839 7840 if (WARN_ON(arvif->is_started)) { 7841 mutex_unlock(&ar->conf_mutex); 7842 return -EBUSY; 7843 } 7844 7845 ret = ath10k_vdev_start(arvif, &ctx->def); 7846 if (ret) { 7847 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 7848 arvif->vdev_id, vif->addr, 7849 ctx->def.chan->center_freq, ret); 7850 goto err; 7851 } 7852 7853 arvif->is_started = true; 7854 7855 ret = ath10k_mac_vif_setup_ps(arvif); 7856 if (ret) { 7857 ath10k_warn(ar, "failed to update vdev %i ps: %d\n", 7858 arvif->vdev_id, ret); 7859 goto err_stop; 7860 } 7861 7862 if (vif->type == NL80211_IFTYPE_MONITOR) { 7863 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 7864 if (ret) { 7865 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 7866 arvif->vdev_id, ret); 7867 goto err_stop; 7868 } 7869 7870 arvif->is_up = true; 7871 } 7872 7873 if (ath10k_mac_can_set_cts_prot(arvif)) { 7874 ret = ath10k_mac_set_cts_prot(arvif); 7875 if (ret) 7876 ath10k_warn(ar, "failed to set cts protection for vdev %d: %d\n", 7877 arvif->vdev_id, ret); 7878 } 7879 7880 if (ath10k_peer_stats_enabled(ar)) { 7881 ar->pktlog_filter |= ATH10K_PKTLOG_PEER_STATS; 7882 ret = ath10k_wmi_pdev_pktlog_enable(ar, 7883 ar->pktlog_filter); 7884 if (ret) { 7885 ath10k_warn(ar, "failed to enable pktlog %d\n", ret); 7886 goto err_stop; 7887 } 7888 } 7889 7890 mutex_unlock(&ar->conf_mutex); 7891 return 0; 7892 7893 err_stop: 7894 ath10k_vdev_stop(arvif); 7895 arvif->is_started = false; 7896 ath10k_mac_vif_setup_ps(arvif); 7897 7898 err: 7899 mutex_unlock(&ar->conf_mutex); 7900 return ret; 7901 } 7902 7903 static void 7904 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 7905 struct ieee80211_vif *vif, 7906 struct ieee80211_chanctx_conf *ctx) 7907 { 7908 struct ath10k *ar = hw->priv; 7909 struct ath10k_vif *arvif = (void *)vif->drv_priv; 7910 int ret; 7911 7912 mutex_lock(&ar->conf_mutex); 7913 7914 ath10k_dbg(ar, ATH10K_DBG_MAC, 7915 "mac chanctx unassign ptr %pK vdev_id %i\n", 7916 ctx, arvif->vdev_id); 7917 7918 WARN_ON(!arvif->is_started); 7919 7920 if (vif->type == NL80211_IFTYPE_MONITOR) { 7921 WARN_ON(!arvif->is_up); 7922 7923 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 7924 if (ret) 7925 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 7926 arvif->vdev_id, ret); 7927 7928 arvif->is_up = false; 7929 } 7930 7931 ret = ath10k_vdev_stop(arvif); 7932 if (ret) 7933 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 7934 arvif->vdev_id, ret); 7935 7936 arvif->is_started = false; 7937 7938 mutex_unlock(&ar->conf_mutex); 7939 } 7940 7941 static int 7942 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 7943 struct ieee80211_vif_chanctx_switch *vifs, 7944 int n_vifs, 7945 enum ieee80211_chanctx_switch_mode mode) 7946 { 7947 struct ath10k *ar = hw->priv; 7948 7949 mutex_lock(&ar->conf_mutex); 7950 7951 ath10k_dbg(ar, ATH10K_DBG_MAC, 7952 "mac chanctx switch n_vifs %d mode %d\n", 7953 n_vifs, mode); 7954 ath10k_mac_update_vif_chan(ar, vifs, n_vifs); 7955 7956 mutex_unlock(&ar->conf_mutex); 7957 return 0; 7958 } 7959 7960 static void ath10k_mac_op_sta_pre_rcu_remove(struct ieee80211_hw *hw, 7961 struct ieee80211_vif *vif, 7962 struct ieee80211_sta *sta) 7963 { 7964 struct ath10k *ar; 7965 struct ath10k_peer *peer; 7966 7967 ar = hw->priv; 7968 7969 list_for_each_entry(peer, &ar->peers, list) 7970 if (peer->sta == sta) 7971 peer->removed = true; 7972 } 7973 7974 static void ath10k_sta_statistics(struct ieee80211_hw *hw, 7975 struct ieee80211_vif *vif, 7976 struct ieee80211_sta *sta, 7977 struct station_info *sinfo) 7978 { 7979 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 7980 struct ath10k *ar = arsta->arvif->ar; 7981 7982 if (!ath10k_peer_stats_enabled(ar)) 7983 return; 7984 7985 sinfo->rx_duration = arsta->rx_duration; 7986 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_RX_DURATION); 7987 7988 if (!arsta->txrate.legacy && !arsta->txrate.nss) 7989 return; 7990 7991 if (arsta->txrate.legacy) { 7992 sinfo->txrate.legacy = arsta->txrate.legacy; 7993 } else { 7994 sinfo->txrate.mcs = arsta->txrate.mcs; 7995 sinfo->txrate.nss = arsta->txrate.nss; 7996 sinfo->txrate.bw = arsta->txrate.bw; 7997 } 7998 sinfo->txrate.flags = arsta->txrate.flags; 7999 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 8000 } 8001 8002 static const struct ieee80211_ops ath10k_ops = { 8003 .tx = ath10k_mac_op_tx, 8004 .wake_tx_queue = ath10k_mac_op_wake_tx_queue, 8005 .start = ath10k_start, 8006 .stop = ath10k_stop, 8007 .config = ath10k_config, 8008 .add_interface = ath10k_add_interface, 8009 .remove_interface = ath10k_remove_interface, 8010 .configure_filter = ath10k_configure_filter, 8011 .bss_info_changed = ath10k_bss_info_changed, 8012 .set_coverage_class = ath10k_mac_op_set_coverage_class, 8013 .hw_scan = ath10k_hw_scan, 8014 .cancel_hw_scan = ath10k_cancel_hw_scan, 8015 .set_key = ath10k_set_key, 8016 .set_default_unicast_key = ath10k_set_default_unicast_key, 8017 .sta_state = ath10k_sta_state, 8018 .conf_tx = ath10k_conf_tx, 8019 .remain_on_channel = ath10k_remain_on_channel, 8020 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 8021 .set_rts_threshold = ath10k_set_rts_threshold, 8022 .set_frag_threshold = ath10k_mac_op_set_frag_threshold, 8023 .flush = ath10k_flush, 8024 .tx_last_beacon = ath10k_tx_last_beacon, 8025 .set_antenna = ath10k_set_antenna, 8026 .get_antenna = ath10k_get_antenna, 8027 .reconfig_complete = ath10k_reconfig_complete, 8028 .get_survey = ath10k_get_survey, 8029 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 8030 .sta_rc_update = ath10k_sta_rc_update, 8031 .offset_tsf = ath10k_offset_tsf, 8032 .ampdu_action = ath10k_ampdu_action, 8033 .get_et_sset_count = ath10k_debug_get_et_sset_count, 8034 .get_et_stats = ath10k_debug_get_et_stats, 8035 .get_et_strings = ath10k_debug_get_et_strings, 8036 .add_chanctx = ath10k_mac_op_add_chanctx, 8037 .remove_chanctx = ath10k_mac_op_remove_chanctx, 8038 .change_chanctx = ath10k_mac_op_change_chanctx, 8039 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 8040 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 8041 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 8042 .sta_pre_rcu_remove = ath10k_mac_op_sta_pre_rcu_remove, 8043 .sta_statistics = ath10k_sta_statistics, 8044 8045 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 8046 8047 #ifdef CONFIG_PM 8048 .suspend = ath10k_wow_op_suspend, 8049 .resume = ath10k_wow_op_resume, 8050 .set_wakeup = ath10k_wow_op_set_wakeup, 8051 #endif 8052 #ifdef CONFIG_MAC80211_DEBUGFS 8053 .sta_add_debugfs = ath10k_sta_add_debugfs, 8054 #endif 8055 }; 8056 8057 #define CHAN2G(_channel, _freq, _flags) { \ 8058 .band = NL80211_BAND_2GHZ, \ 8059 .hw_value = (_channel), \ 8060 .center_freq = (_freq), \ 8061 .flags = (_flags), \ 8062 .max_antenna_gain = 0, \ 8063 .max_power = 30, \ 8064 } 8065 8066 #define CHAN5G(_channel, _freq, _flags) { \ 8067 .band = NL80211_BAND_5GHZ, \ 8068 .hw_value = (_channel), \ 8069 .center_freq = (_freq), \ 8070 .flags = (_flags), \ 8071 .max_antenna_gain = 0, \ 8072 .max_power = 30, \ 8073 } 8074 8075 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 8076 CHAN2G(1, 2412, 0), 8077 CHAN2G(2, 2417, 0), 8078 CHAN2G(3, 2422, 0), 8079 CHAN2G(4, 2427, 0), 8080 CHAN2G(5, 2432, 0), 8081 CHAN2G(6, 2437, 0), 8082 CHAN2G(7, 2442, 0), 8083 CHAN2G(8, 2447, 0), 8084 CHAN2G(9, 2452, 0), 8085 CHAN2G(10, 2457, 0), 8086 CHAN2G(11, 2462, 0), 8087 CHAN2G(12, 2467, 0), 8088 CHAN2G(13, 2472, 0), 8089 CHAN2G(14, 2484, 0), 8090 }; 8091 8092 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 8093 CHAN5G(36, 5180, 0), 8094 CHAN5G(40, 5200, 0), 8095 CHAN5G(44, 5220, 0), 8096 CHAN5G(48, 5240, 0), 8097 CHAN5G(52, 5260, 0), 8098 CHAN5G(56, 5280, 0), 8099 CHAN5G(60, 5300, 0), 8100 CHAN5G(64, 5320, 0), 8101 CHAN5G(100, 5500, 0), 8102 CHAN5G(104, 5520, 0), 8103 CHAN5G(108, 5540, 0), 8104 CHAN5G(112, 5560, 0), 8105 CHAN5G(116, 5580, 0), 8106 CHAN5G(120, 5600, 0), 8107 CHAN5G(124, 5620, 0), 8108 CHAN5G(128, 5640, 0), 8109 CHAN5G(132, 5660, 0), 8110 CHAN5G(136, 5680, 0), 8111 CHAN5G(140, 5700, 0), 8112 CHAN5G(144, 5720, 0), 8113 CHAN5G(149, 5745, 0), 8114 CHAN5G(153, 5765, 0), 8115 CHAN5G(157, 5785, 0), 8116 CHAN5G(161, 5805, 0), 8117 CHAN5G(165, 5825, 0), 8118 CHAN5G(169, 5845, 0), 8119 CHAN5G(173, 5865, 0), 8120 /* If you add more, you may need to change ATH10K_MAX_5G_CHAN */ 8121 /* And you will definitely need to change ATH10K_NUM_CHANS in core.h */ 8122 }; 8123 8124 struct ath10k *ath10k_mac_create(size_t priv_size) 8125 { 8126 struct ieee80211_hw *hw; 8127 struct ieee80211_ops *ops; 8128 struct ath10k *ar; 8129 8130 ops = kmemdup(&ath10k_ops, sizeof(ath10k_ops), GFP_KERNEL); 8131 if (!ops) 8132 return NULL; 8133 8134 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, ops); 8135 if (!hw) { 8136 kfree(ops); 8137 return NULL; 8138 } 8139 8140 ar = hw->priv; 8141 ar->hw = hw; 8142 ar->ops = ops; 8143 8144 return ar; 8145 } 8146 8147 void ath10k_mac_destroy(struct ath10k *ar) 8148 { 8149 struct ieee80211_ops *ops = ar->ops; 8150 8151 ieee80211_free_hw(ar->hw); 8152 kfree(ops); 8153 } 8154 8155 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 8156 { 8157 .max = 8, 8158 .types = BIT(NL80211_IFTYPE_STATION) 8159 | BIT(NL80211_IFTYPE_P2P_CLIENT) 8160 }, 8161 { 8162 .max = 3, 8163 .types = BIT(NL80211_IFTYPE_P2P_GO) 8164 }, 8165 { 8166 .max = 1, 8167 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 8168 }, 8169 { 8170 .max = 7, 8171 .types = BIT(NL80211_IFTYPE_AP) 8172 #ifdef CONFIG_MAC80211_MESH 8173 | BIT(NL80211_IFTYPE_MESH_POINT) 8174 #endif 8175 }, 8176 }; 8177 8178 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 8179 { 8180 .max = 8, 8181 .types = BIT(NL80211_IFTYPE_AP) 8182 #ifdef CONFIG_MAC80211_MESH 8183 | BIT(NL80211_IFTYPE_MESH_POINT) 8184 #endif 8185 }, 8186 { 8187 .max = 1, 8188 .types = BIT(NL80211_IFTYPE_STATION) 8189 }, 8190 }; 8191 8192 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 8193 { 8194 .limits = ath10k_if_limits, 8195 .n_limits = ARRAY_SIZE(ath10k_if_limits), 8196 .max_interfaces = 8, 8197 .num_different_channels = 1, 8198 .beacon_int_infra_match = true, 8199 }, 8200 }; 8201 8202 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 8203 { 8204 .limits = ath10k_10x_if_limits, 8205 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 8206 .max_interfaces = 8, 8207 .num_different_channels = 1, 8208 .beacon_int_infra_match = true, 8209 .beacon_int_min_gcd = 1, 8210 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8211 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8212 BIT(NL80211_CHAN_WIDTH_20) | 8213 BIT(NL80211_CHAN_WIDTH_40) | 8214 BIT(NL80211_CHAN_WIDTH_80), 8215 #endif 8216 }, 8217 }; 8218 8219 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 8220 { 8221 .max = 2, 8222 .types = BIT(NL80211_IFTYPE_STATION), 8223 }, 8224 { 8225 .max = 2, 8226 .types = BIT(NL80211_IFTYPE_AP) | 8227 #ifdef CONFIG_MAC80211_MESH 8228 BIT(NL80211_IFTYPE_MESH_POINT) | 8229 #endif 8230 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8231 BIT(NL80211_IFTYPE_P2P_GO), 8232 }, 8233 { 8234 .max = 1, 8235 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8236 }, 8237 }; 8238 8239 static const struct ieee80211_iface_limit ath10k_tlv_qcs_if_limit[] = { 8240 { 8241 .max = 2, 8242 .types = BIT(NL80211_IFTYPE_STATION), 8243 }, 8244 { 8245 .max = 2, 8246 .types = BIT(NL80211_IFTYPE_P2P_CLIENT), 8247 }, 8248 { 8249 .max = 1, 8250 .types = BIT(NL80211_IFTYPE_AP) | 8251 #ifdef CONFIG_MAC80211_MESH 8252 BIT(NL80211_IFTYPE_MESH_POINT) | 8253 #endif 8254 BIT(NL80211_IFTYPE_P2P_GO), 8255 }, 8256 { 8257 .max = 1, 8258 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 8259 }, 8260 }; 8261 8262 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 8263 { 8264 .max = 1, 8265 .types = BIT(NL80211_IFTYPE_STATION), 8266 }, 8267 { 8268 .max = 1, 8269 .types = BIT(NL80211_IFTYPE_ADHOC), 8270 }, 8271 }; 8272 8273 /* FIXME: This is not thouroughly tested. These combinations may over- or 8274 * underestimate hw/fw capabilities. 8275 */ 8276 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 8277 { 8278 .limits = ath10k_tlv_if_limit, 8279 .num_different_channels = 1, 8280 .max_interfaces = 4, 8281 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8282 }, 8283 { 8284 .limits = ath10k_tlv_if_limit_ibss, 8285 .num_different_channels = 1, 8286 .max_interfaces = 2, 8287 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8288 }, 8289 }; 8290 8291 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 8292 { 8293 .limits = ath10k_tlv_if_limit, 8294 .num_different_channels = 1, 8295 .max_interfaces = 4, 8296 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 8297 }, 8298 { 8299 .limits = ath10k_tlv_qcs_if_limit, 8300 .num_different_channels = 2, 8301 .max_interfaces = 4, 8302 .n_limits = ARRAY_SIZE(ath10k_tlv_qcs_if_limit), 8303 }, 8304 { 8305 .limits = ath10k_tlv_if_limit_ibss, 8306 .num_different_channels = 1, 8307 .max_interfaces = 2, 8308 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 8309 }, 8310 }; 8311 8312 static const struct ieee80211_iface_limit ath10k_10_4_if_limits[] = { 8313 { 8314 .max = 1, 8315 .types = BIT(NL80211_IFTYPE_STATION), 8316 }, 8317 { 8318 .max = 16, 8319 .types = BIT(NL80211_IFTYPE_AP) 8320 #ifdef CONFIG_MAC80211_MESH 8321 | BIT(NL80211_IFTYPE_MESH_POINT) 8322 #endif 8323 }, 8324 }; 8325 8326 static const struct ieee80211_iface_combination ath10k_10_4_if_comb[] = { 8327 { 8328 .limits = ath10k_10_4_if_limits, 8329 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8330 .max_interfaces = 16, 8331 .num_different_channels = 1, 8332 .beacon_int_infra_match = true, 8333 .beacon_int_min_gcd = 1, 8334 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8335 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8336 BIT(NL80211_CHAN_WIDTH_20) | 8337 BIT(NL80211_CHAN_WIDTH_40) | 8338 BIT(NL80211_CHAN_WIDTH_80), 8339 #endif 8340 }, 8341 }; 8342 8343 static const struct 8344 ieee80211_iface_combination ath10k_10_4_bcn_int_if_comb[] = { 8345 { 8346 .limits = ath10k_10_4_if_limits, 8347 .n_limits = ARRAY_SIZE(ath10k_10_4_if_limits), 8348 .max_interfaces = 16, 8349 .num_different_channels = 1, 8350 .beacon_int_infra_match = true, 8351 .beacon_int_min_gcd = 100, 8352 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 8353 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 8354 BIT(NL80211_CHAN_WIDTH_20) | 8355 BIT(NL80211_CHAN_WIDTH_40) | 8356 BIT(NL80211_CHAN_WIDTH_80), 8357 #endif 8358 }, 8359 }; 8360 8361 static void ath10k_get_arvif_iter(void *data, u8 *mac, 8362 struct ieee80211_vif *vif) 8363 { 8364 struct ath10k_vif_iter *arvif_iter = data; 8365 struct ath10k_vif *arvif = (void *)vif->drv_priv; 8366 8367 if (arvif->vdev_id == arvif_iter->vdev_id) 8368 arvif_iter->arvif = arvif; 8369 } 8370 8371 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 8372 { 8373 struct ath10k_vif_iter arvif_iter; 8374 u32 flags; 8375 8376 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 8377 arvif_iter.vdev_id = vdev_id; 8378 8379 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 8380 ieee80211_iterate_active_interfaces_atomic(ar->hw, 8381 flags, 8382 ath10k_get_arvif_iter, 8383 &arvif_iter); 8384 if (!arvif_iter.arvif) { 8385 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 8386 return NULL; 8387 } 8388 8389 return arvif_iter.arvif; 8390 } 8391 8392 #define WRD_METHOD "WRDD" 8393 #define WRDD_WIFI (0x07) 8394 8395 static u32 ath10k_mac_wrdd_get_mcc(struct ath10k *ar, union acpi_object *wrdd) 8396 { 8397 union acpi_object *mcc_pkg; 8398 union acpi_object *domain_type; 8399 union acpi_object *mcc_value; 8400 u32 i; 8401 8402 if (wrdd->type != ACPI_TYPE_PACKAGE || 8403 wrdd->package.count < 2 || 8404 wrdd->package.elements[0].type != ACPI_TYPE_INTEGER || 8405 wrdd->package.elements[0].integer.value != 0) { 8406 ath10k_warn(ar, "ignoring malformed/unsupported wrdd structure\n"); 8407 return 0; 8408 } 8409 8410 for (i = 1; i < wrdd->package.count; ++i) { 8411 mcc_pkg = &wrdd->package.elements[i]; 8412 8413 if (mcc_pkg->type != ACPI_TYPE_PACKAGE) 8414 continue; 8415 if (mcc_pkg->package.count < 2) 8416 continue; 8417 if (mcc_pkg->package.elements[0].type != ACPI_TYPE_INTEGER || 8418 mcc_pkg->package.elements[1].type != ACPI_TYPE_INTEGER) 8419 continue; 8420 8421 domain_type = &mcc_pkg->package.elements[0]; 8422 if (domain_type->integer.value != WRDD_WIFI) 8423 continue; 8424 8425 mcc_value = &mcc_pkg->package.elements[1]; 8426 return mcc_value->integer.value; 8427 } 8428 return 0; 8429 } 8430 8431 static int ath10k_mac_get_wrdd_regulatory(struct ath10k *ar, u16 *rd) 8432 { 8433 acpi_handle root_handle; 8434 acpi_handle handle; 8435 struct acpi_buffer wrdd = {ACPI_ALLOCATE_BUFFER, NULL}; 8436 acpi_status status; 8437 u32 alpha2_code; 8438 char alpha2[3]; 8439 8440 root_handle = ACPI_HANDLE(ar->dev); 8441 if (!root_handle) 8442 return -EOPNOTSUPP; 8443 8444 status = acpi_get_handle(root_handle, (acpi_string)WRD_METHOD, &handle); 8445 if (ACPI_FAILURE(status)) { 8446 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8447 "failed to get wrd method %d\n", status); 8448 return -EIO; 8449 } 8450 8451 status = acpi_evaluate_object(handle, NULL, NULL, &wrdd); 8452 if (ACPI_FAILURE(status)) { 8453 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8454 "failed to call wrdc %d\n", status); 8455 return -EIO; 8456 } 8457 8458 alpha2_code = ath10k_mac_wrdd_get_mcc(ar, wrdd.pointer); 8459 kfree(wrdd.pointer); 8460 if (!alpha2_code) 8461 return -EIO; 8462 8463 alpha2[0] = (alpha2_code >> 8) & 0xff; 8464 alpha2[1] = (alpha2_code >> 0) & 0xff; 8465 alpha2[2] = '\0'; 8466 8467 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8468 "regulatory hint from WRDD (alpha2-code): %s\n", alpha2); 8469 8470 *rd = ath_regd_find_country_by_name(alpha2); 8471 if (*rd == 0xffff) 8472 return -EIO; 8473 8474 *rd |= COUNTRY_ERD_FLAG; 8475 return 0; 8476 } 8477 8478 static int ath10k_mac_init_rd(struct ath10k *ar) 8479 { 8480 int ret; 8481 u16 rd; 8482 8483 ret = ath10k_mac_get_wrdd_regulatory(ar, &rd); 8484 if (ret) { 8485 ath10k_dbg(ar, ATH10K_DBG_BOOT, 8486 "fallback to eeprom programmed regulatory settings\n"); 8487 rd = ar->hw_eeprom_rd; 8488 } 8489 8490 ar->ath_common.regulatory.current_rd = rd; 8491 return 0; 8492 } 8493 8494 int ath10k_mac_register(struct ath10k *ar) 8495 { 8496 static const u32 cipher_suites[] = { 8497 WLAN_CIPHER_SUITE_WEP40, 8498 WLAN_CIPHER_SUITE_WEP104, 8499 WLAN_CIPHER_SUITE_TKIP, 8500 WLAN_CIPHER_SUITE_CCMP, 8501 8502 /* Do not add hardware supported ciphers before this line. 8503 * Allow software encryption for all chips. Don't forget to 8504 * update n_cipher_suites below. 8505 */ 8506 WLAN_CIPHER_SUITE_AES_CMAC, 8507 WLAN_CIPHER_SUITE_BIP_CMAC_256, 8508 WLAN_CIPHER_SUITE_BIP_GMAC_128, 8509 WLAN_CIPHER_SUITE_BIP_GMAC_256, 8510 8511 /* Only QCA99x0 and QCA4019 varients support GCMP-128, GCMP-256 8512 * and CCMP-256 in hardware. 8513 */ 8514 WLAN_CIPHER_SUITE_GCMP, 8515 WLAN_CIPHER_SUITE_GCMP_256, 8516 WLAN_CIPHER_SUITE_CCMP_256, 8517 }; 8518 struct ieee80211_supported_band *band; 8519 void *channels; 8520 int ret; 8521 8522 if (!is_valid_ether_addr(ar->mac_addr)) { 8523 ath10k_warn(ar, "invalid MAC address; choosing random\n"); 8524 eth_random_addr(ar->mac_addr); 8525 } 8526 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 8527 8528 SET_IEEE80211_DEV(ar->hw, ar->dev); 8529 8530 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 8531 ARRAY_SIZE(ath10k_5ghz_channels)) != 8532 ATH10K_NUM_CHANS); 8533 8534 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 8535 channels = kmemdup(ath10k_2ghz_channels, 8536 sizeof(ath10k_2ghz_channels), 8537 GFP_KERNEL); 8538 if (!channels) { 8539 ret = -ENOMEM; 8540 goto err_free; 8541 } 8542 8543 band = &ar->mac.sbands[NL80211_BAND_2GHZ]; 8544 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 8545 band->channels = channels; 8546 8547 if (ar->hw_params.cck_rate_map_rev2) { 8548 band->n_bitrates = ath10k_g_rates_rev2_size; 8549 band->bitrates = ath10k_g_rates_rev2; 8550 } else { 8551 band->n_bitrates = ath10k_g_rates_size; 8552 band->bitrates = ath10k_g_rates; 8553 } 8554 8555 ar->hw->wiphy->bands[NL80211_BAND_2GHZ] = band; 8556 } 8557 8558 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 8559 channels = kmemdup(ath10k_5ghz_channels, 8560 sizeof(ath10k_5ghz_channels), 8561 GFP_KERNEL); 8562 if (!channels) { 8563 ret = -ENOMEM; 8564 goto err_free; 8565 } 8566 8567 band = &ar->mac.sbands[NL80211_BAND_5GHZ]; 8568 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 8569 band->channels = channels; 8570 band->n_bitrates = ath10k_a_rates_size; 8571 band->bitrates = ath10k_a_rates; 8572 ar->hw->wiphy->bands[NL80211_BAND_5GHZ] = band; 8573 } 8574 8575 wiphy_read_of_freq_limits(ar->hw->wiphy); 8576 ath10k_mac_setup_ht_vht_cap(ar); 8577 8578 ar->hw->wiphy->interface_modes = 8579 BIT(NL80211_IFTYPE_STATION) | 8580 BIT(NL80211_IFTYPE_AP) | 8581 BIT(NL80211_IFTYPE_MESH_POINT); 8582 8583 ar->hw->wiphy->available_antennas_rx = ar->cfg_rx_chainmask; 8584 ar->hw->wiphy->available_antennas_tx = ar->cfg_tx_chainmask; 8585 8586 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->normal_mode_fw.fw_file.fw_features)) 8587 ar->hw->wiphy->interface_modes |= 8588 BIT(NL80211_IFTYPE_P2P_DEVICE) | 8589 BIT(NL80211_IFTYPE_P2P_CLIENT) | 8590 BIT(NL80211_IFTYPE_P2P_GO); 8591 8592 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 8593 8594 if (!test_bit(ATH10K_FW_FEATURE_NO_PS, 8595 ar->running_fw->fw_file.fw_features)) { 8596 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 8597 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 8598 } 8599 8600 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 8601 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 8602 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 8603 ieee80211_hw_set(ar->hw, AP_LINK_PS); 8604 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 8605 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 8606 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 8607 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 8608 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 8609 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 8610 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 8611 ieee80211_hw_set(ar->hw, SUPPORTS_TX_FRAG); 8612 ieee80211_hw_set(ar->hw, REPORTS_LOW_ACK); 8613 8614 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8615 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 8616 8617 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 8618 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 8619 8620 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 8621 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 8622 8623 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 8624 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 8625 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 8626 } 8627 8628 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 8629 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 8630 8631 if (test_bit(WMI_SERVICE_NLO, ar->wmi.svc_map)) { 8632 ar->hw->wiphy->max_sched_scan_reqs = 1; 8633 ar->hw->wiphy->max_sched_scan_ssids = WMI_PNO_MAX_SUPP_NETWORKS; 8634 ar->hw->wiphy->max_match_sets = WMI_PNO_MAX_SUPP_NETWORKS; 8635 ar->hw->wiphy->max_sched_scan_ie_len = WMI_PNO_MAX_IE_LENGTH; 8636 ar->hw->wiphy->max_sched_scan_plans = WMI_PNO_MAX_SCHED_SCAN_PLANS; 8637 ar->hw->wiphy->max_sched_scan_plan_interval = 8638 WMI_PNO_MAX_SCHED_SCAN_PLAN_INT; 8639 ar->hw->wiphy->max_sched_scan_plan_iterations = 8640 WMI_PNO_MAX_SCHED_SCAN_PLAN_ITRNS; 8641 } 8642 8643 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 8644 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 8645 ar->hw->txq_data_size = sizeof(struct ath10k_txq); 8646 8647 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 8648 8649 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 8650 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 8651 8652 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 8653 * that userspace (e.g. wpa_supplicant/hostapd) can generate 8654 * correct Probe Responses. This is more of a hack advert.. 8655 */ 8656 ar->hw->wiphy->probe_resp_offload |= 8657 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 8658 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 8659 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 8660 } 8661 8662 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map) || 8663 test_bit(WMI_SERVICE_TDLS_EXPLICIT_MODE_ONLY, ar->wmi.svc_map)) { 8664 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 8665 if (test_bit(WMI_SERVICE_TDLS_WIDER_BANDWIDTH, ar->wmi.svc_map)) 8666 ieee80211_hw_set(ar->hw, TDLS_WIDER_BW); 8667 } 8668 8669 if (test_bit(WMI_SERVICE_TDLS_UAPSD_BUFFER_STA, ar->wmi.svc_map)) 8670 ieee80211_hw_set(ar->hw, SUPPORTS_TDLS_BUFFER_STA); 8671 8672 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 8673 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 8674 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 8675 8676 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 8677 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 8678 NL80211_FEATURE_AP_SCAN; 8679 8680 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 8681 8682 ret = ath10k_wow_init(ar); 8683 if (ret) { 8684 ath10k_warn(ar, "failed to init wow: %d\n", ret); 8685 goto err_free; 8686 } 8687 8688 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 8689 wiphy_ext_feature_set(ar->hw->wiphy, 8690 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 8691 8692 if (test_bit(WMI_SERVICE_TX_DATA_ACK_RSSI, ar->wmi.svc_map) || 8693 test_bit(WMI_SERVICE_HTT_MGMT_TX_COMP_VALID_FLAGS, ar->wmi.svc_map)) 8694 wiphy_ext_feature_set(ar->hw->wiphy, 8695 NL80211_EXT_FEATURE_ACK_SIGNAL_SUPPORT); 8696 8697 if (ath10k_peer_stats_enabled(ar) || 8698 test_bit(WMI_SERVICE_REPORT_AIRTIME, ar->wmi.svc_map)) 8699 wiphy_ext_feature_set(ar->hw->wiphy, 8700 NL80211_EXT_FEATURE_AIRTIME_FAIRNESS); 8701 8702 if (test_bit(WMI_SERVICE_RTT_RESPONDER_ROLE, ar->wmi.svc_map)) 8703 wiphy_ext_feature_set(ar->hw->wiphy, 8704 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 8705 8706 /* 8707 * on LL hardware queues are managed entirely by the FW 8708 * so we only advertise to mac we can do the queues thing 8709 */ 8710 ar->hw->queues = IEEE80211_MAX_QUEUES; 8711 8712 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 8713 * something that vdev_ids can't reach so that we don't stop the queue 8714 * accidentally. 8715 */ 8716 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 8717 8718 switch (ar->running_fw->fw_file.wmi_op_version) { 8719 case ATH10K_FW_WMI_OP_VERSION_MAIN: 8720 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 8721 ar->hw->wiphy->n_iface_combinations = 8722 ARRAY_SIZE(ath10k_if_comb); 8723 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8724 break; 8725 case ATH10K_FW_WMI_OP_VERSION_TLV: 8726 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 8727 ar->hw->wiphy->iface_combinations = 8728 ath10k_tlv_qcs_if_comb; 8729 ar->hw->wiphy->n_iface_combinations = 8730 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 8731 } else { 8732 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 8733 ar->hw->wiphy->n_iface_combinations = 8734 ARRAY_SIZE(ath10k_tlv_if_comb); 8735 } 8736 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 8737 break; 8738 case ATH10K_FW_WMI_OP_VERSION_10_1: 8739 case ATH10K_FW_WMI_OP_VERSION_10_2: 8740 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 8741 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 8742 ar->hw->wiphy->n_iface_combinations = 8743 ARRAY_SIZE(ath10k_10x_if_comb); 8744 break; 8745 case ATH10K_FW_WMI_OP_VERSION_10_4: 8746 ar->hw->wiphy->iface_combinations = ath10k_10_4_if_comb; 8747 ar->hw->wiphy->n_iface_combinations = 8748 ARRAY_SIZE(ath10k_10_4_if_comb); 8749 if (test_bit(WMI_SERVICE_VDEV_DIFFERENT_BEACON_INTERVAL_SUPPORT, 8750 ar->wmi.svc_map)) { 8751 ar->hw->wiphy->iface_combinations = 8752 ath10k_10_4_bcn_int_if_comb; 8753 ar->hw->wiphy->n_iface_combinations = 8754 ARRAY_SIZE(ath10k_10_4_bcn_int_if_comb); 8755 } 8756 break; 8757 case ATH10K_FW_WMI_OP_VERSION_UNSET: 8758 case ATH10K_FW_WMI_OP_VERSION_MAX: 8759 WARN_ON(1); 8760 ret = -EINVAL; 8761 goto err_free; 8762 } 8763 8764 if (!test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) 8765 ar->hw->netdev_features = NETIF_F_HW_CSUM; 8766 8767 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED)) { 8768 /* Init ath dfs pattern detector */ 8769 ar->ath_common.debug_mask = ATH_DBG_DFS; 8770 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 8771 NL80211_DFS_UNSET); 8772 8773 if (!ar->dfs_detector) 8774 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 8775 } 8776 8777 ret = ath10k_mac_init_rd(ar); 8778 if (ret) { 8779 ath10k_err(ar, "failed to derive regdom: %d\n", ret); 8780 goto err_dfs_detector_exit; 8781 } 8782 8783 /* Disable set_coverage_class for chipsets that do not support it. */ 8784 if (!ar->hw_params.hw_ops->set_coverage_class) 8785 ar->ops->set_coverage_class = NULL; 8786 8787 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 8788 ath10k_reg_notifier); 8789 if (ret) { 8790 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 8791 goto err_dfs_detector_exit; 8792 } 8793 8794 if (test_bit(WMI_SERVICE_SPOOF_MAC_SUPPORT, ar->wmi.svc_map)) { 8795 ar->hw->wiphy->features |= 8796 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR; 8797 } 8798 8799 ar->hw->wiphy->cipher_suites = cipher_suites; 8800 8801 /* QCA988x and QCA6174 family chips do not support CCMP-256, GCMP-128 8802 * and GCMP-256 ciphers in hardware. Fetch number of ciphers supported 8803 * from chip specific hw_param table. 8804 */ 8805 if (!ar->hw_params.n_cipher_suites || 8806 ar->hw_params.n_cipher_suites > ARRAY_SIZE(cipher_suites)) { 8807 ath10k_err(ar, "invalid hw_params.n_cipher_suites %d\n", 8808 ar->hw_params.n_cipher_suites); 8809 ar->hw_params.n_cipher_suites = 8; 8810 } 8811 ar->hw->wiphy->n_cipher_suites = ar->hw_params.n_cipher_suites; 8812 8813 wiphy_ext_feature_set(ar->hw->wiphy, NL80211_EXT_FEATURE_CQM_RSSI_LIST); 8814 8815 ar->hw->weight_multiplier = ATH10K_AIRTIME_WEIGHT_MULTIPLIER; 8816 8817 ret = ieee80211_register_hw(ar->hw); 8818 if (ret) { 8819 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 8820 goto err_dfs_detector_exit; 8821 } 8822 8823 if (test_bit(WMI_SERVICE_PER_PACKET_SW_ENCRYPT, ar->wmi.svc_map)) { 8824 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_AP_VLAN); 8825 ar->hw->wiphy->software_iftypes |= BIT(NL80211_IFTYPE_AP_VLAN); 8826 } 8827 8828 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 8829 ret = regulatory_hint(ar->hw->wiphy, 8830 ar->ath_common.regulatory.alpha2); 8831 if (ret) 8832 goto err_unregister; 8833 } 8834 8835 return 0; 8836 8837 err_unregister: 8838 ieee80211_unregister_hw(ar->hw); 8839 8840 err_dfs_detector_exit: 8841 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8842 ar->dfs_detector->exit(ar->dfs_detector); 8843 8844 err_free: 8845 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8846 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8847 8848 SET_IEEE80211_DEV(ar->hw, NULL); 8849 return ret; 8850 } 8851 8852 void ath10k_mac_unregister(struct ath10k *ar) 8853 { 8854 ieee80211_unregister_hw(ar->hw); 8855 8856 if (IS_ENABLED(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 8857 ar->dfs_detector->exit(ar->dfs_detector); 8858 8859 kfree(ar->mac.sbands[NL80211_BAND_2GHZ].channels); 8860 kfree(ar->mac.sbands[NL80211_BAND_5GHZ].channels); 8861 8862 SET_IEEE80211_DEV(ar->hw, NULL); 8863 } 8864