1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include "mac.h" 19 20 #include <net/mac80211.h> 21 #include <linux/etherdevice.h> 22 23 #include "hif.h" 24 #include "core.h" 25 #include "debug.h" 26 #include "wmi.h" 27 #include "htt.h" 28 #include "txrx.h" 29 #include "testmode.h" 30 #include "wmi.h" 31 #include "wmi-tlv.h" 32 #include "wmi-ops.h" 33 #include "wow.h" 34 35 /*********/ 36 /* Rates */ 37 /*********/ 38 39 static struct ieee80211_rate ath10k_rates[] = { 40 { .bitrate = 10, 41 .hw_value = ATH10K_HW_RATE_CCK_LP_1M }, 42 { .bitrate = 20, 43 .hw_value = ATH10K_HW_RATE_CCK_LP_2M, 44 .hw_value_short = ATH10K_HW_RATE_CCK_SP_2M, 45 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 46 { .bitrate = 55, 47 .hw_value = ATH10K_HW_RATE_CCK_LP_5_5M, 48 .hw_value_short = ATH10K_HW_RATE_CCK_SP_5_5M, 49 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 50 { .bitrate = 110, 51 .hw_value = ATH10K_HW_RATE_CCK_LP_11M, 52 .hw_value_short = ATH10K_HW_RATE_CCK_SP_11M, 53 .flags = IEEE80211_RATE_SHORT_PREAMBLE }, 54 55 { .bitrate = 60, .hw_value = ATH10K_HW_RATE_OFDM_6M }, 56 { .bitrate = 90, .hw_value = ATH10K_HW_RATE_OFDM_9M }, 57 { .bitrate = 120, .hw_value = ATH10K_HW_RATE_OFDM_12M }, 58 { .bitrate = 180, .hw_value = ATH10K_HW_RATE_OFDM_18M }, 59 { .bitrate = 240, .hw_value = ATH10K_HW_RATE_OFDM_24M }, 60 { .bitrate = 360, .hw_value = ATH10K_HW_RATE_OFDM_36M }, 61 { .bitrate = 480, .hw_value = ATH10K_HW_RATE_OFDM_48M }, 62 { .bitrate = 540, .hw_value = ATH10K_HW_RATE_OFDM_54M }, 63 }; 64 65 #define ATH10K_MAC_FIRST_OFDM_RATE_IDX 4 66 67 #define ath10k_a_rates (ath10k_rates + ATH10K_MAC_FIRST_OFDM_RATE_IDX) 68 #define ath10k_a_rates_size (ARRAY_SIZE(ath10k_rates) - \ 69 ATH10K_MAC_FIRST_OFDM_RATE_IDX) 70 #define ath10k_g_rates (ath10k_rates + 0) 71 #define ath10k_g_rates_size (ARRAY_SIZE(ath10k_rates)) 72 73 static bool ath10k_mac_bitrate_is_cck(int bitrate) 74 { 75 switch (bitrate) { 76 case 10: 77 case 20: 78 case 55: 79 case 110: 80 return true; 81 } 82 83 return false; 84 } 85 86 static u8 ath10k_mac_bitrate_to_rate(int bitrate) 87 { 88 return DIV_ROUND_UP(bitrate, 5) | 89 (ath10k_mac_bitrate_is_cck(bitrate) ? BIT(7) : 0); 90 } 91 92 u8 ath10k_mac_hw_rate_to_idx(const struct ieee80211_supported_band *sband, 93 u8 hw_rate) 94 { 95 const struct ieee80211_rate *rate; 96 int i; 97 98 for (i = 0; i < sband->n_bitrates; i++) { 99 rate = &sband->bitrates[i]; 100 101 if (rate->hw_value == hw_rate) 102 return i; 103 else if (rate->flags & IEEE80211_RATE_SHORT_PREAMBLE && 104 rate->hw_value_short == hw_rate) 105 return i; 106 } 107 108 return 0; 109 } 110 111 u8 ath10k_mac_bitrate_to_idx(const struct ieee80211_supported_band *sband, 112 u32 bitrate) 113 { 114 int i; 115 116 for (i = 0; i < sband->n_bitrates; i++) 117 if (sband->bitrates[i].bitrate == bitrate) 118 return i; 119 120 return 0; 121 } 122 123 static int ath10k_mac_get_max_vht_mcs_map(u16 mcs_map, int nss) 124 { 125 switch ((mcs_map >> (2 * nss)) & 0x3) { 126 case IEEE80211_VHT_MCS_SUPPORT_0_7: return BIT(8) - 1; 127 case IEEE80211_VHT_MCS_SUPPORT_0_8: return BIT(9) - 1; 128 case IEEE80211_VHT_MCS_SUPPORT_0_9: return BIT(10) - 1; 129 } 130 return 0; 131 } 132 133 static u32 134 ath10k_mac_max_ht_nss(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 135 { 136 int nss; 137 138 for (nss = IEEE80211_HT_MCS_MASK_LEN - 1; nss >= 0; nss--) 139 if (ht_mcs_mask[nss]) 140 return nss + 1; 141 142 return 1; 143 } 144 145 static u32 146 ath10k_mac_max_vht_nss(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 147 { 148 int nss; 149 150 for (nss = NL80211_VHT_NSS_MAX - 1; nss >= 0; nss--) 151 if (vht_mcs_mask[nss]) 152 return nss + 1; 153 154 return 1; 155 } 156 157 /**********/ 158 /* Crypto */ 159 /**********/ 160 161 static int ath10k_send_key(struct ath10k_vif *arvif, 162 struct ieee80211_key_conf *key, 163 enum set_key_cmd cmd, 164 const u8 *macaddr, u32 flags) 165 { 166 struct ath10k *ar = arvif->ar; 167 struct wmi_vdev_install_key_arg arg = { 168 .vdev_id = arvif->vdev_id, 169 .key_idx = key->keyidx, 170 .key_len = key->keylen, 171 .key_data = key->key, 172 .key_flags = flags, 173 .macaddr = macaddr, 174 }; 175 176 lockdep_assert_held(&arvif->ar->conf_mutex); 177 178 switch (key->cipher) { 179 case WLAN_CIPHER_SUITE_CCMP: 180 arg.key_cipher = WMI_CIPHER_AES_CCM; 181 key->flags |= IEEE80211_KEY_FLAG_GENERATE_IV_MGMT; 182 break; 183 case WLAN_CIPHER_SUITE_TKIP: 184 arg.key_cipher = WMI_CIPHER_TKIP; 185 arg.key_txmic_len = 8; 186 arg.key_rxmic_len = 8; 187 break; 188 case WLAN_CIPHER_SUITE_WEP40: 189 case WLAN_CIPHER_SUITE_WEP104: 190 arg.key_cipher = WMI_CIPHER_WEP; 191 break; 192 case WLAN_CIPHER_SUITE_AES_CMAC: 193 WARN_ON(1); 194 return -EINVAL; 195 default: 196 ath10k_warn(ar, "cipher %d is not supported\n", key->cipher); 197 return -EOPNOTSUPP; 198 } 199 200 if (cmd == DISABLE_KEY) { 201 arg.key_cipher = WMI_CIPHER_NONE; 202 arg.key_data = NULL; 203 } 204 205 return ath10k_wmi_vdev_install_key(arvif->ar, &arg); 206 } 207 208 static int ath10k_install_key(struct ath10k_vif *arvif, 209 struct ieee80211_key_conf *key, 210 enum set_key_cmd cmd, 211 const u8 *macaddr, u32 flags) 212 { 213 struct ath10k *ar = arvif->ar; 214 int ret; 215 unsigned long time_left; 216 217 lockdep_assert_held(&ar->conf_mutex); 218 219 reinit_completion(&ar->install_key_done); 220 221 ret = ath10k_send_key(arvif, key, cmd, macaddr, flags); 222 if (ret) 223 return ret; 224 225 time_left = wait_for_completion_timeout(&ar->install_key_done, 3 * HZ); 226 if (time_left == 0) 227 return -ETIMEDOUT; 228 229 return 0; 230 } 231 232 static int ath10k_install_peer_wep_keys(struct ath10k_vif *arvif, 233 const u8 *addr) 234 { 235 struct ath10k *ar = arvif->ar; 236 struct ath10k_peer *peer; 237 int ret; 238 int i; 239 u32 flags; 240 241 lockdep_assert_held(&ar->conf_mutex); 242 243 spin_lock_bh(&ar->data_lock); 244 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 245 spin_unlock_bh(&ar->data_lock); 246 247 if (!peer) 248 return -ENOENT; 249 250 for (i = 0; i < ARRAY_SIZE(arvif->wep_keys); i++) { 251 if (arvif->wep_keys[i] == NULL) 252 continue; 253 254 flags = 0; 255 flags |= WMI_KEY_PAIRWISE; 256 257 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, 258 addr, flags); 259 if (ret) 260 return ret; 261 262 flags = 0; 263 flags |= WMI_KEY_GROUP; 264 265 ret = ath10k_install_key(arvif, arvif->wep_keys[i], SET_KEY, 266 addr, flags); 267 if (ret) 268 return ret; 269 270 spin_lock_bh(&ar->data_lock); 271 peer->keys[i] = arvif->wep_keys[i]; 272 spin_unlock_bh(&ar->data_lock); 273 } 274 275 /* In some cases (notably with static WEP IBSS with multiple keys) 276 * multicast Tx becomes broken. Both pairwise and groupwise keys are 277 * installed already. Using WMI_KEY_TX_USAGE in different combinations 278 * didn't seem help. Using def_keyid vdev parameter seems to be 279 * effective so use that. 280 * 281 * FIXME: Revisit. Perhaps this can be done in a less hacky way. 282 */ 283 if (arvif->def_wep_key_idx == -1) 284 return 0; 285 286 ret = ath10k_wmi_vdev_set_param(arvif->ar, 287 arvif->vdev_id, 288 arvif->ar->wmi.vdev_param->def_keyid, 289 arvif->def_wep_key_idx); 290 if (ret) { 291 ath10k_warn(ar, "failed to re-set def wpa key idxon vdev %i: %d\n", 292 arvif->vdev_id, ret); 293 return ret; 294 } 295 296 return 0; 297 } 298 299 static int ath10k_clear_peer_keys(struct ath10k_vif *arvif, 300 const u8 *addr) 301 { 302 struct ath10k *ar = arvif->ar; 303 struct ath10k_peer *peer; 304 int first_errno = 0; 305 int ret; 306 int i; 307 u32 flags = 0; 308 309 lockdep_assert_held(&ar->conf_mutex); 310 311 spin_lock_bh(&ar->data_lock); 312 peer = ath10k_peer_find(ar, arvif->vdev_id, addr); 313 spin_unlock_bh(&ar->data_lock); 314 315 if (!peer) 316 return -ENOENT; 317 318 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 319 if (peer->keys[i] == NULL) 320 continue; 321 322 /* key flags are not required to delete the key */ 323 ret = ath10k_install_key(arvif, peer->keys[i], 324 DISABLE_KEY, addr, flags); 325 if (ret && first_errno == 0) 326 first_errno = ret; 327 328 if (ret) 329 ath10k_warn(ar, "failed to remove peer wep key %d: %d\n", 330 i, ret); 331 332 spin_lock_bh(&ar->data_lock); 333 peer->keys[i] = NULL; 334 spin_unlock_bh(&ar->data_lock); 335 } 336 337 return first_errno; 338 } 339 340 bool ath10k_mac_is_peer_wep_key_set(struct ath10k *ar, const u8 *addr, 341 u8 keyidx) 342 { 343 struct ath10k_peer *peer; 344 int i; 345 346 lockdep_assert_held(&ar->data_lock); 347 348 /* We don't know which vdev this peer belongs to, 349 * since WMI doesn't give us that information. 350 * 351 * FIXME: multi-bss needs to be handled. 352 */ 353 peer = ath10k_peer_find(ar, 0, addr); 354 if (!peer) 355 return false; 356 357 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 358 if (peer->keys[i] && peer->keys[i]->keyidx == keyidx) 359 return true; 360 } 361 362 return false; 363 } 364 365 static int ath10k_clear_vdev_key(struct ath10k_vif *arvif, 366 struct ieee80211_key_conf *key) 367 { 368 struct ath10k *ar = arvif->ar; 369 struct ath10k_peer *peer; 370 u8 addr[ETH_ALEN]; 371 int first_errno = 0; 372 int ret; 373 int i; 374 u32 flags = 0; 375 376 lockdep_assert_held(&ar->conf_mutex); 377 378 for (;;) { 379 /* since ath10k_install_key we can't hold data_lock all the 380 * time, so we try to remove the keys incrementally */ 381 spin_lock_bh(&ar->data_lock); 382 i = 0; 383 list_for_each_entry(peer, &ar->peers, list) { 384 for (i = 0; i < ARRAY_SIZE(peer->keys); i++) { 385 if (peer->keys[i] == key) { 386 ether_addr_copy(addr, peer->addr); 387 peer->keys[i] = NULL; 388 break; 389 } 390 } 391 392 if (i < ARRAY_SIZE(peer->keys)) 393 break; 394 } 395 spin_unlock_bh(&ar->data_lock); 396 397 if (i == ARRAY_SIZE(peer->keys)) 398 break; 399 /* key flags are not required to delete the key */ 400 ret = ath10k_install_key(arvif, key, DISABLE_KEY, addr, flags); 401 if (ret && first_errno == 0) 402 first_errno = ret; 403 404 if (ret) 405 ath10k_warn(ar, "failed to remove key for %pM: %d\n", 406 addr, ret); 407 } 408 409 return first_errno; 410 } 411 412 static int ath10k_mac_vif_update_wep_key(struct ath10k_vif *arvif, 413 struct ieee80211_key_conf *key) 414 { 415 struct ath10k *ar = arvif->ar; 416 struct ath10k_peer *peer; 417 int ret; 418 419 lockdep_assert_held(&ar->conf_mutex); 420 421 list_for_each_entry(peer, &ar->peers, list) { 422 if (!memcmp(peer->addr, arvif->vif->addr, ETH_ALEN)) 423 continue; 424 425 if (!memcmp(peer->addr, arvif->bssid, ETH_ALEN)) 426 continue; 427 428 if (peer->keys[key->keyidx] == key) 429 continue; 430 431 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vif vdev %i update key %i needs update\n", 432 arvif->vdev_id, key->keyidx); 433 434 ret = ath10k_install_peer_wep_keys(arvif, peer->addr); 435 if (ret) { 436 ath10k_warn(ar, "failed to update wep keys on vdev %i for peer %pM: %d\n", 437 arvif->vdev_id, peer->addr, ret); 438 return ret; 439 } 440 } 441 442 return 0; 443 } 444 445 /*********************/ 446 /* General utilities */ 447 /*********************/ 448 449 static inline enum wmi_phy_mode 450 chan_to_phymode(const struct cfg80211_chan_def *chandef) 451 { 452 enum wmi_phy_mode phymode = MODE_UNKNOWN; 453 454 switch (chandef->chan->band) { 455 case IEEE80211_BAND_2GHZ: 456 switch (chandef->width) { 457 case NL80211_CHAN_WIDTH_20_NOHT: 458 if (chandef->chan->flags & IEEE80211_CHAN_NO_OFDM) 459 phymode = MODE_11B; 460 else 461 phymode = MODE_11G; 462 break; 463 case NL80211_CHAN_WIDTH_20: 464 phymode = MODE_11NG_HT20; 465 break; 466 case NL80211_CHAN_WIDTH_40: 467 phymode = MODE_11NG_HT40; 468 break; 469 case NL80211_CHAN_WIDTH_5: 470 case NL80211_CHAN_WIDTH_10: 471 case NL80211_CHAN_WIDTH_80: 472 case NL80211_CHAN_WIDTH_80P80: 473 case NL80211_CHAN_WIDTH_160: 474 phymode = MODE_UNKNOWN; 475 break; 476 } 477 break; 478 case IEEE80211_BAND_5GHZ: 479 switch (chandef->width) { 480 case NL80211_CHAN_WIDTH_20_NOHT: 481 phymode = MODE_11A; 482 break; 483 case NL80211_CHAN_WIDTH_20: 484 phymode = MODE_11NA_HT20; 485 break; 486 case NL80211_CHAN_WIDTH_40: 487 phymode = MODE_11NA_HT40; 488 break; 489 case NL80211_CHAN_WIDTH_80: 490 phymode = MODE_11AC_VHT80; 491 break; 492 case NL80211_CHAN_WIDTH_5: 493 case NL80211_CHAN_WIDTH_10: 494 case NL80211_CHAN_WIDTH_80P80: 495 case NL80211_CHAN_WIDTH_160: 496 phymode = MODE_UNKNOWN; 497 break; 498 } 499 break; 500 default: 501 break; 502 } 503 504 WARN_ON(phymode == MODE_UNKNOWN); 505 return phymode; 506 } 507 508 static u8 ath10k_parse_mpdudensity(u8 mpdudensity) 509 { 510 /* 511 * 802.11n D2.0 defined values for "Minimum MPDU Start Spacing": 512 * 0 for no restriction 513 * 1 for 1/4 us 514 * 2 for 1/2 us 515 * 3 for 1 us 516 * 4 for 2 us 517 * 5 for 4 us 518 * 6 for 8 us 519 * 7 for 16 us 520 */ 521 switch (mpdudensity) { 522 case 0: 523 return 0; 524 case 1: 525 case 2: 526 case 3: 527 /* Our lower layer calculations limit our precision to 528 1 microsecond */ 529 return 1; 530 case 4: 531 return 2; 532 case 5: 533 return 4; 534 case 6: 535 return 8; 536 case 7: 537 return 16; 538 default: 539 return 0; 540 } 541 } 542 543 int ath10k_mac_vif_chan(struct ieee80211_vif *vif, 544 struct cfg80211_chan_def *def) 545 { 546 struct ieee80211_chanctx_conf *conf; 547 548 rcu_read_lock(); 549 conf = rcu_dereference(vif->chanctx_conf); 550 if (!conf) { 551 rcu_read_unlock(); 552 return -ENOENT; 553 } 554 555 *def = conf->def; 556 rcu_read_unlock(); 557 558 return 0; 559 } 560 561 static void ath10k_mac_num_chanctxs_iter(struct ieee80211_hw *hw, 562 struct ieee80211_chanctx_conf *conf, 563 void *data) 564 { 565 int *num = data; 566 567 (*num)++; 568 } 569 570 static int ath10k_mac_num_chanctxs(struct ath10k *ar) 571 { 572 int num = 0; 573 574 ieee80211_iter_chan_contexts_atomic(ar->hw, 575 ath10k_mac_num_chanctxs_iter, 576 &num); 577 578 return num; 579 } 580 581 static void 582 ath10k_mac_get_any_chandef_iter(struct ieee80211_hw *hw, 583 struct ieee80211_chanctx_conf *conf, 584 void *data) 585 { 586 struct cfg80211_chan_def **def = data; 587 588 *def = &conf->def; 589 } 590 591 static int ath10k_peer_create(struct ath10k *ar, u32 vdev_id, const u8 *addr, 592 enum wmi_peer_type peer_type) 593 { 594 int ret; 595 596 lockdep_assert_held(&ar->conf_mutex); 597 598 if (ar->num_peers >= ar->max_num_peers) 599 return -ENOBUFS; 600 601 ret = ath10k_wmi_peer_create(ar, vdev_id, addr, peer_type); 602 if (ret) { 603 ath10k_warn(ar, "failed to create wmi peer %pM on vdev %i: %i\n", 604 addr, vdev_id, ret); 605 return ret; 606 } 607 608 ret = ath10k_wait_for_peer_created(ar, vdev_id, addr); 609 if (ret) { 610 ath10k_warn(ar, "failed to wait for created wmi peer %pM on vdev %i: %i\n", 611 addr, vdev_id, ret); 612 return ret; 613 } 614 615 ar->num_peers++; 616 617 return 0; 618 } 619 620 static int ath10k_mac_set_kickout(struct ath10k_vif *arvif) 621 { 622 struct ath10k *ar = arvif->ar; 623 u32 param; 624 int ret; 625 626 param = ar->wmi.pdev_param->sta_kickout_th; 627 ret = ath10k_wmi_pdev_set_param(ar, param, 628 ATH10K_KICKOUT_THRESHOLD); 629 if (ret) { 630 ath10k_warn(ar, "failed to set kickout threshold on vdev %i: %d\n", 631 arvif->vdev_id, ret); 632 return ret; 633 } 634 635 param = ar->wmi.vdev_param->ap_keepalive_min_idle_inactive_time_secs; 636 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 637 ATH10K_KEEPALIVE_MIN_IDLE); 638 if (ret) { 639 ath10k_warn(ar, "failed to set keepalive minimum idle time on vdev %i: %d\n", 640 arvif->vdev_id, ret); 641 return ret; 642 } 643 644 param = ar->wmi.vdev_param->ap_keepalive_max_idle_inactive_time_secs; 645 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 646 ATH10K_KEEPALIVE_MAX_IDLE); 647 if (ret) { 648 ath10k_warn(ar, "failed to set keepalive maximum idle time on vdev %i: %d\n", 649 arvif->vdev_id, ret); 650 return ret; 651 } 652 653 param = ar->wmi.vdev_param->ap_keepalive_max_unresponsive_time_secs; 654 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, 655 ATH10K_KEEPALIVE_MAX_UNRESPONSIVE); 656 if (ret) { 657 ath10k_warn(ar, "failed to set keepalive maximum unresponsive time on vdev %i: %d\n", 658 arvif->vdev_id, ret); 659 return ret; 660 } 661 662 return 0; 663 } 664 665 static int ath10k_mac_set_rts(struct ath10k_vif *arvif, u32 value) 666 { 667 struct ath10k *ar = arvif->ar; 668 u32 vdev_param; 669 670 vdev_param = ar->wmi.vdev_param->rts_threshold; 671 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 672 } 673 674 static int ath10k_mac_set_frag(struct ath10k_vif *arvif, u32 value) 675 { 676 struct ath10k *ar = arvif->ar; 677 u32 vdev_param; 678 679 if (value != 0xFFFFFFFF) 680 value = clamp_t(u32, arvif->ar->hw->wiphy->frag_threshold, 681 ATH10K_FRAGMT_THRESHOLD_MIN, 682 ATH10K_FRAGMT_THRESHOLD_MAX); 683 684 vdev_param = ar->wmi.vdev_param->fragmentation_threshold; 685 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, value); 686 } 687 688 static int ath10k_peer_delete(struct ath10k *ar, u32 vdev_id, const u8 *addr) 689 { 690 int ret; 691 692 lockdep_assert_held(&ar->conf_mutex); 693 694 ret = ath10k_wmi_peer_delete(ar, vdev_id, addr); 695 if (ret) 696 return ret; 697 698 ret = ath10k_wait_for_peer_deleted(ar, vdev_id, addr); 699 if (ret) 700 return ret; 701 702 ar->num_peers--; 703 704 return 0; 705 } 706 707 static void ath10k_peer_cleanup(struct ath10k *ar, u32 vdev_id) 708 { 709 struct ath10k_peer *peer, *tmp; 710 711 lockdep_assert_held(&ar->conf_mutex); 712 713 spin_lock_bh(&ar->data_lock); 714 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 715 if (peer->vdev_id != vdev_id) 716 continue; 717 718 ath10k_warn(ar, "removing stale peer %pM from vdev_id %d\n", 719 peer->addr, vdev_id); 720 721 list_del(&peer->list); 722 kfree(peer); 723 ar->num_peers--; 724 } 725 spin_unlock_bh(&ar->data_lock); 726 } 727 728 static void ath10k_peer_cleanup_all(struct ath10k *ar) 729 { 730 struct ath10k_peer *peer, *tmp; 731 732 lockdep_assert_held(&ar->conf_mutex); 733 734 spin_lock_bh(&ar->data_lock); 735 list_for_each_entry_safe(peer, tmp, &ar->peers, list) { 736 list_del(&peer->list); 737 kfree(peer); 738 } 739 spin_unlock_bh(&ar->data_lock); 740 741 ar->num_peers = 0; 742 ar->num_stations = 0; 743 } 744 745 static int ath10k_mac_tdls_peer_update(struct ath10k *ar, u32 vdev_id, 746 struct ieee80211_sta *sta, 747 enum wmi_tdls_peer_state state) 748 { 749 int ret; 750 struct wmi_tdls_peer_update_cmd_arg arg = {}; 751 struct wmi_tdls_peer_capab_arg cap = {}; 752 struct wmi_channel_arg chan_arg = {}; 753 754 lockdep_assert_held(&ar->conf_mutex); 755 756 arg.vdev_id = vdev_id; 757 arg.peer_state = state; 758 ether_addr_copy(arg.addr, sta->addr); 759 760 cap.peer_max_sp = sta->max_sp; 761 cap.peer_uapsd_queues = sta->uapsd_queues; 762 763 if (state == WMI_TDLS_PEER_STATE_CONNECTED && 764 !sta->tdls_initiator) 765 cap.is_peer_responder = 1; 766 767 ret = ath10k_wmi_tdls_peer_update(ar, &arg, &cap, &chan_arg); 768 if (ret) { 769 ath10k_warn(ar, "failed to update tdls peer %pM on vdev %i: %i\n", 770 arg.addr, vdev_id, ret); 771 return ret; 772 } 773 774 return 0; 775 } 776 777 /************************/ 778 /* Interface management */ 779 /************************/ 780 781 void ath10k_mac_vif_beacon_free(struct ath10k_vif *arvif) 782 { 783 struct ath10k *ar = arvif->ar; 784 785 lockdep_assert_held(&ar->data_lock); 786 787 if (!arvif->beacon) 788 return; 789 790 if (!arvif->beacon_buf) 791 dma_unmap_single(ar->dev, ATH10K_SKB_CB(arvif->beacon)->paddr, 792 arvif->beacon->len, DMA_TO_DEVICE); 793 794 if (WARN_ON(arvif->beacon_state != ATH10K_BEACON_SCHEDULED && 795 arvif->beacon_state != ATH10K_BEACON_SENT)) 796 return; 797 798 dev_kfree_skb_any(arvif->beacon); 799 800 arvif->beacon = NULL; 801 arvif->beacon_state = ATH10K_BEACON_SCHEDULED; 802 } 803 804 static void ath10k_mac_vif_beacon_cleanup(struct ath10k_vif *arvif) 805 { 806 struct ath10k *ar = arvif->ar; 807 808 lockdep_assert_held(&ar->data_lock); 809 810 ath10k_mac_vif_beacon_free(arvif); 811 812 if (arvif->beacon_buf) { 813 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 814 arvif->beacon_buf, arvif->beacon_paddr); 815 arvif->beacon_buf = NULL; 816 } 817 } 818 819 static inline int ath10k_vdev_setup_sync(struct ath10k *ar) 820 { 821 unsigned long time_left; 822 823 lockdep_assert_held(&ar->conf_mutex); 824 825 if (test_bit(ATH10K_FLAG_CRASH_FLUSH, &ar->dev_flags)) 826 return -ESHUTDOWN; 827 828 time_left = wait_for_completion_timeout(&ar->vdev_setup_done, 829 ATH10K_VDEV_SETUP_TIMEOUT_HZ); 830 if (time_left == 0) 831 return -ETIMEDOUT; 832 833 return 0; 834 } 835 836 static int ath10k_monitor_vdev_start(struct ath10k *ar, int vdev_id) 837 { 838 struct cfg80211_chan_def *chandef = NULL; 839 struct ieee80211_channel *channel = chandef->chan; 840 struct wmi_vdev_start_request_arg arg = {}; 841 int ret = 0; 842 843 lockdep_assert_held(&ar->conf_mutex); 844 845 ieee80211_iter_chan_contexts_atomic(ar->hw, 846 ath10k_mac_get_any_chandef_iter, 847 &chandef); 848 if (WARN_ON_ONCE(!chandef)) 849 return -ENOENT; 850 851 channel = chandef->chan; 852 853 arg.vdev_id = vdev_id; 854 arg.channel.freq = channel->center_freq; 855 arg.channel.band_center_freq1 = chandef->center_freq1; 856 857 /* TODO setup this dynamically, what in case we 858 don't have any vifs? */ 859 arg.channel.mode = chan_to_phymode(chandef); 860 arg.channel.chan_radar = 861 !!(channel->flags & IEEE80211_CHAN_RADAR); 862 863 arg.channel.min_power = 0; 864 arg.channel.max_power = channel->max_power * 2; 865 arg.channel.max_reg_power = channel->max_reg_power * 2; 866 arg.channel.max_antenna_gain = channel->max_antenna_gain * 2; 867 868 reinit_completion(&ar->vdev_setup_done); 869 870 ret = ath10k_wmi_vdev_start(ar, &arg); 871 if (ret) { 872 ath10k_warn(ar, "failed to request monitor vdev %i start: %d\n", 873 vdev_id, ret); 874 return ret; 875 } 876 877 ret = ath10k_vdev_setup_sync(ar); 878 if (ret) { 879 ath10k_warn(ar, "failed to synchronize setup for monitor vdev %i start: %d\n", 880 vdev_id, ret); 881 return ret; 882 } 883 884 ret = ath10k_wmi_vdev_up(ar, vdev_id, 0, ar->mac_addr); 885 if (ret) { 886 ath10k_warn(ar, "failed to put up monitor vdev %i: %d\n", 887 vdev_id, ret); 888 goto vdev_stop; 889 } 890 891 ar->monitor_vdev_id = vdev_id; 892 893 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i started\n", 894 ar->monitor_vdev_id); 895 return 0; 896 897 vdev_stop: 898 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 899 if (ret) 900 ath10k_warn(ar, "failed to stop monitor vdev %i after start failure: %d\n", 901 ar->monitor_vdev_id, ret); 902 903 return ret; 904 } 905 906 static int ath10k_monitor_vdev_stop(struct ath10k *ar) 907 { 908 int ret = 0; 909 910 lockdep_assert_held(&ar->conf_mutex); 911 912 ret = ath10k_wmi_vdev_down(ar, ar->monitor_vdev_id); 913 if (ret) 914 ath10k_warn(ar, "failed to put down monitor vdev %i: %d\n", 915 ar->monitor_vdev_id, ret); 916 917 reinit_completion(&ar->vdev_setup_done); 918 919 ret = ath10k_wmi_vdev_stop(ar, ar->monitor_vdev_id); 920 if (ret) 921 ath10k_warn(ar, "failed to to request monitor vdev %i stop: %d\n", 922 ar->monitor_vdev_id, ret); 923 924 ret = ath10k_vdev_setup_sync(ar); 925 if (ret) 926 ath10k_warn(ar, "failed to synchronize monitor vdev %i stop: %d\n", 927 ar->monitor_vdev_id, ret); 928 929 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %i stopped\n", 930 ar->monitor_vdev_id); 931 return ret; 932 } 933 934 static int ath10k_monitor_vdev_create(struct ath10k *ar) 935 { 936 int bit, ret = 0; 937 938 lockdep_assert_held(&ar->conf_mutex); 939 940 if (ar->free_vdev_map == 0) { 941 ath10k_warn(ar, "failed to find free vdev id for monitor vdev\n"); 942 return -ENOMEM; 943 } 944 945 bit = __ffs64(ar->free_vdev_map); 946 947 ar->monitor_vdev_id = bit; 948 949 ret = ath10k_wmi_vdev_create(ar, ar->monitor_vdev_id, 950 WMI_VDEV_TYPE_MONITOR, 951 0, ar->mac_addr); 952 if (ret) { 953 ath10k_warn(ar, "failed to request monitor vdev %i creation: %d\n", 954 ar->monitor_vdev_id, ret); 955 return ret; 956 } 957 958 ar->free_vdev_map &= ~(1LL << ar->monitor_vdev_id); 959 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d created\n", 960 ar->monitor_vdev_id); 961 962 return 0; 963 } 964 965 static int ath10k_monitor_vdev_delete(struct ath10k *ar) 966 { 967 int ret = 0; 968 969 lockdep_assert_held(&ar->conf_mutex); 970 971 ret = ath10k_wmi_vdev_delete(ar, ar->monitor_vdev_id); 972 if (ret) { 973 ath10k_warn(ar, "failed to request wmi monitor vdev %i removal: %d\n", 974 ar->monitor_vdev_id, ret); 975 return ret; 976 } 977 978 ar->free_vdev_map |= 1LL << ar->monitor_vdev_id; 979 980 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor vdev %d deleted\n", 981 ar->monitor_vdev_id); 982 return ret; 983 } 984 985 static int ath10k_monitor_start(struct ath10k *ar) 986 { 987 int ret; 988 989 lockdep_assert_held(&ar->conf_mutex); 990 991 ret = ath10k_monitor_vdev_create(ar); 992 if (ret) { 993 ath10k_warn(ar, "failed to create monitor vdev: %d\n", ret); 994 return ret; 995 } 996 997 ret = ath10k_monitor_vdev_start(ar, ar->monitor_vdev_id); 998 if (ret) { 999 ath10k_warn(ar, "failed to start monitor vdev: %d\n", ret); 1000 ath10k_monitor_vdev_delete(ar); 1001 return ret; 1002 } 1003 1004 ar->monitor_started = true; 1005 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor started\n"); 1006 1007 return 0; 1008 } 1009 1010 static int ath10k_monitor_stop(struct ath10k *ar) 1011 { 1012 int ret; 1013 1014 lockdep_assert_held(&ar->conf_mutex); 1015 1016 ret = ath10k_monitor_vdev_stop(ar); 1017 if (ret) { 1018 ath10k_warn(ar, "failed to stop monitor vdev: %d\n", ret); 1019 return ret; 1020 } 1021 1022 ret = ath10k_monitor_vdev_delete(ar); 1023 if (ret) { 1024 ath10k_warn(ar, "failed to delete monitor vdev: %d\n", ret); 1025 return ret; 1026 } 1027 1028 ar->monitor_started = false; 1029 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopped\n"); 1030 1031 return 0; 1032 } 1033 1034 static bool ath10k_mac_monitor_vdev_is_needed(struct ath10k *ar) 1035 { 1036 int num_ctx; 1037 1038 /* At least one chanctx is required to derive a channel to start 1039 * monitor vdev on. 1040 */ 1041 num_ctx = ath10k_mac_num_chanctxs(ar); 1042 if (num_ctx == 0) 1043 return false; 1044 1045 /* If there's already an existing special monitor interface then don't 1046 * bother creating another monitor vdev. 1047 */ 1048 if (ar->monitor_arvif) 1049 return false; 1050 1051 return ar->monitor || 1052 test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1053 } 1054 1055 static bool ath10k_mac_monitor_vdev_is_allowed(struct ath10k *ar) 1056 { 1057 int num_ctx; 1058 1059 num_ctx = ath10k_mac_num_chanctxs(ar); 1060 1061 /* FIXME: Current interface combinations and cfg80211/mac80211 code 1062 * shouldn't allow this but make sure to prevent handling the following 1063 * case anyway since multi-channel DFS hasn't been tested at all. 1064 */ 1065 if (test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags) && num_ctx > 1) 1066 return false; 1067 1068 return true; 1069 } 1070 1071 static int ath10k_monitor_recalc(struct ath10k *ar) 1072 { 1073 bool needed; 1074 bool allowed; 1075 int ret; 1076 1077 lockdep_assert_held(&ar->conf_mutex); 1078 1079 needed = ath10k_mac_monitor_vdev_is_needed(ar); 1080 allowed = ath10k_mac_monitor_vdev_is_allowed(ar); 1081 1082 ath10k_dbg(ar, ATH10K_DBG_MAC, 1083 "mac monitor recalc started? %d needed? %d allowed? %d\n", 1084 ar->monitor_started, needed, allowed); 1085 1086 if (WARN_ON(needed && !allowed)) { 1087 if (ar->monitor_started) { 1088 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac monitor stopping disallowed monitor\n"); 1089 1090 ret = ath10k_monitor_stop(ar); 1091 if (ret) 1092 ath10k_warn(ar, "failed to stop disallowed monitor: %d\n", ret); 1093 /* not serious */ 1094 } 1095 1096 return -EPERM; 1097 } 1098 1099 if (needed == ar->monitor_started) 1100 return 0; 1101 1102 if (needed) 1103 return ath10k_monitor_start(ar); 1104 else 1105 return ath10k_monitor_stop(ar); 1106 } 1107 1108 static int ath10k_recalc_rtscts_prot(struct ath10k_vif *arvif) 1109 { 1110 struct ath10k *ar = arvif->ar; 1111 u32 vdev_param, rts_cts = 0; 1112 1113 lockdep_assert_held(&ar->conf_mutex); 1114 1115 vdev_param = ar->wmi.vdev_param->enable_rtscts; 1116 1117 rts_cts |= SM(WMI_RTSCTS_ENABLED, WMI_RTSCTS_SET); 1118 1119 if (arvif->num_legacy_stations > 0) 1120 rts_cts |= SM(WMI_RTSCTS_ACROSS_SW_RETRIES, 1121 WMI_RTSCTS_PROFILE); 1122 else 1123 rts_cts |= SM(WMI_RTSCTS_FOR_SECOND_RATESERIES, 1124 WMI_RTSCTS_PROFILE); 1125 1126 return ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 1127 rts_cts); 1128 } 1129 1130 static int ath10k_start_cac(struct ath10k *ar) 1131 { 1132 int ret; 1133 1134 lockdep_assert_held(&ar->conf_mutex); 1135 1136 set_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1137 1138 ret = ath10k_monitor_recalc(ar); 1139 if (ret) { 1140 ath10k_warn(ar, "failed to start monitor (cac): %d\n", ret); 1141 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1142 return ret; 1143 } 1144 1145 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac start monitor vdev %d\n", 1146 ar->monitor_vdev_id); 1147 1148 return 0; 1149 } 1150 1151 static int ath10k_stop_cac(struct ath10k *ar) 1152 { 1153 lockdep_assert_held(&ar->conf_mutex); 1154 1155 /* CAC is not running - do nothing */ 1156 if (!test_bit(ATH10K_CAC_RUNNING, &ar->dev_flags)) 1157 return 0; 1158 1159 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 1160 ath10k_monitor_stop(ar); 1161 1162 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac cac finished\n"); 1163 1164 return 0; 1165 } 1166 1167 static void ath10k_mac_has_radar_iter(struct ieee80211_hw *hw, 1168 struct ieee80211_chanctx_conf *conf, 1169 void *data) 1170 { 1171 bool *ret = data; 1172 1173 if (!*ret && conf->radar_enabled) 1174 *ret = true; 1175 } 1176 1177 static bool ath10k_mac_has_radar_enabled(struct ath10k *ar) 1178 { 1179 bool has_radar = false; 1180 1181 ieee80211_iter_chan_contexts_atomic(ar->hw, 1182 ath10k_mac_has_radar_iter, 1183 &has_radar); 1184 1185 return has_radar; 1186 } 1187 1188 static void ath10k_recalc_radar_detection(struct ath10k *ar) 1189 { 1190 int ret; 1191 1192 lockdep_assert_held(&ar->conf_mutex); 1193 1194 ath10k_stop_cac(ar); 1195 1196 if (!ath10k_mac_has_radar_enabled(ar)) 1197 return; 1198 1199 if (ar->num_started_vdevs > 0) 1200 return; 1201 1202 ret = ath10k_start_cac(ar); 1203 if (ret) { 1204 /* 1205 * Not possible to start CAC on current channel so starting 1206 * radiation is not allowed, make this channel DFS_UNAVAILABLE 1207 * by indicating that radar was detected. 1208 */ 1209 ath10k_warn(ar, "failed to start CAC: %d\n", ret); 1210 ieee80211_radar_detected(ar->hw); 1211 } 1212 } 1213 1214 static int ath10k_vdev_stop(struct ath10k_vif *arvif) 1215 { 1216 struct ath10k *ar = arvif->ar; 1217 int ret; 1218 1219 lockdep_assert_held(&ar->conf_mutex); 1220 1221 reinit_completion(&ar->vdev_setup_done); 1222 1223 ret = ath10k_wmi_vdev_stop(ar, arvif->vdev_id); 1224 if (ret) { 1225 ath10k_warn(ar, "failed to stop WMI vdev %i: %d\n", 1226 arvif->vdev_id, ret); 1227 return ret; 1228 } 1229 1230 ret = ath10k_vdev_setup_sync(ar); 1231 if (ret) { 1232 ath10k_warn(ar, "failed to syncronise setup for vdev %i: %d\n", 1233 arvif->vdev_id, ret); 1234 return ret; 1235 } 1236 1237 WARN_ON(ar->num_started_vdevs == 0); 1238 1239 if (ar->num_started_vdevs != 0) { 1240 ar->num_started_vdevs--; 1241 ath10k_recalc_radar_detection(ar); 1242 } 1243 1244 return ret; 1245 } 1246 1247 static int ath10k_vdev_start_restart(struct ath10k_vif *arvif, 1248 const struct cfg80211_chan_def *chandef, 1249 bool restart) 1250 { 1251 struct ath10k *ar = arvif->ar; 1252 struct wmi_vdev_start_request_arg arg = {}; 1253 int ret = 0; 1254 1255 lockdep_assert_held(&ar->conf_mutex); 1256 1257 reinit_completion(&ar->vdev_setup_done); 1258 1259 arg.vdev_id = arvif->vdev_id; 1260 arg.dtim_period = arvif->dtim_period; 1261 arg.bcn_intval = arvif->beacon_interval; 1262 1263 arg.channel.freq = chandef->chan->center_freq; 1264 arg.channel.band_center_freq1 = chandef->center_freq1; 1265 arg.channel.mode = chan_to_phymode(chandef); 1266 1267 arg.channel.min_power = 0; 1268 arg.channel.max_power = chandef->chan->max_power * 2; 1269 arg.channel.max_reg_power = chandef->chan->max_reg_power * 2; 1270 arg.channel.max_antenna_gain = chandef->chan->max_antenna_gain * 2; 1271 1272 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 1273 arg.ssid = arvif->u.ap.ssid; 1274 arg.ssid_len = arvif->u.ap.ssid_len; 1275 arg.hidden_ssid = arvif->u.ap.hidden_ssid; 1276 1277 /* For now allow DFS for AP mode */ 1278 arg.channel.chan_radar = 1279 !!(chandef->chan->flags & IEEE80211_CHAN_RADAR); 1280 } else if (arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 1281 arg.ssid = arvif->vif->bss_conf.ssid; 1282 arg.ssid_len = arvif->vif->bss_conf.ssid_len; 1283 } 1284 1285 ath10k_dbg(ar, ATH10K_DBG_MAC, 1286 "mac vdev %d start center_freq %d phymode %s\n", 1287 arg.vdev_id, arg.channel.freq, 1288 ath10k_wmi_phymode_str(arg.channel.mode)); 1289 1290 if (restart) 1291 ret = ath10k_wmi_vdev_restart(ar, &arg); 1292 else 1293 ret = ath10k_wmi_vdev_start(ar, &arg); 1294 1295 if (ret) { 1296 ath10k_warn(ar, "failed to start WMI vdev %i: %d\n", 1297 arg.vdev_id, ret); 1298 return ret; 1299 } 1300 1301 ret = ath10k_vdev_setup_sync(ar); 1302 if (ret) { 1303 ath10k_warn(ar, 1304 "failed to synchronize setup for vdev %i restart %d: %d\n", 1305 arg.vdev_id, restart, ret); 1306 return ret; 1307 } 1308 1309 ar->num_started_vdevs++; 1310 ath10k_recalc_radar_detection(ar); 1311 1312 return ret; 1313 } 1314 1315 static int ath10k_vdev_start(struct ath10k_vif *arvif, 1316 const struct cfg80211_chan_def *def) 1317 { 1318 return ath10k_vdev_start_restart(arvif, def, false); 1319 } 1320 1321 static int ath10k_vdev_restart(struct ath10k_vif *arvif, 1322 const struct cfg80211_chan_def *def) 1323 { 1324 return ath10k_vdev_start_restart(arvif, def, true); 1325 } 1326 1327 static int ath10k_mac_setup_bcn_p2p_ie(struct ath10k_vif *arvif, 1328 struct sk_buff *bcn) 1329 { 1330 struct ath10k *ar = arvif->ar; 1331 struct ieee80211_mgmt *mgmt; 1332 const u8 *p2p_ie; 1333 int ret; 1334 1335 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1336 return 0; 1337 1338 if (arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 1339 return 0; 1340 1341 mgmt = (void *)bcn->data; 1342 p2p_ie = cfg80211_find_vendor_ie(WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1343 mgmt->u.beacon.variable, 1344 bcn->len - (mgmt->u.beacon.variable - 1345 bcn->data)); 1346 if (!p2p_ie) 1347 return -ENOENT; 1348 1349 ret = ath10k_wmi_p2p_go_bcn_ie(ar, arvif->vdev_id, p2p_ie); 1350 if (ret) { 1351 ath10k_warn(ar, "failed to submit p2p go bcn ie for vdev %i: %d\n", 1352 arvif->vdev_id, ret); 1353 return ret; 1354 } 1355 1356 return 0; 1357 } 1358 1359 static int ath10k_mac_remove_vendor_ie(struct sk_buff *skb, unsigned int oui, 1360 u8 oui_type, size_t ie_offset) 1361 { 1362 size_t len; 1363 const u8 *next; 1364 const u8 *end; 1365 u8 *ie; 1366 1367 if (WARN_ON(skb->len < ie_offset)) 1368 return -EINVAL; 1369 1370 ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, 1371 skb->data + ie_offset, 1372 skb->len - ie_offset); 1373 if (!ie) 1374 return -ENOENT; 1375 1376 len = ie[1] + 2; 1377 end = skb->data + skb->len; 1378 next = ie + len; 1379 1380 if (WARN_ON(next > end)) 1381 return -EINVAL; 1382 1383 memmove(ie, next, end - next); 1384 skb_trim(skb, skb->len - len); 1385 1386 return 0; 1387 } 1388 1389 static int ath10k_mac_setup_bcn_tmpl(struct ath10k_vif *arvif) 1390 { 1391 struct ath10k *ar = arvif->ar; 1392 struct ieee80211_hw *hw = ar->hw; 1393 struct ieee80211_vif *vif = arvif->vif; 1394 struct ieee80211_mutable_offsets offs = {}; 1395 struct sk_buff *bcn; 1396 int ret; 1397 1398 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1399 return 0; 1400 1401 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 1402 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 1403 return 0; 1404 1405 bcn = ieee80211_beacon_get_template(hw, vif, &offs); 1406 if (!bcn) { 1407 ath10k_warn(ar, "failed to get beacon template from mac80211\n"); 1408 return -EPERM; 1409 } 1410 1411 ret = ath10k_mac_setup_bcn_p2p_ie(arvif, bcn); 1412 if (ret) { 1413 ath10k_warn(ar, "failed to setup p2p go bcn ie: %d\n", ret); 1414 kfree_skb(bcn); 1415 return ret; 1416 } 1417 1418 /* P2P IE is inserted by firmware automatically (as configured above) 1419 * so remove it from the base beacon template to avoid duplicate P2P 1420 * IEs in beacon frames. 1421 */ 1422 ath10k_mac_remove_vendor_ie(bcn, WLAN_OUI_WFA, WLAN_OUI_TYPE_WFA_P2P, 1423 offsetof(struct ieee80211_mgmt, 1424 u.beacon.variable)); 1425 1426 ret = ath10k_wmi_bcn_tmpl(ar, arvif->vdev_id, offs.tim_offset, bcn, 0, 1427 0, NULL, 0); 1428 kfree_skb(bcn); 1429 1430 if (ret) { 1431 ath10k_warn(ar, "failed to submit beacon template command: %d\n", 1432 ret); 1433 return ret; 1434 } 1435 1436 return 0; 1437 } 1438 1439 static int ath10k_mac_setup_prb_tmpl(struct ath10k_vif *arvif) 1440 { 1441 struct ath10k *ar = arvif->ar; 1442 struct ieee80211_hw *hw = ar->hw; 1443 struct ieee80211_vif *vif = arvif->vif; 1444 struct sk_buff *prb; 1445 int ret; 1446 1447 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1448 return 0; 1449 1450 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1451 return 0; 1452 1453 prb = ieee80211_proberesp_get(hw, vif); 1454 if (!prb) { 1455 ath10k_warn(ar, "failed to get probe resp template from mac80211\n"); 1456 return -EPERM; 1457 } 1458 1459 ret = ath10k_wmi_prb_tmpl(ar, arvif->vdev_id, prb); 1460 kfree_skb(prb); 1461 1462 if (ret) { 1463 ath10k_warn(ar, "failed to submit probe resp template command: %d\n", 1464 ret); 1465 return ret; 1466 } 1467 1468 return 0; 1469 } 1470 1471 static int ath10k_mac_vif_fix_hidden_ssid(struct ath10k_vif *arvif) 1472 { 1473 struct ath10k *ar = arvif->ar; 1474 struct cfg80211_chan_def def; 1475 int ret; 1476 1477 /* When originally vdev is started during assign_vif_chanctx() some 1478 * information is missing, notably SSID. Firmware revisions with beacon 1479 * offloading require the SSID to be provided during vdev (re)start to 1480 * handle hidden SSID properly. 1481 * 1482 * Vdev restart must be done after vdev has been both started and 1483 * upped. Otherwise some firmware revisions (at least 10.2) fail to 1484 * deliver vdev restart response event causing timeouts during vdev 1485 * syncing in ath10k. 1486 * 1487 * Note: The vdev down/up and template reinstallation could be skipped 1488 * since only wmi-tlv firmware are known to have beacon offload and 1489 * wmi-tlv doesn't seem to misbehave like 10.2 wrt vdev restart 1490 * response delivery. It's probably more robust to keep it as is. 1491 */ 1492 if (!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) 1493 return 0; 1494 1495 if (WARN_ON(!arvif->is_started)) 1496 return -EINVAL; 1497 1498 if (WARN_ON(!arvif->is_up)) 1499 return -EINVAL; 1500 1501 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 1502 return -EINVAL; 1503 1504 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1505 if (ret) { 1506 ath10k_warn(ar, "failed to bring down ap vdev %i: %d\n", 1507 arvif->vdev_id, ret); 1508 return ret; 1509 } 1510 1511 /* Vdev down reset beacon & presp templates. Reinstall them. Otherwise 1512 * firmware will crash upon vdev up. 1513 */ 1514 1515 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1516 if (ret) { 1517 ath10k_warn(ar, "failed to update beacon template: %d\n", ret); 1518 return ret; 1519 } 1520 1521 ret = ath10k_mac_setup_prb_tmpl(arvif); 1522 if (ret) { 1523 ath10k_warn(ar, "failed to update presp template: %d\n", ret); 1524 return ret; 1525 } 1526 1527 ret = ath10k_vdev_restart(arvif, &def); 1528 if (ret) { 1529 ath10k_warn(ar, "failed to restart ap vdev %i: %d\n", 1530 arvif->vdev_id, ret); 1531 return ret; 1532 } 1533 1534 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1535 arvif->bssid); 1536 if (ret) { 1537 ath10k_warn(ar, "failed to bring up ap vdev %i: %d\n", 1538 arvif->vdev_id, ret); 1539 return ret; 1540 } 1541 1542 return 0; 1543 } 1544 1545 static void ath10k_control_beaconing(struct ath10k_vif *arvif, 1546 struct ieee80211_bss_conf *info) 1547 { 1548 struct ath10k *ar = arvif->ar; 1549 int ret = 0; 1550 1551 lockdep_assert_held(&arvif->ar->conf_mutex); 1552 1553 if (!info->enable_beacon) { 1554 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 1555 if (ret) 1556 ath10k_warn(ar, "failed to down vdev_id %i: %d\n", 1557 arvif->vdev_id, ret); 1558 1559 arvif->is_up = false; 1560 1561 spin_lock_bh(&arvif->ar->data_lock); 1562 ath10k_mac_vif_beacon_free(arvif); 1563 spin_unlock_bh(&arvif->ar->data_lock); 1564 1565 return; 1566 } 1567 1568 arvif->tx_seq_no = 0x1000; 1569 1570 arvif->aid = 0; 1571 ether_addr_copy(arvif->bssid, info->bssid); 1572 1573 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 1574 arvif->bssid); 1575 if (ret) { 1576 ath10k_warn(ar, "failed to bring up vdev %d: %i\n", 1577 arvif->vdev_id, ret); 1578 return; 1579 } 1580 1581 arvif->is_up = true; 1582 1583 ret = ath10k_mac_vif_fix_hidden_ssid(arvif); 1584 if (ret) { 1585 ath10k_warn(ar, "failed to fix hidden ssid for vdev %i, expect trouble: %d\n", 1586 arvif->vdev_id, ret); 1587 return; 1588 } 1589 1590 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d up\n", arvif->vdev_id); 1591 } 1592 1593 static void ath10k_control_ibss(struct ath10k_vif *arvif, 1594 struct ieee80211_bss_conf *info, 1595 const u8 self_peer[ETH_ALEN]) 1596 { 1597 struct ath10k *ar = arvif->ar; 1598 u32 vdev_param; 1599 int ret = 0; 1600 1601 lockdep_assert_held(&arvif->ar->conf_mutex); 1602 1603 if (!info->ibss_joined) { 1604 if (is_zero_ether_addr(arvif->bssid)) 1605 return; 1606 1607 eth_zero_addr(arvif->bssid); 1608 1609 return; 1610 } 1611 1612 vdev_param = arvif->ar->wmi.vdev_param->atim_window; 1613 ret = ath10k_wmi_vdev_set_param(arvif->ar, arvif->vdev_id, vdev_param, 1614 ATH10K_DEFAULT_ATIM); 1615 if (ret) 1616 ath10k_warn(ar, "failed to set IBSS ATIM for vdev %d: %d\n", 1617 arvif->vdev_id, ret); 1618 } 1619 1620 static int ath10k_mac_vif_recalc_ps_wake_threshold(struct ath10k_vif *arvif) 1621 { 1622 struct ath10k *ar = arvif->ar; 1623 u32 param; 1624 u32 value; 1625 int ret; 1626 1627 lockdep_assert_held(&arvif->ar->conf_mutex); 1628 1629 if (arvif->u.sta.uapsd) 1630 value = WMI_STA_PS_TX_WAKE_THRESHOLD_NEVER; 1631 else 1632 value = WMI_STA_PS_TX_WAKE_THRESHOLD_ALWAYS; 1633 1634 param = WMI_STA_PS_PARAM_TX_WAKE_THRESHOLD; 1635 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, value); 1636 if (ret) { 1637 ath10k_warn(ar, "failed to submit ps wake threshold %u on vdev %i: %d\n", 1638 value, arvif->vdev_id, ret); 1639 return ret; 1640 } 1641 1642 return 0; 1643 } 1644 1645 static int ath10k_mac_vif_recalc_ps_poll_count(struct ath10k_vif *arvif) 1646 { 1647 struct ath10k *ar = arvif->ar; 1648 u32 param; 1649 u32 value; 1650 int ret; 1651 1652 lockdep_assert_held(&arvif->ar->conf_mutex); 1653 1654 if (arvif->u.sta.uapsd) 1655 value = WMI_STA_PS_PSPOLL_COUNT_UAPSD; 1656 else 1657 value = WMI_STA_PS_PSPOLL_COUNT_NO_MAX; 1658 1659 param = WMI_STA_PS_PARAM_PSPOLL_COUNT; 1660 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 1661 param, value); 1662 if (ret) { 1663 ath10k_warn(ar, "failed to submit ps poll count %u on vdev %i: %d\n", 1664 value, arvif->vdev_id, ret); 1665 return ret; 1666 } 1667 1668 return 0; 1669 } 1670 1671 static int ath10k_mac_ps_vif_count(struct ath10k *ar) 1672 { 1673 struct ath10k_vif *arvif; 1674 int num = 0; 1675 1676 lockdep_assert_held(&ar->conf_mutex); 1677 1678 list_for_each_entry(arvif, &ar->arvifs, list) 1679 if (arvif->ps) 1680 num++; 1681 1682 return num; 1683 } 1684 1685 static int ath10k_mac_vif_setup_ps(struct ath10k_vif *arvif) 1686 { 1687 struct ath10k *ar = arvif->ar; 1688 struct ieee80211_vif *vif = arvif->vif; 1689 struct ieee80211_conf *conf = &ar->hw->conf; 1690 enum wmi_sta_powersave_param param; 1691 enum wmi_sta_ps_mode psmode; 1692 int ret; 1693 int ps_timeout; 1694 bool enable_ps; 1695 1696 lockdep_assert_held(&arvif->ar->conf_mutex); 1697 1698 if (arvif->vif->type != NL80211_IFTYPE_STATION) 1699 return 0; 1700 1701 enable_ps = arvif->ps; 1702 1703 if (enable_ps && ath10k_mac_ps_vif_count(ar) > 1 && 1704 !test_bit(ATH10K_FW_FEATURE_MULTI_VIF_PS_SUPPORT, 1705 ar->fw_features)) { 1706 ath10k_warn(ar, "refusing to enable ps on vdev %i: not supported by fw\n", 1707 arvif->vdev_id); 1708 enable_ps = false; 1709 } 1710 1711 if (!arvif->is_started) { 1712 /* mac80211 can update vif powersave state while disconnected. 1713 * Firmware doesn't behave nicely and consumes more power than 1714 * necessary if PS is disabled on a non-started vdev. Hence 1715 * force-enable PS for non-running vdevs. 1716 */ 1717 psmode = WMI_STA_PS_MODE_ENABLED; 1718 } else if (enable_ps) { 1719 psmode = WMI_STA_PS_MODE_ENABLED; 1720 param = WMI_STA_PS_PARAM_INACTIVITY_TIME; 1721 1722 ps_timeout = conf->dynamic_ps_timeout; 1723 if (ps_timeout == 0) { 1724 /* Firmware doesn't like 0 */ 1725 ps_timeout = ieee80211_tu_to_usec( 1726 vif->bss_conf.beacon_int) / 1000; 1727 } 1728 1729 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, param, 1730 ps_timeout); 1731 if (ret) { 1732 ath10k_warn(ar, "failed to set inactivity time for vdev %d: %i\n", 1733 arvif->vdev_id, ret); 1734 return ret; 1735 } 1736 } else { 1737 psmode = WMI_STA_PS_MODE_DISABLED; 1738 } 1739 1740 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d psmode %s\n", 1741 arvif->vdev_id, psmode ? "enable" : "disable"); 1742 1743 ret = ath10k_wmi_set_psmode(ar, arvif->vdev_id, psmode); 1744 if (ret) { 1745 ath10k_warn(ar, "failed to set PS Mode %d for vdev %d: %d\n", 1746 psmode, arvif->vdev_id, ret); 1747 return ret; 1748 } 1749 1750 return 0; 1751 } 1752 1753 static int ath10k_mac_vif_disable_keepalive(struct ath10k_vif *arvif) 1754 { 1755 struct ath10k *ar = arvif->ar; 1756 struct wmi_sta_keepalive_arg arg = {}; 1757 int ret; 1758 1759 lockdep_assert_held(&arvif->ar->conf_mutex); 1760 1761 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 1762 return 0; 1763 1764 if (!test_bit(WMI_SERVICE_STA_KEEP_ALIVE, ar->wmi.svc_map)) 1765 return 0; 1766 1767 /* Some firmware revisions have a bug and ignore the `enabled` field. 1768 * Instead use the interval to disable the keepalive. 1769 */ 1770 arg.vdev_id = arvif->vdev_id; 1771 arg.enabled = 1; 1772 arg.method = WMI_STA_KEEPALIVE_METHOD_NULL_FRAME; 1773 arg.interval = WMI_STA_KEEPALIVE_INTERVAL_DISABLE; 1774 1775 ret = ath10k_wmi_sta_keepalive(ar, &arg); 1776 if (ret) { 1777 ath10k_warn(ar, "failed to submit keepalive on vdev %i: %d\n", 1778 arvif->vdev_id, ret); 1779 return ret; 1780 } 1781 1782 return 0; 1783 } 1784 1785 static void ath10k_mac_vif_ap_csa_count_down(struct ath10k_vif *arvif) 1786 { 1787 struct ath10k *ar = arvif->ar; 1788 struct ieee80211_vif *vif = arvif->vif; 1789 int ret; 1790 1791 lockdep_assert_held(&arvif->ar->conf_mutex); 1792 1793 if (WARN_ON(!test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map))) 1794 return; 1795 1796 if (arvif->vdev_type != WMI_VDEV_TYPE_AP) 1797 return; 1798 1799 if (!vif->csa_active) 1800 return; 1801 1802 if (!arvif->is_up) 1803 return; 1804 1805 if (!ieee80211_csa_is_complete(vif)) { 1806 ieee80211_csa_update_counter(vif); 1807 1808 ret = ath10k_mac_setup_bcn_tmpl(arvif); 1809 if (ret) 1810 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 1811 ret); 1812 1813 ret = ath10k_mac_setup_prb_tmpl(arvif); 1814 if (ret) 1815 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 1816 ret); 1817 } else { 1818 ieee80211_csa_finish(vif); 1819 } 1820 } 1821 1822 static void ath10k_mac_vif_ap_csa_work(struct work_struct *work) 1823 { 1824 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1825 ap_csa_work); 1826 struct ath10k *ar = arvif->ar; 1827 1828 mutex_lock(&ar->conf_mutex); 1829 ath10k_mac_vif_ap_csa_count_down(arvif); 1830 mutex_unlock(&ar->conf_mutex); 1831 } 1832 1833 static void ath10k_mac_handle_beacon_iter(void *data, u8 *mac, 1834 struct ieee80211_vif *vif) 1835 { 1836 struct sk_buff *skb = data; 1837 struct ieee80211_mgmt *mgmt = (void *)skb->data; 1838 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1839 1840 if (vif->type != NL80211_IFTYPE_STATION) 1841 return; 1842 1843 if (!ether_addr_equal(mgmt->bssid, vif->bss_conf.bssid)) 1844 return; 1845 1846 cancel_delayed_work(&arvif->connection_loss_work); 1847 } 1848 1849 void ath10k_mac_handle_beacon(struct ath10k *ar, struct sk_buff *skb) 1850 { 1851 ieee80211_iterate_active_interfaces_atomic(ar->hw, 1852 IEEE80211_IFACE_ITER_NORMAL, 1853 ath10k_mac_handle_beacon_iter, 1854 skb); 1855 } 1856 1857 static void ath10k_mac_handle_beacon_miss_iter(void *data, u8 *mac, 1858 struct ieee80211_vif *vif) 1859 { 1860 u32 *vdev_id = data; 1861 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1862 struct ath10k *ar = arvif->ar; 1863 struct ieee80211_hw *hw = ar->hw; 1864 1865 if (arvif->vdev_id != *vdev_id) 1866 return; 1867 1868 if (!arvif->is_up) 1869 return; 1870 1871 ieee80211_beacon_loss(vif); 1872 1873 /* Firmware doesn't report beacon loss events repeatedly. If AP probe 1874 * (done by mac80211) succeeds but beacons do not resume then it 1875 * doesn't make sense to continue operation. Queue connection loss work 1876 * which can be cancelled when beacon is received. 1877 */ 1878 ieee80211_queue_delayed_work(hw, &arvif->connection_loss_work, 1879 ATH10K_CONNECTION_LOSS_HZ); 1880 } 1881 1882 void ath10k_mac_handle_beacon_miss(struct ath10k *ar, u32 vdev_id) 1883 { 1884 ieee80211_iterate_active_interfaces_atomic(ar->hw, 1885 IEEE80211_IFACE_ITER_NORMAL, 1886 ath10k_mac_handle_beacon_miss_iter, 1887 &vdev_id); 1888 } 1889 1890 static void ath10k_mac_vif_sta_connection_loss_work(struct work_struct *work) 1891 { 1892 struct ath10k_vif *arvif = container_of(work, struct ath10k_vif, 1893 connection_loss_work.work); 1894 struct ieee80211_vif *vif = arvif->vif; 1895 1896 if (!arvif->is_up) 1897 return; 1898 1899 ieee80211_connection_loss(vif); 1900 } 1901 1902 /**********************/ 1903 /* Station management */ 1904 /**********************/ 1905 1906 static u32 ath10k_peer_assoc_h_listen_intval(struct ath10k *ar, 1907 struct ieee80211_vif *vif) 1908 { 1909 /* Some firmware revisions have unstable STA powersave when listen 1910 * interval is set too high (e.g. 5). The symptoms are firmware doesn't 1911 * generate NullFunc frames properly even if buffered frames have been 1912 * indicated in Beacon TIM. Firmware would seldom wake up to pull 1913 * buffered frames. Often pinging the device from AP would simply fail. 1914 * 1915 * As a workaround set it to 1. 1916 */ 1917 if (vif->type == NL80211_IFTYPE_STATION) 1918 return 1; 1919 1920 return ar->hw->conf.listen_interval; 1921 } 1922 1923 static void ath10k_peer_assoc_h_basic(struct ath10k *ar, 1924 struct ieee80211_vif *vif, 1925 struct ieee80211_sta *sta, 1926 struct wmi_peer_assoc_complete_arg *arg) 1927 { 1928 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1929 u32 aid; 1930 1931 lockdep_assert_held(&ar->conf_mutex); 1932 1933 if (vif->type == NL80211_IFTYPE_STATION) 1934 aid = vif->bss_conf.aid; 1935 else 1936 aid = sta->aid; 1937 1938 ether_addr_copy(arg->addr, sta->addr); 1939 arg->vdev_id = arvif->vdev_id; 1940 arg->peer_aid = aid; 1941 arg->peer_flags |= WMI_PEER_AUTH; 1942 arg->peer_listen_intval = ath10k_peer_assoc_h_listen_intval(ar, vif); 1943 arg->peer_num_spatial_streams = 1; 1944 arg->peer_caps = vif->bss_conf.assoc_capability; 1945 } 1946 1947 static void ath10k_peer_assoc_h_crypto(struct ath10k *ar, 1948 struct ieee80211_vif *vif, 1949 struct wmi_peer_assoc_complete_arg *arg) 1950 { 1951 struct ieee80211_bss_conf *info = &vif->bss_conf; 1952 struct cfg80211_chan_def def; 1953 struct cfg80211_bss *bss; 1954 const u8 *rsnie = NULL; 1955 const u8 *wpaie = NULL; 1956 1957 lockdep_assert_held(&ar->conf_mutex); 1958 1959 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 1960 return; 1961 1962 bss = cfg80211_get_bss(ar->hw->wiphy, def.chan, info->bssid, NULL, 0, 1963 IEEE80211_BSS_TYPE_ANY, IEEE80211_PRIVACY_ANY); 1964 if (bss) { 1965 const struct cfg80211_bss_ies *ies; 1966 1967 rcu_read_lock(); 1968 rsnie = ieee80211_bss_get_ie(bss, WLAN_EID_RSN); 1969 1970 ies = rcu_dereference(bss->ies); 1971 1972 wpaie = cfg80211_find_vendor_ie(WLAN_OUI_MICROSOFT, 1973 WLAN_OUI_TYPE_MICROSOFT_WPA, 1974 ies->data, 1975 ies->len); 1976 rcu_read_unlock(); 1977 cfg80211_put_bss(ar->hw->wiphy, bss); 1978 } 1979 1980 /* FIXME: base on RSN IE/WPA IE is a correct idea? */ 1981 if (rsnie || wpaie) { 1982 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: rsn ie found\n", __func__); 1983 arg->peer_flags |= WMI_PEER_NEED_PTK_4_WAY; 1984 } 1985 1986 if (wpaie) { 1987 ath10k_dbg(ar, ATH10K_DBG_WMI, "%s: wpa ie found\n", __func__); 1988 arg->peer_flags |= WMI_PEER_NEED_GTK_2_WAY; 1989 } 1990 } 1991 1992 static void ath10k_peer_assoc_h_rates(struct ath10k *ar, 1993 struct ieee80211_vif *vif, 1994 struct ieee80211_sta *sta, 1995 struct wmi_peer_assoc_complete_arg *arg) 1996 { 1997 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 1998 struct wmi_rate_set_arg *rateset = &arg->peer_legacy_rates; 1999 struct cfg80211_chan_def def; 2000 const struct ieee80211_supported_band *sband; 2001 const struct ieee80211_rate *rates; 2002 enum ieee80211_band band; 2003 u32 ratemask; 2004 u8 rate; 2005 int i; 2006 2007 lockdep_assert_held(&ar->conf_mutex); 2008 2009 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2010 return; 2011 2012 band = def.chan->band; 2013 sband = ar->hw->wiphy->bands[band]; 2014 ratemask = sta->supp_rates[band]; 2015 ratemask &= arvif->bitrate_mask.control[band].legacy; 2016 rates = sband->bitrates; 2017 2018 rateset->num_rates = 0; 2019 2020 for (i = 0; i < 32; i++, ratemask >>= 1, rates++) { 2021 if (!(ratemask & 1)) 2022 continue; 2023 2024 rate = ath10k_mac_bitrate_to_rate(rates->bitrate); 2025 rateset->rates[rateset->num_rates] = rate; 2026 rateset->num_rates++; 2027 } 2028 } 2029 2030 static bool 2031 ath10k_peer_assoc_h_ht_masked(const u8 ht_mcs_mask[IEEE80211_HT_MCS_MASK_LEN]) 2032 { 2033 int nss; 2034 2035 for (nss = 0; nss < IEEE80211_HT_MCS_MASK_LEN; nss++) 2036 if (ht_mcs_mask[nss]) 2037 return false; 2038 2039 return true; 2040 } 2041 2042 static bool 2043 ath10k_peer_assoc_h_vht_masked(const u16 vht_mcs_mask[NL80211_VHT_NSS_MAX]) 2044 { 2045 int nss; 2046 2047 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) 2048 if (vht_mcs_mask[nss]) 2049 return false; 2050 2051 return true; 2052 } 2053 2054 static void ath10k_peer_assoc_h_ht(struct ath10k *ar, 2055 struct ieee80211_vif *vif, 2056 struct ieee80211_sta *sta, 2057 struct wmi_peer_assoc_complete_arg *arg) 2058 { 2059 const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; 2060 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2061 struct cfg80211_chan_def def; 2062 enum ieee80211_band band; 2063 const u8 *ht_mcs_mask; 2064 const u16 *vht_mcs_mask; 2065 int i, n, max_nss; 2066 u32 stbc; 2067 2068 lockdep_assert_held(&ar->conf_mutex); 2069 2070 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2071 return; 2072 2073 if (!ht_cap->ht_supported) 2074 return; 2075 2076 band = def.chan->band; 2077 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2078 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2079 2080 if (ath10k_peer_assoc_h_ht_masked(ht_mcs_mask) && 2081 ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2082 return; 2083 2084 arg->peer_flags |= WMI_PEER_HT; 2085 arg->peer_max_mpdu = (1 << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2086 ht_cap->ampdu_factor)) - 1; 2087 2088 arg->peer_mpdu_density = 2089 ath10k_parse_mpdudensity(ht_cap->ampdu_density); 2090 2091 arg->peer_ht_caps = ht_cap->cap; 2092 arg->peer_rate_caps |= WMI_RC_HT_FLAG; 2093 2094 if (ht_cap->cap & IEEE80211_HT_CAP_LDPC_CODING) 2095 arg->peer_flags |= WMI_PEER_LDPC; 2096 2097 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) { 2098 arg->peer_flags |= WMI_PEER_40MHZ; 2099 arg->peer_rate_caps |= WMI_RC_CW40_FLAG; 2100 } 2101 2102 if (arvif->bitrate_mask.control[band].gi != NL80211_TXRATE_FORCE_LGI) { 2103 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_20) 2104 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2105 2106 if (ht_cap->cap & IEEE80211_HT_CAP_SGI_40) 2107 arg->peer_rate_caps |= WMI_RC_SGI_FLAG; 2108 } 2109 2110 if (ht_cap->cap & IEEE80211_HT_CAP_TX_STBC) { 2111 arg->peer_rate_caps |= WMI_RC_TX_STBC_FLAG; 2112 arg->peer_flags |= WMI_PEER_STBC; 2113 } 2114 2115 if (ht_cap->cap & IEEE80211_HT_CAP_RX_STBC) { 2116 stbc = ht_cap->cap & IEEE80211_HT_CAP_RX_STBC; 2117 stbc = stbc >> IEEE80211_HT_CAP_RX_STBC_SHIFT; 2118 stbc = stbc << WMI_RC_RX_STBC_FLAG_S; 2119 arg->peer_rate_caps |= stbc; 2120 arg->peer_flags |= WMI_PEER_STBC; 2121 } 2122 2123 if (ht_cap->mcs.rx_mask[1] && ht_cap->mcs.rx_mask[2]) 2124 arg->peer_rate_caps |= WMI_RC_TS_FLAG; 2125 else if (ht_cap->mcs.rx_mask[1]) 2126 arg->peer_rate_caps |= WMI_RC_DS_FLAG; 2127 2128 for (i = 0, n = 0, max_nss = 0; i < IEEE80211_HT_MCS_MASK_LEN * 8; i++) 2129 if ((ht_cap->mcs.rx_mask[i / 8] & BIT(i % 8)) && 2130 (ht_mcs_mask[i / 8] & BIT(i % 8))) { 2131 max_nss = (i / 8) + 1; 2132 arg->peer_ht_rates.rates[n++] = i; 2133 } 2134 2135 /* 2136 * This is a workaround for HT-enabled STAs which break the spec 2137 * and have no HT capabilities RX mask (no HT RX MCS map). 2138 * 2139 * As per spec, in section 20.3.5 Modulation and coding scheme (MCS), 2140 * MCS 0 through 7 are mandatory in 20MHz with 800 ns GI at all STAs. 2141 * 2142 * Firmware asserts if such situation occurs. 2143 */ 2144 if (n == 0) { 2145 arg->peer_ht_rates.num_rates = 8; 2146 for (i = 0; i < arg->peer_ht_rates.num_rates; i++) 2147 arg->peer_ht_rates.rates[i] = i; 2148 } else { 2149 arg->peer_ht_rates.num_rates = n; 2150 arg->peer_num_spatial_streams = max_nss; 2151 } 2152 2153 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ht peer %pM mcs cnt %d nss %d\n", 2154 arg->addr, 2155 arg->peer_ht_rates.num_rates, 2156 arg->peer_num_spatial_streams); 2157 } 2158 2159 static int ath10k_peer_assoc_qos_ap(struct ath10k *ar, 2160 struct ath10k_vif *arvif, 2161 struct ieee80211_sta *sta) 2162 { 2163 u32 uapsd = 0; 2164 u32 max_sp = 0; 2165 int ret = 0; 2166 2167 lockdep_assert_held(&ar->conf_mutex); 2168 2169 if (sta->wme && sta->uapsd_queues) { 2170 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac uapsd_queues 0x%x max_sp %d\n", 2171 sta->uapsd_queues, sta->max_sp); 2172 2173 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 2174 uapsd |= WMI_AP_PS_UAPSD_AC3_DELIVERY_EN | 2175 WMI_AP_PS_UAPSD_AC3_TRIGGER_EN; 2176 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 2177 uapsd |= WMI_AP_PS_UAPSD_AC2_DELIVERY_EN | 2178 WMI_AP_PS_UAPSD_AC2_TRIGGER_EN; 2179 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 2180 uapsd |= WMI_AP_PS_UAPSD_AC1_DELIVERY_EN | 2181 WMI_AP_PS_UAPSD_AC1_TRIGGER_EN; 2182 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 2183 uapsd |= WMI_AP_PS_UAPSD_AC0_DELIVERY_EN | 2184 WMI_AP_PS_UAPSD_AC0_TRIGGER_EN; 2185 2186 if (sta->max_sp < MAX_WMI_AP_PS_PEER_PARAM_MAX_SP) 2187 max_sp = sta->max_sp; 2188 2189 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2190 sta->addr, 2191 WMI_AP_PS_PEER_PARAM_UAPSD, 2192 uapsd); 2193 if (ret) { 2194 ath10k_warn(ar, "failed to set ap ps peer param uapsd for vdev %i: %d\n", 2195 arvif->vdev_id, ret); 2196 return ret; 2197 } 2198 2199 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, 2200 sta->addr, 2201 WMI_AP_PS_PEER_PARAM_MAX_SP, 2202 max_sp); 2203 if (ret) { 2204 ath10k_warn(ar, "failed to set ap ps peer param max sp for vdev %i: %d\n", 2205 arvif->vdev_id, ret); 2206 return ret; 2207 } 2208 2209 /* TODO setup this based on STA listen interval and 2210 beacon interval. Currently we don't know 2211 sta->listen_interval - mac80211 patch required. 2212 Currently use 10 seconds */ 2213 ret = ath10k_wmi_set_ap_ps_param(ar, arvif->vdev_id, sta->addr, 2214 WMI_AP_PS_PEER_PARAM_AGEOUT_TIME, 2215 10); 2216 if (ret) { 2217 ath10k_warn(ar, "failed to set ap ps peer param ageout time for vdev %i: %d\n", 2218 arvif->vdev_id, ret); 2219 return ret; 2220 } 2221 } 2222 2223 return 0; 2224 } 2225 2226 static u16 2227 ath10k_peer_assoc_h_vht_limit(u16 tx_mcs_set, 2228 const u16 vht_mcs_limit[NL80211_VHT_NSS_MAX]) 2229 { 2230 int idx_limit; 2231 int nss; 2232 u16 mcs_map; 2233 u16 mcs; 2234 2235 for (nss = 0; nss < NL80211_VHT_NSS_MAX; nss++) { 2236 mcs_map = ath10k_mac_get_max_vht_mcs_map(tx_mcs_set, nss) & 2237 vht_mcs_limit[nss]; 2238 2239 if (mcs_map) 2240 idx_limit = fls(mcs_map) - 1; 2241 else 2242 idx_limit = -1; 2243 2244 switch (idx_limit) { 2245 case 0: /* fall through */ 2246 case 1: /* fall through */ 2247 case 2: /* fall through */ 2248 case 3: /* fall through */ 2249 case 4: /* fall through */ 2250 case 5: /* fall through */ 2251 case 6: /* fall through */ 2252 default: 2253 /* see ath10k_mac_can_set_bitrate_mask() */ 2254 WARN_ON(1); 2255 /* fall through */ 2256 case -1: 2257 mcs = IEEE80211_VHT_MCS_NOT_SUPPORTED; 2258 break; 2259 case 7: 2260 mcs = IEEE80211_VHT_MCS_SUPPORT_0_7; 2261 break; 2262 case 8: 2263 mcs = IEEE80211_VHT_MCS_SUPPORT_0_8; 2264 break; 2265 case 9: 2266 mcs = IEEE80211_VHT_MCS_SUPPORT_0_9; 2267 break; 2268 } 2269 2270 tx_mcs_set &= ~(0x3 << (nss * 2)); 2271 tx_mcs_set |= mcs << (nss * 2); 2272 } 2273 2274 return tx_mcs_set; 2275 } 2276 2277 static void ath10k_peer_assoc_h_vht(struct ath10k *ar, 2278 struct ieee80211_vif *vif, 2279 struct ieee80211_sta *sta, 2280 struct wmi_peer_assoc_complete_arg *arg) 2281 { 2282 const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; 2283 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2284 struct cfg80211_chan_def def; 2285 enum ieee80211_band band; 2286 const u16 *vht_mcs_mask; 2287 u8 ampdu_factor; 2288 2289 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2290 return; 2291 2292 if (!vht_cap->vht_supported) 2293 return; 2294 2295 band = def.chan->band; 2296 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2297 2298 if (ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) 2299 return; 2300 2301 arg->peer_flags |= WMI_PEER_VHT; 2302 2303 if (def.chan->band == IEEE80211_BAND_2GHZ) 2304 arg->peer_flags |= WMI_PEER_VHT_2G; 2305 2306 arg->peer_vht_caps = vht_cap->cap; 2307 2308 ampdu_factor = (vht_cap->cap & 2309 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK) >> 2310 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 2311 2312 /* Workaround: Some Netgear/Linksys 11ac APs set Rx A-MPDU factor to 2313 * zero in VHT IE. Using it would result in degraded throughput. 2314 * arg->peer_max_mpdu at this point contains HT max_mpdu so keep 2315 * it if VHT max_mpdu is smaller. */ 2316 arg->peer_max_mpdu = max(arg->peer_max_mpdu, 2317 (1U << (IEEE80211_HT_MAX_AMPDU_FACTOR + 2318 ampdu_factor)) - 1); 2319 2320 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2321 arg->peer_flags |= WMI_PEER_80MHZ; 2322 2323 arg->peer_vht_rates.rx_max_rate = 2324 __le16_to_cpu(vht_cap->vht_mcs.rx_highest); 2325 arg->peer_vht_rates.rx_mcs_set = 2326 __le16_to_cpu(vht_cap->vht_mcs.rx_mcs_map); 2327 arg->peer_vht_rates.tx_max_rate = 2328 __le16_to_cpu(vht_cap->vht_mcs.tx_highest); 2329 arg->peer_vht_rates.tx_mcs_set = ath10k_peer_assoc_h_vht_limit( 2330 __le16_to_cpu(vht_cap->vht_mcs.tx_mcs_map), vht_mcs_mask); 2331 2332 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vht peer %pM max_mpdu %d flags 0x%x\n", 2333 sta->addr, arg->peer_max_mpdu, arg->peer_flags); 2334 } 2335 2336 static void ath10k_peer_assoc_h_qos(struct ath10k *ar, 2337 struct ieee80211_vif *vif, 2338 struct ieee80211_sta *sta, 2339 struct wmi_peer_assoc_complete_arg *arg) 2340 { 2341 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2342 2343 switch (arvif->vdev_type) { 2344 case WMI_VDEV_TYPE_AP: 2345 if (sta->wme) 2346 arg->peer_flags |= WMI_PEER_QOS; 2347 2348 if (sta->wme && sta->uapsd_queues) { 2349 arg->peer_flags |= WMI_PEER_APSD; 2350 arg->peer_rate_caps |= WMI_RC_UAPSD_FLAG; 2351 } 2352 break; 2353 case WMI_VDEV_TYPE_STA: 2354 if (vif->bss_conf.qos) 2355 arg->peer_flags |= WMI_PEER_QOS; 2356 break; 2357 case WMI_VDEV_TYPE_IBSS: 2358 if (sta->wme) 2359 arg->peer_flags |= WMI_PEER_QOS; 2360 break; 2361 default: 2362 break; 2363 } 2364 2365 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM qos %d\n", 2366 sta->addr, !!(arg->peer_flags & WMI_PEER_QOS)); 2367 } 2368 2369 static bool ath10k_mac_sta_has_ofdm_only(struct ieee80211_sta *sta) 2370 { 2371 return sta->supp_rates[IEEE80211_BAND_2GHZ] >> 2372 ATH10K_MAC_FIRST_OFDM_RATE_IDX; 2373 } 2374 2375 static void ath10k_peer_assoc_h_phymode(struct ath10k *ar, 2376 struct ieee80211_vif *vif, 2377 struct ieee80211_sta *sta, 2378 struct wmi_peer_assoc_complete_arg *arg) 2379 { 2380 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2381 struct cfg80211_chan_def def; 2382 enum ieee80211_band band; 2383 const u8 *ht_mcs_mask; 2384 const u16 *vht_mcs_mask; 2385 enum wmi_phy_mode phymode = MODE_UNKNOWN; 2386 2387 if (WARN_ON(ath10k_mac_vif_chan(vif, &def))) 2388 return; 2389 2390 band = def.chan->band; 2391 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 2392 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 2393 2394 switch (band) { 2395 case IEEE80211_BAND_2GHZ: 2396 if (sta->vht_cap.vht_supported && 2397 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2398 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2399 phymode = MODE_11AC_VHT40; 2400 else 2401 phymode = MODE_11AC_VHT20; 2402 } else if (sta->ht_cap.ht_supported && 2403 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2404 if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2405 phymode = MODE_11NG_HT40; 2406 else 2407 phymode = MODE_11NG_HT20; 2408 } else if (ath10k_mac_sta_has_ofdm_only(sta)) { 2409 phymode = MODE_11G; 2410 } else { 2411 phymode = MODE_11B; 2412 } 2413 2414 break; 2415 case IEEE80211_BAND_5GHZ: 2416 /* 2417 * Check VHT first. 2418 */ 2419 if (sta->vht_cap.vht_supported && 2420 !ath10k_peer_assoc_h_vht_masked(vht_mcs_mask)) { 2421 if (sta->bandwidth == IEEE80211_STA_RX_BW_80) 2422 phymode = MODE_11AC_VHT80; 2423 else if (sta->bandwidth == IEEE80211_STA_RX_BW_40) 2424 phymode = MODE_11AC_VHT40; 2425 else if (sta->bandwidth == IEEE80211_STA_RX_BW_20) 2426 phymode = MODE_11AC_VHT20; 2427 } else if (sta->ht_cap.ht_supported && 2428 !ath10k_peer_assoc_h_ht_masked(ht_mcs_mask)) { 2429 if (sta->bandwidth >= IEEE80211_STA_RX_BW_40) 2430 phymode = MODE_11NA_HT40; 2431 else 2432 phymode = MODE_11NA_HT20; 2433 } else { 2434 phymode = MODE_11A; 2435 } 2436 2437 break; 2438 default: 2439 break; 2440 } 2441 2442 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac peer %pM phymode %s\n", 2443 sta->addr, ath10k_wmi_phymode_str(phymode)); 2444 2445 arg->peer_phymode = phymode; 2446 WARN_ON(phymode == MODE_UNKNOWN); 2447 } 2448 2449 static int ath10k_peer_assoc_prepare(struct ath10k *ar, 2450 struct ieee80211_vif *vif, 2451 struct ieee80211_sta *sta, 2452 struct wmi_peer_assoc_complete_arg *arg) 2453 { 2454 lockdep_assert_held(&ar->conf_mutex); 2455 2456 memset(arg, 0, sizeof(*arg)); 2457 2458 ath10k_peer_assoc_h_basic(ar, vif, sta, arg); 2459 ath10k_peer_assoc_h_crypto(ar, vif, arg); 2460 ath10k_peer_assoc_h_rates(ar, vif, sta, arg); 2461 ath10k_peer_assoc_h_ht(ar, vif, sta, arg); 2462 ath10k_peer_assoc_h_vht(ar, vif, sta, arg); 2463 ath10k_peer_assoc_h_qos(ar, vif, sta, arg); 2464 ath10k_peer_assoc_h_phymode(ar, vif, sta, arg); 2465 2466 return 0; 2467 } 2468 2469 static const u32 ath10k_smps_map[] = { 2470 [WLAN_HT_CAP_SM_PS_STATIC] = WMI_PEER_SMPS_STATIC, 2471 [WLAN_HT_CAP_SM_PS_DYNAMIC] = WMI_PEER_SMPS_DYNAMIC, 2472 [WLAN_HT_CAP_SM_PS_INVALID] = WMI_PEER_SMPS_PS_NONE, 2473 [WLAN_HT_CAP_SM_PS_DISABLED] = WMI_PEER_SMPS_PS_NONE, 2474 }; 2475 2476 static int ath10k_setup_peer_smps(struct ath10k *ar, struct ath10k_vif *arvif, 2477 const u8 *addr, 2478 const struct ieee80211_sta_ht_cap *ht_cap) 2479 { 2480 int smps; 2481 2482 if (!ht_cap->ht_supported) 2483 return 0; 2484 2485 smps = ht_cap->cap & IEEE80211_HT_CAP_SM_PS; 2486 smps >>= IEEE80211_HT_CAP_SM_PS_SHIFT; 2487 2488 if (smps >= ARRAY_SIZE(ath10k_smps_map)) 2489 return -EINVAL; 2490 2491 return ath10k_wmi_peer_set_param(ar, arvif->vdev_id, addr, 2492 WMI_PEER_SMPS_STATE, 2493 ath10k_smps_map[smps]); 2494 } 2495 2496 static int ath10k_mac_vif_recalc_txbf(struct ath10k *ar, 2497 struct ieee80211_vif *vif, 2498 struct ieee80211_sta_vht_cap vht_cap) 2499 { 2500 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2501 int ret; 2502 u32 param; 2503 u32 value; 2504 2505 if (!(ar->vht_cap_info & 2506 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2507 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE | 2508 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2509 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE))) 2510 return 0; 2511 2512 param = ar->wmi.vdev_param->txbf; 2513 value = 0; 2514 2515 if (WARN_ON(param == WMI_VDEV_PARAM_UNSUPPORTED)) 2516 return 0; 2517 2518 /* The following logic is correct. If a remote STA advertises support 2519 * for being a beamformer then we should enable us being a beamformee. 2520 */ 2521 2522 if (ar->vht_cap_info & 2523 (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 2524 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 2525 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE) 2526 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2527 2528 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE) 2529 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFEE; 2530 } 2531 2532 if (ar->vht_cap_info & 2533 (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 2534 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 2535 if (vht_cap.cap & IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE) 2536 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2537 2538 if (vht_cap.cap & IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE) 2539 value |= WMI_VDEV_PARAM_TXBF_MU_TX_BFER; 2540 } 2541 2542 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFEE) 2543 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFEE; 2544 2545 if (value & WMI_VDEV_PARAM_TXBF_MU_TX_BFER) 2546 value |= WMI_VDEV_PARAM_TXBF_SU_TX_BFER; 2547 2548 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, param, value); 2549 if (ret) { 2550 ath10k_warn(ar, "failed to submit vdev param txbf 0x%x: %d\n", 2551 value, ret); 2552 return ret; 2553 } 2554 2555 return 0; 2556 } 2557 2558 /* can be called only in mac80211 callbacks due to `key_count` usage */ 2559 static void ath10k_bss_assoc(struct ieee80211_hw *hw, 2560 struct ieee80211_vif *vif, 2561 struct ieee80211_bss_conf *bss_conf) 2562 { 2563 struct ath10k *ar = hw->priv; 2564 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2565 struct ieee80211_sta_ht_cap ht_cap; 2566 struct ieee80211_sta_vht_cap vht_cap; 2567 struct wmi_peer_assoc_complete_arg peer_arg; 2568 struct ieee80211_sta *ap_sta; 2569 int ret; 2570 2571 lockdep_assert_held(&ar->conf_mutex); 2572 2573 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i assoc bssid %pM aid %d\n", 2574 arvif->vdev_id, arvif->bssid, arvif->aid); 2575 2576 rcu_read_lock(); 2577 2578 ap_sta = ieee80211_find_sta(vif, bss_conf->bssid); 2579 if (!ap_sta) { 2580 ath10k_warn(ar, "failed to find station entry for bss %pM vdev %i\n", 2581 bss_conf->bssid, arvif->vdev_id); 2582 rcu_read_unlock(); 2583 return; 2584 } 2585 2586 /* ap_sta must be accessed only within rcu section which must be left 2587 * before calling ath10k_setup_peer_smps() which might sleep. */ 2588 ht_cap = ap_sta->ht_cap; 2589 vht_cap = ap_sta->vht_cap; 2590 2591 ret = ath10k_peer_assoc_prepare(ar, vif, ap_sta, &peer_arg); 2592 if (ret) { 2593 ath10k_warn(ar, "failed to prepare peer assoc for %pM vdev %i: %d\n", 2594 bss_conf->bssid, arvif->vdev_id, ret); 2595 rcu_read_unlock(); 2596 return; 2597 } 2598 2599 rcu_read_unlock(); 2600 2601 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2602 if (ret) { 2603 ath10k_warn(ar, "failed to run peer assoc for %pM vdev %i: %d\n", 2604 bss_conf->bssid, arvif->vdev_id, ret); 2605 return; 2606 } 2607 2608 ret = ath10k_setup_peer_smps(ar, arvif, bss_conf->bssid, &ht_cap); 2609 if (ret) { 2610 ath10k_warn(ar, "failed to setup peer SMPS for vdev %i: %d\n", 2611 arvif->vdev_id, ret); 2612 return; 2613 } 2614 2615 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2616 if (ret) { 2617 ath10k_warn(ar, "failed to recalc txbf for vdev %i on bss %pM: %d\n", 2618 arvif->vdev_id, bss_conf->bssid, ret); 2619 return; 2620 } 2621 2622 ath10k_dbg(ar, ATH10K_DBG_MAC, 2623 "mac vdev %d up (associated) bssid %pM aid %d\n", 2624 arvif->vdev_id, bss_conf->bssid, bss_conf->aid); 2625 2626 WARN_ON(arvif->is_up); 2627 2628 arvif->aid = bss_conf->aid; 2629 ether_addr_copy(arvif->bssid, bss_conf->bssid); 2630 2631 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, arvif->aid, arvif->bssid); 2632 if (ret) { 2633 ath10k_warn(ar, "failed to set vdev %d up: %d\n", 2634 arvif->vdev_id, ret); 2635 return; 2636 } 2637 2638 arvif->is_up = true; 2639 2640 /* Workaround: Some firmware revisions (tested with qca6174 2641 * WLAN.RM.2.0-00073) have buggy powersave state machine and must be 2642 * poked with peer param command. 2643 */ 2644 ret = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, arvif->bssid, 2645 WMI_PEER_DUMMY_VAR, 1); 2646 if (ret) { 2647 ath10k_warn(ar, "failed to poke peer %pM param for ps workaround on vdev %i: %d\n", 2648 arvif->bssid, arvif->vdev_id, ret); 2649 return; 2650 } 2651 } 2652 2653 static void ath10k_bss_disassoc(struct ieee80211_hw *hw, 2654 struct ieee80211_vif *vif) 2655 { 2656 struct ath10k *ar = hw->priv; 2657 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2658 struct ieee80211_sta_vht_cap vht_cap = {}; 2659 int ret; 2660 2661 lockdep_assert_held(&ar->conf_mutex); 2662 2663 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i disassoc bssid %pM\n", 2664 arvif->vdev_id, arvif->bssid); 2665 2666 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 2667 if (ret) 2668 ath10k_warn(ar, "faield to down vdev %i: %d\n", 2669 arvif->vdev_id, ret); 2670 2671 arvif->def_wep_key_idx = -1; 2672 2673 ret = ath10k_mac_vif_recalc_txbf(ar, vif, vht_cap); 2674 if (ret) { 2675 ath10k_warn(ar, "failed to recalc txbf for vdev %i: %d\n", 2676 arvif->vdev_id, ret); 2677 return; 2678 } 2679 2680 arvif->is_up = false; 2681 2682 cancel_delayed_work_sync(&arvif->connection_loss_work); 2683 } 2684 2685 static int ath10k_station_assoc(struct ath10k *ar, 2686 struct ieee80211_vif *vif, 2687 struct ieee80211_sta *sta, 2688 bool reassoc) 2689 { 2690 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2691 struct wmi_peer_assoc_complete_arg peer_arg; 2692 int ret = 0; 2693 2694 lockdep_assert_held(&ar->conf_mutex); 2695 2696 ret = ath10k_peer_assoc_prepare(ar, vif, sta, &peer_arg); 2697 if (ret) { 2698 ath10k_warn(ar, "failed to prepare WMI peer assoc for %pM vdev %i: %i\n", 2699 sta->addr, arvif->vdev_id, ret); 2700 return ret; 2701 } 2702 2703 ret = ath10k_wmi_peer_assoc(ar, &peer_arg); 2704 if (ret) { 2705 ath10k_warn(ar, "failed to run peer assoc for STA %pM vdev %i: %d\n", 2706 sta->addr, arvif->vdev_id, ret); 2707 return ret; 2708 } 2709 2710 /* Re-assoc is run only to update supported rates for given station. It 2711 * doesn't make much sense to reconfigure the peer completely. 2712 */ 2713 if (!reassoc) { 2714 ret = ath10k_setup_peer_smps(ar, arvif, sta->addr, 2715 &sta->ht_cap); 2716 if (ret) { 2717 ath10k_warn(ar, "failed to setup peer SMPS for vdev %d: %d\n", 2718 arvif->vdev_id, ret); 2719 return ret; 2720 } 2721 2722 ret = ath10k_peer_assoc_qos_ap(ar, arvif, sta); 2723 if (ret) { 2724 ath10k_warn(ar, "failed to set qos params for STA %pM for vdev %i: %d\n", 2725 sta->addr, arvif->vdev_id, ret); 2726 return ret; 2727 } 2728 2729 if (!sta->wme) { 2730 arvif->num_legacy_stations++; 2731 ret = ath10k_recalc_rtscts_prot(arvif); 2732 if (ret) { 2733 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2734 arvif->vdev_id, ret); 2735 return ret; 2736 } 2737 } 2738 2739 /* Plumb cached keys only for static WEP */ 2740 if (arvif->def_wep_key_idx != -1) { 2741 ret = ath10k_install_peer_wep_keys(arvif, sta->addr); 2742 if (ret) { 2743 ath10k_warn(ar, "failed to install peer wep keys for vdev %i: %d\n", 2744 arvif->vdev_id, ret); 2745 return ret; 2746 } 2747 } 2748 } 2749 2750 return ret; 2751 } 2752 2753 static int ath10k_station_disassoc(struct ath10k *ar, 2754 struct ieee80211_vif *vif, 2755 struct ieee80211_sta *sta) 2756 { 2757 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2758 int ret = 0; 2759 2760 lockdep_assert_held(&ar->conf_mutex); 2761 2762 if (!sta->wme) { 2763 arvif->num_legacy_stations--; 2764 ret = ath10k_recalc_rtscts_prot(arvif); 2765 if (ret) { 2766 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 2767 arvif->vdev_id, ret); 2768 return ret; 2769 } 2770 } 2771 2772 ret = ath10k_clear_peer_keys(arvif, sta->addr); 2773 if (ret) { 2774 ath10k_warn(ar, "failed to clear all peer wep keys for vdev %i: %d\n", 2775 arvif->vdev_id, ret); 2776 return ret; 2777 } 2778 2779 return ret; 2780 } 2781 2782 /**************/ 2783 /* Regulatory */ 2784 /**************/ 2785 2786 static int ath10k_update_channel_list(struct ath10k *ar) 2787 { 2788 struct ieee80211_hw *hw = ar->hw; 2789 struct ieee80211_supported_band **bands; 2790 enum ieee80211_band band; 2791 struct ieee80211_channel *channel; 2792 struct wmi_scan_chan_list_arg arg = {0}; 2793 struct wmi_channel_arg *ch; 2794 bool passive; 2795 int len; 2796 int ret; 2797 int i; 2798 2799 lockdep_assert_held(&ar->conf_mutex); 2800 2801 bands = hw->wiphy->bands; 2802 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2803 if (!bands[band]) 2804 continue; 2805 2806 for (i = 0; i < bands[band]->n_channels; i++) { 2807 if (bands[band]->channels[i].flags & 2808 IEEE80211_CHAN_DISABLED) 2809 continue; 2810 2811 arg.n_channels++; 2812 } 2813 } 2814 2815 len = sizeof(struct wmi_channel_arg) * arg.n_channels; 2816 arg.channels = kzalloc(len, GFP_KERNEL); 2817 if (!arg.channels) 2818 return -ENOMEM; 2819 2820 ch = arg.channels; 2821 for (band = 0; band < IEEE80211_NUM_BANDS; band++) { 2822 if (!bands[band]) 2823 continue; 2824 2825 for (i = 0; i < bands[band]->n_channels; i++) { 2826 channel = &bands[band]->channels[i]; 2827 2828 if (channel->flags & IEEE80211_CHAN_DISABLED) 2829 continue; 2830 2831 ch->allow_ht = true; 2832 2833 /* FIXME: when should we really allow VHT? */ 2834 ch->allow_vht = true; 2835 2836 ch->allow_ibss = 2837 !(channel->flags & IEEE80211_CHAN_NO_IR); 2838 2839 ch->ht40plus = 2840 !(channel->flags & IEEE80211_CHAN_NO_HT40PLUS); 2841 2842 ch->chan_radar = 2843 !!(channel->flags & IEEE80211_CHAN_RADAR); 2844 2845 passive = channel->flags & IEEE80211_CHAN_NO_IR; 2846 ch->passive = passive; 2847 2848 ch->freq = channel->center_freq; 2849 ch->band_center_freq1 = channel->center_freq; 2850 ch->min_power = 0; 2851 ch->max_power = channel->max_power * 2; 2852 ch->max_reg_power = channel->max_reg_power * 2; 2853 ch->max_antenna_gain = channel->max_antenna_gain * 2; 2854 ch->reg_class_id = 0; /* FIXME */ 2855 2856 /* FIXME: why use only legacy modes, why not any 2857 * HT/VHT modes? Would that even make any 2858 * difference? */ 2859 if (channel->band == IEEE80211_BAND_2GHZ) 2860 ch->mode = MODE_11G; 2861 else 2862 ch->mode = MODE_11A; 2863 2864 if (WARN_ON_ONCE(ch->mode == MODE_UNKNOWN)) 2865 continue; 2866 2867 ath10k_dbg(ar, ATH10K_DBG_WMI, 2868 "mac channel [%zd/%d] freq %d maxpower %d regpower %d antenna %d mode %d\n", 2869 ch - arg.channels, arg.n_channels, 2870 ch->freq, ch->max_power, ch->max_reg_power, 2871 ch->max_antenna_gain, ch->mode); 2872 2873 ch++; 2874 } 2875 } 2876 2877 ret = ath10k_wmi_scan_chan_list(ar, &arg); 2878 kfree(arg.channels); 2879 2880 return ret; 2881 } 2882 2883 static enum wmi_dfs_region 2884 ath10k_mac_get_dfs_region(enum nl80211_dfs_regions dfs_region) 2885 { 2886 switch (dfs_region) { 2887 case NL80211_DFS_UNSET: 2888 return WMI_UNINIT_DFS_DOMAIN; 2889 case NL80211_DFS_FCC: 2890 return WMI_FCC_DFS_DOMAIN; 2891 case NL80211_DFS_ETSI: 2892 return WMI_ETSI_DFS_DOMAIN; 2893 case NL80211_DFS_JP: 2894 return WMI_MKK4_DFS_DOMAIN; 2895 } 2896 return WMI_UNINIT_DFS_DOMAIN; 2897 } 2898 2899 static void ath10k_regd_update(struct ath10k *ar) 2900 { 2901 struct reg_dmn_pair_mapping *regpair; 2902 int ret; 2903 enum wmi_dfs_region wmi_dfs_reg; 2904 enum nl80211_dfs_regions nl_dfs_reg; 2905 2906 lockdep_assert_held(&ar->conf_mutex); 2907 2908 ret = ath10k_update_channel_list(ar); 2909 if (ret) 2910 ath10k_warn(ar, "failed to update channel list: %d\n", ret); 2911 2912 regpair = ar->ath_common.regulatory.regpair; 2913 2914 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 2915 nl_dfs_reg = ar->dfs_detector->region; 2916 wmi_dfs_reg = ath10k_mac_get_dfs_region(nl_dfs_reg); 2917 } else { 2918 wmi_dfs_reg = WMI_UNINIT_DFS_DOMAIN; 2919 } 2920 2921 /* Target allows setting up per-band regdomain but ath_common provides 2922 * a combined one only */ 2923 ret = ath10k_wmi_pdev_set_regdomain(ar, 2924 regpair->reg_domain, 2925 regpair->reg_domain, /* 2ghz */ 2926 regpair->reg_domain, /* 5ghz */ 2927 regpair->reg_2ghz_ctl, 2928 regpair->reg_5ghz_ctl, 2929 wmi_dfs_reg); 2930 if (ret) 2931 ath10k_warn(ar, "failed to set pdev regdomain: %d\n", ret); 2932 } 2933 2934 static void ath10k_reg_notifier(struct wiphy *wiphy, 2935 struct regulatory_request *request) 2936 { 2937 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 2938 struct ath10k *ar = hw->priv; 2939 bool result; 2940 2941 ath_reg_notifier_apply(wiphy, request, &ar->ath_common.regulatory); 2942 2943 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) { 2944 ath10k_dbg(ar, ATH10K_DBG_REGULATORY, "dfs region 0x%x\n", 2945 request->dfs_region); 2946 result = ar->dfs_detector->set_dfs_domain(ar->dfs_detector, 2947 request->dfs_region); 2948 if (!result) 2949 ath10k_warn(ar, "DFS region 0x%X not supported, will trigger radar for every pulse\n", 2950 request->dfs_region); 2951 } 2952 2953 mutex_lock(&ar->conf_mutex); 2954 if (ar->state == ATH10K_STATE_ON) 2955 ath10k_regd_update(ar); 2956 mutex_unlock(&ar->conf_mutex); 2957 } 2958 2959 /***************/ 2960 /* TX handlers */ 2961 /***************/ 2962 2963 void ath10k_mac_tx_lock(struct ath10k *ar, int reason) 2964 { 2965 lockdep_assert_held(&ar->htt.tx_lock); 2966 2967 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 2968 ar->tx_paused |= BIT(reason); 2969 ieee80211_stop_queues(ar->hw); 2970 } 2971 2972 static void ath10k_mac_tx_unlock_iter(void *data, u8 *mac, 2973 struct ieee80211_vif *vif) 2974 { 2975 struct ath10k *ar = data; 2976 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 2977 2978 if (arvif->tx_paused) 2979 return; 2980 2981 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 2982 } 2983 2984 void ath10k_mac_tx_unlock(struct ath10k *ar, int reason) 2985 { 2986 lockdep_assert_held(&ar->htt.tx_lock); 2987 2988 WARN_ON(reason >= ATH10K_TX_PAUSE_MAX); 2989 ar->tx_paused &= ~BIT(reason); 2990 2991 if (ar->tx_paused) 2992 return; 2993 2994 ieee80211_iterate_active_interfaces_atomic(ar->hw, 2995 IEEE80211_IFACE_ITER_RESUME_ALL, 2996 ath10k_mac_tx_unlock_iter, 2997 ar); 2998 } 2999 3000 void ath10k_mac_vif_tx_lock(struct ath10k_vif *arvif, int reason) 3001 { 3002 struct ath10k *ar = arvif->ar; 3003 3004 lockdep_assert_held(&ar->htt.tx_lock); 3005 3006 WARN_ON(reason >= BITS_PER_LONG); 3007 arvif->tx_paused |= BIT(reason); 3008 ieee80211_stop_queue(ar->hw, arvif->vdev_id); 3009 } 3010 3011 void ath10k_mac_vif_tx_unlock(struct ath10k_vif *arvif, int reason) 3012 { 3013 struct ath10k *ar = arvif->ar; 3014 3015 lockdep_assert_held(&ar->htt.tx_lock); 3016 3017 WARN_ON(reason >= BITS_PER_LONG); 3018 arvif->tx_paused &= ~BIT(reason); 3019 3020 if (ar->tx_paused) 3021 return; 3022 3023 if (arvif->tx_paused) 3024 return; 3025 3026 ieee80211_wake_queue(ar->hw, arvif->vdev_id); 3027 } 3028 3029 static void ath10k_mac_vif_handle_tx_pause(struct ath10k_vif *arvif, 3030 enum wmi_tlv_tx_pause_id pause_id, 3031 enum wmi_tlv_tx_pause_action action) 3032 { 3033 struct ath10k *ar = arvif->ar; 3034 3035 lockdep_assert_held(&ar->htt.tx_lock); 3036 3037 switch (pause_id) { 3038 case WMI_TLV_TX_PAUSE_ID_MCC: 3039 case WMI_TLV_TX_PAUSE_ID_P2P_CLI_NOA: 3040 case WMI_TLV_TX_PAUSE_ID_P2P_GO_PS: 3041 case WMI_TLV_TX_PAUSE_ID_AP_PS: 3042 case WMI_TLV_TX_PAUSE_ID_IBSS_PS: 3043 switch (action) { 3044 case WMI_TLV_TX_PAUSE_ACTION_STOP: 3045 ath10k_mac_vif_tx_lock(arvif, pause_id); 3046 break; 3047 case WMI_TLV_TX_PAUSE_ACTION_WAKE: 3048 ath10k_mac_vif_tx_unlock(arvif, pause_id); 3049 break; 3050 default: 3051 ath10k_warn(ar, "received unknown tx pause action %d on vdev %i, ignoring\n", 3052 action, arvif->vdev_id); 3053 break; 3054 } 3055 break; 3056 case WMI_TLV_TX_PAUSE_ID_AP_PEER_PS: 3057 case WMI_TLV_TX_PAUSE_ID_AP_PEER_UAPSD: 3058 case WMI_TLV_TX_PAUSE_ID_STA_ADD_BA: 3059 case WMI_TLV_TX_PAUSE_ID_HOST: 3060 default: 3061 /* FIXME: Some pause_ids aren't vdev specific. Instead they 3062 * target peer_id and tid. Implementing these could improve 3063 * traffic scheduling fairness across multiple connected 3064 * stations in AP/IBSS modes. 3065 */ 3066 ath10k_dbg(ar, ATH10K_DBG_MAC, 3067 "mac ignoring unsupported tx pause vdev %i id %d\n", 3068 arvif->vdev_id, pause_id); 3069 break; 3070 } 3071 } 3072 3073 struct ath10k_mac_tx_pause { 3074 u32 vdev_id; 3075 enum wmi_tlv_tx_pause_id pause_id; 3076 enum wmi_tlv_tx_pause_action action; 3077 }; 3078 3079 static void ath10k_mac_handle_tx_pause_iter(void *data, u8 *mac, 3080 struct ieee80211_vif *vif) 3081 { 3082 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 3083 struct ath10k_mac_tx_pause *arg = data; 3084 3085 ath10k_mac_vif_handle_tx_pause(arvif, arg->pause_id, arg->action); 3086 } 3087 3088 void ath10k_mac_handle_tx_pause(struct ath10k *ar, u32 vdev_id, 3089 enum wmi_tlv_tx_pause_id pause_id, 3090 enum wmi_tlv_tx_pause_action action) 3091 { 3092 struct ath10k_mac_tx_pause arg = { 3093 .vdev_id = vdev_id, 3094 .pause_id = pause_id, 3095 .action = action, 3096 }; 3097 3098 spin_lock_bh(&ar->htt.tx_lock); 3099 ieee80211_iterate_active_interfaces_atomic(ar->hw, 3100 IEEE80211_IFACE_ITER_RESUME_ALL, 3101 ath10k_mac_handle_tx_pause_iter, 3102 &arg); 3103 spin_unlock_bh(&ar->htt.tx_lock); 3104 } 3105 3106 static u8 ath10k_tx_h_get_tid(struct ieee80211_hdr *hdr) 3107 { 3108 if (ieee80211_is_mgmt(hdr->frame_control)) 3109 return HTT_DATA_TX_EXT_TID_MGMT; 3110 3111 if (!ieee80211_is_data_qos(hdr->frame_control)) 3112 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 3113 3114 if (!is_unicast_ether_addr(ieee80211_get_DA(hdr))) 3115 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 3116 3117 return ieee80211_get_qos_ctl(hdr)[0] & IEEE80211_QOS_CTL_TID_MASK; 3118 } 3119 3120 static u8 ath10k_tx_h_get_vdev_id(struct ath10k *ar, struct ieee80211_vif *vif) 3121 { 3122 if (vif) 3123 return ath10k_vif_to_arvif(vif)->vdev_id; 3124 3125 if (ar->monitor_started) 3126 return ar->monitor_vdev_id; 3127 3128 ath10k_warn(ar, "failed to resolve vdev id\n"); 3129 return 0; 3130 } 3131 3132 static enum ath10k_hw_txrx_mode 3133 ath10k_tx_h_get_txmode(struct ath10k *ar, struct ieee80211_vif *vif, 3134 struct ieee80211_sta *sta, struct sk_buff *skb) 3135 { 3136 const struct ieee80211_hdr *hdr = (void *)skb->data; 3137 __le16 fc = hdr->frame_control; 3138 3139 if (!vif || vif->type == NL80211_IFTYPE_MONITOR) 3140 return ATH10K_HW_TXRX_RAW; 3141 3142 if (ieee80211_is_mgmt(fc)) 3143 return ATH10K_HW_TXRX_MGMT; 3144 3145 /* Workaround: 3146 * 3147 * NullFunc frames are mostly used to ping if a client or AP are still 3148 * reachable and responsive. This implies tx status reports must be 3149 * accurate - otherwise either mac80211 or userspace (e.g. hostapd) can 3150 * come to a conclusion that the other end disappeared and tear down 3151 * BSS connection or it can never disconnect from BSS/client (which is 3152 * the case). 3153 * 3154 * Firmware with HTT older than 3.0 delivers incorrect tx status for 3155 * NullFunc frames to driver. However there's a HTT Mgmt Tx command 3156 * which seems to deliver correct tx reports for NullFunc frames. The 3157 * downside of using it is it ignores client powersave state so it can 3158 * end up disconnecting sleeping clients in AP mode. It should fix STA 3159 * mode though because AP don't sleep. 3160 */ 3161 if (ar->htt.target_version_major < 3 && 3162 (ieee80211_is_nullfunc(fc) || ieee80211_is_qos_nullfunc(fc)) && 3163 !test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, ar->fw_features)) 3164 return ATH10K_HW_TXRX_MGMT; 3165 3166 /* Workaround: 3167 * 3168 * Some wmi-tlv firmwares for qca6174 have broken Tx key selection for 3169 * NativeWifi txmode - it selects AP key instead of peer key. It seems 3170 * to work with Ethernet txmode so use it. 3171 */ 3172 if (ieee80211_is_data_present(fc) && sta && sta->tdls) 3173 return ATH10K_HW_TXRX_ETHERNET; 3174 3175 return ATH10K_HW_TXRX_NATIVE_WIFI; 3176 } 3177 3178 /* HTT Tx uses Native Wifi tx mode which expects 802.11 frames without QoS 3179 * Control in the header. 3180 */ 3181 static void ath10k_tx_h_nwifi(struct ieee80211_hw *hw, struct sk_buff *skb) 3182 { 3183 struct ieee80211_hdr *hdr = (void *)skb->data; 3184 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3185 u8 *qos_ctl; 3186 3187 if (!ieee80211_is_data_qos(hdr->frame_control)) 3188 return; 3189 3190 qos_ctl = ieee80211_get_qos_ctl(hdr); 3191 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 3192 skb->data, (void *)qos_ctl - (void *)skb->data); 3193 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 3194 3195 /* Some firmware revisions don't handle sending QoS NullFunc well. 3196 * These frames are mainly used for CQM purposes so it doesn't really 3197 * matter whether QoS NullFunc or NullFunc are sent. 3198 */ 3199 hdr = (void *)skb->data; 3200 if (ieee80211_is_qos_nullfunc(hdr->frame_control)) 3201 cb->htt.tid = HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 3202 3203 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 3204 } 3205 3206 static void ath10k_tx_h_8023(struct sk_buff *skb) 3207 { 3208 struct ieee80211_hdr *hdr; 3209 struct rfc1042_hdr *rfc1042; 3210 struct ethhdr *eth; 3211 size_t hdrlen; 3212 u8 da[ETH_ALEN]; 3213 u8 sa[ETH_ALEN]; 3214 __be16 type; 3215 3216 hdr = (void *)skb->data; 3217 hdrlen = ieee80211_hdrlen(hdr->frame_control); 3218 rfc1042 = (void *)skb->data + hdrlen; 3219 3220 ether_addr_copy(da, ieee80211_get_DA(hdr)); 3221 ether_addr_copy(sa, ieee80211_get_SA(hdr)); 3222 type = rfc1042->snap_type; 3223 3224 skb_pull(skb, hdrlen + sizeof(*rfc1042)); 3225 skb_push(skb, sizeof(*eth)); 3226 3227 eth = (void *)skb->data; 3228 ether_addr_copy(eth->h_dest, da); 3229 ether_addr_copy(eth->h_source, sa); 3230 eth->h_proto = type; 3231 } 3232 3233 static void ath10k_tx_h_add_p2p_noa_ie(struct ath10k *ar, 3234 struct ieee80211_vif *vif, 3235 struct sk_buff *skb) 3236 { 3237 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3238 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 3239 3240 /* This is case only for P2P_GO */ 3241 if (arvif->vdev_type != WMI_VDEV_TYPE_AP || 3242 arvif->vdev_subtype != WMI_VDEV_SUBTYPE_P2P_GO) 3243 return; 3244 3245 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { 3246 spin_lock_bh(&ar->data_lock); 3247 if (arvif->u.ap.noa_data) 3248 if (!pskb_expand_head(skb, 0, arvif->u.ap.noa_len, 3249 GFP_ATOMIC)) 3250 memcpy(skb_put(skb, arvif->u.ap.noa_len), 3251 arvif->u.ap.noa_data, 3252 arvif->u.ap.noa_len); 3253 spin_unlock_bh(&ar->data_lock); 3254 } 3255 } 3256 3257 static bool ath10k_mac_need_offchan_tx_work(struct ath10k *ar) 3258 { 3259 /* FIXME: Not really sure since when the behaviour changed. At some 3260 * point new firmware stopped requiring creation of peer entries for 3261 * offchannel tx (and actually creating them causes issues with wmi-htc 3262 * tx credit replenishment and reliability). Assuming it's at least 3.4 3263 * because that's when the `freq` was introduced to TX_FRM HTT command. 3264 */ 3265 return !(ar->htt.target_version_major >= 3 && 3266 ar->htt.target_version_minor >= 4); 3267 } 3268 3269 static int ath10k_mac_tx_wmi_mgmt(struct ath10k *ar, struct sk_buff *skb) 3270 { 3271 struct sk_buff_head *q = &ar->wmi_mgmt_tx_queue; 3272 int ret = 0; 3273 3274 spin_lock_bh(&ar->data_lock); 3275 3276 if (skb_queue_len(q) == ATH10K_MAX_NUM_MGMT_PENDING) { 3277 ath10k_warn(ar, "wmi mgmt tx queue is full\n"); 3278 ret = -ENOSPC; 3279 goto unlock; 3280 } 3281 3282 __skb_queue_tail(q, skb); 3283 ieee80211_queue_work(ar->hw, &ar->wmi_mgmt_tx_work); 3284 3285 unlock: 3286 spin_unlock_bh(&ar->data_lock); 3287 3288 return ret; 3289 } 3290 3291 static void ath10k_mac_tx(struct ath10k *ar, struct sk_buff *skb) 3292 { 3293 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 3294 struct ath10k_htt *htt = &ar->htt; 3295 int ret = 0; 3296 3297 switch (cb->txmode) { 3298 case ATH10K_HW_TXRX_RAW: 3299 case ATH10K_HW_TXRX_NATIVE_WIFI: 3300 case ATH10K_HW_TXRX_ETHERNET: 3301 ret = ath10k_htt_tx(htt, skb); 3302 break; 3303 case ATH10K_HW_TXRX_MGMT: 3304 if (test_bit(ATH10K_FW_FEATURE_HAS_WMI_MGMT_TX, 3305 ar->fw_features)) 3306 ret = ath10k_mac_tx_wmi_mgmt(ar, skb); 3307 else if (ar->htt.target_version_major >= 3) 3308 ret = ath10k_htt_tx(htt, skb); 3309 else 3310 ret = ath10k_htt_mgmt_tx(htt, skb); 3311 break; 3312 } 3313 3314 if (ret) { 3315 ath10k_warn(ar, "failed to transmit packet, dropping: %d\n", 3316 ret); 3317 ieee80211_free_txskb(ar->hw, skb); 3318 } 3319 } 3320 3321 void ath10k_offchan_tx_purge(struct ath10k *ar) 3322 { 3323 struct sk_buff *skb; 3324 3325 for (;;) { 3326 skb = skb_dequeue(&ar->offchan_tx_queue); 3327 if (!skb) 3328 break; 3329 3330 ieee80211_free_txskb(ar->hw, skb); 3331 } 3332 } 3333 3334 void ath10k_offchan_tx_work(struct work_struct *work) 3335 { 3336 struct ath10k *ar = container_of(work, struct ath10k, offchan_tx_work); 3337 struct ath10k_peer *peer; 3338 struct ieee80211_hdr *hdr; 3339 struct sk_buff *skb; 3340 const u8 *peer_addr; 3341 int vdev_id; 3342 int ret; 3343 unsigned long time_left; 3344 3345 /* FW requirement: We must create a peer before FW will send out 3346 * an offchannel frame. Otherwise the frame will be stuck and 3347 * never transmitted. We delete the peer upon tx completion. 3348 * It is unlikely that a peer for offchannel tx will already be 3349 * present. However it may be in some rare cases so account for that. 3350 * Otherwise we might remove a legitimate peer and break stuff. */ 3351 3352 for (;;) { 3353 skb = skb_dequeue(&ar->offchan_tx_queue); 3354 if (!skb) 3355 break; 3356 3357 mutex_lock(&ar->conf_mutex); 3358 3359 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac offchannel skb %p\n", 3360 skb); 3361 3362 hdr = (struct ieee80211_hdr *)skb->data; 3363 peer_addr = ieee80211_get_DA(hdr); 3364 vdev_id = ATH10K_SKB_CB(skb)->vdev_id; 3365 3366 spin_lock_bh(&ar->data_lock); 3367 peer = ath10k_peer_find(ar, vdev_id, peer_addr); 3368 spin_unlock_bh(&ar->data_lock); 3369 3370 if (peer) 3371 /* FIXME: should this use ath10k_warn()? */ 3372 ath10k_dbg(ar, ATH10K_DBG_MAC, "peer %pM on vdev %d already present\n", 3373 peer_addr, vdev_id); 3374 3375 if (!peer) { 3376 ret = ath10k_peer_create(ar, vdev_id, peer_addr, 3377 WMI_PEER_TYPE_DEFAULT); 3378 if (ret) 3379 ath10k_warn(ar, "failed to create peer %pM on vdev %d: %d\n", 3380 peer_addr, vdev_id, ret); 3381 } 3382 3383 spin_lock_bh(&ar->data_lock); 3384 reinit_completion(&ar->offchan_tx_completed); 3385 ar->offchan_tx_skb = skb; 3386 spin_unlock_bh(&ar->data_lock); 3387 3388 ath10k_mac_tx(ar, skb); 3389 3390 time_left = 3391 wait_for_completion_timeout(&ar->offchan_tx_completed, 3 * HZ); 3392 if (time_left == 0) 3393 ath10k_warn(ar, "timed out waiting for offchannel skb %p\n", 3394 skb); 3395 3396 if (!peer) { 3397 ret = ath10k_peer_delete(ar, vdev_id, peer_addr); 3398 if (ret) 3399 ath10k_warn(ar, "failed to delete peer %pM on vdev %d: %d\n", 3400 peer_addr, vdev_id, ret); 3401 } 3402 3403 mutex_unlock(&ar->conf_mutex); 3404 } 3405 } 3406 3407 void ath10k_mgmt_over_wmi_tx_purge(struct ath10k *ar) 3408 { 3409 struct sk_buff *skb; 3410 3411 for (;;) { 3412 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3413 if (!skb) 3414 break; 3415 3416 ieee80211_free_txskb(ar->hw, skb); 3417 } 3418 } 3419 3420 void ath10k_mgmt_over_wmi_tx_work(struct work_struct *work) 3421 { 3422 struct ath10k *ar = container_of(work, struct ath10k, wmi_mgmt_tx_work); 3423 struct sk_buff *skb; 3424 int ret; 3425 3426 for (;;) { 3427 skb = skb_dequeue(&ar->wmi_mgmt_tx_queue); 3428 if (!skb) 3429 break; 3430 3431 ret = ath10k_wmi_mgmt_tx(ar, skb); 3432 if (ret) { 3433 ath10k_warn(ar, "failed to transmit management frame via WMI: %d\n", 3434 ret); 3435 ieee80211_free_txskb(ar->hw, skb); 3436 } 3437 } 3438 } 3439 3440 /************/ 3441 /* Scanning */ 3442 /************/ 3443 3444 void __ath10k_scan_finish(struct ath10k *ar) 3445 { 3446 lockdep_assert_held(&ar->data_lock); 3447 3448 switch (ar->scan.state) { 3449 case ATH10K_SCAN_IDLE: 3450 break; 3451 case ATH10K_SCAN_RUNNING: 3452 if (ar->scan.is_roc) 3453 ieee80211_remain_on_channel_expired(ar->hw); 3454 /* fall through */ 3455 case ATH10K_SCAN_ABORTING: 3456 if (!ar->scan.is_roc) 3457 ieee80211_scan_completed(ar->hw, 3458 (ar->scan.state == 3459 ATH10K_SCAN_ABORTING)); 3460 /* fall through */ 3461 case ATH10K_SCAN_STARTING: 3462 ar->scan.state = ATH10K_SCAN_IDLE; 3463 ar->scan_channel = NULL; 3464 ath10k_offchan_tx_purge(ar); 3465 cancel_delayed_work(&ar->scan.timeout); 3466 complete_all(&ar->scan.completed); 3467 break; 3468 } 3469 } 3470 3471 void ath10k_scan_finish(struct ath10k *ar) 3472 { 3473 spin_lock_bh(&ar->data_lock); 3474 __ath10k_scan_finish(ar); 3475 spin_unlock_bh(&ar->data_lock); 3476 } 3477 3478 static int ath10k_scan_stop(struct ath10k *ar) 3479 { 3480 struct wmi_stop_scan_arg arg = { 3481 .req_id = 1, /* FIXME */ 3482 .req_type = WMI_SCAN_STOP_ONE, 3483 .u.scan_id = ATH10K_SCAN_ID, 3484 }; 3485 int ret; 3486 3487 lockdep_assert_held(&ar->conf_mutex); 3488 3489 ret = ath10k_wmi_stop_scan(ar, &arg); 3490 if (ret) { 3491 ath10k_warn(ar, "failed to stop wmi scan: %d\n", ret); 3492 goto out; 3493 } 3494 3495 ret = wait_for_completion_timeout(&ar->scan.completed, 3*HZ); 3496 if (ret == 0) { 3497 ath10k_warn(ar, "failed to receive scan abortion completion: timed out\n"); 3498 ret = -ETIMEDOUT; 3499 } else if (ret > 0) { 3500 ret = 0; 3501 } 3502 3503 out: 3504 /* Scan state should be updated upon scan completion but in case 3505 * firmware fails to deliver the event (for whatever reason) it is 3506 * desired to clean up scan state anyway. Firmware may have just 3507 * dropped the scan completion event delivery due to transport pipe 3508 * being overflown with data and/or it can recover on its own before 3509 * next scan request is submitted. 3510 */ 3511 spin_lock_bh(&ar->data_lock); 3512 if (ar->scan.state != ATH10K_SCAN_IDLE) 3513 __ath10k_scan_finish(ar); 3514 spin_unlock_bh(&ar->data_lock); 3515 3516 return ret; 3517 } 3518 3519 static void ath10k_scan_abort(struct ath10k *ar) 3520 { 3521 int ret; 3522 3523 lockdep_assert_held(&ar->conf_mutex); 3524 3525 spin_lock_bh(&ar->data_lock); 3526 3527 switch (ar->scan.state) { 3528 case ATH10K_SCAN_IDLE: 3529 /* This can happen if timeout worker kicked in and called 3530 * abortion while scan completion was being processed. 3531 */ 3532 break; 3533 case ATH10K_SCAN_STARTING: 3534 case ATH10K_SCAN_ABORTING: 3535 ath10k_warn(ar, "refusing scan abortion due to invalid scan state: %s (%d)\n", 3536 ath10k_scan_state_str(ar->scan.state), 3537 ar->scan.state); 3538 break; 3539 case ATH10K_SCAN_RUNNING: 3540 ar->scan.state = ATH10K_SCAN_ABORTING; 3541 spin_unlock_bh(&ar->data_lock); 3542 3543 ret = ath10k_scan_stop(ar); 3544 if (ret) 3545 ath10k_warn(ar, "failed to abort scan: %d\n", ret); 3546 3547 spin_lock_bh(&ar->data_lock); 3548 break; 3549 } 3550 3551 spin_unlock_bh(&ar->data_lock); 3552 } 3553 3554 void ath10k_scan_timeout_work(struct work_struct *work) 3555 { 3556 struct ath10k *ar = container_of(work, struct ath10k, 3557 scan.timeout.work); 3558 3559 mutex_lock(&ar->conf_mutex); 3560 ath10k_scan_abort(ar); 3561 mutex_unlock(&ar->conf_mutex); 3562 } 3563 3564 static int ath10k_start_scan(struct ath10k *ar, 3565 const struct wmi_start_scan_arg *arg) 3566 { 3567 int ret; 3568 3569 lockdep_assert_held(&ar->conf_mutex); 3570 3571 ret = ath10k_wmi_start_scan(ar, arg); 3572 if (ret) 3573 return ret; 3574 3575 ret = wait_for_completion_timeout(&ar->scan.started, 1*HZ); 3576 if (ret == 0) { 3577 ret = ath10k_scan_stop(ar); 3578 if (ret) 3579 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 3580 3581 return -ETIMEDOUT; 3582 } 3583 3584 /* If we failed to start the scan, return error code at 3585 * this point. This is probably due to some issue in the 3586 * firmware, but no need to wedge the driver due to that... 3587 */ 3588 spin_lock_bh(&ar->data_lock); 3589 if (ar->scan.state == ATH10K_SCAN_IDLE) { 3590 spin_unlock_bh(&ar->data_lock); 3591 return -EINVAL; 3592 } 3593 spin_unlock_bh(&ar->data_lock); 3594 3595 /* Add a 200ms margin to account for event/command processing */ 3596 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 3597 msecs_to_jiffies(arg->max_scan_time+200)); 3598 return 0; 3599 } 3600 3601 /**********************/ 3602 /* mac80211 callbacks */ 3603 /**********************/ 3604 3605 static void ath10k_tx(struct ieee80211_hw *hw, 3606 struct ieee80211_tx_control *control, 3607 struct sk_buff *skb) 3608 { 3609 struct ath10k *ar = hw->priv; 3610 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 3611 struct ieee80211_vif *vif = info->control.vif; 3612 struct ieee80211_sta *sta = control->sta; 3613 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; 3614 __le16 fc = hdr->frame_control; 3615 3616 /* We should disable CCK RATE due to P2P */ 3617 if (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE) 3618 ath10k_dbg(ar, ATH10K_DBG_MAC, "IEEE80211_TX_CTL_NO_CCK_RATE\n"); 3619 3620 ATH10K_SKB_CB(skb)->htt.is_offchan = false; 3621 ATH10K_SKB_CB(skb)->htt.freq = 0; 3622 ATH10K_SKB_CB(skb)->htt.tid = ath10k_tx_h_get_tid(hdr); 3623 ATH10K_SKB_CB(skb)->vdev_id = ath10k_tx_h_get_vdev_id(ar, vif); 3624 ATH10K_SKB_CB(skb)->txmode = ath10k_tx_h_get_txmode(ar, vif, sta, skb); 3625 ATH10K_SKB_CB(skb)->is_protected = ieee80211_has_protected(fc); 3626 3627 switch (ATH10K_SKB_CB(skb)->txmode) { 3628 case ATH10K_HW_TXRX_MGMT: 3629 case ATH10K_HW_TXRX_NATIVE_WIFI: 3630 ath10k_tx_h_nwifi(hw, skb); 3631 ath10k_tx_h_add_p2p_noa_ie(ar, vif, skb); 3632 ath10k_tx_h_seq_no(vif, skb); 3633 break; 3634 case ATH10K_HW_TXRX_ETHERNET: 3635 ath10k_tx_h_8023(skb); 3636 break; 3637 case ATH10K_HW_TXRX_RAW: 3638 /* FIXME: Packet injection isn't implemented. It should be 3639 * doable with firmware 10.2 on qca988x. 3640 */ 3641 WARN_ON_ONCE(1); 3642 ieee80211_free_txskb(hw, skb); 3643 return; 3644 } 3645 3646 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 3647 spin_lock_bh(&ar->data_lock); 3648 ATH10K_SKB_CB(skb)->htt.freq = ar->scan.roc_freq; 3649 ATH10K_SKB_CB(skb)->vdev_id = ar->scan.vdev_id; 3650 spin_unlock_bh(&ar->data_lock); 3651 3652 if (ath10k_mac_need_offchan_tx_work(ar)) { 3653 ATH10K_SKB_CB(skb)->htt.freq = 0; 3654 ATH10K_SKB_CB(skb)->htt.is_offchan = true; 3655 3656 ath10k_dbg(ar, ATH10K_DBG_MAC, "queued offchannel skb %p\n", 3657 skb); 3658 3659 skb_queue_tail(&ar->offchan_tx_queue, skb); 3660 ieee80211_queue_work(hw, &ar->offchan_tx_work); 3661 return; 3662 } 3663 } 3664 3665 ath10k_mac_tx(ar, skb); 3666 } 3667 3668 /* Must not be called with conf_mutex held as workers can use that also. */ 3669 void ath10k_drain_tx(struct ath10k *ar) 3670 { 3671 /* make sure rcu-protected mac80211 tx path itself is drained */ 3672 synchronize_net(); 3673 3674 ath10k_offchan_tx_purge(ar); 3675 ath10k_mgmt_over_wmi_tx_purge(ar); 3676 3677 cancel_work_sync(&ar->offchan_tx_work); 3678 cancel_work_sync(&ar->wmi_mgmt_tx_work); 3679 } 3680 3681 void ath10k_halt(struct ath10k *ar) 3682 { 3683 struct ath10k_vif *arvif; 3684 3685 lockdep_assert_held(&ar->conf_mutex); 3686 3687 clear_bit(ATH10K_CAC_RUNNING, &ar->dev_flags); 3688 ar->filter_flags = 0; 3689 ar->monitor = false; 3690 ar->monitor_arvif = NULL; 3691 3692 if (ar->monitor_started) 3693 ath10k_monitor_stop(ar); 3694 3695 ar->monitor_started = false; 3696 ar->tx_paused = 0; 3697 3698 ath10k_scan_finish(ar); 3699 ath10k_peer_cleanup_all(ar); 3700 ath10k_core_stop(ar); 3701 ath10k_hif_power_down(ar); 3702 3703 spin_lock_bh(&ar->data_lock); 3704 list_for_each_entry(arvif, &ar->arvifs, list) 3705 ath10k_mac_vif_beacon_cleanup(arvif); 3706 spin_unlock_bh(&ar->data_lock); 3707 } 3708 3709 static int ath10k_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 3710 { 3711 struct ath10k *ar = hw->priv; 3712 3713 mutex_lock(&ar->conf_mutex); 3714 3715 if (ar->cfg_tx_chainmask) { 3716 *tx_ant = ar->cfg_tx_chainmask; 3717 *rx_ant = ar->cfg_rx_chainmask; 3718 } else { 3719 *tx_ant = ar->supp_tx_chainmask; 3720 *rx_ant = ar->supp_rx_chainmask; 3721 } 3722 3723 mutex_unlock(&ar->conf_mutex); 3724 3725 return 0; 3726 } 3727 3728 static void ath10k_check_chain_mask(struct ath10k *ar, u32 cm, const char *dbg) 3729 { 3730 /* It is not clear that allowing gaps in chainmask 3731 * is helpful. Probably it will not do what user 3732 * is hoping for, so warn in that case. 3733 */ 3734 if (cm == 15 || cm == 7 || cm == 3 || cm == 1 || cm == 0) 3735 return; 3736 3737 ath10k_warn(ar, "mac %s antenna chainmask may be invalid: 0x%x. Suggested values: 15, 7, 3, 1 or 0.\n", 3738 dbg, cm); 3739 } 3740 3741 static int __ath10k_set_antenna(struct ath10k *ar, u32 tx_ant, u32 rx_ant) 3742 { 3743 int ret; 3744 3745 lockdep_assert_held(&ar->conf_mutex); 3746 3747 ath10k_check_chain_mask(ar, tx_ant, "tx"); 3748 ath10k_check_chain_mask(ar, rx_ant, "rx"); 3749 3750 ar->cfg_tx_chainmask = tx_ant; 3751 ar->cfg_rx_chainmask = rx_ant; 3752 3753 if ((ar->state != ATH10K_STATE_ON) && 3754 (ar->state != ATH10K_STATE_RESTARTED)) 3755 return 0; 3756 3757 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->tx_chain_mask, 3758 tx_ant); 3759 if (ret) { 3760 ath10k_warn(ar, "failed to set tx-chainmask: %d, req 0x%x\n", 3761 ret, tx_ant); 3762 return ret; 3763 } 3764 3765 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->rx_chain_mask, 3766 rx_ant); 3767 if (ret) { 3768 ath10k_warn(ar, "failed to set rx-chainmask: %d, req 0x%x\n", 3769 ret, rx_ant); 3770 return ret; 3771 } 3772 3773 return 0; 3774 } 3775 3776 static int ath10k_set_antenna(struct ieee80211_hw *hw, u32 tx_ant, u32 rx_ant) 3777 { 3778 struct ath10k *ar = hw->priv; 3779 int ret; 3780 3781 mutex_lock(&ar->conf_mutex); 3782 ret = __ath10k_set_antenna(ar, tx_ant, rx_ant); 3783 mutex_unlock(&ar->conf_mutex); 3784 return ret; 3785 } 3786 3787 static int ath10k_start(struct ieee80211_hw *hw) 3788 { 3789 struct ath10k *ar = hw->priv; 3790 u32 burst_enable; 3791 int ret = 0; 3792 3793 /* 3794 * This makes sense only when restarting hw. It is harmless to call 3795 * uncoditionally. This is necessary to make sure no HTT/WMI tx 3796 * commands will be submitted while restarting. 3797 */ 3798 ath10k_drain_tx(ar); 3799 3800 mutex_lock(&ar->conf_mutex); 3801 3802 switch (ar->state) { 3803 case ATH10K_STATE_OFF: 3804 ar->state = ATH10K_STATE_ON; 3805 break; 3806 case ATH10K_STATE_RESTARTING: 3807 ath10k_halt(ar); 3808 ar->state = ATH10K_STATE_RESTARTED; 3809 break; 3810 case ATH10K_STATE_ON: 3811 case ATH10K_STATE_RESTARTED: 3812 case ATH10K_STATE_WEDGED: 3813 WARN_ON(1); 3814 ret = -EINVAL; 3815 goto err; 3816 case ATH10K_STATE_UTF: 3817 ret = -EBUSY; 3818 goto err; 3819 } 3820 3821 ret = ath10k_hif_power_up(ar); 3822 if (ret) { 3823 ath10k_err(ar, "Could not init hif: %d\n", ret); 3824 goto err_off; 3825 } 3826 3827 ret = ath10k_core_start(ar, ATH10K_FIRMWARE_MODE_NORMAL); 3828 if (ret) { 3829 ath10k_err(ar, "Could not init core: %d\n", ret); 3830 goto err_power_down; 3831 } 3832 3833 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->pmf_qos, 1); 3834 if (ret) { 3835 ath10k_warn(ar, "failed to enable PMF QOS: %d\n", ret); 3836 goto err_core_stop; 3837 } 3838 3839 ret = ath10k_wmi_pdev_set_param(ar, ar->wmi.pdev_param->dynamic_bw, 1); 3840 if (ret) { 3841 ath10k_warn(ar, "failed to enable dynamic BW: %d\n", ret); 3842 goto err_core_stop; 3843 } 3844 3845 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 3846 ret = ath10k_wmi_adaptive_qcs(ar, true); 3847 if (ret) { 3848 ath10k_warn(ar, "failed to enable adaptive qcs: %d\n", 3849 ret); 3850 goto err_core_stop; 3851 } 3852 } 3853 3854 if (test_bit(WMI_SERVICE_BURST, ar->wmi.svc_map)) { 3855 burst_enable = ar->wmi.pdev_param->burst_enable; 3856 ret = ath10k_wmi_pdev_set_param(ar, burst_enable, 0); 3857 if (ret) { 3858 ath10k_warn(ar, "failed to disable burst: %d\n", ret); 3859 goto err_core_stop; 3860 } 3861 } 3862 3863 if (ar->cfg_tx_chainmask) 3864 __ath10k_set_antenna(ar, ar->cfg_tx_chainmask, 3865 ar->cfg_rx_chainmask); 3866 3867 /* 3868 * By default FW set ARP frames ac to voice (6). In that case ARP 3869 * exchange is not working properly for UAPSD enabled AP. ARP requests 3870 * which arrives with access category 0 are processed by network stack 3871 * and send back with access category 0, but FW changes access category 3872 * to 6. Set ARP frames access category to best effort (0) solves 3873 * this problem. 3874 */ 3875 3876 ret = ath10k_wmi_pdev_set_param(ar, 3877 ar->wmi.pdev_param->arp_ac_override, 0); 3878 if (ret) { 3879 ath10k_warn(ar, "failed to set arp ac override parameter: %d\n", 3880 ret); 3881 goto err_core_stop; 3882 } 3883 3884 ret = ath10k_wmi_pdev_set_param(ar, 3885 ar->wmi.pdev_param->ani_enable, 1); 3886 if (ret) { 3887 ath10k_warn(ar, "failed to enable ani by default: %d\n", 3888 ret); 3889 goto err_core_stop; 3890 } 3891 3892 ar->ani_enabled = true; 3893 3894 ar->num_started_vdevs = 0; 3895 ath10k_regd_update(ar); 3896 3897 ath10k_spectral_start(ar); 3898 ath10k_thermal_set_throttling(ar); 3899 3900 mutex_unlock(&ar->conf_mutex); 3901 return 0; 3902 3903 err_core_stop: 3904 ath10k_core_stop(ar); 3905 3906 err_power_down: 3907 ath10k_hif_power_down(ar); 3908 3909 err_off: 3910 ar->state = ATH10K_STATE_OFF; 3911 3912 err: 3913 mutex_unlock(&ar->conf_mutex); 3914 return ret; 3915 } 3916 3917 static void ath10k_stop(struct ieee80211_hw *hw) 3918 { 3919 struct ath10k *ar = hw->priv; 3920 3921 ath10k_drain_tx(ar); 3922 3923 mutex_lock(&ar->conf_mutex); 3924 if (ar->state != ATH10K_STATE_OFF) { 3925 ath10k_halt(ar); 3926 ar->state = ATH10K_STATE_OFF; 3927 } 3928 mutex_unlock(&ar->conf_mutex); 3929 3930 cancel_delayed_work_sync(&ar->scan.timeout); 3931 cancel_work_sync(&ar->restart_work); 3932 } 3933 3934 static int ath10k_config_ps(struct ath10k *ar) 3935 { 3936 struct ath10k_vif *arvif; 3937 int ret = 0; 3938 3939 lockdep_assert_held(&ar->conf_mutex); 3940 3941 list_for_each_entry(arvif, &ar->arvifs, list) { 3942 ret = ath10k_mac_vif_setup_ps(arvif); 3943 if (ret) { 3944 ath10k_warn(ar, "failed to setup powersave: %d\n", ret); 3945 break; 3946 } 3947 } 3948 3949 return ret; 3950 } 3951 3952 static int ath10k_mac_txpower_setup(struct ath10k *ar, int txpower) 3953 { 3954 int ret; 3955 u32 param; 3956 3957 lockdep_assert_held(&ar->conf_mutex); 3958 3959 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac txpower %d\n", txpower); 3960 3961 param = ar->wmi.pdev_param->txpower_limit2g; 3962 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 3963 if (ret) { 3964 ath10k_warn(ar, "failed to set 2g txpower %d: %d\n", 3965 txpower, ret); 3966 return ret; 3967 } 3968 3969 param = ar->wmi.pdev_param->txpower_limit5g; 3970 ret = ath10k_wmi_pdev_set_param(ar, param, txpower * 2); 3971 if (ret) { 3972 ath10k_warn(ar, "failed to set 5g txpower %d: %d\n", 3973 txpower, ret); 3974 return ret; 3975 } 3976 3977 return 0; 3978 } 3979 3980 static int ath10k_mac_txpower_recalc(struct ath10k *ar) 3981 { 3982 struct ath10k_vif *arvif; 3983 int ret, txpower = -1; 3984 3985 lockdep_assert_held(&ar->conf_mutex); 3986 3987 list_for_each_entry(arvif, &ar->arvifs, list) { 3988 WARN_ON(arvif->txpower < 0); 3989 3990 if (txpower == -1) 3991 txpower = arvif->txpower; 3992 else 3993 txpower = min(txpower, arvif->txpower); 3994 } 3995 3996 if (WARN_ON(txpower == -1)) 3997 return -EINVAL; 3998 3999 ret = ath10k_mac_txpower_setup(ar, txpower); 4000 if (ret) { 4001 ath10k_warn(ar, "failed to setup tx power %d: %d\n", 4002 txpower, ret); 4003 return ret; 4004 } 4005 4006 return 0; 4007 } 4008 4009 static int ath10k_config(struct ieee80211_hw *hw, u32 changed) 4010 { 4011 struct ath10k *ar = hw->priv; 4012 struct ieee80211_conf *conf = &hw->conf; 4013 int ret = 0; 4014 4015 mutex_lock(&ar->conf_mutex); 4016 4017 if (changed & IEEE80211_CONF_CHANGE_PS) 4018 ath10k_config_ps(ar); 4019 4020 if (changed & IEEE80211_CONF_CHANGE_MONITOR) { 4021 ar->monitor = conf->flags & IEEE80211_CONF_MONITOR; 4022 ret = ath10k_monitor_recalc(ar); 4023 if (ret) 4024 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4025 } 4026 4027 mutex_unlock(&ar->conf_mutex); 4028 return ret; 4029 } 4030 4031 static u32 get_nss_from_chainmask(u16 chain_mask) 4032 { 4033 if ((chain_mask & 0x15) == 0x15) 4034 return 4; 4035 else if ((chain_mask & 0x7) == 0x7) 4036 return 3; 4037 else if ((chain_mask & 0x3) == 0x3) 4038 return 2; 4039 return 1; 4040 } 4041 4042 /* 4043 * TODO: 4044 * Figure out how to handle WMI_VDEV_SUBTYPE_P2P_DEVICE, 4045 * because we will send mgmt frames without CCK. This requirement 4046 * for P2P_FIND/GO_NEG should be handled by checking CCK flag 4047 * in the TX packet. 4048 */ 4049 static int ath10k_add_interface(struct ieee80211_hw *hw, 4050 struct ieee80211_vif *vif) 4051 { 4052 struct ath10k *ar = hw->priv; 4053 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4054 enum wmi_sta_powersave_param param; 4055 int ret = 0; 4056 u32 value; 4057 int bit; 4058 int i; 4059 u32 vdev_param; 4060 4061 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 4062 4063 mutex_lock(&ar->conf_mutex); 4064 4065 memset(arvif, 0, sizeof(*arvif)); 4066 4067 arvif->ar = ar; 4068 arvif->vif = vif; 4069 4070 INIT_LIST_HEAD(&arvif->list); 4071 INIT_WORK(&arvif->ap_csa_work, ath10k_mac_vif_ap_csa_work); 4072 INIT_DELAYED_WORK(&arvif->connection_loss_work, 4073 ath10k_mac_vif_sta_connection_loss_work); 4074 4075 for (i = 0; i < ARRAY_SIZE(arvif->bitrate_mask.control); i++) { 4076 arvif->bitrate_mask.control[i].legacy = 0xffffffff; 4077 memset(arvif->bitrate_mask.control[i].ht_mcs, 0xff, 4078 sizeof(arvif->bitrate_mask.control[i].ht_mcs)); 4079 memset(arvif->bitrate_mask.control[i].vht_mcs, 0xff, 4080 sizeof(arvif->bitrate_mask.control[i].vht_mcs)); 4081 } 4082 4083 if (ar->free_vdev_map == 0) { 4084 ath10k_warn(ar, "Free vdev map is empty, no more interfaces allowed.\n"); 4085 ret = -EBUSY; 4086 goto err; 4087 } 4088 bit = __ffs64(ar->free_vdev_map); 4089 4090 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac create vdev %i map %llx\n", 4091 bit, ar->free_vdev_map); 4092 4093 arvif->vdev_id = bit; 4094 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_NONE; 4095 4096 switch (vif->type) { 4097 case NL80211_IFTYPE_P2P_DEVICE: 4098 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4099 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_DEVICE; 4100 break; 4101 case NL80211_IFTYPE_UNSPECIFIED: 4102 case NL80211_IFTYPE_STATION: 4103 arvif->vdev_type = WMI_VDEV_TYPE_STA; 4104 if (vif->p2p) 4105 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_CLIENT; 4106 break; 4107 case NL80211_IFTYPE_ADHOC: 4108 arvif->vdev_type = WMI_VDEV_TYPE_IBSS; 4109 break; 4110 case NL80211_IFTYPE_AP: 4111 arvif->vdev_type = WMI_VDEV_TYPE_AP; 4112 4113 if (vif->p2p) 4114 arvif->vdev_subtype = WMI_VDEV_SUBTYPE_P2P_GO; 4115 break; 4116 case NL80211_IFTYPE_MONITOR: 4117 arvif->vdev_type = WMI_VDEV_TYPE_MONITOR; 4118 break; 4119 default: 4120 WARN_ON(1); 4121 break; 4122 } 4123 4124 /* Using vdev_id as queue number will make it very easy to do per-vif 4125 * tx queue locking. This shouldn't wrap due to interface combinations 4126 * but do a modulo for correctness sake and prevent using offchannel tx 4127 * queues for regular vif tx. 4128 */ 4129 vif->cab_queue = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4130 for (i = 0; i < ARRAY_SIZE(vif->hw_queue); i++) 4131 vif->hw_queue[i] = arvif->vdev_id % (IEEE80211_MAX_QUEUES - 1); 4132 4133 /* Some firmware revisions don't wait for beacon tx completion before 4134 * sending another SWBA event. This could lead to hardware using old 4135 * (freed) beacon data in some cases, e.g. tx credit starvation 4136 * combined with missed TBTT. This is very very rare. 4137 * 4138 * On non-IOMMU-enabled hosts this could be a possible security issue 4139 * because hw could beacon some random data on the air. On 4140 * IOMMU-enabled hosts DMAR faults would occur in most cases and target 4141 * device would crash. 4142 * 4143 * Since there are no beacon tx completions (implicit nor explicit) 4144 * propagated to host the only workaround for this is to allocate a 4145 * DMA-coherent buffer for a lifetime of a vif and use it for all 4146 * beacon tx commands. Worst case for this approach is some beacons may 4147 * become corrupted, e.g. have garbled IEs or out-of-date TIM bitmap. 4148 */ 4149 if (vif->type == NL80211_IFTYPE_ADHOC || 4150 vif->type == NL80211_IFTYPE_AP) { 4151 arvif->beacon_buf = dma_zalloc_coherent(ar->dev, 4152 IEEE80211_MAX_FRAME_LEN, 4153 &arvif->beacon_paddr, 4154 GFP_ATOMIC); 4155 if (!arvif->beacon_buf) { 4156 ret = -ENOMEM; 4157 ath10k_warn(ar, "failed to allocate beacon buffer: %d\n", 4158 ret); 4159 goto err; 4160 } 4161 } 4162 4163 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev create %d (add interface) type %d subtype %d bcnmode %s\n", 4164 arvif->vdev_id, arvif->vdev_type, arvif->vdev_subtype, 4165 arvif->beacon_buf ? "single-buf" : "per-skb"); 4166 4167 ret = ath10k_wmi_vdev_create(ar, arvif->vdev_id, arvif->vdev_type, 4168 arvif->vdev_subtype, vif->addr); 4169 if (ret) { 4170 ath10k_warn(ar, "failed to create WMI vdev %i: %d\n", 4171 arvif->vdev_id, ret); 4172 goto err; 4173 } 4174 4175 ar->free_vdev_map &= ~(1LL << arvif->vdev_id); 4176 list_add(&arvif->list, &ar->arvifs); 4177 4178 /* It makes no sense to have firmware do keepalives. mac80211 already 4179 * takes care of this with idle connection polling. 4180 */ 4181 ret = ath10k_mac_vif_disable_keepalive(arvif); 4182 if (ret) { 4183 ath10k_warn(ar, "failed to disable keepalive on vdev %i: %d\n", 4184 arvif->vdev_id, ret); 4185 goto err_vdev_delete; 4186 } 4187 4188 arvif->def_wep_key_idx = -1; 4189 4190 vdev_param = ar->wmi.vdev_param->tx_encap_type; 4191 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4192 ATH10K_HW_TXRX_NATIVE_WIFI); 4193 /* 10.X firmware does not support this VDEV parameter. Do not warn */ 4194 if (ret && ret != -EOPNOTSUPP) { 4195 ath10k_warn(ar, "failed to set vdev %i TX encapsulation: %d\n", 4196 arvif->vdev_id, ret); 4197 goto err_vdev_delete; 4198 } 4199 4200 if (ar->cfg_tx_chainmask) { 4201 u16 nss = get_nss_from_chainmask(ar->cfg_tx_chainmask); 4202 4203 vdev_param = ar->wmi.vdev_param->nss; 4204 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4205 nss); 4206 if (ret) { 4207 ath10k_warn(ar, "failed to set vdev %i chainmask 0x%x, nss %i: %d\n", 4208 arvif->vdev_id, ar->cfg_tx_chainmask, nss, 4209 ret); 4210 goto err_vdev_delete; 4211 } 4212 } 4213 4214 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 4215 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 4216 ret = ath10k_peer_create(ar, arvif->vdev_id, vif->addr, 4217 WMI_PEER_TYPE_DEFAULT); 4218 if (ret) { 4219 ath10k_warn(ar, "failed to create vdev %i peer for AP/IBSS: %d\n", 4220 arvif->vdev_id, ret); 4221 goto err_vdev_delete; 4222 } 4223 } 4224 4225 if (arvif->vdev_type == WMI_VDEV_TYPE_AP) { 4226 ret = ath10k_mac_set_kickout(arvif); 4227 if (ret) { 4228 ath10k_warn(ar, "failed to set vdev %i kickout parameters: %d\n", 4229 arvif->vdev_id, ret); 4230 goto err_peer_delete; 4231 } 4232 } 4233 4234 if (arvif->vdev_type == WMI_VDEV_TYPE_STA) { 4235 param = WMI_STA_PS_PARAM_RX_WAKE_POLICY; 4236 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 4237 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 4238 param, value); 4239 if (ret) { 4240 ath10k_warn(ar, "failed to set vdev %i RX wake policy: %d\n", 4241 arvif->vdev_id, ret); 4242 goto err_peer_delete; 4243 } 4244 4245 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 4246 if (ret) { 4247 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 4248 arvif->vdev_id, ret); 4249 goto err_peer_delete; 4250 } 4251 4252 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 4253 if (ret) { 4254 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 4255 arvif->vdev_id, ret); 4256 goto err_peer_delete; 4257 } 4258 } 4259 4260 ret = ath10k_mac_set_rts(arvif, ar->hw->wiphy->rts_threshold); 4261 if (ret) { 4262 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 4263 arvif->vdev_id, ret); 4264 goto err_peer_delete; 4265 } 4266 4267 ret = ath10k_mac_set_frag(arvif, ar->hw->wiphy->frag_threshold); 4268 if (ret) { 4269 ath10k_warn(ar, "failed to set frag threshold for vdev %d: %d\n", 4270 arvif->vdev_id, ret); 4271 goto err_peer_delete; 4272 } 4273 4274 arvif->txpower = vif->bss_conf.txpower; 4275 ret = ath10k_mac_txpower_recalc(ar); 4276 if (ret) { 4277 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 4278 goto err_peer_delete; 4279 } 4280 4281 if (vif->type == NL80211_IFTYPE_MONITOR) { 4282 ar->monitor_arvif = arvif; 4283 ret = ath10k_monitor_recalc(ar); 4284 if (ret) { 4285 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4286 goto err_peer_delete; 4287 } 4288 } 4289 4290 mutex_unlock(&ar->conf_mutex); 4291 return 0; 4292 4293 err_peer_delete: 4294 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 4295 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) 4296 ath10k_wmi_peer_delete(ar, arvif->vdev_id, vif->addr); 4297 4298 err_vdev_delete: 4299 ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 4300 ar->free_vdev_map |= 1LL << arvif->vdev_id; 4301 list_del(&arvif->list); 4302 4303 err: 4304 if (arvif->beacon_buf) { 4305 dma_free_coherent(ar->dev, IEEE80211_MAX_FRAME_LEN, 4306 arvif->beacon_buf, arvif->beacon_paddr); 4307 arvif->beacon_buf = NULL; 4308 } 4309 4310 mutex_unlock(&ar->conf_mutex); 4311 4312 return ret; 4313 } 4314 4315 static void ath10k_mac_vif_tx_unlock_all(struct ath10k_vif *arvif) 4316 { 4317 int i; 4318 4319 for (i = 0; i < BITS_PER_LONG; i++) 4320 ath10k_mac_vif_tx_unlock(arvif, i); 4321 } 4322 4323 static void ath10k_remove_interface(struct ieee80211_hw *hw, 4324 struct ieee80211_vif *vif) 4325 { 4326 struct ath10k *ar = hw->priv; 4327 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4328 int ret; 4329 4330 cancel_work_sync(&arvif->ap_csa_work); 4331 cancel_delayed_work_sync(&arvif->connection_loss_work); 4332 4333 mutex_lock(&ar->conf_mutex); 4334 4335 spin_lock_bh(&ar->data_lock); 4336 ath10k_mac_vif_beacon_cleanup(arvif); 4337 spin_unlock_bh(&ar->data_lock); 4338 4339 ret = ath10k_spectral_vif_stop(arvif); 4340 if (ret) 4341 ath10k_warn(ar, "failed to stop spectral for vdev %i: %d\n", 4342 arvif->vdev_id, ret); 4343 4344 ar->free_vdev_map |= 1LL << arvif->vdev_id; 4345 list_del(&arvif->list); 4346 4347 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 4348 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 4349 ret = ath10k_wmi_peer_delete(arvif->ar, arvif->vdev_id, 4350 vif->addr); 4351 if (ret) 4352 ath10k_warn(ar, "failed to submit AP/IBSS self-peer removal on vdev %i: %d\n", 4353 arvif->vdev_id, ret); 4354 4355 kfree(arvif->u.ap.noa_data); 4356 } 4357 4358 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %i delete (remove interface)\n", 4359 arvif->vdev_id); 4360 4361 ret = ath10k_wmi_vdev_delete(ar, arvif->vdev_id); 4362 if (ret) 4363 ath10k_warn(ar, "failed to delete WMI vdev %i: %d\n", 4364 arvif->vdev_id, ret); 4365 4366 /* Some firmware revisions don't notify host about self-peer removal 4367 * until after associated vdev is deleted. 4368 */ 4369 if (arvif->vdev_type == WMI_VDEV_TYPE_AP || 4370 arvif->vdev_type == WMI_VDEV_TYPE_IBSS) { 4371 ret = ath10k_wait_for_peer_deleted(ar, arvif->vdev_id, 4372 vif->addr); 4373 if (ret) 4374 ath10k_warn(ar, "failed to remove AP self-peer on vdev %i: %d\n", 4375 arvif->vdev_id, ret); 4376 4377 spin_lock_bh(&ar->data_lock); 4378 ar->num_peers--; 4379 spin_unlock_bh(&ar->data_lock); 4380 } 4381 4382 ath10k_peer_cleanup(ar, arvif->vdev_id); 4383 4384 if (vif->type == NL80211_IFTYPE_MONITOR) { 4385 ar->monitor_arvif = NULL; 4386 ret = ath10k_monitor_recalc(ar); 4387 if (ret) 4388 ath10k_warn(ar, "failed to recalc monitor: %d\n", ret); 4389 } 4390 4391 spin_lock_bh(&ar->htt.tx_lock); 4392 ath10k_mac_vif_tx_unlock_all(arvif); 4393 spin_unlock_bh(&ar->htt.tx_lock); 4394 4395 mutex_unlock(&ar->conf_mutex); 4396 } 4397 4398 /* 4399 * FIXME: Has to be verified. 4400 */ 4401 #define SUPPORTED_FILTERS \ 4402 (FIF_ALLMULTI | \ 4403 FIF_CONTROL | \ 4404 FIF_PSPOLL | \ 4405 FIF_OTHER_BSS | \ 4406 FIF_BCN_PRBRESP_PROMISC | \ 4407 FIF_PROBE_REQ | \ 4408 FIF_FCSFAIL) 4409 4410 static void ath10k_configure_filter(struct ieee80211_hw *hw, 4411 unsigned int changed_flags, 4412 unsigned int *total_flags, 4413 u64 multicast) 4414 { 4415 struct ath10k *ar = hw->priv; 4416 int ret; 4417 4418 mutex_lock(&ar->conf_mutex); 4419 4420 changed_flags &= SUPPORTED_FILTERS; 4421 *total_flags &= SUPPORTED_FILTERS; 4422 ar->filter_flags = *total_flags; 4423 4424 ret = ath10k_monitor_recalc(ar); 4425 if (ret) 4426 ath10k_warn(ar, "failed to recalc montior: %d\n", ret); 4427 4428 mutex_unlock(&ar->conf_mutex); 4429 } 4430 4431 static void ath10k_bss_info_changed(struct ieee80211_hw *hw, 4432 struct ieee80211_vif *vif, 4433 struct ieee80211_bss_conf *info, 4434 u32 changed) 4435 { 4436 struct ath10k *ar = hw->priv; 4437 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4438 int ret = 0; 4439 u32 vdev_param, pdev_param, slottime, preamble; 4440 4441 mutex_lock(&ar->conf_mutex); 4442 4443 if (changed & BSS_CHANGED_IBSS) 4444 ath10k_control_ibss(arvif, info, vif->addr); 4445 4446 if (changed & BSS_CHANGED_BEACON_INT) { 4447 arvif->beacon_interval = info->beacon_int; 4448 vdev_param = ar->wmi.vdev_param->beacon_interval; 4449 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4450 arvif->beacon_interval); 4451 ath10k_dbg(ar, ATH10K_DBG_MAC, 4452 "mac vdev %d beacon_interval %d\n", 4453 arvif->vdev_id, arvif->beacon_interval); 4454 4455 if (ret) 4456 ath10k_warn(ar, "failed to set beacon interval for vdev %d: %i\n", 4457 arvif->vdev_id, ret); 4458 } 4459 4460 if (changed & BSS_CHANGED_BEACON) { 4461 ath10k_dbg(ar, ATH10K_DBG_MAC, 4462 "vdev %d set beacon tx mode to staggered\n", 4463 arvif->vdev_id); 4464 4465 pdev_param = ar->wmi.pdev_param->beacon_tx_mode; 4466 ret = ath10k_wmi_pdev_set_param(ar, pdev_param, 4467 WMI_BEACON_STAGGERED_MODE); 4468 if (ret) 4469 ath10k_warn(ar, "failed to set beacon mode for vdev %d: %i\n", 4470 arvif->vdev_id, ret); 4471 4472 ret = ath10k_mac_setup_bcn_tmpl(arvif); 4473 if (ret) 4474 ath10k_warn(ar, "failed to update beacon template: %d\n", 4475 ret); 4476 } 4477 4478 if (changed & BSS_CHANGED_AP_PROBE_RESP) { 4479 ret = ath10k_mac_setup_prb_tmpl(arvif); 4480 if (ret) 4481 ath10k_warn(ar, "failed to setup probe resp template on vdev %i: %d\n", 4482 arvif->vdev_id, ret); 4483 } 4484 4485 if (changed & (BSS_CHANGED_BEACON_INFO | BSS_CHANGED_BEACON)) { 4486 arvif->dtim_period = info->dtim_period; 4487 4488 ath10k_dbg(ar, ATH10K_DBG_MAC, 4489 "mac vdev %d dtim_period %d\n", 4490 arvif->vdev_id, arvif->dtim_period); 4491 4492 vdev_param = ar->wmi.vdev_param->dtim_period; 4493 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4494 arvif->dtim_period); 4495 if (ret) 4496 ath10k_warn(ar, "failed to set dtim period for vdev %d: %i\n", 4497 arvif->vdev_id, ret); 4498 } 4499 4500 if (changed & BSS_CHANGED_SSID && 4501 vif->type == NL80211_IFTYPE_AP) { 4502 arvif->u.ap.ssid_len = info->ssid_len; 4503 if (info->ssid_len) 4504 memcpy(arvif->u.ap.ssid, info->ssid, info->ssid_len); 4505 arvif->u.ap.hidden_ssid = info->hidden_ssid; 4506 } 4507 4508 if (changed & BSS_CHANGED_BSSID && !is_zero_ether_addr(info->bssid)) 4509 ether_addr_copy(arvif->bssid, info->bssid); 4510 4511 if (changed & BSS_CHANGED_BEACON_ENABLED) 4512 ath10k_control_beaconing(arvif, info); 4513 4514 if (changed & BSS_CHANGED_ERP_CTS_PROT) { 4515 arvif->use_cts_prot = info->use_cts_prot; 4516 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d cts_prot %d\n", 4517 arvif->vdev_id, info->use_cts_prot); 4518 4519 ret = ath10k_recalc_rtscts_prot(arvif); 4520 if (ret) 4521 ath10k_warn(ar, "failed to recalculate rts/cts prot for vdev %d: %d\n", 4522 arvif->vdev_id, ret); 4523 4524 vdev_param = ar->wmi.vdev_param->protection_mode; 4525 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4526 info->use_cts_prot ? 1 : 0); 4527 if (ret) 4528 ath10k_warn(ar, "failed to set protection mode %d on vdev %i: %d\n", 4529 info->use_cts_prot, arvif->vdev_id, ret); 4530 } 4531 4532 if (changed & BSS_CHANGED_ERP_SLOT) { 4533 if (info->use_short_slot) 4534 slottime = WMI_VDEV_SLOT_TIME_SHORT; /* 9us */ 4535 4536 else 4537 slottime = WMI_VDEV_SLOT_TIME_LONG; /* 20us */ 4538 4539 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d slot_time %d\n", 4540 arvif->vdev_id, slottime); 4541 4542 vdev_param = ar->wmi.vdev_param->slot_time; 4543 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4544 slottime); 4545 if (ret) 4546 ath10k_warn(ar, "failed to set erp slot for vdev %d: %i\n", 4547 arvif->vdev_id, ret); 4548 } 4549 4550 if (changed & BSS_CHANGED_ERP_PREAMBLE) { 4551 if (info->use_short_preamble) 4552 preamble = WMI_VDEV_PREAMBLE_SHORT; 4553 else 4554 preamble = WMI_VDEV_PREAMBLE_LONG; 4555 4556 ath10k_dbg(ar, ATH10K_DBG_MAC, 4557 "mac vdev %d preamble %dn", 4558 arvif->vdev_id, preamble); 4559 4560 vdev_param = ar->wmi.vdev_param->preamble; 4561 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4562 preamble); 4563 if (ret) 4564 ath10k_warn(ar, "failed to set preamble for vdev %d: %i\n", 4565 arvif->vdev_id, ret); 4566 } 4567 4568 if (changed & BSS_CHANGED_ASSOC) { 4569 if (info->assoc) { 4570 /* Workaround: Make sure monitor vdev is not running 4571 * when associating to prevent some firmware revisions 4572 * (e.g. 10.1 and 10.2) from crashing. 4573 */ 4574 if (ar->monitor_started) 4575 ath10k_monitor_stop(ar); 4576 ath10k_bss_assoc(hw, vif, info); 4577 ath10k_monitor_recalc(ar); 4578 } else { 4579 ath10k_bss_disassoc(hw, vif); 4580 } 4581 } 4582 4583 if (changed & BSS_CHANGED_TXPOWER) { 4584 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev_id %i txpower %d\n", 4585 arvif->vdev_id, info->txpower); 4586 4587 arvif->txpower = info->txpower; 4588 ret = ath10k_mac_txpower_recalc(ar); 4589 if (ret) 4590 ath10k_warn(ar, "failed to recalc tx power: %d\n", ret); 4591 } 4592 4593 if (changed & BSS_CHANGED_PS) { 4594 arvif->ps = vif->bss_conf.ps; 4595 4596 ret = ath10k_config_ps(ar); 4597 if (ret) 4598 ath10k_warn(ar, "failed to setup ps on vdev %i: %d\n", 4599 arvif->vdev_id, ret); 4600 } 4601 4602 mutex_unlock(&ar->conf_mutex); 4603 } 4604 4605 static int ath10k_hw_scan(struct ieee80211_hw *hw, 4606 struct ieee80211_vif *vif, 4607 struct ieee80211_scan_request *hw_req) 4608 { 4609 struct ath10k *ar = hw->priv; 4610 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4611 struct cfg80211_scan_request *req = &hw_req->req; 4612 struct wmi_start_scan_arg arg; 4613 int ret = 0; 4614 int i; 4615 4616 mutex_lock(&ar->conf_mutex); 4617 4618 spin_lock_bh(&ar->data_lock); 4619 switch (ar->scan.state) { 4620 case ATH10K_SCAN_IDLE: 4621 reinit_completion(&ar->scan.started); 4622 reinit_completion(&ar->scan.completed); 4623 ar->scan.state = ATH10K_SCAN_STARTING; 4624 ar->scan.is_roc = false; 4625 ar->scan.vdev_id = arvif->vdev_id; 4626 ret = 0; 4627 break; 4628 case ATH10K_SCAN_STARTING: 4629 case ATH10K_SCAN_RUNNING: 4630 case ATH10K_SCAN_ABORTING: 4631 ret = -EBUSY; 4632 break; 4633 } 4634 spin_unlock_bh(&ar->data_lock); 4635 4636 if (ret) 4637 goto exit; 4638 4639 memset(&arg, 0, sizeof(arg)); 4640 ath10k_wmi_start_scan_init(ar, &arg); 4641 arg.vdev_id = arvif->vdev_id; 4642 arg.scan_id = ATH10K_SCAN_ID; 4643 4644 if (!req->no_cck) 4645 arg.scan_ctrl_flags |= WMI_SCAN_ADD_CCK_RATES; 4646 4647 if (req->ie_len) { 4648 arg.ie_len = req->ie_len; 4649 memcpy(arg.ie, req->ie, arg.ie_len); 4650 } 4651 4652 if (req->n_ssids) { 4653 arg.n_ssids = req->n_ssids; 4654 for (i = 0; i < arg.n_ssids; i++) { 4655 arg.ssids[i].len = req->ssids[i].ssid_len; 4656 arg.ssids[i].ssid = req->ssids[i].ssid; 4657 } 4658 } else { 4659 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 4660 } 4661 4662 if (req->n_channels) { 4663 arg.n_channels = req->n_channels; 4664 for (i = 0; i < arg.n_channels; i++) 4665 arg.channels[i] = req->channels[i]->center_freq; 4666 } 4667 4668 ret = ath10k_start_scan(ar, &arg); 4669 if (ret) { 4670 ath10k_warn(ar, "failed to start hw scan: %d\n", ret); 4671 spin_lock_bh(&ar->data_lock); 4672 ar->scan.state = ATH10K_SCAN_IDLE; 4673 spin_unlock_bh(&ar->data_lock); 4674 } 4675 4676 exit: 4677 mutex_unlock(&ar->conf_mutex); 4678 return ret; 4679 } 4680 4681 static void ath10k_cancel_hw_scan(struct ieee80211_hw *hw, 4682 struct ieee80211_vif *vif) 4683 { 4684 struct ath10k *ar = hw->priv; 4685 4686 mutex_lock(&ar->conf_mutex); 4687 ath10k_scan_abort(ar); 4688 mutex_unlock(&ar->conf_mutex); 4689 4690 cancel_delayed_work_sync(&ar->scan.timeout); 4691 } 4692 4693 static void ath10k_set_key_h_def_keyidx(struct ath10k *ar, 4694 struct ath10k_vif *arvif, 4695 enum set_key_cmd cmd, 4696 struct ieee80211_key_conf *key) 4697 { 4698 u32 vdev_param = arvif->ar->wmi.vdev_param->def_keyid; 4699 int ret; 4700 4701 /* 10.1 firmware branch requires default key index to be set to group 4702 * key index after installing it. Otherwise FW/HW Txes corrupted 4703 * frames with multi-vif APs. This is not required for main firmware 4704 * branch (e.g. 636). 4705 * 4706 * This is also needed for 636 fw for IBSS-RSN to work more reliably. 4707 * 4708 * FIXME: It remains unknown if this is required for multi-vif STA 4709 * interfaces on 10.1. 4710 */ 4711 4712 if (arvif->vdev_type != WMI_VDEV_TYPE_AP && 4713 arvif->vdev_type != WMI_VDEV_TYPE_IBSS) 4714 return; 4715 4716 if (key->cipher == WLAN_CIPHER_SUITE_WEP40) 4717 return; 4718 4719 if (key->cipher == WLAN_CIPHER_SUITE_WEP104) 4720 return; 4721 4722 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 4723 return; 4724 4725 if (cmd != SET_KEY) 4726 return; 4727 4728 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, 4729 key->keyidx); 4730 if (ret) 4731 ath10k_warn(ar, "failed to set vdev %i group key as default key: %d\n", 4732 arvif->vdev_id, ret); 4733 } 4734 4735 static int ath10k_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, 4736 struct ieee80211_vif *vif, struct ieee80211_sta *sta, 4737 struct ieee80211_key_conf *key) 4738 { 4739 struct ath10k *ar = hw->priv; 4740 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4741 struct ath10k_peer *peer; 4742 const u8 *peer_addr; 4743 bool is_wep = key->cipher == WLAN_CIPHER_SUITE_WEP40 || 4744 key->cipher == WLAN_CIPHER_SUITE_WEP104; 4745 int ret = 0; 4746 int ret2; 4747 u32 flags = 0; 4748 u32 flags2; 4749 4750 /* this one needs to be done in software */ 4751 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 4752 return 1; 4753 4754 if (key->keyidx > WMI_MAX_KEY_INDEX) 4755 return -ENOSPC; 4756 4757 mutex_lock(&ar->conf_mutex); 4758 4759 if (sta) 4760 peer_addr = sta->addr; 4761 else if (arvif->vdev_type == WMI_VDEV_TYPE_STA) 4762 peer_addr = vif->bss_conf.bssid; 4763 else 4764 peer_addr = vif->addr; 4765 4766 key->hw_key_idx = key->keyidx; 4767 4768 if (is_wep) { 4769 if (cmd == SET_KEY) 4770 arvif->wep_keys[key->keyidx] = key; 4771 else 4772 arvif->wep_keys[key->keyidx] = NULL; 4773 } 4774 4775 /* the peer should not disappear in mid-way (unless FW goes awry) since 4776 * we already hold conf_mutex. we just make sure its there now. */ 4777 spin_lock_bh(&ar->data_lock); 4778 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 4779 spin_unlock_bh(&ar->data_lock); 4780 4781 if (!peer) { 4782 if (cmd == SET_KEY) { 4783 ath10k_warn(ar, "failed to install key for non-existent peer %pM\n", 4784 peer_addr); 4785 ret = -EOPNOTSUPP; 4786 goto exit; 4787 } else { 4788 /* if the peer doesn't exist there is no key to disable 4789 * anymore */ 4790 goto exit; 4791 } 4792 } 4793 4794 if (key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 4795 flags |= WMI_KEY_PAIRWISE; 4796 else 4797 flags |= WMI_KEY_GROUP; 4798 4799 if (is_wep) { 4800 if (cmd == DISABLE_KEY) 4801 ath10k_clear_vdev_key(arvif, key); 4802 4803 /* When WEP keys are uploaded it's possible that there are 4804 * stations associated already (e.g. when merging) without any 4805 * keys. Static WEP needs an explicit per-peer key upload. 4806 */ 4807 if (vif->type == NL80211_IFTYPE_ADHOC && 4808 cmd == SET_KEY) 4809 ath10k_mac_vif_update_wep_key(arvif, key); 4810 4811 /* 802.1x never sets the def_wep_key_idx so each set_key() 4812 * call changes default tx key. 4813 * 4814 * Static WEP sets def_wep_key_idx via .set_default_unicast_key 4815 * after first set_key(). 4816 */ 4817 if (cmd == SET_KEY && arvif->def_wep_key_idx == -1) 4818 flags |= WMI_KEY_TX_USAGE; 4819 } 4820 4821 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags); 4822 if (ret) { 4823 ath10k_warn(ar, "failed to install key for vdev %i peer %pM: %d\n", 4824 arvif->vdev_id, peer_addr, ret); 4825 goto exit; 4826 } 4827 4828 /* mac80211 sets static WEP keys as groupwise while firmware requires 4829 * them to be installed twice as both pairwise and groupwise. 4830 */ 4831 if (is_wep && !sta && vif->type == NL80211_IFTYPE_STATION) { 4832 flags2 = flags; 4833 flags2 &= ~WMI_KEY_GROUP; 4834 flags2 |= WMI_KEY_PAIRWISE; 4835 4836 ret = ath10k_install_key(arvif, key, cmd, peer_addr, flags2); 4837 if (ret) { 4838 ath10k_warn(ar, "failed to install (ucast) key for vdev %i peer %pM: %d\n", 4839 arvif->vdev_id, peer_addr, ret); 4840 ret2 = ath10k_install_key(arvif, key, DISABLE_KEY, 4841 peer_addr, flags); 4842 if (ret2) 4843 ath10k_warn(ar, "failed to disable (mcast) key for vdev %i peer %pM: %d\n", 4844 arvif->vdev_id, peer_addr, ret2); 4845 goto exit; 4846 } 4847 } 4848 4849 ath10k_set_key_h_def_keyidx(ar, arvif, cmd, key); 4850 4851 spin_lock_bh(&ar->data_lock); 4852 peer = ath10k_peer_find(ar, arvif->vdev_id, peer_addr); 4853 if (peer && cmd == SET_KEY) 4854 peer->keys[key->keyidx] = key; 4855 else if (peer && cmd == DISABLE_KEY) 4856 peer->keys[key->keyidx] = NULL; 4857 else if (peer == NULL) 4858 /* impossible unless FW goes crazy */ 4859 ath10k_warn(ar, "Peer %pM disappeared!\n", peer_addr); 4860 spin_unlock_bh(&ar->data_lock); 4861 4862 exit: 4863 mutex_unlock(&ar->conf_mutex); 4864 return ret; 4865 } 4866 4867 static void ath10k_set_default_unicast_key(struct ieee80211_hw *hw, 4868 struct ieee80211_vif *vif, 4869 int keyidx) 4870 { 4871 struct ath10k *ar = hw->priv; 4872 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 4873 int ret; 4874 4875 mutex_lock(&arvif->ar->conf_mutex); 4876 4877 if (arvif->ar->state != ATH10K_STATE_ON) 4878 goto unlock; 4879 4880 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d set keyidx %d\n", 4881 arvif->vdev_id, keyidx); 4882 4883 ret = ath10k_wmi_vdev_set_param(arvif->ar, 4884 arvif->vdev_id, 4885 arvif->ar->wmi.vdev_param->def_keyid, 4886 keyidx); 4887 4888 if (ret) { 4889 ath10k_warn(ar, "failed to update wep key index for vdev %d: %d\n", 4890 arvif->vdev_id, 4891 ret); 4892 goto unlock; 4893 } 4894 4895 arvif->def_wep_key_idx = keyidx; 4896 4897 unlock: 4898 mutex_unlock(&arvif->ar->conf_mutex); 4899 } 4900 4901 static void ath10k_sta_rc_update_wk(struct work_struct *wk) 4902 { 4903 struct ath10k *ar; 4904 struct ath10k_vif *arvif; 4905 struct ath10k_sta *arsta; 4906 struct ieee80211_sta *sta; 4907 struct cfg80211_chan_def def; 4908 enum ieee80211_band band; 4909 const u8 *ht_mcs_mask; 4910 const u16 *vht_mcs_mask; 4911 u32 changed, bw, nss, smps; 4912 int err; 4913 4914 arsta = container_of(wk, struct ath10k_sta, update_wk); 4915 sta = container_of((void *)arsta, struct ieee80211_sta, drv_priv); 4916 arvif = arsta->arvif; 4917 ar = arvif->ar; 4918 4919 if (WARN_ON(ath10k_mac_vif_chan(arvif->vif, &def))) 4920 return; 4921 4922 band = def.chan->band; 4923 ht_mcs_mask = arvif->bitrate_mask.control[band].ht_mcs; 4924 vht_mcs_mask = arvif->bitrate_mask.control[band].vht_mcs; 4925 4926 spin_lock_bh(&ar->data_lock); 4927 4928 changed = arsta->changed; 4929 arsta->changed = 0; 4930 4931 bw = arsta->bw; 4932 nss = arsta->nss; 4933 smps = arsta->smps; 4934 4935 spin_unlock_bh(&ar->data_lock); 4936 4937 mutex_lock(&ar->conf_mutex); 4938 4939 nss = max_t(u32, 1, nss); 4940 nss = min(nss, max(ath10k_mac_max_ht_nss(ht_mcs_mask), 4941 ath10k_mac_max_vht_nss(vht_mcs_mask))); 4942 4943 if (changed & IEEE80211_RC_BW_CHANGED) { 4944 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM peer bw %d\n", 4945 sta->addr, bw); 4946 4947 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 4948 WMI_PEER_CHAN_WIDTH, bw); 4949 if (err) 4950 ath10k_warn(ar, "failed to update STA %pM peer bw %d: %d\n", 4951 sta->addr, bw, err); 4952 } 4953 4954 if (changed & IEEE80211_RC_NSS_CHANGED) { 4955 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM nss %d\n", 4956 sta->addr, nss); 4957 4958 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 4959 WMI_PEER_NSS, nss); 4960 if (err) 4961 ath10k_warn(ar, "failed to update STA %pM nss %d: %d\n", 4962 sta->addr, nss, err); 4963 } 4964 4965 if (changed & IEEE80211_RC_SMPS_CHANGED) { 4966 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM smps %d\n", 4967 sta->addr, smps); 4968 4969 err = ath10k_wmi_peer_set_param(ar, arvif->vdev_id, sta->addr, 4970 WMI_PEER_SMPS_STATE, smps); 4971 if (err) 4972 ath10k_warn(ar, "failed to update STA %pM smps %d: %d\n", 4973 sta->addr, smps, err); 4974 } 4975 4976 if (changed & IEEE80211_RC_SUPP_RATES_CHANGED || 4977 changed & IEEE80211_RC_NSS_CHANGED) { 4978 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac update sta %pM supp rates/nss\n", 4979 sta->addr); 4980 4981 err = ath10k_station_assoc(ar, arvif->vif, sta, true); 4982 if (err) 4983 ath10k_warn(ar, "failed to reassociate station: %pM\n", 4984 sta->addr); 4985 } 4986 4987 mutex_unlock(&ar->conf_mutex); 4988 } 4989 4990 static int ath10k_mac_inc_num_stations(struct ath10k_vif *arvif, 4991 struct ieee80211_sta *sta) 4992 { 4993 struct ath10k *ar = arvif->ar; 4994 4995 lockdep_assert_held(&ar->conf_mutex); 4996 4997 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 4998 return 0; 4999 5000 if (ar->num_stations >= ar->max_num_stations) 5001 return -ENOBUFS; 5002 5003 ar->num_stations++; 5004 5005 return 0; 5006 } 5007 5008 static void ath10k_mac_dec_num_stations(struct ath10k_vif *arvif, 5009 struct ieee80211_sta *sta) 5010 { 5011 struct ath10k *ar = arvif->ar; 5012 5013 lockdep_assert_held(&ar->conf_mutex); 5014 5015 if (arvif->vdev_type == WMI_VDEV_TYPE_STA && !sta->tdls) 5016 return; 5017 5018 ar->num_stations--; 5019 } 5020 5021 struct ath10k_mac_tdls_iter_data { 5022 u32 num_tdls_stations; 5023 struct ieee80211_vif *curr_vif; 5024 }; 5025 5026 static void ath10k_mac_tdls_vif_stations_count_iter(void *data, 5027 struct ieee80211_sta *sta) 5028 { 5029 struct ath10k_mac_tdls_iter_data *iter_data = data; 5030 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5031 struct ieee80211_vif *sta_vif = arsta->arvif->vif; 5032 5033 if (sta->tdls && sta_vif == iter_data->curr_vif) 5034 iter_data->num_tdls_stations++; 5035 } 5036 5037 static int ath10k_mac_tdls_vif_stations_count(struct ieee80211_hw *hw, 5038 struct ieee80211_vif *vif) 5039 { 5040 struct ath10k_mac_tdls_iter_data data = {}; 5041 5042 data.curr_vif = vif; 5043 5044 ieee80211_iterate_stations_atomic(hw, 5045 ath10k_mac_tdls_vif_stations_count_iter, 5046 &data); 5047 return data.num_tdls_stations; 5048 } 5049 5050 static void ath10k_mac_tdls_vifs_count_iter(void *data, u8 *mac, 5051 struct ieee80211_vif *vif) 5052 { 5053 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5054 int *num_tdls_vifs = data; 5055 5056 if (vif->type != NL80211_IFTYPE_STATION) 5057 return; 5058 5059 if (ath10k_mac_tdls_vif_stations_count(arvif->ar->hw, vif) > 0) 5060 (*num_tdls_vifs)++; 5061 } 5062 5063 static int ath10k_mac_tdls_vifs_count(struct ieee80211_hw *hw) 5064 { 5065 int num_tdls_vifs = 0; 5066 5067 ieee80211_iterate_active_interfaces_atomic(hw, 5068 IEEE80211_IFACE_ITER_NORMAL, 5069 ath10k_mac_tdls_vifs_count_iter, 5070 &num_tdls_vifs); 5071 return num_tdls_vifs; 5072 } 5073 5074 static int ath10k_sta_state(struct ieee80211_hw *hw, 5075 struct ieee80211_vif *vif, 5076 struct ieee80211_sta *sta, 5077 enum ieee80211_sta_state old_state, 5078 enum ieee80211_sta_state new_state) 5079 { 5080 struct ath10k *ar = hw->priv; 5081 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5082 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5083 int ret = 0; 5084 5085 if (old_state == IEEE80211_STA_NOTEXIST && 5086 new_state == IEEE80211_STA_NONE) { 5087 memset(arsta, 0, sizeof(*arsta)); 5088 arsta->arvif = arvif; 5089 INIT_WORK(&arsta->update_wk, ath10k_sta_rc_update_wk); 5090 } 5091 5092 /* cancel must be done outside the mutex to avoid deadlock */ 5093 if ((old_state == IEEE80211_STA_NONE && 5094 new_state == IEEE80211_STA_NOTEXIST)) 5095 cancel_work_sync(&arsta->update_wk); 5096 5097 mutex_lock(&ar->conf_mutex); 5098 5099 if (old_state == IEEE80211_STA_NOTEXIST && 5100 new_state == IEEE80211_STA_NONE) { 5101 /* 5102 * New station addition. 5103 */ 5104 enum wmi_peer_type peer_type = WMI_PEER_TYPE_DEFAULT; 5105 u32 num_tdls_stations; 5106 u32 num_tdls_vifs; 5107 5108 ath10k_dbg(ar, ATH10K_DBG_MAC, 5109 "mac vdev %d peer create %pM (new sta) sta %d / %d peer %d / %d\n", 5110 arvif->vdev_id, sta->addr, 5111 ar->num_stations + 1, ar->max_num_stations, 5112 ar->num_peers + 1, ar->max_num_peers); 5113 5114 ret = ath10k_mac_inc_num_stations(arvif, sta); 5115 if (ret) { 5116 ath10k_warn(ar, "refusing to associate station: too many connected already (%d)\n", 5117 ar->max_num_stations); 5118 goto exit; 5119 } 5120 5121 if (sta->tdls) 5122 peer_type = WMI_PEER_TYPE_TDLS; 5123 5124 ret = ath10k_peer_create(ar, arvif->vdev_id, sta->addr, 5125 peer_type); 5126 if (ret) { 5127 ath10k_warn(ar, "failed to add peer %pM for vdev %d when adding a new sta: %i\n", 5128 sta->addr, arvif->vdev_id, ret); 5129 ath10k_mac_dec_num_stations(arvif, sta); 5130 goto exit; 5131 } 5132 5133 if (!sta->tdls) 5134 goto exit; 5135 5136 num_tdls_stations = ath10k_mac_tdls_vif_stations_count(hw, vif); 5137 num_tdls_vifs = ath10k_mac_tdls_vifs_count(hw); 5138 5139 if (num_tdls_vifs >= ar->max_num_tdls_vdevs && 5140 num_tdls_stations == 0) { 5141 ath10k_warn(ar, "vdev %i exceeded maximum number of tdls vdevs %i\n", 5142 arvif->vdev_id, ar->max_num_tdls_vdevs); 5143 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 5144 ath10k_mac_dec_num_stations(arvif, sta); 5145 ret = -ENOBUFS; 5146 goto exit; 5147 } 5148 5149 if (num_tdls_stations == 0) { 5150 /* This is the first tdls peer in current vif */ 5151 enum wmi_tdls_state state = WMI_TDLS_ENABLE_ACTIVE; 5152 5153 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 5154 state); 5155 if (ret) { 5156 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 5157 arvif->vdev_id, ret); 5158 ath10k_peer_delete(ar, arvif->vdev_id, 5159 sta->addr); 5160 ath10k_mac_dec_num_stations(arvif, sta); 5161 goto exit; 5162 } 5163 } 5164 5165 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 5166 WMI_TDLS_PEER_STATE_PEERING); 5167 if (ret) { 5168 ath10k_warn(ar, 5169 "failed to update tdls peer %pM for vdev %d when adding a new sta: %i\n", 5170 sta->addr, arvif->vdev_id, ret); 5171 ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 5172 ath10k_mac_dec_num_stations(arvif, sta); 5173 5174 if (num_tdls_stations != 0) 5175 goto exit; 5176 ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 5177 WMI_TDLS_DISABLE); 5178 } 5179 } else if ((old_state == IEEE80211_STA_NONE && 5180 new_state == IEEE80211_STA_NOTEXIST)) { 5181 /* 5182 * Existing station deletion. 5183 */ 5184 ath10k_dbg(ar, ATH10K_DBG_MAC, 5185 "mac vdev %d peer delete %pM (sta gone)\n", 5186 arvif->vdev_id, sta->addr); 5187 5188 ret = ath10k_peer_delete(ar, arvif->vdev_id, sta->addr); 5189 if (ret) 5190 ath10k_warn(ar, "failed to delete peer %pM for vdev %d: %i\n", 5191 sta->addr, arvif->vdev_id, ret); 5192 5193 ath10k_mac_dec_num_stations(arvif, sta); 5194 5195 if (!sta->tdls) 5196 goto exit; 5197 5198 if (ath10k_mac_tdls_vif_stations_count(hw, vif)) 5199 goto exit; 5200 5201 /* This was the last tdls peer in current vif */ 5202 ret = ath10k_wmi_update_fw_tdls_state(ar, arvif->vdev_id, 5203 WMI_TDLS_DISABLE); 5204 if (ret) { 5205 ath10k_warn(ar, "failed to update fw tdls state on vdev %i: %i\n", 5206 arvif->vdev_id, ret); 5207 } 5208 } else if (old_state == IEEE80211_STA_AUTH && 5209 new_state == IEEE80211_STA_ASSOC && 5210 (vif->type == NL80211_IFTYPE_AP || 5211 vif->type == NL80211_IFTYPE_ADHOC)) { 5212 /* 5213 * New association. 5214 */ 5215 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM associated\n", 5216 sta->addr); 5217 5218 ret = ath10k_station_assoc(ar, vif, sta, false); 5219 if (ret) 5220 ath10k_warn(ar, "failed to associate station %pM for vdev %i: %i\n", 5221 sta->addr, arvif->vdev_id, ret); 5222 } else if (old_state == IEEE80211_STA_ASSOC && 5223 new_state == IEEE80211_STA_AUTHORIZED && 5224 sta->tdls) { 5225 /* 5226 * Tdls station authorized. 5227 */ 5228 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac tdls sta %pM authorized\n", 5229 sta->addr); 5230 5231 ret = ath10k_station_assoc(ar, vif, sta, false); 5232 if (ret) { 5233 ath10k_warn(ar, "failed to associate tdls station %pM for vdev %i: %i\n", 5234 sta->addr, arvif->vdev_id, ret); 5235 goto exit; 5236 } 5237 5238 ret = ath10k_mac_tdls_peer_update(ar, arvif->vdev_id, sta, 5239 WMI_TDLS_PEER_STATE_CONNECTED); 5240 if (ret) 5241 ath10k_warn(ar, "failed to update tdls peer %pM for vdev %i: %i\n", 5242 sta->addr, arvif->vdev_id, ret); 5243 } else if (old_state == IEEE80211_STA_ASSOC && 5244 new_state == IEEE80211_STA_AUTH && 5245 (vif->type == NL80211_IFTYPE_AP || 5246 vif->type == NL80211_IFTYPE_ADHOC)) { 5247 /* 5248 * Disassociation. 5249 */ 5250 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac sta %pM disassociated\n", 5251 sta->addr); 5252 5253 ret = ath10k_station_disassoc(ar, vif, sta); 5254 if (ret) 5255 ath10k_warn(ar, "failed to disassociate station: %pM vdev %i: %i\n", 5256 sta->addr, arvif->vdev_id, ret); 5257 } 5258 exit: 5259 mutex_unlock(&ar->conf_mutex); 5260 return ret; 5261 } 5262 5263 static int ath10k_conf_tx_uapsd(struct ath10k *ar, struct ieee80211_vif *vif, 5264 u16 ac, bool enable) 5265 { 5266 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5267 struct wmi_sta_uapsd_auto_trig_arg arg = {}; 5268 u32 prio = 0, acc = 0; 5269 u32 value = 0; 5270 int ret = 0; 5271 5272 lockdep_assert_held(&ar->conf_mutex); 5273 5274 if (arvif->vdev_type != WMI_VDEV_TYPE_STA) 5275 return 0; 5276 5277 switch (ac) { 5278 case IEEE80211_AC_VO: 5279 value = WMI_STA_PS_UAPSD_AC3_DELIVERY_EN | 5280 WMI_STA_PS_UAPSD_AC3_TRIGGER_EN; 5281 prio = 7; 5282 acc = 3; 5283 break; 5284 case IEEE80211_AC_VI: 5285 value = WMI_STA_PS_UAPSD_AC2_DELIVERY_EN | 5286 WMI_STA_PS_UAPSD_AC2_TRIGGER_EN; 5287 prio = 5; 5288 acc = 2; 5289 break; 5290 case IEEE80211_AC_BE: 5291 value = WMI_STA_PS_UAPSD_AC1_DELIVERY_EN | 5292 WMI_STA_PS_UAPSD_AC1_TRIGGER_EN; 5293 prio = 2; 5294 acc = 1; 5295 break; 5296 case IEEE80211_AC_BK: 5297 value = WMI_STA_PS_UAPSD_AC0_DELIVERY_EN | 5298 WMI_STA_PS_UAPSD_AC0_TRIGGER_EN; 5299 prio = 0; 5300 acc = 0; 5301 break; 5302 } 5303 5304 if (enable) 5305 arvif->u.sta.uapsd |= value; 5306 else 5307 arvif->u.sta.uapsd &= ~value; 5308 5309 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5310 WMI_STA_PS_PARAM_UAPSD, 5311 arvif->u.sta.uapsd); 5312 if (ret) { 5313 ath10k_warn(ar, "failed to set uapsd params: %d\n", ret); 5314 goto exit; 5315 } 5316 5317 if (arvif->u.sta.uapsd) 5318 value = WMI_STA_PS_RX_WAKE_POLICY_POLL_UAPSD; 5319 else 5320 value = WMI_STA_PS_RX_WAKE_POLICY_WAKE; 5321 5322 ret = ath10k_wmi_set_sta_ps_param(ar, arvif->vdev_id, 5323 WMI_STA_PS_PARAM_RX_WAKE_POLICY, 5324 value); 5325 if (ret) 5326 ath10k_warn(ar, "failed to set rx wake param: %d\n", ret); 5327 5328 ret = ath10k_mac_vif_recalc_ps_wake_threshold(arvif); 5329 if (ret) { 5330 ath10k_warn(ar, "failed to recalc ps wake threshold on vdev %i: %d\n", 5331 arvif->vdev_id, ret); 5332 return ret; 5333 } 5334 5335 ret = ath10k_mac_vif_recalc_ps_poll_count(arvif); 5336 if (ret) { 5337 ath10k_warn(ar, "failed to recalc ps poll count on vdev %i: %d\n", 5338 arvif->vdev_id, ret); 5339 return ret; 5340 } 5341 5342 if (test_bit(WMI_SERVICE_STA_UAPSD_BASIC_AUTO_TRIG, ar->wmi.svc_map) || 5343 test_bit(WMI_SERVICE_STA_UAPSD_VAR_AUTO_TRIG, ar->wmi.svc_map)) { 5344 /* Only userspace can make an educated decision when to send 5345 * trigger frame. The following effectively disables u-UAPSD 5346 * autotrigger in firmware (which is enabled by default 5347 * provided the autotrigger service is available). 5348 */ 5349 5350 arg.wmm_ac = acc; 5351 arg.user_priority = prio; 5352 arg.service_interval = 0; 5353 arg.suspend_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 5354 arg.delay_interval = WMI_STA_UAPSD_MAX_INTERVAL_MSEC; 5355 5356 ret = ath10k_wmi_vdev_sta_uapsd(ar, arvif->vdev_id, 5357 arvif->bssid, &arg, 1); 5358 if (ret) { 5359 ath10k_warn(ar, "failed to set uapsd auto trigger %d\n", 5360 ret); 5361 return ret; 5362 } 5363 } 5364 5365 exit: 5366 return ret; 5367 } 5368 5369 static int ath10k_conf_tx(struct ieee80211_hw *hw, 5370 struct ieee80211_vif *vif, u16 ac, 5371 const struct ieee80211_tx_queue_params *params) 5372 { 5373 struct ath10k *ar = hw->priv; 5374 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5375 struct wmi_wmm_params_arg *p = NULL; 5376 int ret; 5377 5378 mutex_lock(&ar->conf_mutex); 5379 5380 switch (ac) { 5381 case IEEE80211_AC_VO: 5382 p = &arvif->wmm_params.ac_vo; 5383 break; 5384 case IEEE80211_AC_VI: 5385 p = &arvif->wmm_params.ac_vi; 5386 break; 5387 case IEEE80211_AC_BE: 5388 p = &arvif->wmm_params.ac_be; 5389 break; 5390 case IEEE80211_AC_BK: 5391 p = &arvif->wmm_params.ac_bk; 5392 break; 5393 } 5394 5395 if (WARN_ON(!p)) { 5396 ret = -EINVAL; 5397 goto exit; 5398 } 5399 5400 p->cwmin = params->cw_min; 5401 p->cwmax = params->cw_max; 5402 p->aifs = params->aifs; 5403 5404 /* 5405 * The channel time duration programmed in the HW is in absolute 5406 * microseconds, while mac80211 gives the txop in units of 5407 * 32 microseconds. 5408 */ 5409 p->txop = params->txop * 32; 5410 5411 if (ar->wmi.ops->gen_vdev_wmm_conf) { 5412 ret = ath10k_wmi_vdev_wmm_conf(ar, arvif->vdev_id, 5413 &arvif->wmm_params); 5414 if (ret) { 5415 ath10k_warn(ar, "failed to set vdev wmm params on vdev %i: %d\n", 5416 arvif->vdev_id, ret); 5417 goto exit; 5418 } 5419 } else { 5420 /* This won't work well with multi-interface cases but it's 5421 * better than nothing. 5422 */ 5423 ret = ath10k_wmi_pdev_set_wmm_params(ar, &arvif->wmm_params); 5424 if (ret) { 5425 ath10k_warn(ar, "failed to set wmm params: %d\n", ret); 5426 goto exit; 5427 } 5428 } 5429 5430 ret = ath10k_conf_tx_uapsd(ar, vif, ac, params->uapsd); 5431 if (ret) 5432 ath10k_warn(ar, "failed to set sta uapsd: %d\n", ret); 5433 5434 exit: 5435 mutex_unlock(&ar->conf_mutex); 5436 return ret; 5437 } 5438 5439 #define ATH10K_ROC_TIMEOUT_HZ (2*HZ) 5440 5441 static int ath10k_remain_on_channel(struct ieee80211_hw *hw, 5442 struct ieee80211_vif *vif, 5443 struct ieee80211_channel *chan, 5444 int duration, 5445 enum ieee80211_roc_type type) 5446 { 5447 struct ath10k *ar = hw->priv; 5448 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5449 struct wmi_start_scan_arg arg; 5450 int ret = 0; 5451 u32 scan_time_msec; 5452 5453 mutex_lock(&ar->conf_mutex); 5454 5455 spin_lock_bh(&ar->data_lock); 5456 switch (ar->scan.state) { 5457 case ATH10K_SCAN_IDLE: 5458 reinit_completion(&ar->scan.started); 5459 reinit_completion(&ar->scan.completed); 5460 reinit_completion(&ar->scan.on_channel); 5461 ar->scan.state = ATH10K_SCAN_STARTING; 5462 ar->scan.is_roc = true; 5463 ar->scan.vdev_id = arvif->vdev_id; 5464 ar->scan.roc_freq = chan->center_freq; 5465 ret = 0; 5466 break; 5467 case ATH10K_SCAN_STARTING: 5468 case ATH10K_SCAN_RUNNING: 5469 case ATH10K_SCAN_ABORTING: 5470 ret = -EBUSY; 5471 break; 5472 } 5473 spin_unlock_bh(&ar->data_lock); 5474 5475 if (ret) 5476 goto exit; 5477 5478 scan_time_msec = ar->hw->wiphy->max_remain_on_channel_duration * 2; 5479 5480 memset(&arg, 0, sizeof(arg)); 5481 ath10k_wmi_start_scan_init(ar, &arg); 5482 arg.vdev_id = arvif->vdev_id; 5483 arg.scan_id = ATH10K_SCAN_ID; 5484 arg.n_channels = 1; 5485 arg.channels[0] = chan->center_freq; 5486 arg.dwell_time_active = scan_time_msec; 5487 arg.dwell_time_passive = scan_time_msec; 5488 arg.max_scan_time = scan_time_msec; 5489 arg.scan_ctrl_flags |= WMI_SCAN_FLAG_PASSIVE; 5490 arg.scan_ctrl_flags |= WMI_SCAN_FILTER_PROBE_REQ; 5491 arg.burst_duration_ms = duration; 5492 5493 ret = ath10k_start_scan(ar, &arg); 5494 if (ret) { 5495 ath10k_warn(ar, "failed to start roc scan: %d\n", ret); 5496 spin_lock_bh(&ar->data_lock); 5497 ar->scan.state = ATH10K_SCAN_IDLE; 5498 spin_unlock_bh(&ar->data_lock); 5499 goto exit; 5500 } 5501 5502 ret = wait_for_completion_timeout(&ar->scan.on_channel, 3*HZ); 5503 if (ret == 0) { 5504 ath10k_warn(ar, "failed to switch to channel for roc scan\n"); 5505 5506 ret = ath10k_scan_stop(ar); 5507 if (ret) 5508 ath10k_warn(ar, "failed to stop scan: %d\n", ret); 5509 5510 ret = -ETIMEDOUT; 5511 goto exit; 5512 } 5513 5514 ieee80211_queue_delayed_work(ar->hw, &ar->scan.timeout, 5515 msecs_to_jiffies(duration)); 5516 5517 ret = 0; 5518 exit: 5519 mutex_unlock(&ar->conf_mutex); 5520 return ret; 5521 } 5522 5523 static int ath10k_cancel_remain_on_channel(struct ieee80211_hw *hw) 5524 { 5525 struct ath10k *ar = hw->priv; 5526 5527 mutex_lock(&ar->conf_mutex); 5528 ath10k_scan_abort(ar); 5529 mutex_unlock(&ar->conf_mutex); 5530 5531 cancel_delayed_work_sync(&ar->scan.timeout); 5532 5533 return 0; 5534 } 5535 5536 /* 5537 * Both RTS and Fragmentation threshold are interface-specific 5538 * in ath10k, but device-specific in mac80211. 5539 */ 5540 5541 static int ath10k_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 5542 { 5543 struct ath10k *ar = hw->priv; 5544 struct ath10k_vif *arvif; 5545 int ret = 0; 5546 5547 mutex_lock(&ar->conf_mutex); 5548 list_for_each_entry(arvif, &ar->arvifs, list) { 5549 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac vdev %d rts threshold %d\n", 5550 arvif->vdev_id, value); 5551 5552 ret = ath10k_mac_set_rts(arvif, value); 5553 if (ret) { 5554 ath10k_warn(ar, "failed to set rts threshold for vdev %d: %d\n", 5555 arvif->vdev_id, ret); 5556 break; 5557 } 5558 } 5559 mutex_unlock(&ar->conf_mutex); 5560 5561 return ret; 5562 } 5563 5564 static void ath10k_flush(struct ieee80211_hw *hw, struct ieee80211_vif *vif, 5565 u32 queues, bool drop) 5566 { 5567 struct ath10k *ar = hw->priv; 5568 bool skip; 5569 int ret; 5570 5571 /* mac80211 doesn't care if we really xmit queued frames or not 5572 * we'll collect those frames either way if we stop/delete vdevs */ 5573 if (drop) 5574 return; 5575 5576 mutex_lock(&ar->conf_mutex); 5577 5578 if (ar->state == ATH10K_STATE_WEDGED) 5579 goto skip; 5580 5581 ret = wait_event_timeout(ar->htt.empty_tx_wq, ({ 5582 bool empty; 5583 5584 spin_lock_bh(&ar->htt.tx_lock); 5585 empty = (ar->htt.num_pending_tx == 0); 5586 spin_unlock_bh(&ar->htt.tx_lock); 5587 5588 skip = (ar->state == ATH10K_STATE_WEDGED) || 5589 test_bit(ATH10K_FLAG_CRASH_FLUSH, 5590 &ar->dev_flags); 5591 5592 (empty || skip); 5593 }), ATH10K_FLUSH_TIMEOUT_HZ); 5594 5595 if (ret <= 0 || skip) 5596 ath10k_warn(ar, "failed to flush transmit queue (skip %i ar-state %i): %i\n", 5597 skip, ar->state, ret); 5598 5599 skip: 5600 mutex_unlock(&ar->conf_mutex); 5601 } 5602 5603 /* TODO: Implement this function properly 5604 * For now it is needed to reply to Probe Requests in IBSS mode. 5605 * Propably we need this information from FW. 5606 */ 5607 static int ath10k_tx_last_beacon(struct ieee80211_hw *hw) 5608 { 5609 return 1; 5610 } 5611 5612 static void ath10k_reconfig_complete(struct ieee80211_hw *hw, 5613 enum ieee80211_reconfig_type reconfig_type) 5614 { 5615 struct ath10k *ar = hw->priv; 5616 5617 if (reconfig_type != IEEE80211_RECONFIG_TYPE_RESTART) 5618 return; 5619 5620 mutex_lock(&ar->conf_mutex); 5621 5622 /* If device failed to restart it will be in a different state, e.g. 5623 * ATH10K_STATE_WEDGED */ 5624 if (ar->state == ATH10K_STATE_RESTARTED) { 5625 ath10k_info(ar, "device successfully recovered\n"); 5626 ar->state = ATH10K_STATE_ON; 5627 ieee80211_wake_queues(ar->hw); 5628 } 5629 5630 mutex_unlock(&ar->conf_mutex); 5631 } 5632 5633 static int ath10k_get_survey(struct ieee80211_hw *hw, int idx, 5634 struct survey_info *survey) 5635 { 5636 struct ath10k *ar = hw->priv; 5637 struct ieee80211_supported_band *sband; 5638 struct survey_info *ar_survey = &ar->survey[idx]; 5639 int ret = 0; 5640 5641 mutex_lock(&ar->conf_mutex); 5642 5643 sband = hw->wiphy->bands[IEEE80211_BAND_2GHZ]; 5644 if (sband && idx >= sband->n_channels) { 5645 idx -= sband->n_channels; 5646 sband = NULL; 5647 } 5648 5649 if (!sband) 5650 sband = hw->wiphy->bands[IEEE80211_BAND_5GHZ]; 5651 5652 if (!sband || idx >= sband->n_channels) { 5653 ret = -ENOENT; 5654 goto exit; 5655 } 5656 5657 spin_lock_bh(&ar->data_lock); 5658 memcpy(survey, ar_survey, sizeof(*survey)); 5659 spin_unlock_bh(&ar->data_lock); 5660 5661 survey->channel = &sband->channels[idx]; 5662 5663 if (ar->rx_channel == survey->channel) 5664 survey->filled |= SURVEY_INFO_IN_USE; 5665 5666 exit: 5667 mutex_unlock(&ar->conf_mutex); 5668 return ret; 5669 } 5670 5671 static bool 5672 ath10k_mac_bitrate_mask_has_single_rate(struct ath10k *ar, 5673 enum ieee80211_band band, 5674 const struct cfg80211_bitrate_mask *mask) 5675 { 5676 int num_rates = 0; 5677 int i; 5678 5679 num_rates += hweight32(mask->control[band].legacy); 5680 5681 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) 5682 num_rates += hweight8(mask->control[band].ht_mcs[i]); 5683 5684 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) 5685 num_rates += hweight16(mask->control[band].vht_mcs[i]); 5686 5687 return num_rates == 1; 5688 } 5689 5690 static bool 5691 ath10k_mac_bitrate_mask_get_single_nss(struct ath10k *ar, 5692 enum ieee80211_band band, 5693 const struct cfg80211_bitrate_mask *mask, 5694 int *nss) 5695 { 5696 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 5697 u16 vht_mcs_map = le16_to_cpu(sband->vht_cap.vht_mcs.tx_mcs_map); 5698 u8 ht_nss_mask = 0; 5699 u8 vht_nss_mask = 0; 5700 int i; 5701 5702 if (mask->control[band].legacy) 5703 return false; 5704 5705 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 5706 if (mask->control[band].ht_mcs[i] == 0) 5707 continue; 5708 else if (mask->control[band].ht_mcs[i] == 5709 sband->ht_cap.mcs.rx_mask[i]) 5710 ht_nss_mask |= BIT(i); 5711 else 5712 return false; 5713 } 5714 5715 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 5716 if (mask->control[band].vht_mcs[i] == 0) 5717 continue; 5718 else if (mask->control[band].vht_mcs[i] == 5719 ath10k_mac_get_max_vht_mcs_map(vht_mcs_map, i)) 5720 vht_nss_mask |= BIT(i); 5721 else 5722 return false; 5723 } 5724 5725 if (ht_nss_mask != vht_nss_mask) 5726 return false; 5727 5728 if (ht_nss_mask == 0) 5729 return false; 5730 5731 if (BIT(fls(ht_nss_mask)) - 1 != ht_nss_mask) 5732 return false; 5733 5734 *nss = fls(ht_nss_mask); 5735 5736 return true; 5737 } 5738 5739 static int 5740 ath10k_mac_bitrate_mask_get_single_rate(struct ath10k *ar, 5741 enum ieee80211_band band, 5742 const struct cfg80211_bitrate_mask *mask, 5743 u8 *rate, u8 *nss) 5744 { 5745 struct ieee80211_supported_band *sband = &ar->mac.sbands[band]; 5746 int rate_idx; 5747 int i; 5748 u16 bitrate; 5749 u8 preamble; 5750 u8 hw_rate; 5751 5752 if (hweight32(mask->control[band].legacy) == 1) { 5753 rate_idx = ffs(mask->control[band].legacy) - 1; 5754 5755 hw_rate = sband->bitrates[rate_idx].hw_value; 5756 bitrate = sband->bitrates[rate_idx].bitrate; 5757 5758 if (ath10k_mac_bitrate_is_cck(bitrate)) 5759 preamble = WMI_RATE_PREAMBLE_CCK; 5760 else 5761 preamble = WMI_RATE_PREAMBLE_OFDM; 5762 5763 *nss = 1; 5764 *rate = preamble << 6 | 5765 (*nss - 1) << 4 | 5766 hw_rate << 0; 5767 5768 return 0; 5769 } 5770 5771 for (i = 0; i < ARRAY_SIZE(mask->control[band].ht_mcs); i++) { 5772 if (hweight8(mask->control[band].ht_mcs[i]) == 1) { 5773 *nss = i + 1; 5774 *rate = WMI_RATE_PREAMBLE_HT << 6 | 5775 (*nss - 1) << 4 | 5776 (ffs(mask->control[band].ht_mcs[i]) - 1); 5777 5778 return 0; 5779 } 5780 } 5781 5782 for (i = 0; i < ARRAY_SIZE(mask->control[band].vht_mcs); i++) { 5783 if (hweight16(mask->control[band].vht_mcs[i]) == 1) { 5784 *nss = i + 1; 5785 *rate = WMI_RATE_PREAMBLE_VHT << 6 | 5786 (*nss - 1) << 4 | 5787 (ffs(mask->control[band].vht_mcs[i]) - 1); 5788 5789 return 0; 5790 } 5791 } 5792 5793 return -EINVAL; 5794 } 5795 5796 static int ath10k_mac_set_fixed_rate_params(struct ath10k_vif *arvif, 5797 u8 rate, u8 nss, u8 sgi) 5798 { 5799 struct ath10k *ar = arvif->ar; 5800 u32 vdev_param; 5801 int ret; 5802 5803 lockdep_assert_held(&ar->conf_mutex); 5804 5805 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac set fixed rate params vdev %i rate 0x%02hhx nss %hhu sgi %hhu\n", 5806 arvif->vdev_id, rate, nss, sgi); 5807 5808 vdev_param = ar->wmi.vdev_param->fixed_rate; 5809 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, rate); 5810 if (ret) { 5811 ath10k_warn(ar, "failed to set fixed rate param 0x%02x: %d\n", 5812 rate, ret); 5813 return ret; 5814 } 5815 5816 vdev_param = ar->wmi.vdev_param->nss; 5817 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, nss); 5818 if (ret) { 5819 ath10k_warn(ar, "failed to set nss param %d: %d\n", nss, ret); 5820 return ret; 5821 } 5822 5823 vdev_param = ar->wmi.vdev_param->sgi; 5824 ret = ath10k_wmi_vdev_set_param(ar, arvif->vdev_id, vdev_param, sgi); 5825 if (ret) { 5826 ath10k_warn(ar, "failed to set sgi param %d: %d\n", sgi, ret); 5827 return ret; 5828 } 5829 5830 return 0; 5831 } 5832 5833 static bool 5834 ath10k_mac_can_set_bitrate_mask(struct ath10k *ar, 5835 enum ieee80211_band band, 5836 const struct cfg80211_bitrate_mask *mask) 5837 { 5838 int i; 5839 u16 vht_mcs; 5840 5841 /* Due to firmware limitation in WMI_PEER_ASSOC_CMDID it is impossible 5842 * to express all VHT MCS rate masks. Effectively only the following 5843 * ranges can be used: none, 0-7, 0-8 and 0-9. 5844 */ 5845 for (i = 0; i < NL80211_VHT_NSS_MAX; i++) { 5846 vht_mcs = mask->control[band].vht_mcs[i]; 5847 5848 switch (vht_mcs) { 5849 case 0: 5850 case BIT(8) - 1: 5851 case BIT(9) - 1: 5852 case BIT(10) - 1: 5853 break; 5854 default: 5855 ath10k_warn(ar, "refusing bitrate mask with missing 0-7 VHT MCS rates\n"); 5856 return false; 5857 } 5858 } 5859 5860 return true; 5861 } 5862 5863 static void ath10k_mac_set_bitrate_mask_iter(void *data, 5864 struct ieee80211_sta *sta) 5865 { 5866 struct ath10k_vif *arvif = data; 5867 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5868 struct ath10k *ar = arvif->ar; 5869 5870 if (arsta->arvif != arvif) 5871 return; 5872 5873 spin_lock_bh(&ar->data_lock); 5874 arsta->changed |= IEEE80211_RC_SUPP_RATES_CHANGED; 5875 spin_unlock_bh(&ar->data_lock); 5876 5877 ieee80211_queue_work(ar->hw, &arsta->update_wk); 5878 } 5879 5880 static int ath10k_mac_op_set_bitrate_mask(struct ieee80211_hw *hw, 5881 struct ieee80211_vif *vif, 5882 const struct cfg80211_bitrate_mask *mask) 5883 { 5884 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 5885 struct cfg80211_chan_def def; 5886 struct ath10k *ar = arvif->ar; 5887 enum ieee80211_band band; 5888 const u8 *ht_mcs_mask; 5889 const u16 *vht_mcs_mask; 5890 u8 rate; 5891 u8 nss; 5892 u8 sgi; 5893 int single_nss; 5894 int ret; 5895 5896 if (ath10k_mac_vif_chan(vif, &def)) 5897 return -EPERM; 5898 5899 band = def.chan->band; 5900 ht_mcs_mask = mask->control[band].ht_mcs; 5901 vht_mcs_mask = mask->control[band].vht_mcs; 5902 5903 sgi = mask->control[band].gi; 5904 if (sgi == NL80211_TXRATE_FORCE_LGI) 5905 return -EINVAL; 5906 5907 if (ath10k_mac_bitrate_mask_has_single_rate(ar, band, mask)) { 5908 ret = ath10k_mac_bitrate_mask_get_single_rate(ar, band, mask, 5909 &rate, &nss); 5910 if (ret) { 5911 ath10k_warn(ar, "failed to get single rate for vdev %i: %d\n", 5912 arvif->vdev_id, ret); 5913 return ret; 5914 } 5915 } else if (ath10k_mac_bitrate_mask_get_single_nss(ar, band, mask, 5916 &single_nss)) { 5917 rate = WMI_FIXED_RATE_NONE; 5918 nss = single_nss; 5919 } else { 5920 rate = WMI_FIXED_RATE_NONE; 5921 nss = min(ar->num_rf_chains, 5922 max(ath10k_mac_max_ht_nss(ht_mcs_mask), 5923 ath10k_mac_max_vht_nss(vht_mcs_mask))); 5924 5925 if (!ath10k_mac_can_set_bitrate_mask(ar, band, mask)) 5926 return -EINVAL; 5927 5928 mutex_lock(&ar->conf_mutex); 5929 5930 arvif->bitrate_mask = *mask; 5931 ieee80211_iterate_stations_atomic(ar->hw, 5932 ath10k_mac_set_bitrate_mask_iter, 5933 arvif); 5934 5935 mutex_unlock(&ar->conf_mutex); 5936 } 5937 5938 mutex_lock(&ar->conf_mutex); 5939 5940 ret = ath10k_mac_set_fixed_rate_params(arvif, rate, nss, sgi); 5941 if (ret) { 5942 ath10k_warn(ar, "failed to set fixed rate params on vdev %i: %d\n", 5943 arvif->vdev_id, ret); 5944 goto exit; 5945 } 5946 5947 exit: 5948 mutex_unlock(&ar->conf_mutex); 5949 5950 return ret; 5951 } 5952 5953 static void ath10k_sta_rc_update(struct ieee80211_hw *hw, 5954 struct ieee80211_vif *vif, 5955 struct ieee80211_sta *sta, 5956 u32 changed) 5957 { 5958 struct ath10k *ar = hw->priv; 5959 struct ath10k_sta *arsta = (struct ath10k_sta *)sta->drv_priv; 5960 u32 bw, smps; 5961 5962 spin_lock_bh(&ar->data_lock); 5963 5964 ath10k_dbg(ar, ATH10K_DBG_MAC, 5965 "mac sta rc update for %pM changed %08x bw %d nss %d smps %d\n", 5966 sta->addr, changed, sta->bandwidth, sta->rx_nss, 5967 sta->smps_mode); 5968 5969 if (changed & IEEE80211_RC_BW_CHANGED) { 5970 bw = WMI_PEER_CHWIDTH_20MHZ; 5971 5972 switch (sta->bandwidth) { 5973 case IEEE80211_STA_RX_BW_20: 5974 bw = WMI_PEER_CHWIDTH_20MHZ; 5975 break; 5976 case IEEE80211_STA_RX_BW_40: 5977 bw = WMI_PEER_CHWIDTH_40MHZ; 5978 break; 5979 case IEEE80211_STA_RX_BW_80: 5980 bw = WMI_PEER_CHWIDTH_80MHZ; 5981 break; 5982 case IEEE80211_STA_RX_BW_160: 5983 ath10k_warn(ar, "Invalid bandwidth %d in rc update for %pM\n", 5984 sta->bandwidth, sta->addr); 5985 bw = WMI_PEER_CHWIDTH_20MHZ; 5986 break; 5987 } 5988 5989 arsta->bw = bw; 5990 } 5991 5992 if (changed & IEEE80211_RC_NSS_CHANGED) 5993 arsta->nss = sta->rx_nss; 5994 5995 if (changed & IEEE80211_RC_SMPS_CHANGED) { 5996 smps = WMI_PEER_SMPS_PS_NONE; 5997 5998 switch (sta->smps_mode) { 5999 case IEEE80211_SMPS_AUTOMATIC: 6000 case IEEE80211_SMPS_OFF: 6001 smps = WMI_PEER_SMPS_PS_NONE; 6002 break; 6003 case IEEE80211_SMPS_STATIC: 6004 smps = WMI_PEER_SMPS_STATIC; 6005 break; 6006 case IEEE80211_SMPS_DYNAMIC: 6007 smps = WMI_PEER_SMPS_DYNAMIC; 6008 break; 6009 case IEEE80211_SMPS_NUM_MODES: 6010 ath10k_warn(ar, "Invalid smps %d in sta rc update for %pM\n", 6011 sta->smps_mode, sta->addr); 6012 smps = WMI_PEER_SMPS_PS_NONE; 6013 break; 6014 } 6015 6016 arsta->smps = smps; 6017 } 6018 6019 arsta->changed |= changed; 6020 6021 spin_unlock_bh(&ar->data_lock); 6022 6023 ieee80211_queue_work(hw, &arsta->update_wk); 6024 } 6025 6026 static u64 ath10k_get_tsf(struct ieee80211_hw *hw, struct ieee80211_vif *vif) 6027 { 6028 /* 6029 * FIXME: Return 0 for time being. Need to figure out whether FW 6030 * has the API to fetch 64-bit local TSF 6031 */ 6032 6033 return 0; 6034 } 6035 6036 static int ath10k_ampdu_action(struct ieee80211_hw *hw, 6037 struct ieee80211_vif *vif, 6038 enum ieee80211_ampdu_mlme_action action, 6039 struct ieee80211_sta *sta, u16 tid, u16 *ssn, 6040 u8 buf_size) 6041 { 6042 struct ath10k *ar = hw->priv; 6043 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 6044 6045 ath10k_dbg(ar, ATH10K_DBG_MAC, "mac ampdu vdev_id %i sta %pM tid %hu action %d\n", 6046 arvif->vdev_id, sta->addr, tid, action); 6047 6048 switch (action) { 6049 case IEEE80211_AMPDU_RX_START: 6050 case IEEE80211_AMPDU_RX_STOP: 6051 /* HTT AddBa/DelBa events trigger mac80211 Rx BA session 6052 * creation/removal. Do we need to verify this? 6053 */ 6054 return 0; 6055 case IEEE80211_AMPDU_TX_START: 6056 case IEEE80211_AMPDU_TX_STOP_CONT: 6057 case IEEE80211_AMPDU_TX_STOP_FLUSH: 6058 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 6059 case IEEE80211_AMPDU_TX_OPERATIONAL: 6060 /* Firmware offloads Tx aggregation entirely so deny mac80211 6061 * Tx aggregation requests. 6062 */ 6063 return -EOPNOTSUPP; 6064 } 6065 6066 return -EINVAL; 6067 } 6068 6069 static void 6070 ath10k_mac_update_rx_channel(struct ath10k *ar, 6071 struct ieee80211_chanctx_conf *ctx, 6072 struct ieee80211_vif_chanctx_switch *vifs, 6073 int n_vifs) 6074 { 6075 struct cfg80211_chan_def *def = NULL; 6076 6077 /* Both locks are required because ar->rx_channel is modified. This 6078 * allows readers to hold either lock. 6079 */ 6080 lockdep_assert_held(&ar->conf_mutex); 6081 lockdep_assert_held(&ar->data_lock); 6082 6083 WARN_ON(ctx && vifs); 6084 WARN_ON(vifs && n_vifs != 1); 6085 6086 /* FIXME: Sort of an optimization and a workaround. Peers and vifs are 6087 * on a linked list now. Doing a lookup peer -> vif -> chanctx for each 6088 * ppdu on Rx may reduce performance on low-end systems. It should be 6089 * possible to make tables/hashmaps to speed the lookup up (be vary of 6090 * cpu data cache lines though regarding sizes) but to keep the initial 6091 * implementation simple and less intrusive fallback to the slow lookup 6092 * only for multi-channel cases. Single-channel cases will remain to 6093 * use the old channel derival and thus performance should not be 6094 * affected much. 6095 */ 6096 rcu_read_lock(); 6097 if (!ctx && ath10k_mac_num_chanctxs(ar) == 1) { 6098 ieee80211_iter_chan_contexts_atomic(ar->hw, 6099 ath10k_mac_get_any_chandef_iter, 6100 &def); 6101 6102 if (vifs) 6103 def = &vifs[0].new_ctx->def; 6104 6105 ar->rx_channel = def->chan; 6106 } else if (ctx && ath10k_mac_num_chanctxs(ar) == 0) { 6107 ar->rx_channel = ctx->def.chan; 6108 } else { 6109 ar->rx_channel = NULL; 6110 } 6111 rcu_read_unlock(); 6112 } 6113 6114 static int 6115 ath10k_mac_op_add_chanctx(struct ieee80211_hw *hw, 6116 struct ieee80211_chanctx_conf *ctx) 6117 { 6118 struct ath10k *ar = hw->priv; 6119 6120 ath10k_dbg(ar, ATH10K_DBG_MAC, 6121 "mac chanctx add freq %hu width %d ptr %p\n", 6122 ctx->def.chan->center_freq, ctx->def.width, ctx); 6123 6124 mutex_lock(&ar->conf_mutex); 6125 6126 spin_lock_bh(&ar->data_lock); 6127 ath10k_mac_update_rx_channel(ar, ctx, NULL, 0); 6128 spin_unlock_bh(&ar->data_lock); 6129 6130 ath10k_recalc_radar_detection(ar); 6131 ath10k_monitor_recalc(ar); 6132 6133 mutex_unlock(&ar->conf_mutex); 6134 6135 return 0; 6136 } 6137 6138 static void 6139 ath10k_mac_op_remove_chanctx(struct ieee80211_hw *hw, 6140 struct ieee80211_chanctx_conf *ctx) 6141 { 6142 struct ath10k *ar = hw->priv; 6143 6144 ath10k_dbg(ar, ATH10K_DBG_MAC, 6145 "mac chanctx remove freq %hu width %d ptr %p\n", 6146 ctx->def.chan->center_freq, ctx->def.width, ctx); 6147 6148 mutex_lock(&ar->conf_mutex); 6149 6150 spin_lock_bh(&ar->data_lock); 6151 ath10k_mac_update_rx_channel(ar, NULL, NULL, 0); 6152 spin_unlock_bh(&ar->data_lock); 6153 6154 ath10k_recalc_radar_detection(ar); 6155 ath10k_monitor_recalc(ar); 6156 6157 mutex_unlock(&ar->conf_mutex); 6158 } 6159 6160 static void 6161 ath10k_mac_op_change_chanctx(struct ieee80211_hw *hw, 6162 struct ieee80211_chanctx_conf *ctx, 6163 u32 changed) 6164 { 6165 struct ath10k *ar = hw->priv; 6166 6167 mutex_lock(&ar->conf_mutex); 6168 6169 ath10k_dbg(ar, ATH10K_DBG_MAC, 6170 "mac chanctx change freq %hu width %d ptr %p changed %x\n", 6171 ctx->def.chan->center_freq, ctx->def.width, ctx, changed); 6172 6173 /* This shouldn't really happen because channel switching should use 6174 * switch_vif_chanctx(). 6175 */ 6176 if (WARN_ON(changed & IEEE80211_CHANCTX_CHANGE_CHANNEL)) 6177 goto unlock; 6178 6179 ath10k_recalc_radar_detection(ar); 6180 6181 /* FIXME: How to configure Rx chains properly? */ 6182 6183 /* No other actions are actually necessary. Firmware maintains channel 6184 * definitions per vdev internally and there's no host-side channel 6185 * context abstraction to configure, e.g. channel width. 6186 */ 6187 6188 unlock: 6189 mutex_unlock(&ar->conf_mutex); 6190 } 6191 6192 static int 6193 ath10k_mac_op_assign_vif_chanctx(struct ieee80211_hw *hw, 6194 struct ieee80211_vif *vif, 6195 struct ieee80211_chanctx_conf *ctx) 6196 { 6197 struct ath10k *ar = hw->priv; 6198 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6199 int ret; 6200 6201 mutex_lock(&ar->conf_mutex); 6202 6203 ath10k_dbg(ar, ATH10K_DBG_MAC, 6204 "mac chanctx assign ptr %p vdev_id %i\n", 6205 ctx, arvif->vdev_id); 6206 6207 if (WARN_ON(arvif->is_started)) { 6208 mutex_unlock(&ar->conf_mutex); 6209 return -EBUSY; 6210 } 6211 6212 ret = ath10k_vdev_start(arvif, &ctx->def); 6213 if (ret) { 6214 ath10k_warn(ar, "failed to start vdev %i addr %pM on freq %d: %d\n", 6215 arvif->vdev_id, vif->addr, 6216 ctx->def.chan->center_freq, ret); 6217 goto err; 6218 } 6219 6220 arvif->is_started = true; 6221 6222 if (vif->type == NL80211_IFTYPE_MONITOR) { 6223 ret = ath10k_wmi_vdev_up(ar, arvif->vdev_id, 0, vif->addr); 6224 if (ret) { 6225 ath10k_warn(ar, "failed to up monitor vdev %i: %d\n", 6226 arvif->vdev_id, ret); 6227 goto err_stop; 6228 } 6229 6230 arvif->is_up = true; 6231 } 6232 6233 mutex_unlock(&ar->conf_mutex); 6234 return 0; 6235 6236 err_stop: 6237 ath10k_vdev_stop(arvif); 6238 arvif->is_started = false; 6239 6240 err: 6241 mutex_unlock(&ar->conf_mutex); 6242 return ret; 6243 } 6244 6245 static void 6246 ath10k_mac_op_unassign_vif_chanctx(struct ieee80211_hw *hw, 6247 struct ieee80211_vif *vif, 6248 struct ieee80211_chanctx_conf *ctx) 6249 { 6250 struct ath10k *ar = hw->priv; 6251 struct ath10k_vif *arvif = (void *)vif->drv_priv; 6252 int ret; 6253 6254 mutex_lock(&ar->conf_mutex); 6255 6256 ath10k_dbg(ar, ATH10K_DBG_MAC, 6257 "mac chanctx unassign ptr %p vdev_id %i\n", 6258 ctx, arvif->vdev_id); 6259 6260 WARN_ON(!arvif->is_started); 6261 6262 if (vif->type == NL80211_IFTYPE_MONITOR) { 6263 WARN_ON(!arvif->is_up); 6264 6265 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 6266 if (ret) 6267 ath10k_warn(ar, "failed to down monitor vdev %i: %d\n", 6268 arvif->vdev_id, ret); 6269 6270 arvif->is_up = false; 6271 } 6272 6273 ret = ath10k_vdev_stop(arvif); 6274 if (ret) 6275 ath10k_warn(ar, "failed to stop vdev %i: %d\n", 6276 arvif->vdev_id, ret); 6277 6278 arvif->is_started = false; 6279 6280 mutex_unlock(&ar->conf_mutex); 6281 } 6282 6283 static int 6284 ath10k_mac_op_switch_vif_chanctx(struct ieee80211_hw *hw, 6285 struct ieee80211_vif_chanctx_switch *vifs, 6286 int n_vifs, 6287 enum ieee80211_chanctx_switch_mode mode) 6288 { 6289 struct ath10k *ar = hw->priv; 6290 struct ath10k_vif *arvif; 6291 int ret; 6292 int i; 6293 6294 mutex_lock(&ar->conf_mutex); 6295 6296 ath10k_dbg(ar, ATH10K_DBG_MAC, 6297 "mac chanctx switch n_vifs %d mode %d\n", 6298 n_vifs, mode); 6299 6300 /* First stop monitor interface. Some FW versions crash if there's a 6301 * lone monitor interface. 6302 */ 6303 if (ar->monitor_started) 6304 ath10k_monitor_stop(ar); 6305 6306 for (i = 0; i < n_vifs; i++) { 6307 arvif = ath10k_vif_to_arvif(vifs[i].vif); 6308 6309 ath10k_dbg(ar, ATH10K_DBG_MAC, 6310 "mac chanctx switch vdev_id %i freq %hu->%hu width %d->%d\n", 6311 arvif->vdev_id, 6312 vifs[i].old_ctx->def.chan->center_freq, 6313 vifs[i].new_ctx->def.chan->center_freq, 6314 vifs[i].old_ctx->def.width, 6315 vifs[i].new_ctx->def.width); 6316 6317 if (WARN_ON(!arvif->is_started)) 6318 continue; 6319 6320 if (WARN_ON(!arvif->is_up)) 6321 continue; 6322 6323 ret = ath10k_wmi_vdev_down(ar, arvif->vdev_id); 6324 if (ret) { 6325 ath10k_warn(ar, "failed to down vdev %d: %d\n", 6326 arvif->vdev_id, ret); 6327 continue; 6328 } 6329 } 6330 6331 /* All relevant vdevs are downed and associated channel resources 6332 * should be available for the channel switch now. 6333 */ 6334 6335 spin_lock_bh(&ar->data_lock); 6336 ath10k_mac_update_rx_channel(ar, NULL, vifs, n_vifs); 6337 spin_unlock_bh(&ar->data_lock); 6338 6339 for (i = 0; i < n_vifs; i++) { 6340 arvif = ath10k_vif_to_arvif(vifs[i].vif); 6341 6342 if (WARN_ON(!arvif->is_started)) 6343 continue; 6344 6345 if (WARN_ON(!arvif->is_up)) 6346 continue; 6347 6348 ret = ath10k_mac_setup_bcn_tmpl(arvif); 6349 if (ret) 6350 ath10k_warn(ar, "failed to update bcn tmpl during csa: %d\n", 6351 ret); 6352 6353 ret = ath10k_mac_setup_prb_tmpl(arvif); 6354 if (ret) 6355 ath10k_warn(ar, "failed to update prb tmpl during csa: %d\n", 6356 ret); 6357 6358 ret = ath10k_vdev_restart(arvif, &vifs[i].new_ctx->def); 6359 if (ret) { 6360 ath10k_warn(ar, "failed to restart vdev %d: %d\n", 6361 arvif->vdev_id, ret); 6362 continue; 6363 } 6364 6365 ret = ath10k_wmi_vdev_up(arvif->ar, arvif->vdev_id, arvif->aid, 6366 arvif->bssid); 6367 if (ret) { 6368 ath10k_warn(ar, "failed to bring vdev up %d: %d\n", 6369 arvif->vdev_id, ret); 6370 continue; 6371 } 6372 } 6373 6374 ath10k_monitor_recalc(ar); 6375 6376 mutex_unlock(&ar->conf_mutex); 6377 return 0; 6378 } 6379 6380 static const struct ieee80211_ops ath10k_ops = { 6381 .tx = ath10k_tx, 6382 .start = ath10k_start, 6383 .stop = ath10k_stop, 6384 .config = ath10k_config, 6385 .add_interface = ath10k_add_interface, 6386 .remove_interface = ath10k_remove_interface, 6387 .configure_filter = ath10k_configure_filter, 6388 .bss_info_changed = ath10k_bss_info_changed, 6389 .hw_scan = ath10k_hw_scan, 6390 .cancel_hw_scan = ath10k_cancel_hw_scan, 6391 .set_key = ath10k_set_key, 6392 .set_default_unicast_key = ath10k_set_default_unicast_key, 6393 .sta_state = ath10k_sta_state, 6394 .conf_tx = ath10k_conf_tx, 6395 .remain_on_channel = ath10k_remain_on_channel, 6396 .cancel_remain_on_channel = ath10k_cancel_remain_on_channel, 6397 .set_rts_threshold = ath10k_set_rts_threshold, 6398 .flush = ath10k_flush, 6399 .tx_last_beacon = ath10k_tx_last_beacon, 6400 .set_antenna = ath10k_set_antenna, 6401 .get_antenna = ath10k_get_antenna, 6402 .reconfig_complete = ath10k_reconfig_complete, 6403 .get_survey = ath10k_get_survey, 6404 .set_bitrate_mask = ath10k_mac_op_set_bitrate_mask, 6405 .sta_rc_update = ath10k_sta_rc_update, 6406 .get_tsf = ath10k_get_tsf, 6407 .ampdu_action = ath10k_ampdu_action, 6408 .get_et_sset_count = ath10k_debug_get_et_sset_count, 6409 .get_et_stats = ath10k_debug_get_et_stats, 6410 .get_et_strings = ath10k_debug_get_et_strings, 6411 .add_chanctx = ath10k_mac_op_add_chanctx, 6412 .remove_chanctx = ath10k_mac_op_remove_chanctx, 6413 .change_chanctx = ath10k_mac_op_change_chanctx, 6414 .assign_vif_chanctx = ath10k_mac_op_assign_vif_chanctx, 6415 .unassign_vif_chanctx = ath10k_mac_op_unassign_vif_chanctx, 6416 .switch_vif_chanctx = ath10k_mac_op_switch_vif_chanctx, 6417 6418 CFG80211_TESTMODE_CMD(ath10k_tm_cmd) 6419 6420 #ifdef CONFIG_PM 6421 .suspend = ath10k_wow_op_suspend, 6422 .resume = ath10k_wow_op_resume, 6423 #endif 6424 #ifdef CONFIG_MAC80211_DEBUGFS 6425 .sta_add_debugfs = ath10k_sta_add_debugfs, 6426 #endif 6427 }; 6428 6429 #define CHAN2G(_channel, _freq, _flags) { \ 6430 .band = IEEE80211_BAND_2GHZ, \ 6431 .hw_value = (_channel), \ 6432 .center_freq = (_freq), \ 6433 .flags = (_flags), \ 6434 .max_antenna_gain = 0, \ 6435 .max_power = 30, \ 6436 } 6437 6438 #define CHAN5G(_channel, _freq, _flags) { \ 6439 .band = IEEE80211_BAND_5GHZ, \ 6440 .hw_value = (_channel), \ 6441 .center_freq = (_freq), \ 6442 .flags = (_flags), \ 6443 .max_antenna_gain = 0, \ 6444 .max_power = 30, \ 6445 } 6446 6447 static const struct ieee80211_channel ath10k_2ghz_channels[] = { 6448 CHAN2G(1, 2412, 0), 6449 CHAN2G(2, 2417, 0), 6450 CHAN2G(3, 2422, 0), 6451 CHAN2G(4, 2427, 0), 6452 CHAN2G(5, 2432, 0), 6453 CHAN2G(6, 2437, 0), 6454 CHAN2G(7, 2442, 0), 6455 CHAN2G(8, 2447, 0), 6456 CHAN2G(9, 2452, 0), 6457 CHAN2G(10, 2457, 0), 6458 CHAN2G(11, 2462, 0), 6459 CHAN2G(12, 2467, 0), 6460 CHAN2G(13, 2472, 0), 6461 CHAN2G(14, 2484, 0), 6462 }; 6463 6464 static const struct ieee80211_channel ath10k_5ghz_channels[] = { 6465 CHAN5G(36, 5180, 0), 6466 CHAN5G(40, 5200, 0), 6467 CHAN5G(44, 5220, 0), 6468 CHAN5G(48, 5240, 0), 6469 CHAN5G(52, 5260, 0), 6470 CHAN5G(56, 5280, 0), 6471 CHAN5G(60, 5300, 0), 6472 CHAN5G(64, 5320, 0), 6473 CHAN5G(100, 5500, 0), 6474 CHAN5G(104, 5520, 0), 6475 CHAN5G(108, 5540, 0), 6476 CHAN5G(112, 5560, 0), 6477 CHAN5G(116, 5580, 0), 6478 CHAN5G(120, 5600, 0), 6479 CHAN5G(124, 5620, 0), 6480 CHAN5G(128, 5640, 0), 6481 CHAN5G(132, 5660, 0), 6482 CHAN5G(136, 5680, 0), 6483 CHAN5G(140, 5700, 0), 6484 CHAN5G(144, 5720, 0), 6485 CHAN5G(149, 5745, 0), 6486 CHAN5G(153, 5765, 0), 6487 CHAN5G(157, 5785, 0), 6488 CHAN5G(161, 5805, 0), 6489 CHAN5G(165, 5825, 0), 6490 }; 6491 6492 struct ath10k *ath10k_mac_create(size_t priv_size) 6493 { 6494 struct ieee80211_hw *hw; 6495 struct ath10k *ar; 6496 6497 hw = ieee80211_alloc_hw(sizeof(struct ath10k) + priv_size, &ath10k_ops); 6498 if (!hw) 6499 return NULL; 6500 6501 ar = hw->priv; 6502 ar->hw = hw; 6503 6504 return ar; 6505 } 6506 6507 void ath10k_mac_destroy(struct ath10k *ar) 6508 { 6509 ieee80211_free_hw(ar->hw); 6510 } 6511 6512 static const struct ieee80211_iface_limit ath10k_if_limits[] = { 6513 { 6514 .max = 8, 6515 .types = BIT(NL80211_IFTYPE_STATION) 6516 | BIT(NL80211_IFTYPE_P2P_CLIENT) 6517 }, 6518 { 6519 .max = 3, 6520 .types = BIT(NL80211_IFTYPE_P2P_GO) 6521 }, 6522 { 6523 .max = 1, 6524 .types = BIT(NL80211_IFTYPE_P2P_DEVICE) 6525 }, 6526 { 6527 .max = 7, 6528 .types = BIT(NL80211_IFTYPE_AP) 6529 }, 6530 }; 6531 6532 static const struct ieee80211_iface_limit ath10k_10x_if_limits[] = { 6533 { 6534 .max = 8, 6535 .types = BIT(NL80211_IFTYPE_AP) 6536 }, 6537 }; 6538 6539 static const struct ieee80211_iface_combination ath10k_if_comb[] = { 6540 { 6541 .limits = ath10k_if_limits, 6542 .n_limits = ARRAY_SIZE(ath10k_if_limits), 6543 .max_interfaces = 8, 6544 .num_different_channels = 1, 6545 .beacon_int_infra_match = true, 6546 }, 6547 }; 6548 6549 static const struct ieee80211_iface_combination ath10k_10x_if_comb[] = { 6550 { 6551 .limits = ath10k_10x_if_limits, 6552 .n_limits = ARRAY_SIZE(ath10k_10x_if_limits), 6553 .max_interfaces = 8, 6554 .num_different_channels = 1, 6555 .beacon_int_infra_match = true, 6556 #ifdef CONFIG_ATH10K_DFS_CERTIFIED 6557 .radar_detect_widths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 6558 BIT(NL80211_CHAN_WIDTH_20) | 6559 BIT(NL80211_CHAN_WIDTH_40) | 6560 BIT(NL80211_CHAN_WIDTH_80), 6561 #endif 6562 }, 6563 }; 6564 6565 static const struct ieee80211_iface_limit ath10k_tlv_if_limit[] = { 6566 { 6567 .max = 2, 6568 .types = BIT(NL80211_IFTYPE_STATION) | 6569 BIT(NL80211_IFTYPE_AP) | 6570 BIT(NL80211_IFTYPE_P2P_CLIENT) | 6571 BIT(NL80211_IFTYPE_P2P_GO), 6572 }, 6573 { 6574 .max = 1, 6575 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 6576 }, 6577 }; 6578 6579 static const struct ieee80211_iface_limit ath10k_tlv_if_limit_ibss[] = { 6580 { 6581 .max = 1, 6582 .types = BIT(NL80211_IFTYPE_STATION), 6583 }, 6584 { 6585 .max = 1, 6586 .types = BIT(NL80211_IFTYPE_ADHOC), 6587 }, 6588 }; 6589 6590 /* FIXME: This is not thouroughly tested. These combinations may over- or 6591 * underestimate hw/fw capabilities. 6592 */ 6593 static struct ieee80211_iface_combination ath10k_tlv_if_comb[] = { 6594 { 6595 .limits = ath10k_tlv_if_limit, 6596 .num_different_channels = 1, 6597 .max_interfaces = 3, 6598 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 6599 }, 6600 { 6601 .limits = ath10k_tlv_if_limit_ibss, 6602 .num_different_channels = 1, 6603 .max_interfaces = 2, 6604 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 6605 }, 6606 }; 6607 6608 static struct ieee80211_iface_combination ath10k_tlv_qcs_if_comb[] = { 6609 { 6610 .limits = ath10k_tlv_if_limit, 6611 .num_different_channels = 2, 6612 .max_interfaces = 3, 6613 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit), 6614 }, 6615 { 6616 .limits = ath10k_tlv_if_limit_ibss, 6617 .num_different_channels = 1, 6618 .max_interfaces = 2, 6619 .n_limits = ARRAY_SIZE(ath10k_tlv_if_limit_ibss), 6620 }, 6621 }; 6622 6623 static struct ieee80211_sta_vht_cap ath10k_create_vht_cap(struct ath10k *ar) 6624 { 6625 struct ieee80211_sta_vht_cap vht_cap = {0}; 6626 u16 mcs_map; 6627 u32 val; 6628 int i; 6629 6630 vht_cap.vht_supported = 1; 6631 vht_cap.cap = ar->vht_cap_info; 6632 6633 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMEE_CAPABLE | 6634 IEEE80211_VHT_CAP_MU_BEAMFORMEE_CAPABLE)) { 6635 val = ar->num_rf_chains - 1; 6636 val <<= IEEE80211_VHT_CAP_BEAMFORMEE_STS_SHIFT; 6637 val &= IEEE80211_VHT_CAP_BEAMFORMEE_STS_MASK; 6638 6639 vht_cap.cap |= val; 6640 } 6641 6642 if (ar->vht_cap_info & (IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE | 6643 IEEE80211_VHT_CAP_MU_BEAMFORMER_CAPABLE)) { 6644 val = ar->num_rf_chains - 1; 6645 val <<= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_SHIFT; 6646 val &= IEEE80211_VHT_CAP_SOUNDING_DIMENSIONS_MASK; 6647 6648 vht_cap.cap |= val; 6649 } 6650 6651 mcs_map = 0; 6652 for (i = 0; i < 8; i++) { 6653 if (i < ar->num_rf_chains) 6654 mcs_map |= IEEE80211_VHT_MCS_SUPPORT_0_9 << (i*2); 6655 else 6656 mcs_map |= IEEE80211_VHT_MCS_NOT_SUPPORTED << (i*2); 6657 } 6658 6659 vht_cap.vht_mcs.rx_mcs_map = cpu_to_le16(mcs_map); 6660 vht_cap.vht_mcs.tx_mcs_map = cpu_to_le16(mcs_map); 6661 6662 return vht_cap; 6663 } 6664 6665 static struct ieee80211_sta_ht_cap ath10k_get_ht_cap(struct ath10k *ar) 6666 { 6667 int i; 6668 struct ieee80211_sta_ht_cap ht_cap = {0}; 6669 6670 if (!(ar->ht_cap_info & WMI_HT_CAP_ENABLED)) 6671 return ht_cap; 6672 6673 ht_cap.ht_supported = 1; 6674 ht_cap.ampdu_factor = IEEE80211_HT_MAX_AMPDU_64K; 6675 ht_cap.ampdu_density = IEEE80211_HT_MPDU_DENSITY_8; 6676 ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 6677 ht_cap.cap |= IEEE80211_HT_CAP_DSSSCCK40; 6678 ht_cap.cap |= WLAN_HT_CAP_SM_PS_STATIC << IEEE80211_HT_CAP_SM_PS_SHIFT; 6679 6680 if (ar->ht_cap_info & WMI_HT_CAP_HT20_SGI) 6681 ht_cap.cap |= IEEE80211_HT_CAP_SGI_20; 6682 6683 if (ar->ht_cap_info & WMI_HT_CAP_HT40_SGI) 6684 ht_cap.cap |= IEEE80211_HT_CAP_SGI_40; 6685 6686 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) { 6687 u32 smps; 6688 6689 smps = WLAN_HT_CAP_SM_PS_DYNAMIC; 6690 smps <<= IEEE80211_HT_CAP_SM_PS_SHIFT; 6691 6692 ht_cap.cap |= smps; 6693 } 6694 6695 if (ar->ht_cap_info & WMI_HT_CAP_TX_STBC) 6696 ht_cap.cap |= IEEE80211_HT_CAP_TX_STBC; 6697 6698 if (ar->ht_cap_info & WMI_HT_CAP_RX_STBC) { 6699 u32 stbc; 6700 6701 stbc = ar->ht_cap_info; 6702 stbc &= WMI_HT_CAP_RX_STBC; 6703 stbc >>= WMI_HT_CAP_RX_STBC_MASK_SHIFT; 6704 stbc <<= IEEE80211_HT_CAP_RX_STBC_SHIFT; 6705 stbc &= IEEE80211_HT_CAP_RX_STBC; 6706 6707 ht_cap.cap |= stbc; 6708 } 6709 6710 if (ar->ht_cap_info & WMI_HT_CAP_LDPC) 6711 ht_cap.cap |= IEEE80211_HT_CAP_LDPC_CODING; 6712 6713 if (ar->ht_cap_info & WMI_HT_CAP_L_SIG_TXOP_PROT) 6714 ht_cap.cap |= IEEE80211_HT_CAP_LSIG_TXOP_PROT; 6715 6716 /* max AMSDU is implicitly taken from vht_cap_info */ 6717 if (ar->vht_cap_info & WMI_VHT_CAP_MAX_MPDU_LEN_MASK) 6718 ht_cap.cap |= IEEE80211_HT_CAP_MAX_AMSDU; 6719 6720 for (i = 0; i < ar->num_rf_chains; i++) 6721 ht_cap.mcs.rx_mask[i] = 0xFF; 6722 6723 ht_cap.mcs.tx_params |= IEEE80211_HT_MCS_TX_DEFINED; 6724 6725 return ht_cap; 6726 } 6727 6728 static void ath10k_get_arvif_iter(void *data, u8 *mac, 6729 struct ieee80211_vif *vif) 6730 { 6731 struct ath10k_vif_iter *arvif_iter = data; 6732 struct ath10k_vif *arvif = ath10k_vif_to_arvif(vif); 6733 6734 if (arvif->vdev_id == arvif_iter->vdev_id) 6735 arvif_iter->arvif = arvif; 6736 } 6737 6738 struct ath10k_vif *ath10k_get_arvif(struct ath10k *ar, u32 vdev_id) 6739 { 6740 struct ath10k_vif_iter arvif_iter; 6741 u32 flags; 6742 6743 memset(&arvif_iter, 0, sizeof(struct ath10k_vif_iter)); 6744 arvif_iter.vdev_id = vdev_id; 6745 6746 flags = IEEE80211_IFACE_ITER_RESUME_ALL; 6747 ieee80211_iterate_active_interfaces_atomic(ar->hw, 6748 flags, 6749 ath10k_get_arvif_iter, 6750 &arvif_iter); 6751 if (!arvif_iter.arvif) { 6752 ath10k_warn(ar, "No VIF found for vdev %d\n", vdev_id); 6753 return NULL; 6754 } 6755 6756 return arvif_iter.arvif; 6757 } 6758 6759 int ath10k_mac_register(struct ath10k *ar) 6760 { 6761 static const u32 cipher_suites[] = { 6762 WLAN_CIPHER_SUITE_WEP40, 6763 WLAN_CIPHER_SUITE_WEP104, 6764 WLAN_CIPHER_SUITE_TKIP, 6765 WLAN_CIPHER_SUITE_CCMP, 6766 WLAN_CIPHER_SUITE_AES_CMAC, 6767 }; 6768 struct ieee80211_supported_band *band; 6769 struct ieee80211_sta_vht_cap vht_cap; 6770 struct ieee80211_sta_ht_cap ht_cap; 6771 void *channels; 6772 int ret; 6773 6774 SET_IEEE80211_PERM_ADDR(ar->hw, ar->mac_addr); 6775 6776 SET_IEEE80211_DEV(ar->hw, ar->dev); 6777 6778 ht_cap = ath10k_get_ht_cap(ar); 6779 vht_cap = ath10k_create_vht_cap(ar); 6780 6781 BUILD_BUG_ON((ARRAY_SIZE(ath10k_2ghz_channels) + 6782 ARRAY_SIZE(ath10k_5ghz_channels)) != 6783 ATH10K_NUM_CHANS); 6784 6785 if (ar->phy_capability & WHAL_WLAN_11G_CAPABILITY) { 6786 channels = kmemdup(ath10k_2ghz_channels, 6787 sizeof(ath10k_2ghz_channels), 6788 GFP_KERNEL); 6789 if (!channels) { 6790 ret = -ENOMEM; 6791 goto err_free; 6792 } 6793 6794 band = &ar->mac.sbands[IEEE80211_BAND_2GHZ]; 6795 band->n_channels = ARRAY_SIZE(ath10k_2ghz_channels); 6796 band->channels = channels; 6797 band->n_bitrates = ath10k_g_rates_size; 6798 band->bitrates = ath10k_g_rates; 6799 band->ht_cap = ht_cap; 6800 6801 /* Enable the VHT support at 2.4 GHz */ 6802 band->vht_cap = vht_cap; 6803 6804 ar->hw->wiphy->bands[IEEE80211_BAND_2GHZ] = band; 6805 } 6806 6807 if (ar->phy_capability & WHAL_WLAN_11A_CAPABILITY) { 6808 channels = kmemdup(ath10k_5ghz_channels, 6809 sizeof(ath10k_5ghz_channels), 6810 GFP_KERNEL); 6811 if (!channels) { 6812 ret = -ENOMEM; 6813 goto err_free; 6814 } 6815 6816 band = &ar->mac.sbands[IEEE80211_BAND_5GHZ]; 6817 band->n_channels = ARRAY_SIZE(ath10k_5ghz_channels); 6818 band->channels = channels; 6819 band->n_bitrates = ath10k_a_rates_size; 6820 band->bitrates = ath10k_a_rates; 6821 band->ht_cap = ht_cap; 6822 band->vht_cap = vht_cap; 6823 ar->hw->wiphy->bands[IEEE80211_BAND_5GHZ] = band; 6824 } 6825 6826 ar->hw->wiphy->interface_modes = 6827 BIT(NL80211_IFTYPE_STATION) | 6828 BIT(NL80211_IFTYPE_AP); 6829 6830 ar->hw->wiphy->available_antennas_rx = ar->supp_rx_chainmask; 6831 ar->hw->wiphy->available_antennas_tx = ar->supp_tx_chainmask; 6832 6833 if (!test_bit(ATH10K_FW_FEATURE_NO_P2P, ar->fw_features)) 6834 ar->hw->wiphy->interface_modes |= 6835 BIT(NL80211_IFTYPE_P2P_DEVICE) | 6836 BIT(NL80211_IFTYPE_P2P_CLIENT) | 6837 BIT(NL80211_IFTYPE_P2P_GO); 6838 6839 ieee80211_hw_set(ar->hw, SIGNAL_DBM); 6840 ieee80211_hw_set(ar->hw, SUPPORTS_PS); 6841 ieee80211_hw_set(ar->hw, SUPPORTS_DYNAMIC_PS); 6842 ieee80211_hw_set(ar->hw, MFP_CAPABLE); 6843 ieee80211_hw_set(ar->hw, REPORTS_TX_ACK_STATUS); 6844 ieee80211_hw_set(ar->hw, HAS_RATE_CONTROL); 6845 ieee80211_hw_set(ar->hw, AP_LINK_PS); 6846 ieee80211_hw_set(ar->hw, SPECTRUM_MGMT); 6847 ieee80211_hw_set(ar->hw, SW_CRYPTO_CONTROL); 6848 ieee80211_hw_set(ar->hw, SUPPORT_FAST_XMIT); 6849 ieee80211_hw_set(ar->hw, CONNECTION_MONITOR); 6850 ieee80211_hw_set(ar->hw, SUPPORTS_PER_STA_GTK); 6851 ieee80211_hw_set(ar->hw, WANT_MONITOR_VIF); 6852 ieee80211_hw_set(ar->hw, CHANCTX_STA_CSA); 6853 ieee80211_hw_set(ar->hw, QUEUE_CONTROL); 6854 6855 ar->hw->wiphy->features |= NL80211_FEATURE_STATIC_SMPS; 6856 ar->hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 6857 6858 if (ar->ht_cap_info & WMI_HT_CAP_DYNAMIC_SMPS) 6859 ar->hw->wiphy->features |= NL80211_FEATURE_DYNAMIC_SMPS; 6860 6861 if (ar->ht_cap_info & WMI_HT_CAP_ENABLED) { 6862 ieee80211_hw_set(ar->hw, AMPDU_AGGREGATION); 6863 ieee80211_hw_set(ar->hw, TX_AMPDU_SETUP_IN_HW); 6864 } 6865 6866 ar->hw->wiphy->max_scan_ssids = WLAN_SCAN_PARAMS_MAX_SSID; 6867 ar->hw->wiphy->max_scan_ie_len = WLAN_SCAN_PARAMS_MAX_IE_LEN; 6868 6869 ar->hw->vif_data_size = sizeof(struct ath10k_vif); 6870 ar->hw->sta_data_size = sizeof(struct ath10k_sta); 6871 6872 ar->hw->max_listen_interval = ATH10K_MAX_HW_LISTEN_INTERVAL; 6873 6874 if (test_bit(WMI_SERVICE_BEACON_OFFLOAD, ar->wmi.svc_map)) { 6875 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_PROBE_RESP_OFFLOAD; 6876 6877 /* Firmware delivers WPS/P2P Probe Requests frames to driver so 6878 * that userspace (e.g. wpa_supplicant/hostapd) can generate 6879 * correct Probe Responses. This is more of a hack advert.. 6880 */ 6881 ar->hw->wiphy->probe_resp_offload |= 6882 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS | 6883 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_WPS2 | 6884 NL80211_PROBE_RESP_OFFLOAD_SUPPORT_P2P; 6885 } 6886 6887 if (test_bit(WMI_SERVICE_TDLS, ar->wmi.svc_map)) 6888 ar->hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 6889 6890 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_REMAIN_ON_CHANNEL; 6891 ar->hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 6892 ar->hw->wiphy->max_remain_on_channel_duration = 5000; 6893 6894 ar->hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 6895 ar->hw->wiphy->features |= NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE; 6896 6897 ar->hw->wiphy->max_ap_assoc_sta = ar->max_num_stations; 6898 6899 ret = ath10k_wow_init(ar); 6900 if (ret) { 6901 ath10k_warn(ar, "failed to init wow: %d\n", ret); 6902 goto err_free; 6903 } 6904 6905 /* 6906 * on LL hardware queues are managed entirely by the FW 6907 * so we only advertise to mac we can do the queues thing 6908 */ 6909 ar->hw->queues = IEEE80211_MAX_QUEUES; 6910 6911 /* vdev_ids are used as hw queue numbers. Make sure offchan tx queue is 6912 * something that vdev_ids can't reach so that we don't stop the queue 6913 * accidentally. 6914 */ 6915 ar->hw->offchannel_tx_hw_queue = IEEE80211_MAX_QUEUES - 1; 6916 6917 switch (ar->wmi.op_version) { 6918 case ATH10K_FW_WMI_OP_VERSION_MAIN: 6919 ar->hw->wiphy->iface_combinations = ath10k_if_comb; 6920 ar->hw->wiphy->n_iface_combinations = 6921 ARRAY_SIZE(ath10k_if_comb); 6922 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 6923 break; 6924 case ATH10K_FW_WMI_OP_VERSION_TLV: 6925 if (test_bit(WMI_SERVICE_ADAPTIVE_OCS, ar->wmi.svc_map)) { 6926 ar->hw->wiphy->iface_combinations = 6927 ath10k_tlv_qcs_if_comb; 6928 ar->hw->wiphy->n_iface_combinations = 6929 ARRAY_SIZE(ath10k_tlv_qcs_if_comb); 6930 } else { 6931 ar->hw->wiphy->iface_combinations = ath10k_tlv_if_comb; 6932 ar->hw->wiphy->n_iface_combinations = 6933 ARRAY_SIZE(ath10k_tlv_if_comb); 6934 } 6935 ar->hw->wiphy->interface_modes |= BIT(NL80211_IFTYPE_ADHOC); 6936 break; 6937 case ATH10K_FW_WMI_OP_VERSION_10_1: 6938 case ATH10K_FW_WMI_OP_VERSION_10_2: 6939 case ATH10K_FW_WMI_OP_VERSION_10_2_4: 6940 ar->hw->wiphy->iface_combinations = ath10k_10x_if_comb; 6941 ar->hw->wiphy->n_iface_combinations = 6942 ARRAY_SIZE(ath10k_10x_if_comb); 6943 break; 6944 case ATH10K_FW_WMI_OP_VERSION_UNSET: 6945 case ATH10K_FW_WMI_OP_VERSION_MAX: 6946 WARN_ON(1); 6947 ret = -EINVAL; 6948 goto err_free; 6949 } 6950 6951 ar->hw->netdev_features = NETIF_F_HW_CSUM; 6952 6953 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED)) { 6954 /* Init ath dfs pattern detector */ 6955 ar->ath_common.debug_mask = ATH_DBG_DFS; 6956 ar->dfs_detector = dfs_pattern_detector_init(&ar->ath_common, 6957 NL80211_DFS_UNSET); 6958 6959 if (!ar->dfs_detector) 6960 ath10k_warn(ar, "failed to initialise DFS pattern detector\n"); 6961 } 6962 6963 ret = ath_regd_init(&ar->ath_common.regulatory, ar->hw->wiphy, 6964 ath10k_reg_notifier); 6965 if (ret) { 6966 ath10k_err(ar, "failed to initialise regulatory: %i\n", ret); 6967 goto err_free; 6968 } 6969 6970 ar->hw->wiphy->cipher_suites = cipher_suites; 6971 ar->hw->wiphy->n_cipher_suites = ARRAY_SIZE(cipher_suites); 6972 6973 ret = ieee80211_register_hw(ar->hw); 6974 if (ret) { 6975 ath10k_err(ar, "failed to register ieee80211: %d\n", ret); 6976 goto err_free; 6977 } 6978 6979 if (!ath_is_world_regd(&ar->ath_common.regulatory)) { 6980 ret = regulatory_hint(ar->hw->wiphy, 6981 ar->ath_common.regulatory.alpha2); 6982 if (ret) 6983 goto err_unregister; 6984 } 6985 6986 return 0; 6987 6988 err_unregister: 6989 ieee80211_unregister_hw(ar->hw); 6990 err_free: 6991 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); 6992 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); 6993 6994 return ret; 6995 } 6996 6997 void ath10k_mac_unregister(struct ath10k *ar) 6998 { 6999 ieee80211_unregister_hw(ar->hw); 7000 7001 if (config_enabled(CONFIG_ATH10K_DFS_CERTIFIED) && ar->dfs_detector) 7002 ar->dfs_detector->exit(ar->dfs_detector); 7003 7004 kfree(ar->mac.sbands[IEEE80211_BAND_2GHZ].channels); 7005 kfree(ar->mac.sbands[IEEE80211_BAND_5GHZ].channels); 7006 7007 SET_IEEE80211_DEV(ar->hw, NULL); 7008 } 7009