1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <linux/kernel.h> 65 #include <linux/slab.h> 66 #include <linux/skbuff.h> 67 #include <linux/netdevice.h> 68 #include <linux/etherdevice.h> 69 #include <linux/ip.h> 70 #include <linux/if_arp.h> 71 #include <linux/time.h> 72 #include <net/mac80211.h> 73 #include <net/ieee80211_radiotap.h> 74 #include <net/tcp.h> 75 76 #include "iwl-op-mode.h" 77 #include "iwl-io.h" 78 #include "mvm.h" 79 #include "sta.h" 80 #include "time-event.h" 81 #include "iwl-eeprom-parse.h" 82 #include "iwl-phy-db.h" 83 #include "testmode.h" 84 #include "fw/error-dump.h" 85 #include "iwl-prph.h" 86 #include "iwl-nvm-parse.h" 87 88 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 89 { 90 .max = 1, 91 .types = BIT(NL80211_IFTYPE_STATION), 92 }, 93 { 94 .max = 1, 95 .types = BIT(NL80211_IFTYPE_AP) | 96 BIT(NL80211_IFTYPE_P2P_CLIENT) | 97 BIT(NL80211_IFTYPE_P2P_GO), 98 }, 99 { 100 .max = 1, 101 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 102 }, 103 }; 104 105 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 106 { 107 .num_different_channels = 2, 108 .max_interfaces = 3, 109 .limits = iwl_mvm_limits, 110 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 111 }, 112 }; 113 114 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 115 /* 116 * Use the reserved field to indicate magic values. 117 * these values will only be used internally by the driver, 118 * and won't make it to the fw (reserved will be 0). 119 * BC_FILTER_MAGIC_IP - configure the val of this attribute to 120 * be the vif's ip address. in case there is not a single 121 * ip address (0, or more than 1), this attribute will 122 * be skipped. 123 * BC_FILTER_MAGIC_MAC - set the val of this attribute to 124 * the LSB bytes of the vif's mac address 125 */ 126 enum { 127 BC_FILTER_MAGIC_NONE = 0, 128 BC_FILTER_MAGIC_IP, 129 BC_FILTER_MAGIC_MAC, 130 }; 131 132 static const struct iwl_fw_bcast_filter iwl_mvm_default_bcast_filters[] = { 133 { 134 /* arp */ 135 .discard = 0, 136 .frame_type = BCAST_FILTER_FRAME_TYPE_ALL, 137 .attrs = { 138 { 139 /* frame type - arp, hw type - ethernet */ 140 .offset_type = 141 BCAST_FILTER_OFFSET_PAYLOAD_START, 142 .offset = sizeof(rfc1042_header), 143 .val = cpu_to_be32(0x08060001), 144 .mask = cpu_to_be32(0xffffffff), 145 }, 146 { 147 /* arp dest ip */ 148 .offset_type = 149 BCAST_FILTER_OFFSET_PAYLOAD_START, 150 .offset = sizeof(rfc1042_header) + 2 + 151 sizeof(struct arphdr) + 152 ETH_ALEN + sizeof(__be32) + 153 ETH_ALEN, 154 .mask = cpu_to_be32(0xffffffff), 155 /* mark it as special field */ 156 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_IP), 157 }, 158 }, 159 }, 160 { 161 /* dhcp offer bcast */ 162 .discard = 0, 163 .frame_type = BCAST_FILTER_FRAME_TYPE_IPV4, 164 .attrs = { 165 { 166 /* udp dest port - 68 (bootp client)*/ 167 .offset_type = BCAST_FILTER_OFFSET_IP_END, 168 .offset = offsetof(struct udphdr, dest), 169 .val = cpu_to_be32(0x00440000), 170 .mask = cpu_to_be32(0xffff0000), 171 }, 172 { 173 /* dhcp - lsb bytes of client hw address */ 174 .offset_type = BCAST_FILTER_OFFSET_IP_END, 175 .offset = 38, 176 .mask = cpu_to_be32(0xffffffff), 177 /* mark it as special field */ 178 .reserved1 = cpu_to_le16(BC_FILTER_MAGIC_MAC), 179 }, 180 }, 181 }, 182 /* last filter must be empty */ 183 {}, 184 }; 185 #endif 186 187 void iwl_mvm_ref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 188 { 189 if (!iwl_mvm_is_d0i3_supported(mvm)) 190 return; 191 192 IWL_DEBUG_RPM(mvm, "Take mvm reference - type %d\n", ref_type); 193 spin_lock_bh(&mvm->refs_lock); 194 mvm->refs[ref_type]++; 195 spin_unlock_bh(&mvm->refs_lock); 196 iwl_trans_ref(mvm->trans); 197 } 198 199 void iwl_mvm_unref(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 200 { 201 if (!iwl_mvm_is_d0i3_supported(mvm)) 202 return; 203 204 IWL_DEBUG_RPM(mvm, "Leave mvm reference - type %d\n", ref_type); 205 spin_lock_bh(&mvm->refs_lock); 206 if (WARN_ON(!mvm->refs[ref_type])) { 207 spin_unlock_bh(&mvm->refs_lock); 208 return; 209 } 210 mvm->refs[ref_type]--; 211 spin_unlock_bh(&mvm->refs_lock); 212 iwl_trans_unref(mvm->trans); 213 } 214 215 static void iwl_mvm_unref_all_except(struct iwl_mvm *mvm, 216 enum iwl_mvm_ref_type except_ref) 217 { 218 int i, j; 219 220 if (!iwl_mvm_is_d0i3_supported(mvm)) 221 return; 222 223 spin_lock_bh(&mvm->refs_lock); 224 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 225 if (except_ref == i || !mvm->refs[i]) 226 continue; 227 228 IWL_DEBUG_RPM(mvm, "Cleanup: remove mvm ref type %d (%d)\n", 229 i, mvm->refs[i]); 230 for (j = 0; j < mvm->refs[i]; j++) 231 iwl_trans_unref(mvm->trans); 232 mvm->refs[i] = 0; 233 } 234 spin_unlock_bh(&mvm->refs_lock); 235 } 236 237 bool iwl_mvm_ref_taken(struct iwl_mvm *mvm) 238 { 239 int i; 240 bool taken = false; 241 242 if (!iwl_mvm_is_d0i3_supported(mvm)) 243 return true; 244 245 spin_lock_bh(&mvm->refs_lock); 246 for (i = 0; i < IWL_MVM_REF_COUNT; i++) { 247 if (mvm->refs[i]) { 248 taken = true; 249 break; 250 } 251 } 252 spin_unlock_bh(&mvm->refs_lock); 253 254 return taken; 255 } 256 257 int iwl_mvm_ref_sync(struct iwl_mvm *mvm, enum iwl_mvm_ref_type ref_type) 258 { 259 iwl_mvm_ref(mvm, ref_type); 260 261 if (!wait_event_timeout(mvm->d0i3_exit_waitq, 262 !test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status), 263 HZ)) { 264 WARN_ON_ONCE(1); 265 iwl_mvm_unref(mvm, ref_type); 266 return -EIO; 267 } 268 269 return 0; 270 } 271 272 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 273 { 274 int i; 275 276 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 277 for (i = 0; i < NUM_PHY_CTX; i++) { 278 mvm->phy_ctxts[i].id = i; 279 mvm->phy_ctxts[i].ref = 0; 280 } 281 } 282 283 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 284 const char *alpha2, 285 enum iwl_mcc_source src_id, 286 bool *changed) 287 { 288 struct ieee80211_regdomain *regd = NULL; 289 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 290 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 291 struct iwl_mcc_update_resp *resp; 292 293 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 294 295 lockdep_assert_held(&mvm->mutex); 296 297 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 298 if (IS_ERR_OR_NULL(resp)) { 299 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 300 PTR_ERR_OR_ZERO(resp)); 301 goto out; 302 } 303 304 if (changed) { 305 u32 status = le32_to_cpu(resp->status); 306 307 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || 308 status == MCC_RESP_ILLEGAL); 309 } 310 311 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 312 __le32_to_cpu(resp->n_channels), 313 resp->channels, 314 __le16_to_cpu(resp->mcc), 315 __le16_to_cpu(resp->geo_info)); 316 /* Store the return source id */ 317 src_id = resp->source_id; 318 kfree(resp); 319 if (IS_ERR_OR_NULL(regd)) { 320 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 321 PTR_ERR_OR_ZERO(regd)); 322 goto out; 323 } 324 325 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 326 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 327 mvm->lar_regdom_set = true; 328 mvm->mcc_src = src_id; 329 330 out: 331 return regd; 332 } 333 334 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 335 { 336 bool changed; 337 struct ieee80211_regdomain *regd; 338 339 if (!iwl_mvm_is_lar_supported(mvm)) 340 return; 341 342 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 343 if (!IS_ERR_OR_NULL(regd)) { 344 /* only update the regulatory core if changed */ 345 if (changed) 346 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 347 348 kfree(regd); 349 } 350 } 351 352 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 353 bool *changed) 354 { 355 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 356 iwl_mvm_is_wifi_mcc_supported(mvm) ? 357 MCC_SOURCE_GET_CURRENT : 358 MCC_SOURCE_OLD_FW, changed); 359 } 360 361 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 362 { 363 enum iwl_mcc_source used_src; 364 struct ieee80211_regdomain *regd; 365 int ret; 366 bool changed; 367 const struct ieee80211_regdomain *r = 368 rtnl_dereference(mvm->hw->wiphy->regd); 369 370 if (!r) 371 return -ENOENT; 372 373 /* save the last source in case we overwrite it below */ 374 used_src = mvm->mcc_src; 375 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 376 /* Notify the firmware we support wifi location updates */ 377 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 378 if (!IS_ERR_OR_NULL(regd)) 379 kfree(regd); 380 } 381 382 /* Now set our last stored MCC and source */ 383 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 384 &changed); 385 if (IS_ERR_OR_NULL(regd)) 386 return -EIO; 387 388 /* update cfg80211 if the regdomain was changed */ 389 if (changed) 390 ret = regulatory_set_wiphy_regd_sync_rtnl(mvm->hw->wiphy, regd); 391 else 392 ret = 0; 393 394 kfree(regd); 395 return ret; 396 } 397 398 const static u8 he_if_types_ext_capa_sta[] = { 399 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 400 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 401 [9] = WLAN_EXT_CAPA10_TWT_REQUESTER_SUPPORT, 402 }; 403 404 const static struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { 405 { 406 .iftype = NL80211_IFTYPE_STATION, 407 .extended_capabilities = he_if_types_ext_capa_sta, 408 .extended_capabilities_mask = he_if_types_ext_capa_sta, 409 .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), 410 }, 411 }; 412 413 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 414 { 415 struct ieee80211_hw *hw = mvm->hw; 416 int num_mac, ret, i; 417 static const u32 mvm_ciphers[] = { 418 WLAN_CIPHER_SUITE_WEP40, 419 WLAN_CIPHER_SUITE_WEP104, 420 WLAN_CIPHER_SUITE_TKIP, 421 WLAN_CIPHER_SUITE_CCMP, 422 }; 423 424 /* Tell mac80211 our characteristics */ 425 ieee80211_hw_set(hw, SIGNAL_DBM); 426 ieee80211_hw_set(hw, SPECTRUM_MGMT); 427 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 428 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 429 ieee80211_hw_set(hw, SUPPORTS_PS); 430 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 431 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 432 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 433 ieee80211_hw_set(hw, CONNECTION_MONITOR); 434 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 435 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 436 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 437 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 438 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 439 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); 440 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 441 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 442 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 443 ieee80211_hw_set(hw, TX_AMSDU); 444 ieee80211_hw_set(hw, TX_FRAG_LIST); 445 446 if (iwl_mvm_has_tlc_offload(mvm)) { 447 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 448 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 449 } 450 451 if (iwl_mvm_has_new_rx_api(mvm)) 452 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 453 454 if (fw_has_capa(&mvm->fw->ucode_capa, 455 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 456 ieee80211_hw_set(hw, AP_LINK_PS); 457 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 458 /* 459 * we absolutely need this for the new TX API since that comes 460 * with many more queues than the current code can deal with 461 * for station powersave 462 */ 463 return -EINVAL; 464 } 465 466 if (mvm->trans->num_rx_queues > 1) 467 ieee80211_hw_set(hw, USES_RSS); 468 469 if (mvm->trans->max_skb_frags) 470 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 471 472 hw->queues = IEEE80211_MAX_QUEUES; 473 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 474 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 475 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 476 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 477 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 478 479 hw->radiotap_timestamp.units_pos = 480 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 481 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 482 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 483 hw->radiotap_timestamp.accuracy = 22; 484 485 if (!iwl_mvm_has_tlc_offload(mvm)) 486 hw->rate_control_algorithm = RS_NAME; 487 488 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 489 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 490 hw->max_tx_fragments = mvm->trans->max_skb_frags; 491 492 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 493 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 494 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 495 hw->wiphy->cipher_suites = mvm->ciphers; 496 497 if (iwl_mvm_has_new_rx_api(mvm)) { 498 mvm->ciphers[hw->wiphy->n_cipher_suites] = 499 WLAN_CIPHER_SUITE_GCMP; 500 hw->wiphy->n_cipher_suites++; 501 mvm->ciphers[hw->wiphy->n_cipher_suites] = 502 WLAN_CIPHER_SUITE_GCMP_256; 503 hw->wiphy->n_cipher_suites++; 504 } 505 506 /* Enable 11w if software crypto is not enabled (as the 507 * firmware will interpret some mgmt packets, so enabling it 508 * with software crypto isn't safe). 509 */ 510 if (!iwlwifi_mod_params.swcrypto) { 511 ieee80211_hw_set(hw, MFP_CAPABLE); 512 mvm->ciphers[hw->wiphy->n_cipher_suites] = 513 WLAN_CIPHER_SUITE_AES_CMAC; 514 hw->wiphy->n_cipher_suites++; 515 if (iwl_mvm_has_new_rx_api(mvm)) { 516 mvm->ciphers[hw->wiphy->n_cipher_suites] = 517 WLAN_CIPHER_SUITE_BIP_GMAC_128; 518 hw->wiphy->n_cipher_suites++; 519 mvm->ciphers[hw->wiphy->n_cipher_suites] = 520 WLAN_CIPHER_SUITE_BIP_GMAC_256; 521 hw->wiphy->n_cipher_suites++; 522 } 523 } 524 525 /* currently FW API supports only one optional cipher scheme */ 526 if (mvm->fw->cs[0].cipher) { 527 const struct iwl_fw_cipher_scheme *fwcs = &mvm->fw->cs[0]; 528 struct ieee80211_cipher_scheme *cs = &mvm->cs[0]; 529 530 mvm->hw->n_cipher_schemes = 1; 531 532 cs->cipher = le32_to_cpu(fwcs->cipher); 533 cs->iftype = BIT(NL80211_IFTYPE_STATION); 534 cs->hdr_len = fwcs->hdr_len; 535 cs->pn_len = fwcs->pn_len; 536 cs->pn_off = fwcs->pn_off; 537 cs->key_idx_off = fwcs->key_idx_off; 538 cs->key_idx_mask = fwcs->key_idx_mask; 539 cs->key_idx_shift = fwcs->key_idx_shift; 540 cs->mic_len = fwcs->mic_len; 541 542 mvm->hw->cipher_schemes = mvm->cs; 543 mvm->ciphers[hw->wiphy->n_cipher_suites] = cs->cipher; 544 hw->wiphy->n_cipher_suites++; 545 } 546 547 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 548 hw->wiphy->features |= 549 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 550 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 551 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 552 553 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 554 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 555 hw->chanctx_data_size = sizeof(u16); 556 hw->txq_data_size = sizeof(struct iwl_mvm_txq); 557 558 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 559 BIT(NL80211_IFTYPE_P2P_CLIENT) | 560 BIT(NL80211_IFTYPE_AP) | 561 BIT(NL80211_IFTYPE_P2P_GO) | 562 BIT(NL80211_IFTYPE_P2P_DEVICE) | 563 BIT(NL80211_IFTYPE_ADHOC); 564 565 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 566 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 567 if (iwl_mvm_is_lar_supported(mvm)) 568 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 569 else 570 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 571 REGULATORY_DISABLE_BEACON_HINTS; 572 573 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 574 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 575 576 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 577 hw->wiphy->n_iface_combinations = 578 ARRAY_SIZE(iwl_mvm_iface_combinations); 579 580 hw->wiphy->max_remain_on_channel_duration = 10000; 581 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 582 583 /* Extract MAC address */ 584 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 585 hw->wiphy->addresses = mvm->addresses; 586 hw->wiphy->n_addresses = 1; 587 588 /* Extract additional MAC addresses if available */ 589 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 590 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 591 592 for (i = 1; i < num_mac; i++) { 593 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 594 ETH_ALEN); 595 mvm->addresses[i].addr[5]++; 596 hw->wiphy->n_addresses++; 597 } 598 599 iwl_mvm_reset_phy_ctxts(mvm); 600 601 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 602 603 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 604 605 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 606 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 607 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 608 609 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 610 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 611 else 612 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 613 614 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 615 hw->wiphy->bands[NL80211_BAND_2GHZ] = 616 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 617 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 618 hw->wiphy->bands[NL80211_BAND_5GHZ] = 619 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 620 621 if (fw_has_capa(&mvm->fw->ucode_capa, 622 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 623 fw_has_api(&mvm->fw->ucode_capa, 624 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 625 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 626 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 627 } 628 629 hw->wiphy->hw_version = mvm->trans->hw_id; 630 631 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 632 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 633 else 634 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 635 636 hw->wiphy->max_sched_scan_reqs = 1; 637 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 638 hw->wiphy->max_match_sets = IWL_SCAN_MAX_PROFILES; 639 /* we create the 802.11 header and zero length SSID IE. */ 640 hw->wiphy->max_sched_scan_ie_len = 641 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 642 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 643 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 644 645 /* 646 * the firmware uses u8 for num of iterations, but 0xff is saved for 647 * infinite loop, so the maximum number of iterations is actually 254. 648 */ 649 hw->wiphy->max_sched_scan_plan_iterations = 254; 650 651 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 652 NL80211_FEATURE_LOW_PRIORITY_SCAN | 653 NL80211_FEATURE_P2P_GO_OPPPS | 654 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 655 NL80211_FEATURE_DYNAMIC_SMPS | 656 NL80211_FEATURE_STATIC_SMPS | 657 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 658 659 if (fw_has_capa(&mvm->fw->ucode_capa, 660 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 661 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 662 if (fw_has_capa(&mvm->fw->ucode_capa, 663 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 664 hw->wiphy->features |= NL80211_FEATURE_QUIET; 665 666 if (fw_has_capa(&mvm->fw->ucode_capa, 667 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 668 hw->wiphy->features |= 669 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 670 671 if (fw_has_capa(&mvm->fw->ucode_capa, 672 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 673 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 674 675 if (fw_has_api(&mvm->fw->ucode_capa, 676 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 677 wiphy_ext_feature_set(hw->wiphy, 678 NL80211_EXT_FEATURE_SCAN_START_TIME); 679 wiphy_ext_feature_set(hw->wiphy, 680 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 681 wiphy_ext_feature_set(hw->wiphy, 682 NL80211_EXT_FEATURE_SET_SCAN_DWELL); 683 } 684 685 if (iwl_mvm_is_oce_supported(mvm)) { 686 wiphy_ext_feature_set(hw->wiphy, 687 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); 688 wiphy_ext_feature_set(hw->wiphy, 689 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); 690 wiphy_ext_feature_set(hw->wiphy, 691 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); 692 wiphy_ext_feature_set(hw->wiphy, 693 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); 694 } 695 696 if (mvm->nvm_data->sku_cap_11ax_enable && 697 !iwlwifi_mod_params.disable_11ax) { 698 hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; 699 hw->wiphy->num_iftype_ext_capab = 700 ARRAY_SIZE(he_iftypes_ext_capa); 701 } 702 703 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 704 705 #ifdef CONFIG_PM_SLEEP 706 if (iwl_mvm_is_d0i3_supported(mvm) && 707 device_can_wakeup(mvm->trans->dev)) { 708 mvm->wowlan.flags = WIPHY_WOWLAN_ANY; 709 hw->wiphy->wowlan = &mvm->wowlan; 710 } 711 712 if (mvm->fw->img[IWL_UCODE_WOWLAN].num_sec && 713 mvm->trans->ops->d3_suspend && 714 mvm->trans->ops->d3_resume && 715 device_can_wakeup(mvm->trans->dev)) { 716 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 717 WIPHY_WOWLAN_DISCONNECT | 718 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 719 WIPHY_WOWLAN_RFKILL_RELEASE | 720 WIPHY_WOWLAN_NET_DETECT; 721 if (!iwlwifi_mod_params.swcrypto) 722 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 723 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 724 WIPHY_WOWLAN_4WAY_HANDSHAKE; 725 726 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 727 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 728 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 729 mvm->wowlan.max_nd_match_sets = IWL_SCAN_MAX_PROFILES; 730 hw->wiphy->wowlan = &mvm->wowlan; 731 } 732 #endif 733 734 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 735 /* assign default bcast filtering configuration */ 736 mvm->bcast_filters = iwl_mvm_default_bcast_filters; 737 #endif 738 739 ret = iwl_mvm_leds_init(mvm); 740 if (ret) 741 return ret; 742 743 if (fw_has_capa(&mvm->fw->ucode_capa, 744 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 745 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 746 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 747 ieee80211_hw_set(hw, TDLS_WIDER_BW); 748 } 749 750 if (fw_has_capa(&mvm->fw->ucode_capa, 751 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 752 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 753 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 754 } 755 756 hw->netdev_features |= mvm->cfg->features; 757 if (!iwl_mvm_is_csum_supported(mvm)) { 758 hw->netdev_features &= ~(IWL_TX_CSUM_NETIF_FLAGS | 759 NETIF_F_RXCSUM); 760 /* We may support SW TX CSUM */ 761 if (IWL_MVM_SW_TX_CSUM_OFFLOAD) 762 hw->netdev_features |= IWL_TX_CSUM_NETIF_FLAGS; 763 } 764 765 ret = ieee80211_register_hw(mvm->hw); 766 if (ret) 767 iwl_mvm_leds_exit(mvm); 768 mvm->init_status |= IWL_MVM_INIT_STATUS_REG_HW_INIT_COMPLETE; 769 770 if (mvm->cfg->vht_mu_mimo_supported) 771 wiphy_ext_feature_set(hw->wiphy, 772 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 773 774 return ret; 775 } 776 777 static bool iwl_mvm_defer_tx(struct iwl_mvm *mvm, 778 struct ieee80211_sta *sta, 779 struct sk_buff *skb) 780 { 781 struct iwl_mvm_sta *mvmsta; 782 bool defer = false; 783 784 /* 785 * double check the IN_D0I3 flag both before and after 786 * taking the spinlock, in order to prevent taking 787 * the spinlock when not needed. 788 */ 789 if (likely(!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status))) 790 return false; 791 792 spin_lock(&mvm->d0i3_tx_lock); 793 /* 794 * testing the flag again ensures the skb dequeue 795 * loop (on d0i3 exit) hasn't run yet. 796 */ 797 if (!test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) 798 goto out; 799 800 mvmsta = iwl_mvm_sta_from_mac80211(sta); 801 if (mvmsta->sta_id == IWL_MVM_INVALID_STA || 802 mvmsta->sta_id != mvm->d0i3_ap_sta_id) 803 goto out; 804 805 __skb_queue_tail(&mvm->d0i3_tx, skb); 806 807 /* trigger wakeup */ 808 iwl_mvm_ref(mvm, IWL_MVM_REF_TX); 809 iwl_mvm_unref(mvm, IWL_MVM_REF_TX); 810 811 defer = true; 812 out: 813 spin_unlock(&mvm->d0i3_tx_lock); 814 return defer; 815 } 816 817 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 818 struct ieee80211_tx_control *control, 819 struct sk_buff *skb) 820 { 821 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 822 struct ieee80211_sta *sta = control->sta; 823 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 824 struct ieee80211_hdr *hdr = (void *)skb->data; 825 bool offchannel = IEEE80211_SKB_CB(skb)->flags & 826 IEEE80211_TX_CTL_TX_OFFCHAN; 827 828 if (iwl_mvm_is_radio_killed(mvm)) { 829 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 830 goto drop; 831 } 832 833 if (offchannel && 834 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 835 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 836 goto drop; 837 838 /* treat non-bufferable MMPDUs on AP interfaces as broadcast */ 839 if ((info->control.vif->type == NL80211_IFTYPE_AP || 840 info->control.vif->type == NL80211_IFTYPE_ADHOC) && 841 ieee80211_is_mgmt(hdr->frame_control) && 842 !ieee80211_is_bufferable_mmpdu(hdr->frame_control)) 843 sta = NULL; 844 845 /* If there is no sta, and it's not offchannel - send through AP */ 846 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && 847 !offchannel) { 848 struct iwl_mvm_vif *mvmvif = 849 iwl_mvm_vif_from_mac80211(info->control.vif); 850 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); 851 852 if (ap_sta_id < IWL_MVM_STATION_COUNT) { 853 /* mac80211 holds rcu read lock */ 854 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); 855 if (IS_ERR_OR_NULL(sta)) 856 goto drop; 857 } 858 } 859 860 if (sta) { 861 if (iwl_mvm_defer_tx(mvm, sta, skb)) 862 return; 863 if (iwl_mvm_tx_skb(mvm, skb, sta)) 864 goto drop; 865 return; 866 } 867 868 if (iwl_mvm_tx_skb_non_sta(mvm, skb)) 869 goto drop; 870 return; 871 drop: 872 ieee80211_free_txskb(hw, skb); 873 } 874 875 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 876 { 877 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 878 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 879 struct sk_buff *skb = NULL; 880 881 /* 882 * No need for threads to be pending here, they can leave the first 883 * taker all the work. 884 * 885 * mvmtxq->tx_request logic: 886 * 887 * If 0, no one is currently TXing, set to 1 to indicate current thread 888 * will now start TX and other threads should quit. 889 * 890 * If 1, another thread is currently TXing, set to 2 to indicate to 891 * that thread that there was another request. Since that request may 892 * have raced with the check whether the queue is empty, the TXing 893 * thread should check the queue's status one more time before leaving. 894 * This check is done in order to not leave any TX hanging in the queue 895 * until the next TX invocation (which may not even happen). 896 * 897 * If 2, another thread is currently TXing, and it will already double 898 * check the queue, so do nothing. 899 */ 900 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) 901 return; 902 903 rcu_read_lock(); 904 do { 905 while (likely(!mvmtxq->stopped && 906 (mvm->trans->system_pm_mode == 907 IWL_PLAT_PM_MODE_DISABLED))) { 908 skb = ieee80211_tx_dequeue(hw, txq); 909 910 if (!skb) 911 break; 912 913 if (!txq->sta) 914 iwl_mvm_tx_skb_non_sta(mvm, skb); 915 else 916 iwl_mvm_tx_skb(mvm, skb, txq->sta); 917 } 918 } while (atomic_dec_return(&mvmtxq->tx_request)); 919 rcu_read_unlock(); 920 } 921 922 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, 923 struct ieee80211_txq *txq) 924 { 925 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 926 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 927 928 /* 929 * Please note that racing is handled very carefully here: 930 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 931 * deleted afterwards. 932 * This means that if: 933 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 934 * queue is allocated and we can TX. 935 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 936 * a race, should defer the frame. 937 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 938 * need to allocate the queue and defer the frame. 939 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 940 * queue is already scheduled for allocation, no need to allocate, 941 * should defer the frame. 942 */ 943 944 /* If the queue is allocated TX and return. */ 945 if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 946 /* 947 * Check that list is empty to avoid a race where txq_id is 948 * already updated, but the queue allocation work wasn't 949 * finished 950 */ 951 if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 952 return; 953 954 iwl_mvm_mac_itxq_xmit(hw, txq); 955 return; 956 } 957 958 /* The list is being deleted only after the queue is fully allocated. */ 959 if (!list_empty(&mvmtxq->list)) 960 return; 961 962 list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 963 schedule_work(&mvm->add_stream_wk); 964 } 965 966 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 967 do { \ 968 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 969 break; \ 970 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 971 } while (0) 972 973 static void 974 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 975 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 976 enum ieee80211_ampdu_mlme_action action) 977 { 978 struct iwl_fw_dbg_trigger_tlv *trig; 979 struct iwl_fw_dbg_trigger_ba *ba_trig; 980 981 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 982 FW_DBG_TRIGGER_BA); 983 if (!trig) 984 return; 985 986 ba_trig = (void *)trig->data; 987 988 switch (action) { 989 case IEEE80211_AMPDU_TX_OPERATIONAL: { 990 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 991 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 992 993 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 994 "TX AGG START: MAC %pM tid %d ssn %d\n", 995 sta->addr, tid, tid_data->ssn); 996 break; 997 } 998 case IEEE80211_AMPDU_TX_STOP_CONT: 999 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 1000 "TX AGG STOP: MAC %pM tid %d\n", 1001 sta->addr, tid); 1002 break; 1003 case IEEE80211_AMPDU_RX_START: 1004 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 1005 "RX AGG START: MAC %pM tid %d ssn %d\n", 1006 sta->addr, tid, rx_ba_ssn); 1007 break; 1008 case IEEE80211_AMPDU_RX_STOP: 1009 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 1010 "RX AGG STOP: MAC %pM tid %d\n", 1011 sta->addr, tid); 1012 break; 1013 default: 1014 break; 1015 } 1016 } 1017 1018 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 1019 struct ieee80211_vif *vif, 1020 struct ieee80211_ampdu_params *params) 1021 { 1022 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1023 int ret; 1024 bool tx_agg_ref = false; 1025 struct ieee80211_sta *sta = params->sta; 1026 enum ieee80211_ampdu_mlme_action action = params->action; 1027 u16 tid = params->tid; 1028 u16 *ssn = ¶ms->ssn; 1029 u16 buf_size = params->buf_size; 1030 bool amsdu = params->amsdu; 1031 u16 timeout = params->timeout; 1032 1033 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 1034 sta->addr, tid, action); 1035 1036 if (!(mvm->nvm_data->sku_cap_11n_enable)) 1037 return -EACCES; 1038 1039 /* return from D0i3 before starting a new Tx aggregation */ 1040 switch (action) { 1041 case IEEE80211_AMPDU_TX_START: 1042 case IEEE80211_AMPDU_TX_STOP_CONT: 1043 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1044 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1045 case IEEE80211_AMPDU_TX_OPERATIONAL: 1046 /* 1047 * for tx start, wait synchronously until D0i3 exit to 1048 * get the correct sequence number for the tid. 1049 * additionally, some other ampdu actions use direct 1050 * target access, which is not handled automatically 1051 * by the trans layer (unlike commands), so wait for 1052 * d0i3 exit in these cases as well. 1053 */ 1054 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_TX_AGG); 1055 if (ret) 1056 return ret; 1057 1058 tx_agg_ref = true; 1059 break; 1060 default: 1061 break; 1062 } 1063 1064 mutex_lock(&mvm->mutex); 1065 1066 switch (action) { 1067 case IEEE80211_AMPDU_RX_START: 1068 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == 1069 iwl_mvm_sta_from_mac80211(sta)->sta_id) { 1070 struct iwl_mvm_vif *mvmvif; 1071 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; 1072 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; 1073 1074 mdata->opened_rx_ba_sessions = true; 1075 mvmvif = iwl_mvm_vif_from_mac80211(vif); 1076 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); 1077 } 1078 if (!iwl_enable_rx_ampdu()) { 1079 ret = -EINVAL; 1080 break; 1081 } 1082 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 1083 timeout); 1084 break; 1085 case IEEE80211_AMPDU_RX_STOP: 1086 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 1087 timeout); 1088 break; 1089 case IEEE80211_AMPDU_TX_START: 1090 if (!iwl_enable_tx_ampdu()) { 1091 ret = -EINVAL; 1092 break; 1093 } 1094 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 1095 break; 1096 case IEEE80211_AMPDU_TX_STOP_CONT: 1097 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 1098 break; 1099 case IEEE80211_AMPDU_TX_STOP_FLUSH: 1100 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 1101 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 1102 break; 1103 case IEEE80211_AMPDU_TX_OPERATIONAL: 1104 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 1105 buf_size, amsdu); 1106 break; 1107 default: 1108 WARN_ON_ONCE(1); 1109 ret = -EINVAL; 1110 break; 1111 } 1112 1113 if (!ret) { 1114 u16 rx_ba_ssn = 0; 1115 1116 if (action == IEEE80211_AMPDU_RX_START) 1117 rx_ba_ssn = *ssn; 1118 1119 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 1120 rx_ba_ssn, action); 1121 } 1122 mutex_unlock(&mvm->mutex); 1123 1124 /* 1125 * If the tid is marked as started, we won't use it for offloaded 1126 * traffic on the next D0i3 entry. It's safe to unref. 1127 */ 1128 if (tx_agg_ref) 1129 iwl_mvm_unref(mvm, IWL_MVM_REF_TX_AGG); 1130 1131 return ret; 1132 } 1133 1134 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 1135 struct ieee80211_vif *vif) 1136 { 1137 struct iwl_mvm *mvm = data; 1138 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1139 1140 mvmvif->uploaded = false; 1141 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1142 1143 spin_lock_bh(&mvm->time_event_lock); 1144 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 1145 spin_unlock_bh(&mvm->time_event_lock); 1146 1147 mvmvif->phy_ctxt = NULL; 1148 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 1149 memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); 1150 } 1151 1152 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 1153 { 1154 /* clear the D3 reconfig, we only need it to avoid dumping a 1155 * firmware coredump on reconfiguration, we shouldn't do that 1156 * on D3->D0 transition 1157 */ 1158 if (!test_and_clear_bit(IWL_MVM_STATUS_D3_RECONFIG, &mvm->status)) { 1159 mvm->fwrt.dump.desc = &iwl_dump_desc_assert; 1160 iwl_fw_error_dump(&mvm->fwrt); 1161 } 1162 1163 /* cleanup all stale references (scan, roc), but keep the 1164 * ucode_down ref until reconfig is complete 1165 */ 1166 iwl_mvm_unref_all_except(mvm, IWL_MVM_REF_UCODE_DOWN); 1167 1168 iwl_mvm_stop_device(mvm); 1169 1170 mvm->cur_aid = 0; 1171 1172 mvm->scan_status = 0; 1173 mvm->ps_disabled = false; 1174 mvm->calibrating = false; 1175 1176 /* just in case one was running */ 1177 iwl_mvm_cleanup_roc_te(mvm); 1178 ieee80211_remain_on_channel_expired(mvm->hw); 1179 1180 /* 1181 * cleanup all interfaces, even inactive ones, as some might have 1182 * gone down during the HW restart 1183 */ 1184 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 1185 1186 mvm->p2p_device_vif = NULL; 1187 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1188 1189 iwl_mvm_reset_phy_ctxts(mvm); 1190 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 1191 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 1192 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 1193 1194 ieee80211_wake_queues(mvm->hw); 1195 1196 /* clear any stale d0i3 state */ 1197 clear_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status); 1198 1199 mvm->vif_count = 0; 1200 mvm->rx_ba_sessions = 0; 1201 mvm->fwrt.dump.conf = FW_DBG_INVALID; 1202 mvm->monitor_on = false; 1203 1204 /* keep statistics ticking */ 1205 iwl_mvm_accu_radio_stats(mvm); 1206 } 1207 1208 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 1209 { 1210 int ret; 1211 1212 lockdep_assert_held(&mvm->mutex); 1213 1214 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1215 /* 1216 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1217 * so later code will - from now on - see that we're doing it. 1218 */ 1219 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1220 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1221 /* Clean up some internal and mac80211 state on restart */ 1222 iwl_mvm_restart_cleanup(mvm); 1223 } else { 1224 /* Hold the reference to prevent runtime suspend while 1225 * the start procedure runs. It's a bit confusing 1226 * that the UCODE_DOWN reference is taken, but it just 1227 * means "UCODE is not UP yet". ( TODO: rename this 1228 * reference). 1229 */ 1230 iwl_mvm_ref(mvm, IWL_MVM_REF_UCODE_DOWN); 1231 } 1232 ret = iwl_mvm_up(mvm); 1233 1234 iwl_fw_dbg_apply_point(&mvm->fwrt, IWL_FW_INI_APPLY_POST_INIT); 1235 1236 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1237 /* Something went wrong - we need to finish some cleanup 1238 * that normally iwl_mvm_mac_restart_complete() below 1239 * would do. 1240 */ 1241 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1242 #ifdef CONFIG_PM 1243 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1244 #endif 1245 } 1246 1247 return ret; 1248 } 1249 1250 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1251 { 1252 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1253 int ret; 1254 1255 /* Some hw restart cleanups must not hold the mutex */ 1256 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1257 /* 1258 * Make sure we are out of d0i3. This is needed 1259 * to make sure the reference accounting is correct 1260 * (and there is no stale d0i3_exit_work). 1261 */ 1262 wait_event_timeout(mvm->d0i3_exit_waitq, 1263 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1264 &mvm->status), 1265 HZ); 1266 } 1267 1268 mutex_lock(&mvm->mutex); 1269 ret = __iwl_mvm_mac_start(mvm); 1270 mutex_unlock(&mvm->mutex); 1271 1272 return ret; 1273 } 1274 1275 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1276 { 1277 int ret; 1278 1279 mutex_lock(&mvm->mutex); 1280 1281 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1282 #ifdef CONFIG_PM 1283 iwl_mvm_d0i3_enable_tx(mvm, NULL); 1284 #endif 1285 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1286 if (ret) 1287 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1288 ret); 1289 1290 /* allow transport/FW low power modes */ 1291 iwl_mvm_unref(mvm, IWL_MVM_REF_UCODE_DOWN); 1292 1293 /* 1294 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1295 * of packets the FW sent out, so we must reconnect. 1296 */ 1297 iwl_mvm_teardown_tdls_peers(mvm); 1298 1299 mutex_unlock(&mvm->mutex); 1300 } 1301 1302 static void iwl_mvm_resume_complete(struct iwl_mvm *mvm) 1303 { 1304 if (iwl_mvm_is_d0i3_supported(mvm) && 1305 iwl_mvm_enter_d0i3_on_suspend(mvm)) 1306 WARN_ONCE(!wait_event_timeout(mvm->d0i3_exit_waitq, 1307 !test_bit(IWL_MVM_STATUS_IN_D0I3, 1308 &mvm->status), 1309 HZ), 1310 "D0i3 exit on resume timed out\n"); 1311 } 1312 1313 static void 1314 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1315 enum ieee80211_reconfig_type reconfig_type) 1316 { 1317 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1318 1319 switch (reconfig_type) { 1320 case IEEE80211_RECONFIG_TYPE_RESTART: 1321 iwl_mvm_restart_complete(mvm); 1322 break; 1323 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1324 iwl_mvm_resume_complete(mvm); 1325 break; 1326 } 1327 } 1328 1329 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1330 { 1331 lockdep_assert_held(&mvm->mutex); 1332 1333 /* firmware counters are obviously reset now, but we shouldn't 1334 * partially track so also clear the fw_reset_accu counters. 1335 */ 1336 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1337 1338 /* async_handlers_wk is now blocked */ 1339 1340 /* 1341 * The work item could be running or queued if the 1342 * ROC time event stops just as we get here. 1343 */ 1344 flush_work(&mvm->roc_done_wk); 1345 1346 iwl_mvm_stop_device(mvm); 1347 1348 iwl_mvm_async_handlers_purge(mvm); 1349 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1350 1351 /* the fw is stopped, the aux sta is dead: clean up driver state */ 1352 iwl_mvm_del_aux_sta(mvm); 1353 1354 /* 1355 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the 1356 * hw (as restart_complete() won't be called in this case) and mac80211 1357 * won't execute the restart. 1358 * But make sure to cleanup interfaces that have gone down before/during 1359 * HW restart was requested. 1360 */ 1361 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1362 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1363 &mvm->status)) 1364 ieee80211_iterate_interfaces(mvm->hw, 0, 1365 iwl_mvm_cleanup_iterator, mvm); 1366 1367 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1368 * make sure there's nothing left there and warn if any is found. 1369 */ 1370 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1371 int i; 1372 1373 for (i = 0; i < mvm->max_scans; i++) { 1374 if (WARN_ONCE(mvm->scan_uid_status[i], 1375 "UMAC scan UID %d status was not cleaned\n", 1376 i)) 1377 mvm->scan_uid_status[i] = 0; 1378 } 1379 } 1380 } 1381 1382 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1383 { 1384 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1385 1386 flush_work(&mvm->d0i3_exit_work); 1387 flush_work(&mvm->async_handlers_wk); 1388 flush_work(&mvm->add_stream_wk); 1389 1390 /* 1391 * Lock and clear the firmware running bit here already, so that 1392 * new commands coming in elsewhere, e.g. from debugfs, will not 1393 * be able to proceed. This is important here because one of those 1394 * debugfs files causes the firmware dump to be triggered, and if we 1395 * don't stop debugfs accesses before canceling that it could be 1396 * retriggered after we flush it but before we've cleared the bit. 1397 */ 1398 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1399 1400 iwl_fw_cancel_dump(&mvm->fwrt); 1401 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1402 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1403 iwl_fw_free_dump_desc(&mvm->fwrt); 1404 1405 mutex_lock(&mvm->mutex); 1406 __iwl_mvm_mac_stop(mvm); 1407 mutex_unlock(&mvm->mutex); 1408 1409 /* 1410 * The worker might have been waiting for the mutex, let it run and 1411 * discover that its list is now empty. 1412 */ 1413 cancel_work_sync(&mvm->async_handlers_wk); 1414 } 1415 1416 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1417 { 1418 u16 i; 1419 1420 lockdep_assert_held(&mvm->mutex); 1421 1422 for (i = 0; i < NUM_PHY_CTX; i++) 1423 if (!mvm->phy_ctxts[i].ref) 1424 return &mvm->phy_ctxts[i]; 1425 1426 IWL_ERR(mvm, "No available PHY context\n"); 1427 return NULL; 1428 } 1429 1430 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1431 s16 tx_power) 1432 { 1433 int len; 1434 union { 1435 struct iwl_dev_tx_power_cmd v5; 1436 struct iwl_dev_tx_power_cmd_v4 v4; 1437 } cmd = { 1438 .v5.v3.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1439 .v5.v3.mac_context_id = 1440 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1441 .v5.v3.pwr_restriction = cpu_to_le16(8 * tx_power), 1442 }; 1443 1444 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1445 cmd.v5.v3.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1446 1447 if (fw_has_api(&mvm->fw->ucode_capa, 1448 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) 1449 len = sizeof(cmd.v5); 1450 else if (fw_has_capa(&mvm->fw->ucode_capa, 1451 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1452 len = sizeof(cmd.v4); 1453 else 1454 len = sizeof(cmd.v4.v3); 1455 1456 return iwl_mvm_send_cmd_pdu(mvm, REDUCE_TX_POWER_CMD, 0, len, &cmd); 1457 } 1458 1459 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1460 struct ieee80211_vif *vif) 1461 { 1462 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1463 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1464 int ret; 1465 1466 mvmvif->mvm = mvm; 1467 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1468 1469 /* 1470 * make sure D0i3 exit is completed, otherwise a target access 1471 * during tx queue configuration could be done when still in 1472 * D0i3 state. 1473 */ 1474 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_ADD_IF); 1475 if (ret) 1476 return ret; 1477 1478 /* 1479 * Not much to do here. The stack will not allow interface 1480 * types or combinations that we didn't advertise, so we 1481 * don't really have to check the types. 1482 */ 1483 1484 mutex_lock(&mvm->mutex); 1485 1486 /* make sure that beacon statistics don't go backwards with FW reset */ 1487 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1488 mvmvif->beacon_stats.accu_num_beacons += 1489 mvmvif->beacon_stats.num_beacons; 1490 1491 /* Allocate resources for the MAC context, and add it to the fw */ 1492 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1493 if (ret) 1494 goto out_unlock; 1495 1496 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); 1497 1498 /* Counting number of interfaces is needed for legacy PM */ 1499 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1500 mvm->vif_count++; 1501 1502 /* 1503 * The AP binding flow can be done only after the beacon 1504 * template is configured (which happens only in the mac80211 1505 * start_ap() flow), and adding the broadcast station can happen 1506 * only after the binding. 1507 * In addition, since modifying the MAC before adding a bcast 1508 * station is not allowed by the FW, delay the adding of MAC context to 1509 * the point where we can also add the bcast station. 1510 * In short: there's not much we can do at this point, other than 1511 * allocating resources :) 1512 */ 1513 if (vif->type == NL80211_IFTYPE_AP || 1514 vif->type == NL80211_IFTYPE_ADHOC) { 1515 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1516 if (ret) { 1517 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1518 goto out_release; 1519 } 1520 1521 /* 1522 * Only queue for this station is the mcast queue, 1523 * which shouldn't be in TFD mask anyway 1524 */ 1525 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1526 0, vif->type, 1527 IWL_STA_MULTICAST); 1528 if (ret) 1529 goto out_release; 1530 1531 iwl_mvm_vif_dbgfs_register(mvm, vif); 1532 goto out_unlock; 1533 } 1534 1535 mvmvif->features |= hw->netdev_features; 1536 1537 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1538 if (ret) 1539 goto out_release; 1540 1541 ret = iwl_mvm_power_update_mac(mvm); 1542 if (ret) 1543 goto out_remove_mac; 1544 1545 /* beacon filtering */ 1546 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1547 if (ret) 1548 goto out_remove_mac; 1549 1550 if (!mvm->bf_allowed_vif && 1551 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1552 mvm->bf_allowed_vif = mvmvif; 1553 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1554 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1555 } 1556 1557 /* 1558 * P2P_DEVICE interface does not have a channel context assigned to it, 1559 * so a dedicated PHY context is allocated to it and the corresponding 1560 * MAC context is bound to it at this stage. 1561 */ 1562 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1563 1564 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1565 if (!mvmvif->phy_ctxt) { 1566 ret = -ENOSPC; 1567 goto out_free_bf; 1568 } 1569 1570 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1571 ret = iwl_mvm_binding_add_vif(mvm, vif); 1572 if (ret) 1573 goto out_unref_phy; 1574 1575 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1576 if (ret) 1577 goto out_unbind; 1578 1579 /* Save a pointer to p2p device vif, so it can later be used to 1580 * update the p2p device MAC when a GO is started/stopped */ 1581 mvm->p2p_device_vif = vif; 1582 } 1583 1584 iwl_mvm_tcm_add_vif(mvm, vif); 1585 1586 if (vif->type == NL80211_IFTYPE_MONITOR) 1587 mvm->monitor_on = true; 1588 1589 iwl_mvm_vif_dbgfs_register(mvm, vif); 1590 goto out_unlock; 1591 1592 out_unbind: 1593 iwl_mvm_binding_remove_vif(mvm, vif); 1594 out_unref_phy: 1595 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1596 out_free_bf: 1597 if (mvm->bf_allowed_vif == mvmvif) { 1598 mvm->bf_allowed_vif = NULL; 1599 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1600 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1601 } 1602 out_remove_mac: 1603 mvmvif->phy_ctxt = NULL; 1604 iwl_mvm_mac_ctxt_remove(mvm, vif); 1605 out_release: 1606 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1607 mvm->vif_count--; 1608 out_unlock: 1609 mutex_unlock(&mvm->mutex); 1610 1611 iwl_mvm_unref(mvm, IWL_MVM_REF_ADD_IF); 1612 1613 return ret; 1614 } 1615 1616 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1617 struct ieee80211_vif *vif) 1618 { 1619 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1620 /* 1621 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1622 * We assume here that all the packets sent to the OFFCHANNEL 1623 * queue are sent in ROC session. 1624 */ 1625 flush_work(&mvm->roc_done_wk); 1626 } 1627 } 1628 1629 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1630 struct ieee80211_vif *vif) 1631 { 1632 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1633 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1634 struct iwl_probe_resp_data *probe_data; 1635 1636 iwl_mvm_prepare_mac_removal(mvm, vif); 1637 1638 if (!(vif->type == NL80211_IFTYPE_AP || 1639 vif->type == NL80211_IFTYPE_ADHOC)) 1640 iwl_mvm_tcm_rm_vif(mvm, vif); 1641 1642 mutex_lock(&mvm->mutex); 1643 1644 probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, 1645 lockdep_is_held(&mvm->mutex)); 1646 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1647 if (probe_data) 1648 kfree_rcu(probe_data, rcu_head); 1649 1650 if (mvm->bf_allowed_vif == mvmvif) { 1651 mvm->bf_allowed_vif = NULL; 1652 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1653 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1654 } 1655 1656 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1657 1658 /* 1659 * For AP/GO interface, the tear down of the resources allocated to the 1660 * interface is be handled as part of the stop_ap flow. 1661 */ 1662 if (vif->type == NL80211_IFTYPE_AP || 1663 vif->type == NL80211_IFTYPE_ADHOC) { 1664 #ifdef CONFIG_NL80211_TESTMODE 1665 if (vif == mvm->noa_vif) { 1666 mvm->noa_vif = NULL; 1667 mvm->noa_duration = 0; 1668 } 1669 #endif 1670 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1671 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1672 goto out_release; 1673 } 1674 1675 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1676 mvm->p2p_device_vif = NULL; 1677 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1678 iwl_mvm_binding_remove_vif(mvm, vif); 1679 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1680 mvmvif->phy_ctxt = NULL; 1681 } 1682 1683 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 1684 mvm->vif_count--; 1685 1686 iwl_mvm_power_update_mac(mvm); 1687 iwl_mvm_mac_ctxt_remove(mvm, vif); 1688 1689 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); 1690 1691 if (vif->type == NL80211_IFTYPE_MONITOR) 1692 mvm->monitor_on = false; 1693 1694 out_release: 1695 mutex_unlock(&mvm->mutex); 1696 } 1697 1698 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1699 { 1700 return 0; 1701 } 1702 1703 struct iwl_mvm_mc_iter_data { 1704 struct iwl_mvm *mvm; 1705 int port_id; 1706 }; 1707 1708 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1709 struct ieee80211_vif *vif) 1710 { 1711 struct iwl_mvm_mc_iter_data *data = _data; 1712 struct iwl_mvm *mvm = data->mvm; 1713 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1714 struct iwl_host_cmd hcmd = { 1715 .id = MCAST_FILTER_CMD, 1716 .flags = CMD_ASYNC, 1717 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1718 }; 1719 int ret, len; 1720 1721 /* if we don't have free ports, mcast frames will be dropped */ 1722 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1723 return; 1724 1725 if (vif->type != NL80211_IFTYPE_STATION || 1726 !vif->bss_conf.assoc) 1727 return; 1728 1729 cmd->port_id = data->port_id++; 1730 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1731 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1732 1733 hcmd.len[0] = len; 1734 hcmd.data[0] = cmd; 1735 1736 ret = iwl_mvm_send_cmd(mvm, &hcmd); 1737 if (ret) 1738 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1739 } 1740 1741 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1742 { 1743 struct iwl_mvm_mc_iter_data iter_data = { 1744 .mvm = mvm, 1745 }; 1746 1747 lockdep_assert_held(&mvm->mutex); 1748 1749 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1750 return; 1751 1752 ieee80211_iterate_active_interfaces_atomic( 1753 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1754 iwl_mvm_mc_iface_iterator, &iter_data); 1755 } 1756 1757 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1758 struct netdev_hw_addr_list *mc_list) 1759 { 1760 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1761 struct iwl_mcast_filter_cmd *cmd; 1762 struct netdev_hw_addr *addr; 1763 int addr_count; 1764 bool pass_all; 1765 int len; 1766 1767 addr_count = netdev_hw_addr_list_count(mc_list); 1768 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1769 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1770 if (pass_all) 1771 addr_count = 0; 1772 1773 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1774 cmd = kzalloc(len, GFP_ATOMIC); 1775 if (!cmd) 1776 return 0; 1777 1778 if (pass_all) { 1779 cmd->pass_all = 1; 1780 return (u64)(unsigned long)cmd; 1781 } 1782 1783 netdev_hw_addr_list_for_each(addr, mc_list) { 1784 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1785 cmd->count, addr->addr); 1786 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1787 addr->addr, ETH_ALEN); 1788 cmd->count++; 1789 } 1790 1791 return (u64)(unsigned long)cmd; 1792 } 1793 1794 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1795 unsigned int changed_flags, 1796 unsigned int *total_flags, 1797 u64 multicast) 1798 { 1799 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1800 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1801 1802 mutex_lock(&mvm->mutex); 1803 1804 /* replace previous configuration */ 1805 kfree(mvm->mcast_filter_cmd); 1806 mvm->mcast_filter_cmd = cmd; 1807 1808 if (!cmd) 1809 goto out; 1810 1811 if (changed_flags & FIF_ALLMULTI) 1812 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); 1813 1814 if (cmd->pass_all) 1815 cmd->count = 0; 1816 1817 iwl_mvm_recalc_multicast(mvm); 1818 out: 1819 mutex_unlock(&mvm->mutex); 1820 *total_flags = 0; 1821 } 1822 1823 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1824 struct ieee80211_vif *vif, 1825 unsigned int filter_flags, 1826 unsigned int changed_flags) 1827 { 1828 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1829 1830 /* We support only filter for probe requests */ 1831 if (!(changed_flags & FIF_PROBE_REQ)) 1832 return; 1833 1834 /* Supported only for p2p client interfaces */ 1835 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1836 !vif->p2p) 1837 return; 1838 1839 mutex_lock(&mvm->mutex); 1840 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1841 mutex_unlock(&mvm->mutex); 1842 } 1843 1844 #ifdef CONFIG_IWLWIFI_BCAST_FILTERING 1845 struct iwl_bcast_iter_data { 1846 struct iwl_mvm *mvm; 1847 struct iwl_bcast_filter_cmd *cmd; 1848 u8 current_filter; 1849 }; 1850 1851 static void 1852 iwl_mvm_set_bcast_filter(struct ieee80211_vif *vif, 1853 const struct iwl_fw_bcast_filter *in_filter, 1854 struct iwl_fw_bcast_filter *out_filter) 1855 { 1856 struct iwl_fw_bcast_filter_attr *attr; 1857 int i; 1858 1859 memcpy(out_filter, in_filter, sizeof(*out_filter)); 1860 1861 for (i = 0; i < ARRAY_SIZE(out_filter->attrs); i++) { 1862 attr = &out_filter->attrs[i]; 1863 1864 if (!attr->mask) 1865 break; 1866 1867 switch (attr->reserved1) { 1868 case cpu_to_le16(BC_FILTER_MAGIC_IP): 1869 if (vif->bss_conf.arp_addr_cnt != 1) { 1870 attr->mask = 0; 1871 continue; 1872 } 1873 1874 attr->val = vif->bss_conf.arp_addr_list[0]; 1875 break; 1876 case cpu_to_le16(BC_FILTER_MAGIC_MAC): 1877 attr->val = *(__be32 *)&vif->addr[2]; 1878 break; 1879 default: 1880 break; 1881 } 1882 attr->reserved1 = 0; 1883 out_filter->num_attrs++; 1884 } 1885 } 1886 1887 static void iwl_mvm_bcast_filter_iterator(void *_data, u8 *mac, 1888 struct ieee80211_vif *vif) 1889 { 1890 struct iwl_bcast_iter_data *data = _data; 1891 struct iwl_mvm *mvm = data->mvm; 1892 struct iwl_bcast_filter_cmd *cmd = data->cmd; 1893 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1894 struct iwl_fw_bcast_mac *bcast_mac; 1895 int i; 1896 1897 if (WARN_ON(mvmvif->id >= ARRAY_SIZE(cmd->macs))) 1898 return; 1899 1900 bcast_mac = &cmd->macs[mvmvif->id]; 1901 1902 /* 1903 * enable filtering only for associated stations, but not for P2P 1904 * Clients 1905 */ 1906 if (vif->type != NL80211_IFTYPE_STATION || vif->p2p || 1907 !vif->bss_conf.assoc) 1908 return; 1909 1910 bcast_mac->default_discard = 1; 1911 1912 /* copy all configured filters */ 1913 for (i = 0; mvm->bcast_filters[i].attrs[0].mask; i++) { 1914 /* 1915 * Make sure we don't exceed our filters limit. 1916 * if there is still a valid filter to be configured, 1917 * be on the safe side and just allow bcast for this mac. 1918 */ 1919 if (WARN_ON_ONCE(data->current_filter >= 1920 ARRAY_SIZE(cmd->filters))) { 1921 bcast_mac->default_discard = 0; 1922 bcast_mac->attached_filters = 0; 1923 break; 1924 } 1925 1926 iwl_mvm_set_bcast_filter(vif, 1927 &mvm->bcast_filters[i], 1928 &cmd->filters[data->current_filter]); 1929 1930 /* skip current filter if it contains no attributes */ 1931 if (!cmd->filters[data->current_filter].num_attrs) 1932 continue; 1933 1934 /* attach the filter to current mac */ 1935 bcast_mac->attached_filters |= 1936 cpu_to_le16(BIT(data->current_filter)); 1937 1938 data->current_filter++; 1939 } 1940 } 1941 1942 bool iwl_mvm_bcast_filter_build_cmd(struct iwl_mvm *mvm, 1943 struct iwl_bcast_filter_cmd *cmd) 1944 { 1945 struct iwl_bcast_iter_data iter_data = { 1946 .mvm = mvm, 1947 .cmd = cmd, 1948 }; 1949 1950 if (IWL_MVM_FW_BCAST_FILTER_PASS_ALL) 1951 return false; 1952 1953 memset(cmd, 0, sizeof(*cmd)); 1954 cmd->max_bcast_filters = ARRAY_SIZE(cmd->filters); 1955 cmd->max_macs = ARRAY_SIZE(cmd->macs); 1956 1957 #ifdef CONFIG_IWLWIFI_DEBUGFS 1958 /* use debugfs filters/macs if override is configured */ 1959 if (mvm->dbgfs_bcast_filtering.override) { 1960 memcpy(cmd->filters, &mvm->dbgfs_bcast_filtering.cmd.filters, 1961 sizeof(cmd->filters)); 1962 memcpy(cmd->macs, &mvm->dbgfs_bcast_filtering.cmd.macs, 1963 sizeof(cmd->macs)); 1964 return true; 1965 } 1966 #endif 1967 1968 /* if no filters are configured, do nothing */ 1969 if (!mvm->bcast_filters) 1970 return false; 1971 1972 /* configure and attach these filters for each associated sta vif */ 1973 ieee80211_iterate_active_interfaces( 1974 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1975 iwl_mvm_bcast_filter_iterator, &iter_data); 1976 1977 return true; 1978 } 1979 1980 static int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1981 { 1982 struct iwl_bcast_filter_cmd cmd; 1983 1984 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_BCAST_FILTERING)) 1985 return 0; 1986 1987 if (!iwl_mvm_bcast_filter_build_cmd(mvm, &cmd)) 1988 return 0; 1989 1990 return iwl_mvm_send_cmd_pdu(mvm, BCAST_FILTER_CMD, 0, 1991 sizeof(cmd), &cmd); 1992 } 1993 #else 1994 static inline int iwl_mvm_configure_bcast_filter(struct iwl_mvm *mvm) 1995 { 1996 return 0; 1997 } 1998 #endif 1999 2000 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 2001 struct ieee80211_vif *vif) 2002 { 2003 struct iwl_mu_group_mgmt_cmd cmd = {}; 2004 2005 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 2006 WLAN_MEMBERSHIP_LEN); 2007 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 2008 WLAN_USER_POSITION_LEN); 2009 2010 return iwl_mvm_send_cmd_pdu(mvm, 2011 WIDE_ID(DATA_PATH_GROUP, 2012 UPDATE_MU_GROUPS_CMD), 2013 0, sizeof(cmd), &cmd); 2014 } 2015 2016 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 2017 struct ieee80211_vif *vif) 2018 { 2019 if (vif->mu_mimo_owner) { 2020 struct iwl_mu_group_mgmt_notif *notif = _data; 2021 2022 /* 2023 * MU-MIMO Group Id action frame is little endian. We treat 2024 * the data received from firmware as if it came from the 2025 * action frame, so no conversion is needed. 2026 */ 2027 ieee80211_update_mu_groups(vif, 2028 (u8 *)¬if->membership_status, 2029 (u8 *)¬if->user_position); 2030 } 2031 } 2032 2033 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 2034 struct iwl_rx_cmd_buffer *rxb) 2035 { 2036 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2037 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 2038 2039 ieee80211_iterate_active_interfaces_atomic( 2040 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 2041 iwl_mvm_mu_mimo_iface_iterator, notif); 2042 } 2043 2044 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) 2045 { 2046 u8 byte_num = ppe_pos_bit / 8; 2047 u8 bit_num = ppe_pos_bit % 8; 2048 u8 residue_bits; 2049 u8 res; 2050 2051 if (bit_num <= 5) 2052 return (ppe[byte_num] >> bit_num) & 2053 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); 2054 2055 /* 2056 * If bit_num > 5, we have to combine bits with next byte. 2057 * Calculate how many bits we need to take from current byte (called 2058 * here "residue_bits"), and add them to bits from next byte. 2059 */ 2060 2061 residue_bits = 8 - bit_num; 2062 2063 res = (ppe[byte_num + 1] & 2064 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << 2065 residue_bits; 2066 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); 2067 2068 return res; 2069 } 2070 2071 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, 2072 struct ieee80211_vif *vif, u8 sta_id) 2073 { 2074 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2075 struct iwl_he_sta_context_cmd sta_ctxt_cmd = { 2076 .sta_id = sta_id, 2077 .tid_limit = IWL_MAX_TID_COUNT, 2078 .bss_color = vif->bss_conf.bss_color, 2079 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, 2080 .frame_time_rts_th = 2081 cpu_to_le16(vif->bss_conf.frame_time_rts_th), 2082 }; 2083 struct ieee80211_sta *sta; 2084 u32 flags; 2085 int i; 2086 2087 rcu_read_lock(); 2088 2089 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2090 if (IS_ERR(sta)) { 2091 rcu_read_unlock(); 2092 WARN(1, "Can't find STA to configure HE\n"); 2093 return; 2094 } 2095 2096 if (!sta->he_cap.has_he) { 2097 rcu_read_unlock(); 2098 return; 2099 } 2100 2101 flags = 0; 2102 2103 /* HTC flags */ 2104 if (sta->he_cap.he_cap_elem.mac_cap_info[0] & 2105 IEEE80211_HE_MAC_CAP0_HTC_HE) 2106 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); 2107 if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & 2108 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || 2109 (sta->he_cap.he_cap_elem.mac_cap_info[2] & 2110 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { 2111 u8 link_adap = 2112 ((sta->he_cap.he_cap_elem.mac_cap_info[2] & 2113 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + 2114 (sta->he_cap.he_cap_elem.mac_cap_info[1] & 2115 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); 2116 2117 if (link_adap == 2) 2118 sta_ctxt_cmd.htc_flags |= 2119 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); 2120 else if (link_adap == 3) 2121 sta_ctxt_cmd.htc_flags |= 2122 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); 2123 } 2124 if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 2125 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); 2126 if (sta->he_cap.he_cap_elem.mac_cap_info[3] & 2127 IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 2128 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); 2129 if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 2130 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); 2131 2132 /* 2133 * Initialize the PPE thresholds to "None" (7), as described in Table 2134 * 9-262ac of 80211.ax/D3.0. 2135 */ 2136 memset(&sta_ctxt_cmd.pkt_ext, 7, sizeof(sta_ctxt_cmd.pkt_ext)); 2137 2138 /* If PPE Thresholds exist, parse them into a FW-familiar format. */ 2139 if (sta->he_cap.he_cap_elem.phy_cap_info[6] & 2140 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2141 u8 nss = (sta->he_cap.ppe_thres[0] & 2142 IEEE80211_PPE_THRES_NSS_MASK) + 1; 2143 u8 ru_index_bitmap = 2144 (sta->he_cap.ppe_thres[0] & 2145 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> 2146 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; 2147 u8 *ppe = &sta->he_cap.ppe_thres[0]; 2148 u8 ppe_pos_bit = 7; /* Starting after PPE header */ 2149 2150 /* 2151 * FW currently supports only nss == MAX_HE_SUPP_NSS 2152 * 2153 * If nss > MAX: we can ignore values we don't support 2154 * If nss < MAX: we can set zeros in other streams 2155 */ 2156 if (nss > MAX_HE_SUPP_NSS) { 2157 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, 2158 MAX_HE_SUPP_NSS); 2159 nss = MAX_HE_SUPP_NSS; 2160 } 2161 2162 for (i = 0; i < nss; i++) { 2163 u8 ru_index_tmp = ru_index_bitmap << 1; 2164 u8 bw; 2165 2166 for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { 2167 ru_index_tmp >>= 1; 2168 if (!(ru_index_tmp & 1)) 2169 continue; 2170 2171 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = 2172 iwl_mvm_he_get_ppe_val(ppe, 2173 ppe_pos_bit); 2174 ppe_pos_bit += 2175 IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2176 sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = 2177 iwl_mvm_he_get_ppe_val(ppe, 2178 ppe_pos_bit); 2179 ppe_pos_bit += 2180 IEEE80211_PPE_THRES_INFO_PPET_SIZE; 2181 } 2182 } 2183 2184 flags |= STA_CTXT_HE_PACKET_EXT; 2185 } 2186 rcu_read_unlock(); 2187 2188 /* Mark MU EDCA as enabled, unless none detected on some AC */ 2189 flags |= STA_CTXT_HE_MU_EDCA_CW; 2190 for (i = 0; i < AC_NUM; i++) { 2191 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = 2192 &mvmvif->queue_params[i].mu_edca_param_rec; 2193 2194 if (!mvmvif->queue_params[i].mu_edca) { 2195 flags &= ~STA_CTXT_HE_MU_EDCA_CW; 2196 break; 2197 } 2198 2199 sta_ctxt_cmd.trig_based_txf[i].cwmin = 2200 cpu_to_le16(mu_edca->ecw_min_max & 0xf); 2201 sta_ctxt_cmd.trig_based_txf[i].cwmax = 2202 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); 2203 sta_ctxt_cmd.trig_based_txf[i].aifsn = 2204 cpu_to_le16(mu_edca->aifsn); 2205 sta_ctxt_cmd.trig_based_txf[i].mu_time = 2206 cpu_to_le16(mu_edca->mu_edca_timer); 2207 } 2208 2209 if (vif->bss_conf.multi_sta_back_32bit) 2210 flags |= STA_CTXT_HE_32BIT_BA_BITMAP; 2211 2212 if (vif->bss_conf.ack_enabled) 2213 flags |= STA_CTXT_HE_ACK_ENABLED; 2214 2215 if (vif->bss_conf.uora_exists) { 2216 flags |= STA_CTXT_HE_TRIG_RND_ALLOC; 2217 2218 sta_ctxt_cmd.rand_alloc_ecwmin = 2219 vif->bss_conf.uora_ocw_range & 0x7; 2220 sta_ctxt_cmd.rand_alloc_ecwmax = 2221 (vif->bss_conf.uora_ocw_range >> 3) & 0x7; 2222 } 2223 2224 /* TODO: support Multi BSSID IE */ 2225 2226 sta_ctxt_cmd.flags = cpu_to_le32(flags); 2227 2228 if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, 2229 DATA_PATH_GROUP, 0), 2230 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd)) 2231 IWL_ERR(mvm, "Failed to config FW to work HE!\n"); 2232 } 2233 2234 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2235 struct ieee80211_vif *vif, 2236 struct ieee80211_bss_conf *bss_conf, 2237 u32 changes) 2238 { 2239 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2240 int ret; 2241 2242 /* 2243 * Re-calculate the tsf id, as the master-slave relations depend on the 2244 * beacon interval, which was not known when the station interface was 2245 * added. 2246 */ 2247 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { 2248 if (vif->bss_conf.he_support && 2249 !iwlwifi_mod_params.disable_11ax) 2250 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2251 2252 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2253 } 2254 2255 /* Update MU EDCA params */ 2256 if (changes & BSS_CHANGED_QOS && mvmvif->associated && 2257 bss_conf->assoc && vif->bss_conf.he_support && 2258 !iwlwifi_mod_params.disable_11ax) 2259 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2260 2261 /* 2262 * If we're not associated yet, take the (new) BSSID before associating 2263 * so the firmware knows. If we're already associated, then use the old 2264 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 2265 * branch for disassociation below. 2266 */ 2267 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 2268 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2269 2270 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 2271 if (ret) 2272 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2273 2274 /* after sending it once, adopt mac80211 data */ 2275 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2276 mvmvif->associated = bss_conf->assoc; 2277 2278 if (changes & BSS_CHANGED_ASSOC) { 2279 if (bss_conf->assoc) { 2280 /* clear statistics to get clean beacon counter */ 2281 iwl_mvm_request_statistics(mvm, true); 2282 memset(&mvmvif->beacon_stats, 0, 2283 sizeof(mvmvif->beacon_stats)); 2284 2285 /* add quota for this interface */ 2286 ret = iwl_mvm_update_quotas(mvm, true, NULL); 2287 if (ret) { 2288 IWL_ERR(mvm, "failed to update quotas\n"); 2289 return; 2290 } 2291 2292 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2293 &mvm->status)) { 2294 /* 2295 * If we're restarting then the firmware will 2296 * obviously have lost synchronisation with 2297 * the AP. It will attempt to synchronise by 2298 * itself, but we can make it more reliable by 2299 * scheduling a session protection time event. 2300 * 2301 * The firmware needs to receive a beacon to 2302 * catch up with synchronisation, use 110% of 2303 * the beacon interval. 2304 * 2305 * Set a large maximum delay to allow for more 2306 * than a single interface. 2307 */ 2308 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 2309 iwl_mvm_protect_session(mvm, vif, dur, dur, 2310 5 * dur, false); 2311 } 2312 2313 iwl_mvm_sf_update(mvm, vif, false); 2314 iwl_mvm_power_vif_assoc(mvm, vif); 2315 if (vif->p2p) { 2316 iwl_mvm_ref(mvm, IWL_MVM_REF_P2P_CLIENT); 2317 iwl_mvm_update_smps(mvm, vif, 2318 IWL_MVM_SMPS_REQ_PROT, 2319 IEEE80211_SMPS_DYNAMIC); 2320 } 2321 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2322 /* 2323 * If update fails - SF might be running in associated 2324 * mode while disassociated - which is forbidden. 2325 */ 2326 WARN_ONCE(iwl_mvm_sf_update(mvm, vif, false), 2327 "Failed to update SF upon disassociation\n"); 2328 2329 /* 2330 * If we get an assert during the connection (after the 2331 * station has been added, but before the vif is set 2332 * to associated), mac80211 will re-add the station and 2333 * then configure the vif. Since the vif is not 2334 * associated, we would remove the station here and 2335 * this would fail the recovery. 2336 */ 2337 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2338 &mvm->status)) { 2339 /* 2340 * Remove AP station now that 2341 * the MAC is unassoc 2342 */ 2343 ret = iwl_mvm_rm_sta_id(mvm, vif, 2344 mvmvif->ap_sta_id); 2345 if (ret) 2346 IWL_ERR(mvm, 2347 "failed to remove AP station\n"); 2348 2349 if (mvm->d0i3_ap_sta_id == mvmvif->ap_sta_id) 2350 mvm->d0i3_ap_sta_id = 2351 IWL_MVM_INVALID_STA; 2352 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2353 } 2354 2355 /* remove quota for this interface */ 2356 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2357 if (ret) 2358 IWL_ERR(mvm, "failed to update quotas\n"); 2359 2360 if (vif->p2p) 2361 iwl_mvm_unref(mvm, IWL_MVM_REF_P2P_CLIENT); 2362 2363 /* this will take the cleared BSSID from bss_conf */ 2364 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2365 if (ret) 2366 IWL_ERR(mvm, 2367 "failed to update MAC %pM (clear after unassoc)\n", 2368 vif->addr); 2369 } 2370 2371 /* 2372 * The firmware tracks the MU-MIMO group on its own. 2373 * However, on HW restart we should restore this data. 2374 */ 2375 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2376 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { 2377 ret = iwl_mvm_update_mu_groups(mvm, vif); 2378 if (ret) 2379 IWL_ERR(mvm, 2380 "failed to update VHT MU_MIMO groups\n"); 2381 } 2382 2383 iwl_mvm_recalc_multicast(mvm); 2384 iwl_mvm_configure_bcast_filter(mvm); 2385 2386 /* reset rssi values */ 2387 mvmvif->bf_data.ave_beacon_signal = 0; 2388 2389 iwl_mvm_bt_coex_vif_change(mvm); 2390 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2391 IEEE80211_SMPS_AUTOMATIC); 2392 if (fw_has_capa(&mvm->fw->ucode_capa, 2393 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2394 iwl_mvm_config_scan(mvm); 2395 } 2396 2397 if (changes & BSS_CHANGED_BEACON_INFO) { 2398 /* 2399 * We received a beacon from the associated AP so 2400 * remove the session protection. 2401 */ 2402 iwl_mvm_stop_session_protection(mvm, vif); 2403 2404 iwl_mvm_sf_update(mvm, vif, false); 2405 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2406 } 2407 2408 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2409 /* 2410 * Send power command on every beacon change, 2411 * because we may have not enabled beacon abort yet. 2412 */ 2413 BSS_CHANGED_BEACON_INFO)) { 2414 ret = iwl_mvm_power_update_mac(mvm); 2415 if (ret) 2416 IWL_ERR(mvm, "failed to update power mode\n"); 2417 } 2418 2419 if (changes & BSS_CHANGED_TXPOWER) { 2420 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2421 bss_conf->txpower); 2422 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2423 } 2424 2425 if (changes & BSS_CHANGED_CQM) { 2426 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2427 /* reset cqm events tracking */ 2428 mvmvif->bf_data.last_cqm_event = 0; 2429 if (mvmvif->bf_data.bf_enabled) { 2430 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2431 if (ret) 2432 IWL_ERR(mvm, 2433 "failed to update CQM thresholds\n"); 2434 } 2435 } 2436 2437 if (changes & BSS_CHANGED_ARP_FILTER) { 2438 IWL_DEBUG_MAC80211(mvm, "arp filter changed\n"); 2439 iwl_mvm_configure_bcast_filter(mvm); 2440 } 2441 } 2442 2443 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2444 struct ieee80211_vif *vif) 2445 { 2446 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2447 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2448 int ret; 2449 2450 /* 2451 * iwl_mvm_mac_ctxt_add() might read directly from the device 2452 * (the system time), so make sure it is available. 2453 */ 2454 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_START_AP); 2455 if (ret) 2456 return ret; 2457 2458 mutex_lock(&mvm->mutex); 2459 2460 /* Send the beacon template */ 2461 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2462 if (ret) 2463 goto out_unlock; 2464 2465 /* 2466 * Re-calculate the tsf id, as the master-slave relations depend on the 2467 * beacon interval, which was not known when the AP interface was added. 2468 */ 2469 if (vif->type == NL80211_IFTYPE_AP) 2470 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2471 2472 mvmvif->ap_assoc_sta_count = 0; 2473 2474 /* Add the mac context */ 2475 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2476 if (ret) 2477 goto out_unlock; 2478 2479 /* Perform the binding */ 2480 ret = iwl_mvm_binding_add_vif(mvm, vif); 2481 if (ret) 2482 goto out_remove; 2483 2484 /* 2485 * This is not very nice, but the simplest: 2486 * For older FWs adding the mcast sta before the bcast station may 2487 * cause assert 0x2b00. 2488 * This is fixed in later FW so make the order of removal depend on 2489 * the TLV 2490 */ 2491 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2492 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2493 if (ret) 2494 goto out_unbind; 2495 /* 2496 * Send the bcast station. At this stage the TBTT and DTIM time 2497 * events are added and applied to the scheduler 2498 */ 2499 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2500 if (ret) { 2501 iwl_mvm_rm_mcast_sta(mvm, vif); 2502 goto out_unbind; 2503 } 2504 } else { 2505 /* 2506 * Send the bcast station. At this stage the TBTT and DTIM time 2507 * events are added and applied to the scheduler 2508 */ 2509 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2510 if (ret) 2511 goto out_unbind; 2512 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2513 if (ret) { 2514 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2515 goto out_unbind; 2516 } 2517 } 2518 2519 /* must be set before quota calculations */ 2520 mvmvif->ap_ibss_active = true; 2521 2522 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2523 iwl_mvm_vif_set_low_latency(mvmvif, true, 2524 LOW_LATENCY_VIF_TYPE); 2525 iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); 2526 } 2527 2528 /* power updated needs to be done before quotas */ 2529 iwl_mvm_power_update_mac(mvm); 2530 2531 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2532 if (ret) 2533 goto out_quota_failed; 2534 2535 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2536 if (vif->p2p && mvm->p2p_device_vif) 2537 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2538 2539 iwl_mvm_ref(mvm, IWL_MVM_REF_AP_IBSS); 2540 2541 iwl_mvm_bt_coex_vif_change(mvm); 2542 2543 /* we don't support TDLS during DCM */ 2544 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2545 iwl_mvm_teardown_tdls_peers(mvm); 2546 2547 goto out_unlock; 2548 2549 out_quota_failed: 2550 iwl_mvm_power_update_mac(mvm); 2551 mvmvif->ap_ibss_active = false; 2552 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2553 iwl_mvm_rm_mcast_sta(mvm, vif); 2554 out_unbind: 2555 iwl_mvm_binding_remove_vif(mvm, vif); 2556 out_remove: 2557 iwl_mvm_mac_ctxt_remove(mvm, vif); 2558 out_unlock: 2559 mutex_unlock(&mvm->mutex); 2560 iwl_mvm_unref(mvm, IWL_MVM_REF_START_AP); 2561 return ret; 2562 } 2563 2564 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2565 struct ieee80211_vif *vif) 2566 { 2567 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2568 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2569 2570 iwl_mvm_prepare_mac_removal(mvm, vif); 2571 2572 mutex_lock(&mvm->mutex); 2573 2574 /* Handle AP stop while in CSA */ 2575 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2576 iwl_mvm_remove_time_event(mvm, mvmvif, 2577 &mvmvif->time_event_data); 2578 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2579 mvmvif->csa_countdown = false; 2580 } 2581 2582 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2583 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2584 mvm->csa_tx_block_bcn_timeout = 0; 2585 } 2586 2587 mvmvif->ap_ibss_active = false; 2588 mvm->ap_last_beacon_gp2 = 0; 2589 2590 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2591 iwl_mvm_vif_set_low_latency(mvmvif, false, 2592 LOW_LATENCY_VIF_TYPE); 2593 iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); 2594 } 2595 2596 iwl_mvm_bt_coex_vif_change(mvm); 2597 2598 iwl_mvm_unref(mvm, IWL_MVM_REF_AP_IBSS); 2599 2600 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2601 if (vif->p2p && mvm->p2p_device_vif) 2602 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2603 2604 iwl_mvm_update_quotas(mvm, false, NULL); 2605 2606 /* 2607 * This is not very nice, but the simplest: 2608 * For older FWs removing the mcast sta before the bcast station may 2609 * cause assert 0x2b00. 2610 * This is fixed in later FW (which will stop beaconing when removing 2611 * bcast station). 2612 * So make the order of removal depend on the TLV 2613 */ 2614 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2615 iwl_mvm_rm_mcast_sta(mvm, vif); 2616 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2617 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2618 iwl_mvm_rm_mcast_sta(mvm, vif); 2619 iwl_mvm_binding_remove_vif(mvm, vif); 2620 2621 iwl_mvm_power_update_mac(mvm); 2622 2623 iwl_mvm_mac_ctxt_remove(mvm, vif); 2624 2625 kfree(mvmvif->ap_wep_key); 2626 mvmvif->ap_wep_key = NULL; 2627 2628 mutex_unlock(&mvm->mutex); 2629 } 2630 2631 static void 2632 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2633 struct ieee80211_vif *vif, 2634 struct ieee80211_bss_conf *bss_conf, 2635 u32 changes) 2636 { 2637 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2638 2639 /* Changes will be applied when the AP/IBSS is started */ 2640 if (!mvmvif->ap_ibss_active) 2641 return; 2642 2643 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2644 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2645 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2646 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2647 2648 /* Need to send a new beacon template to the FW */ 2649 if (changes & BSS_CHANGED_BEACON && 2650 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2651 IWL_WARN(mvm, "Failed updating beacon data\n"); 2652 2653 if (changes & BSS_CHANGED_TXPOWER) { 2654 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d\n", 2655 bss_conf->txpower); 2656 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2657 } 2658 } 2659 2660 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2661 struct ieee80211_vif *vif, 2662 struct ieee80211_bss_conf *bss_conf, 2663 u32 changes) 2664 { 2665 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2666 2667 /* 2668 * iwl_mvm_bss_info_changed_station() might call 2669 * iwl_mvm_protect_session(), which reads directly from 2670 * the device (the system time), so make sure it is available. 2671 */ 2672 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_BSS_CHANGED)) 2673 return; 2674 2675 mutex_lock(&mvm->mutex); 2676 2677 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) 2678 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2679 2680 switch (vif->type) { 2681 case NL80211_IFTYPE_STATION: 2682 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2683 break; 2684 case NL80211_IFTYPE_AP: 2685 case NL80211_IFTYPE_ADHOC: 2686 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2687 break; 2688 case NL80211_IFTYPE_MONITOR: 2689 if (changes & BSS_CHANGED_MU_GROUPS) 2690 iwl_mvm_update_mu_groups(mvm, vif); 2691 break; 2692 default: 2693 /* shouldn't happen */ 2694 WARN_ON_ONCE(1); 2695 } 2696 2697 mutex_unlock(&mvm->mutex); 2698 iwl_mvm_unref(mvm, IWL_MVM_REF_BSS_CHANGED); 2699 } 2700 2701 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2702 struct ieee80211_vif *vif, 2703 struct ieee80211_scan_request *hw_req) 2704 { 2705 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2706 int ret; 2707 2708 if (hw_req->req.n_channels == 0 || 2709 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2710 return -EINVAL; 2711 2712 mutex_lock(&mvm->mutex); 2713 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2714 mutex_unlock(&mvm->mutex); 2715 2716 return ret; 2717 } 2718 2719 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2720 struct ieee80211_vif *vif) 2721 { 2722 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2723 2724 mutex_lock(&mvm->mutex); 2725 2726 /* Due to a race condition, it's possible that mac80211 asks 2727 * us to stop a hw_scan when it's already stopped. This can 2728 * happen, for instance, if we stopped the scan ourselves, 2729 * called ieee80211_scan_completed() and the userspace called 2730 * cancel scan scan before ieee80211_scan_work() could run. 2731 * To handle that, simply return if the scan is not running. 2732 */ 2733 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2734 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2735 2736 mutex_unlock(&mvm->mutex); 2737 } 2738 2739 static void 2740 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2741 struct ieee80211_sta *sta, u16 tids, 2742 int num_frames, 2743 enum ieee80211_frame_release_type reason, 2744 bool more_data) 2745 { 2746 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2747 2748 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2749 2750 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2751 tids, more_data, false); 2752 } 2753 2754 static void 2755 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2756 struct ieee80211_sta *sta, u16 tids, 2757 int num_frames, 2758 enum ieee80211_frame_release_type reason, 2759 bool more_data) 2760 { 2761 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2762 2763 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2764 2765 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2766 tids, more_data, true); 2767 } 2768 2769 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2770 enum sta_notify_cmd cmd, 2771 struct ieee80211_sta *sta) 2772 { 2773 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2774 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2775 unsigned long txqs = 0, tids = 0; 2776 int tid; 2777 2778 /* 2779 * If we have TVQM then we get too high queue numbers - luckily 2780 * we really shouldn't get here with that because such hardware 2781 * should have firmware supporting buffer station offload. 2782 */ 2783 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2784 return; 2785 2786 spin_lock_bh(&mvmsta->lock); 2787 for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { 2788 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2789 2790 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2791 continue; 2792 2793 __set_bit(tid_data->txq_id, &txqs); 2794 2795 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2796 continue; 2797 2798 __set_bit(tid, &tids); 2799 } 2800 2801 switch (cmd) { 2802 case STA_NOTIFY_SLEEP: 2803 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2804 ieee80211_sta_set_buffered(sta, tid, true); 2805 2806 if (txqs) 2807 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2808 /* 2809 * The fw updates the STA to be asleep. Tx packets on the Tx 2810 * queues to this station will not be transmitted. The fw will 2811 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2812 */ 2813 break; 2814 case STA_NOTIFY_AWAKE: 2815 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2816 break; 2817 2818 if (txqs) 2819 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2820 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2821 break; 2822 default: 2823 break; 2824 } 2825 spin_unlock_bh(&mvmsta->lock); 2826 } 2827 2828 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2829 struct ieee80211_vif *vif, 2830 enum sta_notify_cmd cmd, 2831 struct ieee80211_sta *sta) 2832 { 2833 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2834 } 2835 2836 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2837 { 2838 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2839 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2840 struct ieee80211_sta *sta; 2841 struct iwl_mvm_sta *mvmsta; 2842 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2843 2844 if (WARN_ON(notif->sta_id >= ARRAY_SIZE(mvm->fw_id_to_mac_id))) 2845 return; 2846 2847 rcu_read_lock(); 2848 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2849 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2850 rcu_read_unlock(); 2851 return; 2852 } 2853 2854 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2855 2856 if (!mvmsta->vif || 2857 mvmsta->vif->type != NL80211_IFTYPE_AP) { 2858 rcu_read_unlock(); 2859 return; 2860 } 2861 2862 if (mvmsta->sleeping != sleeping) { 2863 mvmsta->sleeping = sleeping; 2864 __iwl_mvm_mac_sta_notify(mvm->hw, 2865 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 2866 sta); 2867 ieee80211_sta_ps_transition(sta, sleeping); 2868 } 2869 2870 if (sleeping) { 2871 switch (notif->type) { 2872 case IWL_MVM_PM_EVENT_AWAKE: 2873 case IWL_MVM_PM_EVENT_ASLEEP: 2874 break; 2875 case IWL_MVM_PM_EVENT_UAPSD: 2876 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 2877 break; 2878 case IWL_MVM_PM_EVENT_PS_POLL: 2879 ieee80211_sta_pspoll(sta); 2880 break; 2881 default: 2882 break; 2883 } 2884 } 2885 2886 rcu_read_unlock(); 2887 } 2888 2889 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2890 struct ieee80211_vif *vif, 2891 struct ieee80211_sta *sta) 2892 { 2893 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2894 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2895 2896 /* 2897 * This is called before mac80211 does RCU synchronisation, 2898 * so here we already invalidate our internal RCU-protected 2899 * station pointer. The rest of the code will thus no longer 2900 * be able to find the station this way, and we don't rely 2901 * on further RCU synchronisation after the sta_state() 2902 * callback deleted the station. 2903 */ 2904 mutex_lock(&mvm->mutex); 2905 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 2906 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 2907 ERR_PTR(-ENOENT)); 2908 2909 mutex_unlock(&mvm->mutex); 2910 } 2911 2912 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2913 const u8 *bssid) 2914 { 2915 int i; 2916 2917 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2918 struct iwl_mvm_tcm_mac *mdata; 2919 2920 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; 2921 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); 2922 mdata->opened_rx_ba_sessions = false; 2923 } 2924 2925 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2926 return; 2927 2928 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 2929 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2930 return; 2931 } 2932 2933 if (!vif->p2p && 2934 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 2935 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2936 return; 2937 } 2938 2939 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { 2940 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { 2941 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2942 return; 2943 } 2944 } 2945 2946 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 2947 } 2948 2949 static void 2950 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 2951 struct ieee80211_vif *vif, u8 *peer_addr, 2952 enum nl80211_tdls_operation action) 2953 { 2954 struct iwl_fw_dbg_trigger_tlv *trig; 2955 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 2956 2957 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 2958 FW_DBG_TRIGGER_TDLS); 2959 if (!trig) 2960 return; 2961 2962 tdls_trig = (void *)trig->data; 2963 2964 if (!(tdls_trig->action_bitmap & BIT(action))) 2965 return; 2966 2967 if (tdls_trig->peer_mode && 2968 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 2969 return; 2970 2971 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 2972 "TDLS event occurred, peer %pM, action %d", 2973 peer_addr, action); 2974 } 2975 2976 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 2977 struct ieee80211_vif *vif, 2978 struct ieee80211_sta *sta, 2979 enum ieee80211_sta_state old_state, 2980 enum ieee80211_sta_state new_state) 2981 { 2982 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2983 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2984 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2985 int ret; 2986 2987 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 2988 sta->addr, old_state, new_state); 2989 2990 /* this would be a mac80211 bug ... but don't crash */ 2991 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 2992 return -EINVAL; 2993 2994 /* 2995 * If we are in a STA removal flow and in DQA mode: 2996 * 2997 * This is after the sync_rcu part, so the queues have already been 2998 * flushed. No more TXs on their way in mac80211's path, and no more in 2999 * the queues. 3000 * Also, we won't be getting any new TX frames for this station. 3001 * What we might have are deferred TX frames that need to be taken care 3002 * of. 3003 * 3004 * Drop any still-queued deferred-frame before removing the STA, and 3005 * make sure the worker is no longer handling frames for this STA. 3006 */ 3007 if (old_state == IEEE80211_STA_NONE && 3008 new_state == IEEE80211_STA_NOTEXIST) { 3009 flush_work(&mvm->add_stream_wk); 3010 3011 /* 3012 * No need to make sure deferred TX indication is off since the 3013 * worker will already remove it if it was on 3014 */ 3015 } 3016 3017 mutex_lock(&mvm->mutex); 3018 /* track whether or not the station is associated */ 3019 mvm_sta->sta_state = new_state; 3020 3021 if (old_state == IEEE80211_STA_NOTEXIST && 3022 new_state == IEEE80211_STA_NONE) { 3023 /* 3024 * Firmware bug - it'll crash if the beacon interval is less 3025 * than 16. We can't avoid connecting at all, so refuse the 3026 * station state change, this will cause mac80211 to abandon 3027 * attempts to connect to this AP, and eventually wpa_s will 3028 * blacklist the AP... 3029 */ 3030 if (vif->type == NL80211_IFTYPE_STATION && 3031 vif->bss_conf.beacon_int < 16) { 3032 IWL_ERR(mvm, 3033 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 3034 sta->addr, vif->bss_conf.beacon_int); 3035 ret = -EINVAL; 3036 goto out_unlock; 3037 } 3038 3039 if (sta->tdls && 3040 (vif->p2p || 3041 iwl_mvm_tdls_sta_count(mvm, NULL) == 3042 IWL_MVM_TDLS_STA_COUNT || 3043 iwl_mvm_phy_ctx_count(mvm) > 1)) { 3044 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 3045 ret = -EBUSY; 3046 goto out_unlock; 3047 } 3048 3049 ret = iwl_mvm_add_sta(mvm, vif, sta); 3050 if (sta->tdls && ret == 0) { 3051 iwl_mvm_recalc_tdls_state(mvm, vif, true); 3052 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3053 NL80211_TDLS_SETUP); 3054 } 3055 3056 sta->max_rc_amsdu_len = 1; 3057 } else if (old_state == IEEE80211_STA_NONE && 3058 new_state == IEEE80211_STA_AUTH) { 3059 /* 3060 * EBS may be disabled due to previous failures reported by FW. 3061 * Reset EBS status here assuming environment has been changed. 3062 */ 3063 mvm->last_ebs_successful = true; 3064 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 3065 ret = 0; 3066 } else if (old_state == IEEE80211_STA_AUTH && 3067 new_state == IEEE80211_STA_ASSOC) { 3068 if (vif->type == NL80211_IFTYPE_AP) { 3069 mvmvif->ap_assoc_sta_count++; 3070 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3071 if (vif->bss_conf.he_support && 3072 !iwlwifi_mod_params.disable_11ax) 3073 iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); 3074 } 3075 3076 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3077 false); 3078 ret = iwl_mvm_update_sta(mvm, vif, sta); 3079 } else if (old_state == IEEE80211_STA_ASSOC && 3080 new_state == IEEE80211_STA_AUTHORIZED) { 3081 3082 /* we don't support TDLS during DCM */ 3083 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3084 iwl_mvm_teardown_tdls_peers(mvm); 3085 3086 if (sta->tdls) 3087 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3088 NL80211_TDLS_ENABLE_LINK); 3089 3090 /* enable beacon filtering */ 3091 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3092 3093 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3094 true); 3095 3096 /* if wep is used, need to set the key for the station now */ 3097 if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) 3098 ret = iwl_mvm_set_sta_key(mvm, vif, sta, 3099 mvmvif->ap_wep_key, 3100 STA_KEY_IDX_INVALID); 3101 else 3102 ret = 0; 3103 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3104 new_state == IEEE80211_STA_ASSOC) { 3105 /* disable beacon filtering */ 3106 WARN_ON(iwl_mvm_disable_beacon_filter(mvm, vif, 0)); 3107 ret = 0; 3108 } else if (old_state == IEEE80211_STA_ASSOC && 3109 new_state == IEEE80211_STA_AUTH) { 3110 if (vif->type == NL80211_IFTYPE_AP) { 3111 mvmvif->ap_assoc_sta_count--; 3112 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3113 } 3114 ret = 0; 3115 } else if (old_state == IEEE80211_STA_AUTH && 3116 new_state == IEEE80211_STA_NONE) { 3117 ret = 0; 3118 } else if (old_state == IEEE80211_STA_NONE && 3119 new_state == IEEE80211_STA_NOTEXIST) { 3120 ret = iwl_mvm_rm_sta(mvm, vif, sta); 3121 if (sta->tdls) { 3122 iwl_mvm_recalc_tdls_state(mvm, vif, false); 3123 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3124 NL80211_TDLS_DISABLE_LINK); 3125 } 3126 3127 /* Remove STA key if this is an AP using WEP */ 3128 if (vif->type == NL80211_IFTYPE_AP && mvmvif->ap_wep_key) { 3129 int rm_ret = iwl_mvm_remove_sta_key(mvm, vif, sta, 3130 mvmvif->ap_wep_key); 3131 3132 if (!ret) 3133 ret = rm_ret; 3134 } 3135 3136 } else { 3137 ret = -EIO; 3138 } 3139 out_unlock: 3140 mutex_unlock(&mvm->mutex); 3141 3142 if (sta->tdls && ret == 0) { 3143 if (old_state == IEEE80211_STA_NOTEXIST && 3144 new_state == IEEE80211_STA_NONE) 3145 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3146 else if (old_state == IEEE80211_STA_NONE && 3147 new_state == IEEE80211_STA_NOTEXIST) 3148 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3149 } 3150 3151 return ret; 3152 } 3153 3154 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3155 { 3156 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3157 3158 mvm->rts_threshold = value; 3159 3160 return 0; 3161 } 3162 3163 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 3164 struct ieee80211_vif *vif, 3165 struct ieee80211_sta *sta, u32 changed) 3166 { 3167 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3168 3169 if (vif->type == NL80211_IFTYPE_STATION && 3170 changed & IEEE80211_RC_NSS_CHANGED) 3171 iwl_mvm_sf_update(mvm, vif, false); 3172 } 3173 3174 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 3175 struct ieee80211_vif *vif, u16 ac, 3176 const struct ieee80211_tx_queue_params *params) 3177 { 3178 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3179 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3180 3181 mvmvif->queue_params[ac] = *params; 3182 3183 /* 3184 * No need to update right away, we'll get BSS_CHANGED_QOS 3185 * The exception is P2P_DEVICE interface which needs immediate update. 3186 */ 3187 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 3188 int ret; 3189 3190 mutex_lock(&mvm->mutex); 3191 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3192 mutex_unlock(&mvm->mutex); 3193 return ret; 3194 } 3195 return 0; 3196 } 3197 3198 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 3199 struct ieee80211_vif *vif, 3200 u16 req_duration) 3201 { 3202 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3203 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 3204 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 3205 3206 /* 3207 * iwl_mvm_protect_session() reads directly from the device 3208 * (the system time), so make sure it is available. 3209 */ 3210 if (iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PREPARE_TX)) 3211 return; 3212 3213 if (req_duration > duration) 3214 duration = req_duration; 3215 3216 mutex_lock(&mvm->mutex); 3217 /* Try really hard to protect the session and hear a beacon */ 3218 iwl_mvm_protect_session(mvm, vif, duration, min_duration, 500, false); 3219 mutex_unlock(&mvm->mutex); 3220 3221 iwl_mvm_unref(mvm, IWL_MVM_REF_PREPARE_TX); 3222 } 3223 3224 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 3225 struct ieee80211_vif *vif, 3226 struct cfg80211_sched_scan_request *req, 3227 struct ieee80211_scan_ies *ies) 3228 { 3229 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3230 3231 int ret; 3232 3233 mutex_lock(&mvm->mutex); 3234 3235 if (!vif->bss_conf.idle) { 3236 ret = -EBUSY; 3237 goto out; 3238 } 3239 3240 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 3241 3242 out: 3243 mutex_unlock(&mvm->mutex); 3244 return ret; 3245 } 3246 3247 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 3248 struct ieee80211_vif *vif) 3249 { 3250 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3251 int ret; 3252 3253 mutex_lock(&mvm->mutex); 3254 3255 /* Due to a race condition, it's possible that mac80211 asks 3256 * us to stop a sched_scan when it's already stopped. This 3257 * can happen, for instance, if we stopped the scan ourselves, 3258 * called ieee80211_sched_scan_stopped() and the userspace called 3259 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 3260 * could run. To handle this, simply return if the scan is 3261 * not running. 3262 */ 3263 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 3264 mutex_unlock(&mvm->mutex); 3265 return 0; 3266 } 3267 3268 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 3269 mutex_unlock(&mvm->mutex); 3270 iwl_mvm_wait_for_async_handlers(mvm); 3271 3272 return ret; 3273 } 3274 3275 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3276 enum set_key_cmd cmd, 3277 struct ieee80211_vif *vif, 3278 struct ieee80211_sta *sta, 3279 struct ieee80211_key_conf *key) 3280 { 3281 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3282 struct iwl_mvm_sta *mvmsta; 3283 struct iwl_mvm_key_pn *ptk_pn; 3284 int keyidx = key->keyidx; 3285 int ret; 3286 u8 key_offset; 3287 3288 if (iwlwifi_mod_params.swcrypto) { 3289 IWL_DEBUG_MAC80211(mvm, "leave - hwcrypto disabled\n"); 3290 return -EOPNOTSUPP; 3291 } 3292 3293 switch (key->cipher) { 3294 case WLAN_CIPHER_SUITE_TKIP: 3295 if (!mvm->trans->cfg->gen2) { 3296 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 3297 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3298 } else if (vif->type == NL80211_IFTYPE_STATION) { 3299 key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; 3300 } else { 3301 IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); 3302 return -EOPNOTSUPP; 3303 } 3304 break; 3305 case WLAN_CIPHER_SUITE_CCMP: 3306 case WLAN_CIPHER_SUITE_GCMP: 3307 case WLAN_CIPHER_SUITE_GCMP_256: 3308 if (!iwl_mvm_has_new_tx_api(mvm)) 3309 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3310 break; 3311 case WLAN_CIPHER_SUITE_AES_CMAC: 3312 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3313 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3314 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 3315 break; 3316 case WLAN_CIPHER_SUITE_WEP40: 3317 case WLAN_CIPHER_SUITE_WEP104: 3318 if (vif->type == NL80211_IFTYPE_AP) { 3319 struct iwl_mvm_vif *mvmvif = 3320 iwl_mvm_vif_from_mac80211(vif); 3321 3322 mvmvif->ap_wep_key = kmemdup(key, 3323 sizeof(*key) + key->keylen, 3324 GFP_KERNEL); 3325 if (!mvmvif->ap_wep_key) 3326 return -ENOMEM; 3327 } 3328 3329 if (vif->type != NL80211_IFTYPE_STATION) 3330 return 0; 3331 break; 3332 default: 3333 /* currently FW supports only one optional cipher scheme */ 3334 if (hw->n_cipher_schemes && 3335 hw->cipher_schemes->cipher == key->cipher) 3336 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3337 else 3338 return -EOPNOTSUPP; 3339 } 3340 3341 mutex_lock(&mvm->mutex); 3342 3343 switch (cmd) { 3344 case SET_KEY: 3345 if ((vif->type == NL80211_IFTYPE_ADHOC || 3346 vif->type == NL80211_IFTYPE_AP) && !sta) { 3347 /* 3348 * GTK on AP interface is a TX-only key, return 0; 3349 * on IBSS they're per-station and because we're lazy 3350 * we don't support them for RX, so do the same. 3351 * CMAC/GMAC in AP/IBSS modes must be done in software. 3352 */ 3353 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3354 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3355 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3356 ret = -EOPNOTSUPP; 3357 else 3358 ret = 0; 3359 3360 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 3361 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 3362 !iwl_mvm_has_new_tx_api(mvm)) { 3363 key->hw_key_idx = STA_KEY_IDX_INVALID; 3364 break; 3365 } 3366 } 3367 3368 /* During FW restart, in order to restore the state as it was, 3369 * don't try to reprogram keys we previously failed for. 3370 */ 3371 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3372 key->hw_key_idx == STA_KEY_IDX_INVALID) { 3373 IWL_DEBUG_MAC80211(mvm, 3374 "skip invalid idx key programming during restart\n"); 3375 ret = 0; 3376 break; 3377 } 3378 3379 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3380 sta && iwl_mvm_has_new_rx_api(mvm) && 3381 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3382 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3383 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3384 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3385 struct ieee80211_key_seq seq; 3386 int tid, q; 3387 3388 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3389 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 3390 ptk_pn = kzalloc(struct_size(ptk_pn, q, 3391 mvm->trans->num_rx_queues), 3392 GFP_KERNEL); 3393 if (!ptk_pn) { 3394 ret = -ENOMEM; 3395 break; 3396 } 3397 3398 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3399 ieee80211_get_key_rx_seq(key, tid, &seq); 3400 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3401 memcpy(ptk_pn->q[q].pn[tid], 3402 seq.ccmp.pn, 3403 IEEE80211_CCMP_PN_LEN); 3404 } 3405 3406 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3407 } 3408 3409 /* in HW restart reuse the index, otherwise request a new one */ 3410 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3411 key_offset = key->hw_key_idx; 3412 else 3413 key_offset = STA_KEY_IDX_INVALID; 3414 3415 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3416 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3417 if (ret) { 3418 IWL_WARN(mvm, "set key failed\n"); 3419 /* 3420 * can't add key for RX, but we don't need it 3421 * in the device for TX so still return 0 3422 */ 3423 key->hw_key_idx = STA_KEY_IDX_INVALID; 3424 ret = 0; 3425 } 3426 3427 break; 3428 case DISABLE_KEY: 3429 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3430 ret = 0; 3431 break; 3432 } 3433 3434 if (sta && iwl_mvm_has_new_rx_api(mvm) && 3435 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3436 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3437 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3438 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3439 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3440 ptk_pn = rcu_dereference_protected( 3441 mvmsta->ptk_pn[keyidx], 3442 lockdep_is_held(&mvm->mutex)); 3443 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3444 if (ptk_pn) 3445 kfree_rcu(ptk_pn, rcu_head); 3446 } 3447 3448 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3449 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3450 break; 3451 default: 3452 ret = -EINVAL; 3453 } 3454 3455 mutex_unlock(&mvm->mutex); 3456 return ret; 3457 } 3458 3459 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3460 struct ieee80211_vif *vif, 3461 struct ieee80211_key_conf *keyconf, 3462 struct ieee80211_sta *sta, 3463 u32 iv32, u16 *phase1key) 3464 { 3465 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3466 3467 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3468 return; 3469 3470 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3471 } 3472 3473 3474 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3475 struct iwl_rx_packet *pkt, void *data) 3476 { 3477 struct iwl_mvm *mvm = 3478 container_of(notif_wait, struct iwl_mvm, notif_wait); 3479 struct iwl_hs20_roc_res *resp; 3480 int resp_len = iwl_rx_packet_payload_len(pkt); 3481 struct iwl_mvm_time_event_data *te_data = data; 3482 3483 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3484 return true; 3485 3486 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3487 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3488 return true; 3489 } 3490 3491 resp = (void *)pkt->data; 3492 3493 IWL_DEBUG_TE(mvm, 3494 "Aux ROC: Received response from ucode: status=%d uid=%d\n", 3495 resp->status, resp->event_unique_id); 3496 3497 te_data->uid = le32_to_cpu(resp->event_unique_id); 3498 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3499 te_data->uid); 3500 3501 spin_lock_bh(&mvm->time_event_lock); 3502 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3503 spin_unlock_bh(&mvm->time_event_lock); 3504 3505 return true; 3506 } 3507 3508 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3509 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3510 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3511 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3512 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3513 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3514 struct ieee80211_channel *channel, 3515 struct ieee80211_vif *vif, 3516 int duration) 3517 { 3518 int res, time_reg = DEVICE_SYSTEM_TIME_REG; 3519 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3520 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3521 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3522 struct iwl_notification_wait wait_time_event; 3523 u32 dtim_interval = vif->bss_conf.dtim_period * 3524 vif->bss_conf.beacon_int; 3525 u32 req_dur, delay; 3526 struct iwl_hs20_roc_req aux_roc_req = { 3527 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3528 .id_and_color = 3529 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3530 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3531 }; 3532 struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, 3533 &aux_roc_req.channel_info); 3534 u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); 3535 3536 /* Set the channel info data */ 3537 iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, 3538 (channel->band == NL80211_BAND_2GHZ) ? 3539 PHY_BAND_24 : PHY_BAND_5, 3540 PHY_VHT_CHANNEL_MODE20, 3541 0); 3542 3543 /* Set the time and duration */ 3544 tail->apply_time = cpu_to_le32(iwl_read_prph(mvm->trans, time_reg)); 3545 3546 delay = AUX_ROC_MIN_DELAY; 3547 req_dur = MSEC_TO_TU(duration); 3548 3549 /* 3550 * If we are associated we want the delay time to be at least one 3551 * dtim interval so that the FW can wait until after the DTIM and 3552 * then start the time event, this will potentially allow us to 3553 * remain off-channel for the max duration. 3554 * Since we want to use almost a whole dtim interval we would also 3555 * like the delay to be for 2-3 dtim intervals, in case there are 3556 * other time events with higher priority. 3557 */ 3558 if (vif->bss_conf.assoc) { 3559 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3560 /* We cannot remain off-channel longer than the DTIM interval */ 3561 if (dtim_interval <= req_dur) { 3562 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3563 if (req_dur <= AUX_ROC_MIN_DURATION) 3564 req_dur = dtim_interval - 3565 AUX_ROC_MIN_SAFETY_BUFFER; 3566 } 3567 } 3568 3569 tail->duration = cpu_to_le32(req_dur); 3570 tail->apply_time_max_delay = cpu_to_le32(delay); 3571 3572 IWL_DEBUG_TE(mvm, 3573 "ROC: Requesting to remain on channel %u for %ums (requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3574 channel->hw_value, req_dur, duration, delay, 3575 dtim_interval); 3576 /* Set the node address */ 3577 memcpy(tail->node_addr, vif->addr, ETH_ALEN); 3578 3579 lockdep_assert_held(&mvm->mutex); 3580 3581 spin_lock_bh(&mvm->time_event_lock); 3582 3583 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3584 spin_unlock_bh(&mvm->time_event_lock); 3585 return -EIO; 3586 } 3587 3588 te_data->vif = vif; 3589 te_data->duration = duration; 3590 te_data->id = HOT_SPOT_CMD; 3591 3592 spin_unlock_bh(&mvm->time_event_lock); 3593 3594 /* 3595 * Use a notification wait, which really just processes the 3596 * command response and doesn't wait for anything, in order 3597 * to be able to process the response and get the UID inside 3598 * the RX path. Using CMD_WANT_SKB doesn't work because it 3599 * stores the buffer and then wakes up this thread, by which 3600 * time another notification (that the time event started) 3601 * might already be processed unsuccessfully. 3602 */ 3603 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3604 time_event_response, 3605 ARRAY_SIZE(time_event_response), 3606 iwl_mvm_rx_aux_roc, te_data); 3607 3608 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, 3609 &aux_roc_req); 3610 3611 if (res) { 3612 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3613 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3614 goto out_clear_te; 3615 } 3616 3617 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3618 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3619 /* should never fail */ 3620 WARN_ON_ONCE(res); 3621 3622 if (res) { 3623 out_clear_te: 3624 spin_lock_bh(&mvm->time_event_lock); 3625 iwl_mvm_te_clear_data(mvm, te_data); 3626 spin_unlock_bh(&mvm->time_event_lock); 3627 } 3628 3629 return res; 3630 } 3631 3632 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3633 struct ieee80211_vif *vif, 3634 struct ieee80211_channel *channel, 3635 int duration, 3636 enum ieee80211_roc_type type) 3637 { 3638 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3639 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3640 struct cfg80211_chan_def chandef; 3641 struct iwl_mvm_phy_ctxt *phy_ctxt; 3642 int ret, i; 3643 3644 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3645 duration, type); 3646 3647 /* 3648 * Flush the done work, just in case it's still pending, so that 3649 * the work it does can complete and we can accept new frames. 3650 */ 3651 flush_work(&mvm->roc_done_wk); 3652 3653 mutex_lock(&mvm->mutex); 3654 3655 switch (vif->type) { 3656 case NL80211_IFTYPE_STATION: 3657 if (fw_has_capa(&mvm->fw->ucode_capa, 3658 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3659 /* Use aux roc framework (HS20) */ 3660 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3661 vif, duration); 3662 goto out_unlock; 3663 } 3664 IWL_ERR(mvm, "hotspot not supported\n"); 3665 ret = -EINVAL; 3666 goto out_unlock; 3667 case NL80211_IFTYPE_P2P_DEVICE: 3668 /* handle below */ 3669 break; 3670 default: 3671 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3672 ret = -EINVAL; 3673 goto out_unlock; 3674 } 3675 3676 for (i = 0; i < NUM_PHY_CTX; i++) { 3677 phy_ctxt = &mvm->phy_ctxts[i]; 3678 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3679 continue; 3680 3681 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3682 /* 3683 * Unbind the P2P_DEVICE from the current PHY context, 3684 * and if the PHY context is not used remove it. 3685 */ 3686 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3687 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3688 goto out_unlock; 3689 3690 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3691 3692 /* Bind the P2P_DEVICE to the current PHY Context */ 3693 mvmvif->phy_ctxt = phy_ctxt; 3694 3695 ret = iwl_mvm_binding_add_vif(mvm, vif); 3696 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3697 goto out_unlock; 3698 3699 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3700 goto schedule_time_event; 3701 } 3702 } 3703 3704 /* Need to update the PHY context only if the ROC channel changed */ 3705 if (channel == mvmvif->phy_ctxt->channel) 3706 goto schedule_time_event; 3707 3708 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3709 3710 /* 3711 * Change the PHY context configuration as it is currently referenced 3712 * only by the P2P Device MAC 3713 */ 3714 if (mvmvif->phy_ctxt->ref == 1) { 3715 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3716 &chandef, 1, 1); 3717 if (ret) 3718 goto out_unlock; 3719 } else { 3720 /* 3721 * The PHY context is shared with other MACs. Need to remove the 3722 * P2P Device from the binding, allocate an new PHY context and 3723 * create a new binding 3724 */ 3725 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3726 if (!phy_ctxt) { 3727 ret = -ENOSPC; 3728 goto out_unlock; 3729 } 3730 3731 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3732 1, 1); 3733 if (ret) { 3734 IWL_ERR(mvm, "Failed to change PHY context\n"); 3735 goto out_unlock; 3736 } 3737 3738 /* Unbind the P2P_DEVICE from the current PHY context */ 3739 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3740 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3741 goto out_unlock; 3742 3743 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3744 3745 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3746 mvmvif->phy_ctxt = phy_ctxt; 3747 3748 ret = iwl_mvm_binding_add_vif(mvm, vif); 3749 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3750 goto out_unlock; 3751 3752 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3753 } 3754 3755 schedule_time_event: 3756 /* Schedule the time events */ 3757 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 3758 3759 out_unlock: 3760 mutex_unlock(&mvm->mutex); 3761 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3762 return ret; 3763 } 3764 3765 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw) 3766 { 3767 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3768 3769 IWL_DEBUG_MAC80211(mvm, "enter\n"); 3770 3771 mutex_lock(&mvm->mutex); 3772 iwl_mvm_stop_roc(mvm); 3773 mutex_unlock(&mvm->mutex); 3774 3775 IWL_DEBUG_MAC80211(mvm, "leave\n"); 3776 return 0; 3777 } 3778 3779 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 3780 struct ieee80211_chanctx_conf *ctx) 3781 { 3782 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3783 struct iwl_mvm_phy_ctxt *phy_ctxt; 3784 int ret; 3785 3786 lockdep_assert_held(&mvm->mutex); 3787 3788 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 3789 3790 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3791 if (!phy_ctxt) { 3792 ret = -ENOSPC; 3793 goto out; 3794 } 3795 3796 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3797 ctx->rx_chains_static, 3798 ctx->rx_chains_dynamic); 3799 if (ret) { 3800 IWL_ERR(mvm, "Failed to add PHY context\n"); 3801 goto out; 3802 } 3803 3804 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 3805 *phy_ctxt_id = phy_ctxt->id; 3806 out: 3807 return ret; 3808 } 3809 3810 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 3811 struct ieee80211_chanctx_conf *ctx) 3812 { 3813 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3814 int ret; 3815 3816 mutex_lock(&mvm->mutex); 3817 ret = __iwl_mvm_add_chanctx(mvm, ctx); 3818 mutex_unlock(&mvm->mutex); 3819 3820 return ret; 3821 } 3822 3823 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 3824 struct ieee80211_chanctx_conf *ctx) 3825 { 3826 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3827 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3828 3829 lockdep_assert_held(&mvm->mutex); 3830 3831 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 3832 } 3833 3834 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 3835 struct ieee80211_chanctx_conf *ctx) 3836 { 3837 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3838 3839 mutex_lock(&mvm->mutex); 3840 __iwl_mvm_remove_chanctx(mvm, ctx); 3841 mutex_unlock(&mvm->mutex); 3842 } 3843 3844 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 3845 struct ieee80211_chanctx_conf *ctx, 3846 u32 changed) 3847 { 3848 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3849 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3850 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3851 3852 if (WARN_ONCE((phy_ctxt->ref > 1) && 3853 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 3854 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 3855 IEEE80211_CHANCTX_CHANGE_RADAR | 3856 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 3857 "Cannot change PHY. Ref=%d, changed=0x%X\n", 3858 phy_ctxt->ref, changed)) 3859 return; 3860 3861 mutex_lock(&mvm->mutex); 3862 3863 /* we are only changing the min_width, may be a noop */ 3864 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { 3865 if (phy_ctxt->width == ctx->min_def.width) 3866 goto out_unlock; 3867 3868 /* we are just toggling between 20_NOHT and 20 */ 3869 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && 3870 ctx->min_def.width <= NL80211_CHAN_WIDTH_20) 3871 goto out_unlock; 3872 } 3873 3874 iwl_mvm_bt_coex_vif_change(mvm); 3875 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &ctx->min_def, 3876 ctx->rx_chains_static, 3877 ctx->rx_chains_dynamic); 3878 3879 out_unlock: 3880 mutex_unlock(&mvm->mutex); 3881 } 3882 3883 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 3884 struct ieee80211_vif *vif, 3885 struct ieee80211_chanctx_conf *ctx, 3886 bool switching_chanctx) 3887 { 3888 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 3889 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 3890 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3891 int ret; 3892 3893 lockdep_assert_held(&mvm->mutex); 3894 3895 mvmvif->phy_ctxt = phy_ctxt; 3896 3897 switch (vif->type) { 3898 case NL80211_IFTYPE_AP: 3899 /* only needed if we're switching chanctx (i.e. during CSA) */ 3900 if (switching_chanctx) { 3901 mvmvif->ap_ibss_active = true; 3902 break; 3903 } 3904 case NL80211_IFTYPE_ADHOC: 3905 /* 3906 * The AP binding flow is handled as part of the start_ap flow 3907 * (in bss_info_changed), similarly for IBSS. 3908 */ 3909 ret = 0; 3910 goto out; 3911 case NL80211_IFTYPE_STATION: 3912 mvmvif->csa_bcn_pending = false; 3913 break; 3914 case NL80211_IFTYPE_MONITOR: 3915 /* always disable PS when a monitor interface is active */ 3916 mvmvif->ps_disabled = true; 3917 break; 3918 default: 3919 ret = -EINVAL; 3920 goto out; 3921 } 3922 3923 ret = iwl_mvm_binding_add_vif(mvm, vif); 3924 if (ret) 3925 goto out; 3926 3927 /* 3928 * Power state must be updated before quotas, 3929 * otherwise fw will complain. 3930 */ 3931 iwl_mvm_power_update_mac(mvm); 3932 3933 /* Setting the quota at this stage is only required for monitor 3934 * interfaces. For the other types, the bss_info changed flow 3935 * will handle quota settings. 3936 */ 3937 if (vif->type == NL80211_IFTYPE_MONITOR) { 3938 mvmvif->monitor_active = true; 3939 ret = iwl_mvm_update_quotas(mvm, false, NULL); 3940 if (ret) 3941 goto out_remove_binding; 3942 3943 ret = iwl_mvm_add_snif_sta(mvm, vif); 3944 if (ret) 3945 goto out_remove_binding; 3946 3947 } 3948 3949 /* Handle binding during CSA */ 3950 if (vif->type == NL80211_IFTYPE_AP) { 3951 iwl_mvm_update_quotas(mvm, false, NULL); 3952 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3953 } 3954 3955 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 3956 u32 duration = 3 * vif->bss_conf.beacon_int; 3957 3958 /* iwl_mvm_protect_session() reads directly from the 3959 * device (the system time), so make sure it is 3960 * available. 3961 */ 3962 ret = iwl_mvm_ref_sync(mvm, IWL_MVM_REF_PROTECT_CSA); 3963 if (ret) 3964 goto out_remove_binding; 3965 3966 /* Protect the session to make sure we hear the first 3967 * beacon on the new channel. 3968 */ 3969 mvmvif->csa_bcn_pending = true; 3970 iwl_mvm_protect_session(mvm, vif, duration, duration, 3971 vif->bss_conf.beacon_int / 2, 3972 true); 3973 3974 iwl_mvm_unref(mvm, IWL_MVM_REF_PROTECT_CSA); 3975 3976 iwl_mvm_update_quotas(mvm, false, NULL); 3977 } 3978 3979 goto out; 3980 3981 out_remove_binding: 3982 iwl_mvm_binding_remove_vif(mvm, vif); 3983 iwl_mvm_power_update_mac(mvm); 3984 out: 3985 if (ret) 3986 mvmvif->phy_ctxt = NULL; 3987 return ret; 3988 } 3989 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 3990 struct ieee80211_vif *vif, 3991 struct ieee80211_chanctx_conf *ctx) 3992 { 3993 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3994 int ret; 3995 3996 mutex_lock(&mvm->mutex); 3997 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 3998 mutex_unlock(&mvm->mutex); 3999 4000 return ret; 4001 } 4002 4003 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 4004 struct ieee80211_vif *vif, 4005 struct ieee80211_chanctx_conf *ctx, 4006 bool switching_chanctx) 4007 { 4008 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4009 struct ieee80211_vif *disabled_vif = NULL; 4010 4011 lockdep_assert_held(&mvm->mutex); 4012 4013 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 4014 4015 switch (vif->type) { 4016 case NL80211_IFTYPE_ADHOC: 4017 goto out; 4018 case NL80211_IFTYPE_MONITOR: 4019 mvmvif->monitor_active = false; 4020 mvmvif->ps_disabled = false; 4021 iwl_mvm_rm_snif_sta(mvm, vif); 4022 break; 4023 case NL80211_IFTYPE_AP: 4024 /* This part is triggered only during CSA */ 4025 if (!switching_chanctx || !mvmvif->ap_ibss_active) 4026 goto out; 4027 4028 mvmvif->csa_countdown = false; 4029 4030 /* Set CS bit on all the stations */ 4031 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 4032 4033 /* Save blocked iface, the timeout is set on the next beacon */ 4034 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 4035 4036 mvmvif->ap_ibss_active = false; 4037 break; 4038 case NL80211_IFTYPE_STATION: 4039 if (!switching_chanctx) 4040 break; 4041 4042 disabled_vif = vif; 4043 4044 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 4045 break; 4046 default: 4047 break; 4048 } 4049 4050 iwl_mvm_update_quotas(mvm, false, disabled_vif); 4051 iwl_mvm_binding_remove_vif(mvm, vif); 4052 4053 out: 4054 mvmvif->phy_ctxt = NULL; 4055 iwl_mvm_power_update_mac(mvm); 4056 } 4057 4058 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 4059 struct ieee80211_vif *vif, 4060 struct ieee80211_chanctx_conf *ctx) 4061 { 4062 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4063 4064 mutex_lock(&mvm->mutex); 4065 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 4066 mutex_unlock(&mvm->mutex); 4067 } 4068 4069 static int 4070 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 4071 struct ieee80211_vif_chanctx_switch *vifs) 4072 { 4073 int ret; 4074 4075 mutex_lock(&mvm->mutex); 4076 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4077 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 4078 4079 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 4080 if (ret) { 4081 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 4082 goto out_reassign; 4083 } 4084 4085 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4086 true); 4087 if (ret) { 4088 IWL_ERR(mvm, 4089 "failed to assign new_ctx during channel switch\n"); 4090 goto out_remove; 4091 } 4092 4093 /* we don't support TDLS during DCM - can be caused by channel switch */ 4094 if (iwl_mvm_phy_ctx_count(mvm) > 1) 4095 iwl_mvm_teardown_tdls_peers(mvm); 4096 4097 goto out; 4098 4099 out_remove: 4100 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 4101 4102 out_reassign: 4103 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 4104 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 4105 goto out_restart; 4106 } 4107 4108 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4109 true)) { 4110 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4111 goto out_restart; 4112 } 4113 4114 goto out; 4115 4116 out_restart: 4117 /* things keep failing, better restart the hw */ 4118 iwl_mvm_nic_restart(mvm, false); 4119 4120 out: 4121 mutex_unlock(&mvm->mutex); 4122 4123 return ret; 4124 } 4125 4126 static int 4127 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 4128 struct ieee80211_vif_chanctx_switch *vifs) 4129 { 4130 int ret; 4131 4132 mutex_lock(&mvm->mutex); 4133 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4134 4135 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4136 true); 4137 if (ret) { 4138 IWL_ERR(mvm, 4139 "failed to assign new_ctx during channel switch\n"); 4140 goto out_reassign; 4141 } 4142 4143 goto out; 4144 4145 out_reassign: 4146 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4147 true)) { 4148 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4149 goto out_restart; 4150 } 4151 4152 goto out; 4153 4154 out_restart: 4155 /* things keep failing, better restart the hw */ 4156 iwl_mvm_nic_restart(mvm, false); 4157 4158 out: 4159 mutex_unlock(&mvm->mutex); 4160 4161 return ret; 4162 } 4163 4164 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 4165 struct ieee80211_vif_chanctx_switch *vifs, 4166 int n_vifs, 4167 enum ieee80211_chanctx_switch_mode mode) 4168 { 4169 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4170 int ret; 4171 4172 /* we only support a single-vif right now */ 4173 if (n_vifs > 1) 4174 return -EOPNOTSUPP; 4175 4176 switch (mode) { 4177 case CHANCTX_SWMODE_SWAP_CONTEXTS: 4178 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 4179 break; 4180 case CHANCTX_SWMODE_REASSIGN_VIF: 4181 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 4182 break; 4183 default: 4184 ret = -EOPNOTSUPP; 4185 break; 4186 } 4187 4188 return ret; 4189 } 4190 4191 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 4192 { 4193 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4194 4195 return mvm->ibss_manager; 4196 } 4197 4198 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 4199 struct ieee80211_sta *sta, 4200 bool set) 4201 { 4202 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4203 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4204 4205 if (!mvm_sta || !mvm_sta->vif) { 4206 IWL_ERR(mvm, "Station is not associated to a vif\n"); 4207 return -EINVAL; 4208 } 4209 4210 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 4211 } 4212 4213 #ifdef CONFIG_NL80211_TESTMODE 4214 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 4215 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 4216 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 4217 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 4218 }; 4219 4220 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 4221 struct ieee80211_vif *vif, 4222 void *data, int len) 4223 { 4224 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 4225 int err; 4226 u32 noa_duration; 4227 4228 err = nla_parse(tb, IWL_MVM_TM_ATTR_MAX, data, len, iwl_mvm_tm_policy, 4229 NULL); 4230 if (err) 4231 return err; 4232 4233 if (!tb[IWL_MVM_TM_ATTR_CMD]) 4234 return -EINVAL; 4235 4236 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 4237 case IWL_MVM_TM_CMD_SET_NOA: 4238 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 4239 !vif->bss_conf.enable_beacon || 4240 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 4241 return -EINVAL; 4242 4243 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 4244 if (noa_duration >= vif->bss_conf.beacon_int) 4245 return -EINVAL; 4246 4247 mvm->noa_duration = noa_duration; 4248 mvm->noa_vif = vif; 4249 4250 return iwl_mvm_update_quotas(mvm, true, NULL); 4251 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 4252 /* must be associated client vif - ignore authorized */ 4253 if (!vif || vif->type != NL80211_IFTYPE_STATION || 4254 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || 4255 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 4256 return -EINVAL; 4257 4258 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 4259 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4260 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4261 } 4262 4263 return -EOPNOTSUPP; 4264 } 4265 4266 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 4267 struct ieee80211_vif *vif, 4268 void *data, int len) 4269 { 4270 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4271 int err; 4272 4273 mutex_lock(&mvm->mutex); 4274 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 4275 mutex_unlock(&mvm->mutex); 4276 4277 return err; 4278 } 4279 #endif 4280 4281 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 4282 struct ieee80211_vif *vif, 4283 struct ieee80211_channel_switch *chsw) 4284 { 4285 /* By implementing this operation, we prevent mac80211 from 4286 * starting its own channel switch timer, so that we can call 4287 * ieee80211_chswitch_done() ourselves at the right time 4288 * (which is when the absence time event starts). 4289 */ 4290 4291 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 4292 "dummy channel switch op\n"); 4293 } 4294 4295 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 4296 struct ieee80211_vif *vif, 4297 struct ieee80211_channel_switch *chsw) 4298 { 4299 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4300 struct ieee80211_vif *csa_vif; 4301 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4302 u32 apply_time; 4303 int ret; 4304 4305 mutex_lock(&mvm->mutex); 4306 4307 mvmvif->csa_failed = false; 4308 4309 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 4310 chsw->chandef.center_freq1); 4311 4312 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 4313 ieee80211_vif_to_wdev(vif), 4314 FW_DBG_TRIGGER_CHANNEL_SWITCH); 4315 4316 switch (vif->type) { 4317 case NL80211_IFTYPE_AP: 4318 csa_vif = 4319 rcu_dereference_protected(mvm->csa_vif, 4320 lockdep_is_held(&mvm->mutex)); 4321 if (WARN_ONCE(csa_vif && csa_vif->csa_active, 4322 "Another CSA is already in progress")) { 4323 ret = -EBUSY; 4324 goto out_unlock; 4325 } 4326 4327 /* we still didn't unblock tx. prevent new CS meanwhile */ 4328 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 4329 lockdep_is_held(&mvm->mutex))) { 4330 ret = -EBUSY; 4331 goto out_unlock; 4332 } 4333 4334 rcu_assign_pointer(mvm->csa_vif, vif); 4335 4336 if (WARN_ONCE(mvmvif->csa_countdown, 4337 "Previous CSA countdown didn't complete")) { 4338 ret = -EBUSY; 4339 goto out_unlock; 4340 } 4341 4342 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 4343 4344 break; 4345 case NL80211_IFTYPE_STATION: 4346 /* Schedule the time event to a bit before beacon 1, 4347 * to make sure we're in the new channel when the 4348 * GO/AP arrives. In case count <= 1 immediately schedule the 4349 * TE (this might result with some packet loss or connection 4350 * loss). 4351 */ 4352 if (chsw->count <= 1) 4353 apply_time = 0; 4354 else 4355 apply_time = chsw->device_timestamp + 4356 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 4357 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 4358 4359 if (chsw->block_tx) 4360 iwl_mvm_csa_client_absent(mvm, vif); 4361 4362 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 4363 apply_time); 4364 if (mvmvif->bf_data.bf_enabled) { 4365 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4366 if (ret) 4367 goto out_unlock; 4368 } 4369 4370 break; 4371 default: 4372 break; 4373 } 4374 4375 mvmvif->ps_disabled = true; 4376 4377 ret = iwl_mvm_power_update_ps(mvm); 4378 if (ret) 4379 goto out_unlock; 4380 4381 /* we won't be on this channel any longer */ 4382 iwl_mvm_teardown_tdls_peers(mvm); 4383 4384 out_unlock: 4385 mutex_unlock(&mvm->mutex); 4386 4387 return ret; 4388 } 4389 4390 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 4391 struct ieee80211_vif *vif) 4392 { 4393 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4394 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4395 int ret; 4396 4397 mutex_lock(&mvm->mutex); 4398 4399 if (mvmvif->csa_failed) { 4400 mvmvif->csa_failed = false; 4401 ret = -EIO; 4402 goto out_unlock; 4403 } 4404 4405 if (vif->type == NL80211_IFTYPE_STATION) { 4406 struct iwl_mvm_sta *mvmsta; 4407 4408 mvmvif->csa_bcn_pending = false; 4409 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 4410 mvmvif->ap_sta_id); 4411 4412 if (WARN_ON(!mvmsta)) { 4413 ret = -EIO; 4414 goto out_unlock; 4415 } 4416 4417 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 4418 4419 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4420 4421 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4422 if (ret) 4423 goto out_unlock; 4424 4425 iwl_mvm_stop_session_protection(mvm, vif); 4426 } 4427 4428 mvmvif->ps_disabled = false; 4429 4430 ret = iwl_mvm_power_update_ps(mvm); 4431 4432 out_unlock: 4433 mutex_unlock(&mvm->mutex); 4434 4435 return ret; 4436 } 4437 4438 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) 4439 { 4440 int i; 4441 4442 if (!iwl_mvm_has_new_tx_api(mvm)) { 4443 if (drop) { 4444 mutex_lock(&mvm->mutex); 4445 iwl_mvm_flush_tx_path(mvm, 4446 iwl_mvm_flushable_queues(mvm) & queues, 0); 4447 mutex_unlock(&mvm->mutex); 4448 } else { 4449 iwl_trans_wait_tx_queues_empty(mvm->trans, queues); 4450 } 4451 return; 4452 } 4453 4454 mutex_lock(&mvm->mutex); 4455 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 4456 struct ieee80211_sta *sta; 4457 4458 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4459 lockdep_is_held(&mvm->mutex)); 4460 if (IS_ERR_OR_NULL(sta)) 4461 continue; 4462 4463 if (drop) 4464 iwl_mvm_flush_sta_tids(mvm, i, 0xFF, 0); 4465 else 4466 iwl_mvm_wait_sta_queues_empty(mvm, 4467 iwl_mvm_sta_from_mac80211(sta)); 4468 } 4469 mutex_unlock(&mvm->mutex); 4470 } 4471 4472 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4473 struct ieee80211_vif *vif, u32 queues, bool drop) 4474 { 4475 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4476 struct iwl_mvm_vif *mvmvif; 4477 struct iwl_mvm_sta *mvmsta; 4478 struct ieee80211_sta *sta; 4479 int i; 4480 u32 msk = 0; 4481 4482 if (!vif) { 4483 iwl_mvm_flush_no_vif(mvm, queues, drop); 4484 return; 4485 } 4486 4487 if (vif->type != NL80211_IFTYPE_STATION) 4488 return; 4489 4490 /* Make sure we're done with the deferred traffic before flushing */ 4491 flush_work(&mvm->add_stream_wk); 4492 4493 mutex_lock(&mvm->mutex); 4494 mvmvif = iwl_mvm_vif_from_mac80211(vif); 4495 4496 /* flush the AP-station and all TDLS peers */ 4497 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 4498 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4499 lockdep_is_held(&mvm->mutex)); 4500 if (IS_ERR_OR_NULL(sta)) 4501 continue; 4502 4503 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4504 if (mvmsta->vif != vif) 4505 continue; 4506 4507 /* make sure only TDLS peers or the AP are flushed */ 4508 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4509 4510 if (drop) { 4511 if (iwl_mvm_flush_sta(mvm, mvmsta, false, 0)) 4512 IWL_ERR(mvm, "flush request fail\n"); 4513 } else { 4514 msk |= mvmsta->tfd_queue_msk; 4515 if (iwl_mvm_has_new_tx_api(mvm)) 4516 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4517 } 4518 } 4519 4520 mutex_unlock(&mvm->mutex); 4521 4522 /* this can take a while, and we may need/want other operations 4523 * to succeed while doing this, so do it without the mutex held 4524 */ 4525 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4526 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4527 } 4528 4529 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4530 struct survey_info *survey) 4531 { 4532 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4533 int ret; 4534 4535 memset(survey, 0, sizeof(*survey)); 4536 4537 /* only support global statistics right now */ 4538 if (idx != 0) 4539 return -ENOENT; 4540 4541 if (!fw_has_capa(&mvm->fw->ucode_capa, 4542 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4543 return -ENOENT; 4544 4545 mutex_lock(&mvm->mutex); 4546 4547 if (iwl_mvm_firmware_running(mvm)) { 4548 ret = iwl_mvm_request_statistics(mvm, false); 4549 if (ret) 4550 goto out; 4551 } 4552 4553 survey->filled = SURVEY_INFO_TIME | 4554 SURVEY_INFO_TIME_RX | 4555 SURVEY_INFO_TIME_TX | 4556 SURVEY_INFO_TIME_SCAN; 4557 survey->time = mvm->accu_radio_stats.on_time_rf + 4558 mvm->radio_stats.on_time_rf; 4559 do_div(survey->time, USEC_PER_MSEC); 4560 4561 survey->time_rx = mvm->accu_radio_stats.rx_time + 4562 mvm->radio_stats.rx_time; 4563 do_div(survey->time_rx, USEC_PER_MSEC); 4564 4565 survey->time_tx = mvm->accu_radio_stats.tx_time + 4566 mvm->radio_stats.tx_time; 4567 do_div(survey->time_tx, USEC_PER_MSEC); 4568 4569 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4570 mvm->radio_stats.on_time_scan; 4571 do_div(survey->time_scan, USEC_PER_MSEC); 4572 4573 ret = 0; 4574 out: 4575 mutex_unlock(&mvm->mutex); 4576 return ret; 4577 } 4578 4579 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 4580 struct ieee80211_vif *vif, 4581 struct ieee80211_sta *sta, 4582 struct station_info *sinfo) 4583 { 4584 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4585 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4586 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4587 4588 if (mvmsta->avg_energy) { 4589 sinfo->signal_avg = mvmsta->avg_energy; 4590 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 4591 } 4592 4593 /* if beacon filtering isn't on mac80211 does it anyway */ 4594 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 4595 return; 4596 4597 if (!vif->bss_conf.assoc) 4598 return; 4599 4600 mutex_lock(&mvm->mutex); 4601 4602 if (mvmvif->ap_sta_id != mvmsta->sta_id) 4603 goto unlock; 4604 4605 if (iwl_mvm_request_statistics(mvm, false)) 4606 goto unlock; 4607 4608 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 4609 mvmvif->beacon_stats.accu_num_beacons; 4610 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); 4611 if (mvmvif->beacon_stats.avg_signal) { 4612 /* firmware only reports a value after RXing a few beacons */ 4613 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 4614 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 4615 } 4616 unlock: 4617 mutex_unlock(&mvm->mutex); 4618 } 4619 4620 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 4621 struct ieee80211_vif *vif, 4622 const struct ieee80211_event *event) 4623 { 4624 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 4625 do { \ 4626 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 4627 break; \ 4628 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 4629 } while (0) 4630 4631 struct iwl_fw_dbg_trigger_tlv *trig; 4632 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 4633 4634 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 4635 FW_DBG_TRIGGER_MLME); 4636 if (!trig) 4637 return; 4638 4639 trig_mlme = (void *)trig->data; 4640 4641 if (event->u.mlme.data == ASSOC_EVENT) { 4642 if (event->u.mlme.status == MLME_DENIED) 4643 CHECK_MLME_TRIGGER(stop_assoc_denied, 4644 "DENIED ASSOC: reason %d", 4645 event->u.mlme.reason); 4646 else if (event->u.mlme.status == MLME_TIMEOUT) 4647 CHECK_MLME_TRIGGER(stop_assoc_timeout, 4648 "ASSOC TIMEOUT"); 4649 } else if (event->u.mlme.data == AUTH_EVENT) { 4650 if (event->u.mlme.status == MLME_DENIED) 4651 CHECK_MLME_TRIGGER(stop_auth_denied, 4652 "DENIED AUTH: reason %d", 4653 event->u.mlme.reason); 4654 else if (event->u.mlme.status == MLME_TIMEOUT) 4655 CHECK_MLME_TRIGGER(stop_auth_timeout, 4656 "AUTH TIMEOUT"); 4657 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 4658 CHECK_MLME_TRIGGER(stop_rx_deauth, 4659 "DEAUTH RX %d", event->u.mlme.reason); 4660 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 4661 CHECK_MLME_TRIGGER(stop_tx_deauth, 4662 "DEAUTH TX %d", event->u.mlme.reason); 4663 } 4664 #undef CHECK_MLME_TRIGGER 4665 } 4666 4667 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 4668 struct ieee80211_vif *vif, 4669 const struct ieee80211_event *event) 4670 { 4671 struct iwl_fw_dbg_trigger_tlv *trig; 4672 struct iwl_fw_dbg_trigger_ba *ba_trig; 4673 4674 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 4675 FW_DBG_TRIGGER_BA); 4676 if (!trig) 4677 return; 4678 4679 ba_trig = (void *)trig->data; 4680 4681 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 4682 return; 4683 4684 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 4685 "BAR received from %pM, tid %d, ssn %d", 4686 event->u.ba.sta->addr, event->u.ba.tid, 4687 event->u.ba.ssn); 4688 } 4689 4690 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 4691 struct ieee80211_vif *vif, 4692 const struct ieee80211_event *event) 4693 { 4694 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4695 4696 switch (event->type) { 4697 case MLME_EVENT: 4698 iwl_mvm_event_mlme_callback(mvm, vif, event); 4699 break; 4700 case BAR_RX_EVENT: 4701 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 4702 break; 4703 case BA_FRAME_TIMEOUT: 4704 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, 4705 event->u.ba.tid); 4706 break; 4707 default: 4708 break; 4709 } 4710 } 4711 4712 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 4713 struct iwl_mvm_internal_rxq_notif *notif, 4714 u32 size) 4715 { 4716 u32 qmask = BIT(mvm->trans->num_rx_queues) - 1; 4717 int ret; 4718 4719 lockdep_assert_held(&mvm->mutex); 4720 4721 if (!iwl_mvm_has_new_rx_api(mvm)) 4722 return; 4723 4724 notif->cookie = mvm->queue_sync_cookie; 4725 4726 if (notif->sync) 4727 atomic_set(&mvm->queue_sync_counter, 4728 mvm->trans->num_rx_queues); 4729 4730 ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); 4731 if (ret) { 4732 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 4733 goto out; 4734 } 4735 4736 if (notif->sync) { 4737 ret = wait_event_timeout(mvm->rx_sync_waitq, 4738 atomic_read(&mvm->queue_sync_counter) == 0 || 4739 iwl_mvm_is_radio_killed(mvm), 4740 HZ); 4741 WARN_ON_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm)); 4742 } 4743 4744 out: 4745 atomic_set(&mvm->queue_sync_counter, 0); 4746 mvm->queue_sync_cookie++; 4747 } 4748 4749 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 4750 { 4751 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4752 struct iwl_mvm_internal_rxq_notif data = { 4753 .type = IWL_MVM_RXQ_EMPTY, 4754 .sync = 1, 4755 }; 4756 4757 mutex_lock(&mvm->mutex); 4758 iwl_mvm_sync_rx_queues_internal(mvm, &data, sizeof(data)); 4759 mutex_unlock(&mvm->mutex); 4760 } 4761 4762 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) 4763 { 4764 u8 protocol = ip_hdr(skb)->protocol; 4765 4766 if (!IS_ENABLED(CONFIG_INET)) 4767 return false; 4768 4769 return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; 4770 } 4771 4772 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, 4773 struct sk_buff *head, 4774 struct sk_buff *skb) 4775 { 4776 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4777 4778 /* For now don't aggregate IPv6 in AMSDU */ 4779 if (skb->protocol != htons(ETH_P_IP)) 4780 return false; 4781 4782 if (!iwl_mvm_is_csum_supported(mvm)) 4783 return true; 4784 4785 return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); 4786 } 4787 4788 const struct ieee80211_ops iwl_mvm_hw_ops = { 4789 .tx = iwl_mvm_mac_tx, 4790 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, 4791 .ampdu_action = iwl_mvm_mac_ampdu_action, 4792 .start = iwl_mvm_mac_start, 4793 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 4794 .stop = iwl_mvm_mac_stop, 4795 .add_interface = iwl_mvm_mac_add_interface, 4796 .remove_interface = iwl_mvm_mac_remove_interface, 4797 .config = iwl_mvm_mac_config, 4798 .prepare_multicast = iwl_mvm_prepare_multicast, 4799 .configure_filter = iwl_mvm_configure_filter, 4800 .config_iface_filter = iwl_mvm_config_iface_filter, 4801 .bss_info_changed = iwl_mvm_bss_info_changed, 4802 .hw_scan = iwl_mvm_mac_hw_scan, 4803 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 4804 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 4805 .sta_state = iwl_mvm_mac_sta_state, 4806 .sta_notify = iwl_mvm_mac_sta_notify, 4807 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 4808 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 4809 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 4810 .sta_rc_update = iwl_mvm_sta_rc_update, 4811 .conf_tx = iwl_mvm_mac_conf_tx, 4812 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 4813 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 4814 .flush = iwl_mvm_mac_flush, 4815 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 4816 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 4817 .set_key = iwl_mvm_mac_set_key, 4818 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 4819 .remain_on_channel = iwl_mvm_roc, 4820 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 4821 .add_chanctx = iwl_mvm_add_chanctx, 4822 .remove_chanctx = iwl_mvm_remove_chanctx, 4823 .change_chanctx = iwl_mvm_change_chanctx, 4824 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 4825 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 4826 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 4827 4828 .start_ap = iwl_mvm_start_ap_ibss, 4829 .stop_ap = iwl_mvm_stop_ap_ibss, 4830 .join_ibss = iwl_mvm_start_ap_ibss, 4831 .leave_ibss = iwl_mvm_stop_ap_ibss, 4832 4833 .tx_last_beacon = iwl_mvm_tx_last_beacon, 4834 4835 .set_tim = iwl_mvm_set_tim, 4836 4837 .channel_switch = iwl_mvm_channel_switch, 4838 .pre_channel_switch = iwl_mvm_pre_channel_switch, 4839 .post_channel_switch = iwl_mvm_post_channel_switch, 4840 4841 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 4842 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 4843 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 4844 4845 .event_callback = iwl_mvm_mac_event_callback, 4846 4847 .sync_rx_queues = iwl_mvm_sync_rx_queues, 4848 4849 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 4850 4851 #ifdef CONFIG_PM_SLEEP 4852 /* look at d3.c */ 4853 .suspend = iwl_mvm_suspend, 4854 .resume = iwl_mvm_resume, 4855 .set_wakeup = iwl_mvm_set_wakeup, 4856 .set_rekey_data = iwl_mvm_set_rekey_data, 4857 #if IS_ENABLED(CONFIG_IPV6) 4858 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 4859 #endif 4860 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 4861 #endif 4862 .get_survey = iwl_mvm_mac_get_survey, 4863 .sta_statistics = iwl_mvm_mac_sta_statistics, 4864 .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, 4865 #ifdef CONFIG_IWLWIFI_DEBUGFS 4866 .sta_add_debugfs = iwl_mvm_sta_add_debugfs, 4867 #endif 4868 }; 4869