1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2014, 2018-2022 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/kernel.h> 8 #include <linux/slab.h> 9 #include <linux/skbuff.h> 10 #include <linux/netdevice.h> 11 #include <linux/etherdevice.h> 12 #include <linux/ip.h> 13 #include <linux/if_arp.h> 14 #include <linux/time.h> 15 #if defined(__FreeBSD__) 16 #include <linux/math64.h> 17 #endif 18 #include <net/mac80211.h> 19 #include <net/ieee80211_radiotap.h> 20 #include <net/tcp.h> 21 #if defined(__FreeBSD__) 22 #include <linux/udp.h> 23 #endif 24 25 #include "iwl-drv.h" 26 #include "iwl-op-mode.h" 27 #include "iwl-io.h" 28 #include "mvm.h" 29 #include "sta.h" 30 #include "time-event.h" 31 #include "iwl-eeprom-parse.h" 32 #include "iwl-phy-db.h" 33 #ifdef CONFIG_NL80211_TESTMODE 34 #include "testmode.h" 35 #endif 36 #include "fw/error-dump.h" 37 #include "iwl-prph.h" 38 #include "iwl-nvm-parse.h" 39 40 static const struct ieee80211_iface_limit iwl_mvm_limits[] = { 41 { 42 .max = 1, 43 .types = BIT(NL80211_IFTYPE_STATION), 44 }, 45 { 46 .max = 1, 47 .types = BIT(NL80211_IFTYPE_AP) | 48 BIT(NL80211_IFTYPE_P2P_CLIENT) | 49 BIT(NL80211_IFTYPE_P2P_GO), 50 }, 51 { 52 .max = 1, 53 .types = BIT(NL80211_IFTYPE_P2P_DEVICE), 54 }, 55 }; 56 57 static const struct ieee80211_iface_combination iwl_mvm_iface_combinations[] = { 58 { 59 .num_different_channels = 2, 60 .max_interfaces = 3, 61 .limits = iwl_mvm_limits, 62 .n_limits = ARRAY_SIZE(iwl_mvm_limits), 63 }, 64 }; 65 66 static const struct cfg80211_pmsr_capabilities iwl_mvm_pmsr_capa = { 67 .max_peers = IWL_MVM_TOF_MAX_APS, 68 .report_ap_tsf = 1, 69 .randomize_mac_addr = 1, 70 71 .ftm = { 72 .supported = 1, 73 .asap = 1, 74 .non_asap = 1, 75 .request_lci = 1, 76 .request_civicloc = 1, 77 .trigger_based = 1, 78 .non_trigger_based = 1, 79 .max_bursts_exponent = -1, /* all supported */ 80 .max_ftms_per_burst = 0, /* no limits */ 81 .bandwidths = BIT(NL80211_CHAN_WIDTH_20_NOHT) | 82 BIT(NL80211_CHAN_WIDTH_20) | 83 BIT(NL80211_CHAN_WIDTH_40) | 84 BIT(NL80211_CHAN_WIDTH_80) | 85 BIT(NL80211_CHAN_WIDTH_160), 86 .preambles = BIT(NL80211_PREAMBLE_LEGACY) | 87 BIT(NL80211_PREAMBLE_HT) | 88 BIT(NL80211_PREAMBLE_VHT) | 89 BIT(NL80211_PREAMBLE_HE), 90 }, 91 }; 92 93 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 94 enum set_key_cmd cmd, 95 struct ieee80211_vif *vif, 96 struct ieee80211_sta *sta, 97 struct ieee80211_key_conf *key); 98 99 static void iwl_mvm_reset_phy_ctxts(struct iwl_mvm *mvm) 100 { 101 int i; 102 103 memset(mvm->phy_ctxts, 0, sizeof(mvm->phy_ctxts)); 104 for (i = 0; i < NUM_PHY_CTX; i++) { 105 mvm->phy_ctxts[i].id = i; 106 mvm->phy_ctxts[i].ref = 0; 107 } 108 } 109 110 struct ieee80211_regdomain *iwl_mvm_get_regdomain(struct wiphy *wiphy, 111 const char *alpha2, 112 enum iwl_mcc_source src_id, 113 bool *changed) 114 { 115 struct ieee80211_regdomain *regd = NULL; 116 struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy); 117 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 118 struct iwl_mcc_update_resp *resp; 119 u8 resp_ver; 120 121 IWL_DEBUG_LAR(mvm, "Getting regdomain data for %s from FW\n", alpha2); 122 123 lockdep_assert_held(&mvm->mutex); 124 125 resp = iwl_mvm_update_mcc(mvm, alpha2, src_id); 126 if (IS_ERR_OR_NULL(resp)) { 127 IWL_DEBUG_LAR(mvm, "Could not get update from FW %d\n", 128 PTR_ERR_OR_ZERO(resp)); 129 resp = NULL; 130 goto out; 131 } 132 133 if (changed) { 134 u32 status = le32_to_cpu(resp->status); 135 136 *changed = (status == MCC_RESP_NEW_CHAN_PROFILE || 137 status == MCC_RESP_ILLEGAL); 138 } 139 resp_ver = iwl_fw_lookup_notif_ver(mvm->fw, IWL_ALWAYS_LONG_GROUP, 140 MCC_UPDATE_CMD, 0); 141 IWL_DEBUG_LAR(mvm, "MCC update response version: %d\n", resp_ver); 142 143 regd = iwl_parse_nvm_mcc_info(mvm->trans->dev, mvm->cfg, 144 __le32_to_cpu(resp->n_channels), 145 resp->channels, 146 __le16_to_cpu(resp->mcc), 147 __le16_to_cpu(resp->geo_info), 148 __le16_to_cpu(resp->cap), resp_ver); 149 /* Store the return source id */ 150 src_id = resp->source_id; 151 if (IS_ERR_OR_NULL(regd)) { 152 IWL_DEBUG_LAR(mvm, "Could not get parse update from FW %d\n", 153 PTR_ERR_OR_ZERO(regd)); 154 goto out; 155 } 156 157 IWL_DEBUG_LAR(mvm, "setting alpha2 from FW to %s (0x%x, 0x%x) src=%d\n", 158 regd->alpha2, regd->alpha2[0], regd->alpha2[1], src_id); 159 mvm->lar_regdom_set = true; 160 mvm->mcc_src = src_id; 161 162 iwl_mei_set_country_code(__le16_to_cpu(resp->mcc)); 163 164 out: 165 kfree(resp); 166 return regd; 167 } 168 169 void iwl_mvm_update_changed_regdom(struct iwl_mvm *mvm) 170 { 171 bool changed; 172 struct ieee80211_regdomain *regd; 173 174 if (!iwl_mvm_is_lar_supported(mvm)) 175 return; 176 177 regd = iwl_mvm_get_current_regdomain(mvm, &changed); 178 if (!IS_ERR_OR_NULL(regd)) { 179 /* only update the regulatory core if changed */ 180 if (changed) 181 regulatory_set_wiphy_regd(mvm->hw->wiphy, regd); 182 183 kfree(regd); 184 } 185 } 186 187 struct ieee80211_regdomain *iwl_mvm_get_current_regdomain(struct iwl_mvm *mvm, 188 bool *changed) 189 { 190 return iwl_mvm_get_regdomain(mvm->hw->wiphy, "ZZ", 191 iwl_mvm_is_wifi_mcc_supported(mvm) ? 192 MCC_SOURCE_GET_CURRENT : 193 MCC_SOURCE_OLD_FW, changed); 194 } 195 196 int iwl_mvm_init_fw_regd(struct iwl_mvm *mvm) 197 { 198 enum iwl_mcc_source used_src; 199 struct ieee80211_regdomain *regd; 200 int ret; 201 bool changed; 202 const struct ieee80211_regdomain *r = 203 wiphy_dereference(mvm->hw->wiphy, mvm->hw->wiphy->regd); 204 205 if (!r) 206 return -ENOENT; 207 208 /* save the last source in case we overwrite it below */ 209 used_src = mvm->mcc_src; 210 if (iwl_mvm_is_wifi_mcc_supported(mvm)) { 211 /* Notify the firmware we support wifi location updates */ 212 regd = iwl_mvm_get_current_regdomain(mvm, NULL); 213 if (!IS_ERR_OR_NULL(regd)) 214 kfree(regd); 215 } 216 217 /* Now set our last stored MCC and source */ 218 regd = iwl_mvm_get_regdomain(mvm->hw->wiphy, r->alpha2, used_src, 219 &changed); 220 if (IS_ERR_OR_NULL(regd)) 221 return -EIO; 222 223 /* update cfg80211 if the regdomain was changed */ 224 if (changed) 225 ret = regulatory_set_wiphy_regd_sync(mvm->hw->wiphy, regd); 226 else 227 ret = 0; 228 229 kfree(regd); 230 return ret; 231 } 232 233 static const u8 he_if_types_ext_capa_sta[] = { 234 [0] = WLAN_EXT_CAPA1_EXT_CHANNEL_SWITCHING, 235 [2] = WLAN_EXT_CAPA3_MULTI_BSSID_SUPPORT, 236 [7] = WLAN_EXT_CAPA8_OPMODE_NOTIF, 237 }; 238 239 static const struct wiphy_iftype_ext_capab he_iftypes_ext_capa[] = { 240 { 241 .iftype = NL80211_IFTYPE_STATION, 242 .extended_capabilities = he_if_types_ext_capa_sta, 243 .extended_capabilities_mask = he_if_types_ext_capa_sta, 244 .extended_capabilities_len = sizeof(he_if_types_ext_capa_sta), 245 }, 246 }; 247 248 static int 249 iwl_mvm_op_get_antenna(struct ieee80211_hw *hw, u32 *tx_ant, u32 *rx_ant) 250 { 251 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 252 *tx_ant = iwl_mvm_get_valid_tx_ant(mvm); 253 *rx_ant = iwl_mvm_get_valid_rx_ant(mvm); 254 return 0; 255 } 256 257 int iwl_mvm_mac_setup_register(struct iwl_mvm *mvm) 258 { 259 struct ieee80211_hw *hw = mvm->hw; 260 int num_mac, ret, i; 261 static const u32 mvm_ciphers[] = { 262 WLAN_CIPHER_SUITE_WEP40, 263 WLAN_CIPHER_SUITE_WEP104, 264 WLAN_CIPHER_SUITE_TKIP, 265 WLAN_CIPHER_SUITE_CCMP, 266 }; 267 #ifdef CONFIG_PM_SLEEP 268 bool unified = fw_has_capa(&mvm->fw->ucode_capa, 269 IWL_UCODE_TLV_CAPA_CNSLDTD_D3_D0_IMG); 270 #endif 271 272 /* Tell mac80211 our characteristics */ 273 ieee80211_hw_set(hw, SIGNAL_DBM); 274 ieee80211_hw_set(hw, SPECTRUM_MGMT); 275 ieee80211_hw_set(hw, REPORTS_TX_ACK_STATUS); 276 ieee80211_hw_set(hw, WANT_MONITOR_VIF); 277 ieee80211_hw_set(hw, SUPPORTS_PS); 278 ieee80211_hw_set(hw, SUPPORTS_DYNAMIC_PS); 279 ieee80211_hw_set(hw, AMPDU_AGGREGATION); 280 ieee80211_hw_set(hw, TIMING_BEACON_ONLY); 281 ieee80211_hw_set(hw, CONNECTION_MONITOR); 282 ieee80211_hw_set(hw, CHANCTX_STA_CSA); 283 ieee80211_hw_set(hw, SUPPORT_FAST_XMIT); 284 ieee80211_hw_set(hw, SUPPORTS_CLONED_SKBS); 285 ieee80211_hw_set(hw, SUPPORTS_AMSDU_IN_AMPDU); 286 ieee80211_hw_set(hw, NEEDS_UNIQUE_STA_ADDR); 287 ieee80211_hw_set(hw, DEAUTH_NEED_MGD_TX_PREP); 288 ieee80211_hw_set(hw, SUPPORTS_VHT_EXT_NSS_BW); 289 ieee80211_hw_set(hw, BUFF_MMPDU_TXQ); 290 ieee80211_hw_set(hw, STA_MMPDU_TXQ); 291 /* 292 * On older devices, enabling TX A-MSDU occasionally leads to 293 * something getting messed up, the command read from the FIFO 294 * gets out of sync and isn't a TX command, so that we have an 295 * assert EDC. 296 * 297 * It's not clear where the bug is, but since we didn't used to 298 * support A-MSDU until moving the mac80211 iTXQs, just leave it 299 * for older devices. We also don't see this issue on any newer 300 * devices. 301 */ 302 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_9000) 303 ieee80211_hw_set(hw, TX_AMSDU); 304 ieee80211_hw_set(hw, TX_FRAG_LIST); 305 306 if (iwl_mvm_has_tlc_offload(mvm)) { 307 ieee80211_hw_set(hw, TX_AMPDU_SETUP_IN_HW); 308 ieee80211_hw_set(hw, HAS_RATE_CONTROL); 309 } 310 311 if (iwl_mvm_has_new_rx_api(mvm)) 312 ieee80211_hw_set(hw, SUPPORTS_REORDERING_BUFFER); 313 314 if (fw_has_capa(&mvm->fw->ucode_capa, 315 IWL_UCODE_TLV_CAPA_STA_PM_NOTIF)) { 316 ieee80211_hw_set(hw, AP_LINK_PS); 317 } else if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) { 318 /* 319 * we absolutely need this for the new TX API since that comes 320 * with many more queues than the current code can deal with 321 * for station powersave 322 */ 323 return -EINVAL; 324 } 325 326 if (mvm->trans->num_rx_queues > 1) 327 ieee80211_hw_set(hw, USES_RSS); 328 329 if (mvm->trans->max_skb_frags) 330 hw->netdev_features = NETIF_F_HIGHDMA | NETIF_F_SG; 331 332 hw->queues = IEEE80211_NUM_ACS; 333 hw->offchannel_tx_hw_queue = IWL_MVM_OFFCHANNEL_QUEUE; 334 hw->radiotap_mcs_details |= IEEE80211_RADIOTAP_MCS_HAVE_FEC | 335 IEEE80211_RADIOTAP_MCS_HAVE_STBC; 336 hw->radiotap_vht_details |= IEEE80211_RADIOTAP_VHT_KNOWN_STBC | 337 IEEE80211_RADIOTAP_VHT_KNOWN_BEAMFORMED; 338 339 hw->radiotap_timestamp.units_pos = 340 IEEE80211_RADIOTAP_TIMESTAMP_UNIT_US | 341 IEEE80211_RADIOTAP_TIMESTAMP_SPOS_PLCP_SIG_ACQ; 342 /* this is the case for CCK frames, it's better (only 8) for OFDM */ 343 hw->radiotap_timestamp.accuracy = 22; 344 345 if (!iwl_mvm_has_tlc_offload(mvm)) 346 hw->rate_control_algorithm = RS_NAME; 347 348 hw->uapsd_queues = IWL_MVM_UAPSD_QUEUES; 349 hw->uapsd_max_sp_len = IWL_UAPSD_MAX_SP; 350 hw->max_tx_fragments = mvm->trans->max_skb_frags; 351 352 BUILD_BUG_ON(ARRAY_SIZE(mvm->ciphers) < ARRAY_SIZE(mvm_ciphers) + 6); 353 memcpy(mvm->ciphers, mvm_ciphers, sizeof(mvm_ciphers)); 354 hw->wiphy->n_cipher_suites = ARRAY_SIZE(mvm_ciphers); 355 hw->wiphy->cipher_suites = mvm->ciphers; 356 357 if (iwl_mvm_has_new_rx_api(mvm)) { 358 mvm->ciphers[hw->wiphy->n_cipher_suites] = 359 WLAN_CIPHER_SUITE_GCMP; 360 hw->wiphy->n_cipher_suites++; 361 mvm->ciphers[hw->wiphy->n_cipher_suites] = 362 WLAN_CIPHER_SUITE_GCMP_256; 363 hw->wiphy->n_cipher_suites++; 364 } 365 366 if (iwlwifi_mod_params.swcrypto) 367 IWL_ERR(mvm, 368 "iwlmvm doesn't allow to disable HW crypto, check swcrypto module parameter\n"); 369 if (!iwlwifi_mod_params.bt_coex_active) 370 IWL_ERR(mvm, 371 "iwlmvm doesn't allow to disable BT Coex, check bt_coex_active module parameter\n"); 372 373 ieee80211_hw_set(hw, MFP_CAPABLE); 374 mvm->ciphers[hw->wiphy->n_cipher_suites] = WLAN_CIPHER_SUITE_AES_CMAC; 375 hw->wiphy->n_cipher_suites++; 376 if (iwl_mvm_has_new_rx_api(mvm)) { 377 mvm->ciphers[hw->wiphy->n_cipher_suites] = 378 WLAN_CIPHER_SUITE_BIP_GMAC_128; 379 hw->wiphy->n_cipher_suites++; 380 mvm->ciphers[hw->wiphy->n_cipher_suites] = 381 WLAN_CIPHER_SUITE_BIP_GMAC_256; 382 hw->wiphy->n_cipher_suites++; 383 } 384 385 if (fw_has_capa(&mvm->fw->ucode_capa, 386 IWL_UCODE_TLV_CAPA_FTM_CALIBRATED)) { 387 wiphy_ext_feature_set(hw->wiphy, 388 NL80211_EXT_FEATURE_ENABLE_FTM_RESPONDER); 389 hw->wiphy->pmsr_capa = &iwl_mvm_pmsr_capa; 390 } 391 392 if (fw_has_capa(&mvm->fw->ucode_capa, 393 IWL_UCODE_TLV_CAPA_BIGTK_SUPPORT)) 394 wiphy_ext_feature_set(hw->wiphy, 395 NL80211_EXT_FEATURE_BEACON_PROTECTION_CLIENT); 396 397 ieee80211_hw_set(hw, SINGLE_SCAN_ON_ALL_BANDS); 398 hw->wiphy->features |= 399 NL80211_FEATURE_SCHED_SCAN_RANDOM_MAC_ADDR | 400 NL80211_FEATURE_SCAN_RANDOM_MAC_ADDR | 401 NL80211_FEATURE_ND_RANDOM_MAC_ADDR; 402 403 hw->sta_data_size = sizeof(struct iwl_mvm_sta); 404 hw->vif_data_size = sizeof(struct iwl_mvm_vif); 405 hw->chanctx_data_size = sizeof(u16); 406 hw->txq_data_size = sizeof(struct iwl_mvm_txq); 407 408 hw->wiphy->interface_modes = BIT(NL80211_IFTYPE_STATION) | 409 BIT(NL80211_IFTYPE_P2P_CLIENT) | 410 BIT(NL80211_IFTYPE_AP) | 411 BIT(NL80211_IFTYPE_P2P_GO) | 412 BIT(NL80211_IFTYPE_P2P_DEVICE) | 413 BIT(NL80211_IFTYPE_ADHOC); 414 415 hw->wiphy->flags |= WIPHY_FLAG_IBSS_RSN; 416 wiphy_ext_feature_set(hw->wiphy, NL80211_EXT_FEATURE_VHT_IBSS); 417 418 /* The new Tx API does not allow to pass the key or keyid of a MPDU to 419 * the hw, preventing us to control which key(id) to use per MPDU. 420 * Till that's fixed we can't use Extended Key ID for the newer cards. 421 */ 422 if (!iwl_mvm_has_new_tx_api(mvm)) 423 wiphy_ext_feature_set(hw->wiphy, 424 NL80211_EXT_FEATURE_EXT_KEY_ID); 425 hw->wiphy->features |= NL80211_FEATURE_HT_IBSS; 426 427 hw->wiphy->regulatory_flags |= REGULATORY_ENABLE_RELAX_NO_IR; 428 if (iwl_mvm_is_lar_supported(mvm)) 429 hw->wiphy->regulatory_flags |= REGULATORY_WIPHY_SELF_MANAGED; 430 else 431 hw->wiphy->regulatory_flags |= REGULATORY_CUSTOM_REG | 432 REGULATORY_DISABLE_BEACON_HINTS; 433 434 hw->wiphy->flags |= WIPHY_FLAG_AP_UAPSD; 435 hw->wiphy->flags |= WIPHY_FLAG_HAS_CHANNEL_SWITCH; 436 hw->wiphy->flags |= WIPHY_FLAG_SPLIT_SCAN_6GHZ; 437 438 hw->wiphy->iface_combinations = iwl_mvm_iface_combinations; 439 hw->wiphy->n_iface_combinations = 440 ARRAY_SIZE(iwl_mvm_iface_combinations); 441 442 hw->wiphy->max_remain_on_channel_duration = 10000; 443 hw->max_listen_interval = IWL_CONN_MAX_LISTEN_INTERVAL; 444 445 /* Extract MAC address */ 446 memcpy(mvm->addresses[0].addr, mvm->nvm_data->hw_addr, ETH_ALEN); 447 hw->wiphy->addresses = mvm->addresses; 448 hw->wiphy->n_addresses = 1; 449 450 /* Extract additional MAC addresses if available */ 451 num_mac = (mvm->nvm_data->n_hw_addrs > 1) ? 452 min(IWL_MVM_MAX_ADDRESSES, mvm->nvm_data->n_hw_addrs) : 1; 453 454 for (i = 1; i < num_mac; i++) { 455 memcpy(mvm->addresses[i].addr, mvm->addresses[i-1].addr, 456 ETH_ALEN); 457 mvm->addresses[i].addr[5]++; 458 hw->wiphy->n_addresses++; 459 } 460 461 iwl_mvm_reset_phy_ctxts(mvm); 462 463 hw->wiphy->max_scan_ie_len = iwl_mvm_max_scan_ie_len(mvm); 464 465 hw->wiphy->max_scan_ssids = PROBE_OPTION_MAX; 466 467 BUILD_BUG_ON(IWL_MVM_SCAN_STOPPING_MASK & IWL_MVM_SCAN_MASK); 468 BUILD_BUG_ON(IWL_MVM_MAX_UMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK) || 469 IWL_MVM_MAX_LMAC_SCANS > HWEIGHT32(IWL_MVM_SCAN_MASK)); 470 471 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 472 mvm->max_scans = IWL_MVM_MAX_UMAC_SCANS; 473 else 474 mvm->max_scans = IWL_MVM_MAX_LMAC_SCANS; 475 476 if (mvm->nvm_data->bands[NL80211_BAND_2GHZ].n_channels) 477 hw->wiphy->bands[NL80211_BAND_2GHZ] = 478 &mvm->nvm_data->bands[NL80211_BAND_2GHZ]; 479 if (mvm->nvm_data->bands[NL80211_BAND_5GHZ].n_channels) { 480 hw->wiphy->bands[NL80211_BAND_5GHZ] = 481 &mvm->nvm_data->bands[NL80211_BAND_5GHZ]; 482 483 if (fw_has_capa(&mvm->fw->ucode_capa, 484 IWL_UCODE_TLV_CAPA_BEAMFORMER) && 485 fw_has_api(&mvm->fw->ucode_capa, 486 IWL_UCODE_TLV_API_LQ_SS_PARAMS)) 487 hw->wiphy->bands[NL80211_BAND_5GHZ]->vht_cap.cap |= 488 IEEE80211_VHT_CAP_SU_BEAMFORMER_CAPABLE; 489 } 490 if (fw_has_capa(&mvm->fw->ucode_capa, 491 IWL_UCODE_TLV_CAPA_PSC_CHAN_SUPPORT) && 492 mvm->nvm_data->bands[NL80211_BAND_6GHZ].n_channels) 493 hw->wiphy->bands[NL80211_BAND_6GHZ] = 494 &mvm->nvm_data->bands[NL80211_BAND_6GHZ]; 495 496 hw->wiphy->hw_version = mvm->trans->hw_id; 497 498 if (iwlmvm_mod_params.power_scheme != IWL_POWER_SCHEME_CAM) 499 hw->wiphy->flags |= WIPHY_FLAG_PS_ON_BY_DEFAULT; 500 else 501 hw->wiphy->flags &= ~WIPHY_FLAG_PS_ON_BY_DEFAULT; 502 503 hw->wiphy->max_sched_scan_reqs = 1; 504 hw->wiphy->max_sched_scan_ssids = PROBE_OPTION_MAX; 505 hw->wiphy->max_match_sets = iwl_umac_scan_get_max_profiles(mvm->fw); 506 /* we create the 802.11 header and zero length SSID IE. */ 507 hw->wiphy->max_sched_scan_ie_len = 508 SCAN_OFFLOAD_PROBE_REQ_SIZE - 24 - 2; 509 hw->wiphy->max_sched_scan_plans = IWL_MAX_SCHED_SCAN_PLANS; 510 hw->wiphy->max_sched_scan_plan_interval = U16_MAX; 511 512 /* 513 * the firmware uses u8 for num of iterations, but 0xff is saved for 514 * infinite loop, so the maximum number of iterations is actually 254. 515 */ 516 hw->wiphy->max_sched_scan_plan_iterations = 254; 517 518 hw->wiphy->features |= NL80211_FEATURE_P2P_GO_CTWIN | 519 NL80211_FEATURE_LOW_PRIORITY_SCAN | 520 NL80211_FEATURE_P2P_GO_OPPPS | 521 NL80211_FEATURE_AP_MODE_CHAN_WIDTH_CHANGE | 522 NL80211_FEATURE_DYNAMIC_SMPS | 523 NL80211_FEATURE_STATIC_SMPS | 524 NL80211_FEATURE_SUPPORTS_WMM_ADMISSION; 525 526 if (fw_has_capa(&mvm->fw->ucode_capa, 527 IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT)) 528 hw->wiphy->features |= NL80211_FEATURE_TX_POWER_INSERTION; 529 if (fw_has_capa(&mvm->fw->ucode_capa, 530 IWL_UCODE_TLV_CAPA_QUIET_PERIOD_SUPPORT)) 531 hw->wiphy->features |= NL80211_FEATURE_QUIET; 532 533 if (fw_has_capa(&mvm->fw->ucode_capa, 534 IWL_UCODE_TLV_CAPA_DS_PARAM_SET_IE_SUPPORT)) 535 hw->wiphy->features |= 536 NL80211_FEATURE_DS_PARAM_SET_IE_IN_PROBES; 537 538 if (fw_has_capa(&mvm->fw->ucode_capa, 539 IWL_UCODE_TLV_CAPA_WFA_TPC_REP_IE_SUPPORT)) 540 hw->wiphy->features |= NL80211_FEATURE_WFA_TPC_IE_IN_PROBES; 541 542 if (iwl_fw_lookup_cmd_ver(mvm->fw, WOWLAN_KEK_KCK_MATERIAL, 543 IWL_FW_CMD_VER_UNKNOWN) == 3) 544 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_EXT_KEK_KCK; 545 546 if (fw_has_api(&mvm->fw->ucode_capa, 547 IWL_UCODE_TLV_API_SCAN_TSF_REPORT)) { 548 wiphy_ext_feature_set(hw->wiphy, 549 NL80211_EXT_FEATURE_SCAN_START_TIME); 550 wiphy_ext_feature_set(hw->wiphy, 551 NL80211_EXT_FEATURE_BSS_PARENT_TSF); 552 } 553 554 if (iwl_mvm_is_oce_supported(mvm)) { 555 u8 scan_ver = iwl_fw_lookup_cmd_ver(mvm->fw, SCAN_REQ_UMAC, 0); 556 557 wiphy_ext_feature_set(hw->wiphy, 558 NL80211_EXT_FEATURE_ACCEPT_BCAST_PROBE_RESP); 559 wiphy_ext_feature_set(hw->wiphy, 560 NL80211_EXT_FEATURE_FILS_MAX_CHANNEL_TIME); 561 wiphy_ext_feature_set(hw->wiphy, 562 NL80211_EXT_FEATURE_OCE_PROBE_REQ_HIGH_TX_RATE); 563 564 /* Old firmware also supports probe deferral and suppression */ 565 if (scan_ver < 15) 566 wiphy_ext_feature_set(hw->wiphy, 567 NL80211_EXT_FEATURE_OCE_PROBE_REQ_DEFERRAL_SUPPRESSION); 568 } 569 570 if (mvm->nvm_data->sku_cap_11ax_enable && 571 !iwlwifi_mod_params.disable_11ax) { 572 hw->wiphy->iftype_ext_capab = he_iftypes_ext_capa; 573 hw->wiphy->num_iftype_ext_capab = 574 ARRAY_SIZE(he_iftypes_ext_capa); 575 576 ieee80211_hw_set(hw, SUPPORTS_MULTI_BSSID); 577 ieee80211_hw_set(hw, SUPPORTS_ONLY_HE_MULTI_BSSID); 578 } 579 580 mvm->rts_threshold = IEEE80211_MAX_RTS_THRESHOLD; 581 582 #ifdef CONFIG_PM_SLEEP 583 if ((unified || mvm->fw->img[IWL_UCODE_WOWLAN].num_sec) && 584 mvm->trans->ops->d3_suspend && 585 mvm->trans->ops->d3_resume && 586 device_can_wakeup(mvm->trans->dev)) { 587 mvm->wowlan.flags |= WIPHY_WOWLAN_MAGIC_PKT | 588 WIPHY_WOWLAN_DISCONNECT | 589 WIPHY_WOWLAN_EAP_IDENTITY_REQ | 590 WIPHY_WOWLAN_RFKILL_RELEASE | 591 WIPHY_WOWLAN_NET_DETECT; 592 mvm->wowlan.flags |= WIPHY_WOWLAN_SUPPORTS_GTK_REKEY | 593 WIPHY_WOWLAN_GTK_REKEY_FAILURE | 594 WIPHY_WOWLAN_4WAY_HANDSHAKE; 595 596 mvm->wowlan.n_patterns = IWL_WOWLAN_MAX_PATTERNS; 597 mvm->wowlan.pattern_min_len = IWL_WOWLAN_MIN_PATTERN_LEN; 598 mvm->wowlan.pattern_max_len = IWL_WOWLAN_MAX_PATTERN_LEN; 599 mvm->wowlan.max_nd_match_sets = 600 iwl_umac_scan_get_max_profiles(mvm->fw); 601 hw->wiphy->wowlan = &mvm->wowlan; 602 } 603 #endif 604 605 ret = iwl_mvm_leds_init(mvm); 606 if (ret) 607 return ret; 608 609 if (fw_has_capa(&mvm->fw->ucode_capa, 610 IWL_UCODE_TLV_CAPA_TDLS_SUPPORT)) { 611 IWL_DEBUG_TDLS(mvm, "TDLS supported\n"); 612 hw->wiphy->flags |= WIPHY_FLAG_SUPPORTS_TDLS; 613 ieee80211_hw_set(hw, TDLS_WIDER_BW); 614 } 615 616 if (fw_has_capa(&mvm->fw->ucode_capa, 617 IWL_UCODE_TLV_CAPA_TDLS_CHANNEL_SWITCH)) { 618 IWL_DEBUG_TDLS(mvm, "TDLS channel switch supported\n"); 619 hw->wiphy->features |= NL80211_FEATURE_TDLS_CHANNEL_SWITCH; 620 } 621 622 hw->netdev_features |= mvm->cfg->features; 623 if (!iwl_mvm_is_csum_supported(mvm)) 624 hw->netdev_features &= ~IWL_CSUM_NETIF_FLAGS_MASK; 625 626 if (mvm->cfg->vht_mu_mimo_supported) 627 wiphy_ext_feature_set(hw->wiphy, 628 NL80211_EXT_FEATURE_MU_MIMO_AIR_SNIFFER); 629 630 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_PROTECTED_TWT)) 631 wiphy_ext_feature_set(hw->wiphy, 632 NL80211_EXT_FEATURE_PROTECTED_TWT); 633 634 iwl_mvm_vendor_cmds_register(mvm); 635 636 hw->wiphy->available_antennas_tx = iwl_mvm_get_valid_tx_ant(mvm); 637 hw->wiphy->available_antennas_rx = iwl_mvm_get_valid_rx_ant(mvm); 638 639 ret = ieee80211_register_hw(mvm->hw); 640 if (ret) { 641 iwl_mvm_leds_exit(mvm); 642 } 643 644 return ret; 645 } 646 647 static void iwl_mvm_tx_skb(struct iwl_mvm *mvm, struct sk_buff *skb, 648 struct ieee80211_sta *sta) 649 { 650 if (likely(sta)) { 651 if (likely(iwl_mvm_tx_skb_sta(mvm, skb, sta) == 0)) 652 return; 653 } else { 654 if (likely(iwl_mvm_tx_skb_non_sta(mvm, skb) == 0)) 655 return; 656 } 657 658 ieee80211_free_txskb(mvm->hw, skb); 659 } 660 661 static void iwl_mvm_mac_tx(struct ieee80211_hw *hw, 662 struct ieee80211_tx_control *control, 663 struct sk_buff *skb) 664 { 665 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 666 struct ieee80211_sta *sta = control->sta; 667 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 668 struct ieee80211_hdr *hdr = (void *)skb->data; 669 bool offchannel = IEEE80211_SKB_CB(skb)->flags & 670 IEEE80211_TX_CTL_TX_OFFCHAN; 671 672 if (iwl_mvm_is_radio_killed(mvm)) { 673 IWL_DEBUG_DROP(mvm, "Dropping - RF/CT KILL\n"); 674 goto drop; 675 } 676 677 if (offchannel && 678 !test_bit(IWL_MVM_STATUS_ROC_RUNNING, &mvm->status) && 679 !test_bit(IWL_MVM_STATUS_ROC_AUX_RUNNING, &mvm->status)) 680 goto drop; 681 682 /* 683 * bufferable MMPDUs or MMPDUs on STA interfaces come via TXQs 684 * so we treat the others as broadcast 685 */ 686 if (ieee80211_is_mgmt(hdr->frame_control)) 687 sta = NULL; 688 689 /* If there is no sta, and it's not offchannel - send through AP */ 690 if (!sta && info->control.vif->type == NL80211_IFTYPE_STATION && 691 !offchannel) { 692 struct iwl_mvm_vif *mvmvif = 693 iwl_mvm_vif_from_mac80211(info->control.vif); 694 u8 ap_sta_id = READ_ONCE(mvmvif->ap_sta_id); 695 696 if (ap_sta_id < mvm->fw->ucode_capa.num_stations) { 697 /* mac80211 holds rcu read lock */ 698 sta = rcu_dereference(mvm->fw_id_to_mac_id[ap_sta_id]); 699 if (IS_ERR_OR_NULL(sta)) 700 goto drop; 701 } 702 } 703 704 iwl_mvm_tx_skb(mvm, skb, sta); 705 return; 706 drop: 707 ieee80211_free_txskb(hw, skb); 708 } 709 710 void iwl_mvm_mac_itxq_xmit(struct ieee80211_hw *hw, struct ieee80211_txq *txq) 711 { 712 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 713 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 714 struct sk_buff *skb = NULL; 715 716 /* 717 * No need for threads to be pending here, they can leave the first 718 * taker all the work. 719 * 720 * mvmtxq->tx_request logic: 721 * 722 * If 0, no one is currently TXing, set to 1 to indicate current thread 723 * will now start TX and other threads should quit. 724 * 725 * If 1, another thread is currently TXing, set to 2 to indicate to 726 * that thread that there was another request. Since that request may 727 * have raced with the check whether the queue is empty, the TXing 728 * thread should check the queue's status one more time before leaving. 729 * This check is done in order to not leave any TX hanging in the queue 730 * until the next TX invocation (which may not even happen). 731 * 732 * If 2, another thread is currently TXing, and it will already double 733 * check the queue, so do nothing. 734 */ 735 if (atomic_fetch_add_unless(&mvmtxq->tx_request, 1, 2)) 736 return; 737 738 rcu_read_lock(); 739 do { 740 while (likely(!mvmtxq->stopped && 741 !test_bit(IWL_MVM_STATUS_IN_D3, &mvm->status))) { 742 skb = ieee80211_tx_dequeue(hw, txq); 743 744 if (!skb) { 745 if (txq->sta) 746 IWL_DEBUG_TX(mvm, 747 "TXQ of sta %pM tid %d is now empty\n", 748 txq->sta->addr, 749 txq->tid); 750 break; 751 } 752 753 iwl_mvm_tx_skb(mvm, skb, txq->sta); 754 } 755 } while (atomic_dec_return(&mvmtxq->tx_request)); 756 rcu_read_unlock(); 757 } 758 759 static void iwl_mvm_mac_wake_tx_queue(struct ieee80211_hw *hw, 760 struct ieee80211_txq *txq) 761 { 762 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 763 struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 764 765 /* 766 * Please note that racing is handled very carefully here: 767 * mvmtxq->txq_id is updated during allocation, and mvmtxq->list is 768 * deleted afterwards. 769 * This means that if: 770 * mvmtxq->txq_id != INVALID_QUEUE && list_empty(&mvmtxq->list): 771 * queue is allocated and we can TX. 772 * mvmtxq->txq_id != INVALID_QUEUE && !list_empty(&mvmtxq->list): 773 * a race, should defer the frame. 774 * mvmtxq->txq_id == INVALID_QUEUE && list_empty(&mvmtxq->list): 775 * need to allocate the queue and defer the frame. 776 * mvmtxq->txq_id == INVALID_QUEUE && !list_empty(&mvmtxq->list): 777 * queue is already scheduled for allocation, no need to allocate, 778 * should defer the frame. 779 */ 780 781 /* If the queue is allocated TX and return. */ 782 if (!txq->sta || mvmtxq->txq_id != IWL_MVM_INVALID_QUEUE) { 783 /* 784 * Check that list is empty to avoid a race where txq_id is 785 * already updated, but the queue allocation work wasn't 786 * finished 787 */ 788 if (unlikely(txq->sta && !list_empty(&mvmtxq->list))) 789 return; 790 791 iwl_mvm_mac_itxq_xmit(hw, txq); 792 return; 793 } 794 795 /* The list is being deleted only after the queue is fully allocated. */ 796 if (!list_empty(&mvmtxq->list)) 797 return; 798 799 list_add_tail(&mvmtxq->list, &mvm->add_stream_txqs); 800 schedule_work(&mvm->add_stream_wk); 801 } 802 803 #define CHECK_BA_TRIGGER(_mvm, _trig, _tid_bm, _tid, _fmt...) \ 804 do { \ 805 if (!(le16_to_cpu(_tid_bm) & BIT(_tid))) \ 806 break; \ 807 iwl_fw_dbg_collect_trig(&(_mvm)->fwrt, _trig, _fmt); \ 808 } while (0) 809 810 static void 811 iwl_mvm_ampdu_check_trigger(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 812 struct ieee80211_sta *sta, u16 tid, u16 rx_ba_ssn, 813 enum ieee80211_ampdu_mlme_action action) 814 { 815 struct iwl_fw_dbg_trigger_tlv *trig; 816 struct iwl_fw_dbg_trigger_ba *ba_trig; 817 818 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 819 FW_DBG_TRIGGER_BA); 820 if (!trig) 821 return; 822 823 ba_trig = (void *)trig->data; 824 825 switch (action) { 826 case IEEE80211_AMPDU_TX_OPERATIONAL: { 827 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 828 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 829 830 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_start, tid, 831 "TX AGG START: MAC %pM tid %d ssn %d\n", 832 sta->addr, tid, tid_data->ssn); 833 break; 834 } 835 case IEEE80211_AMPDU_TX_STOP_CONT: 836 CHECK_BA_TRIGGER(mvm, trig, ba_trig->tx_ba_stop, tid, 837 "TX AGG STOP: MAC %pM tid %d\n", 838 sta->addr, tid); 839 break; 840 case IEEE80211_AMPDU_RX_START: 841 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_start, tid, 842 "RX AGG START: MAC %pM tid %d ssn %d\n", 843 sta->addr, tid, rx_ba_ssn); 844 break; 845 case IEEE80211_AMPDU_RX_STOP: 846 CHECK_BA_TRIGGER(mvm, trig, ba_trig->rx_ba_stop, tid, 847 "RX AGG STOP: MAC %pM tid %d\n", 848 sta->addr, tid); 849 break; 850 default: 851 break; 852 } 853 } 854 855 static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, 856 struct ieee80211_vif *vif, 857 struct ieee80211_ampdu_params *params) 858 { 859 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 860 int ret; 861 struct ieee80211_sta *sta = params->sta; 862 enum ieee80211_ampdu_mlme_action action = params->action; 863 u16 tid = params->tid; 864 u16 *ssn = ¶ms->ssn; 865 u16 buf_size = params->buf_size; 866 bool amsdu = params->amsdu; 867 u16 timeout = params->timeout; 868 869 IWL_DEBUG_HT(mvm, "A-MPDU action on addr %pM tid %d: action %d\n", 870 sta->addr, tid, action); 871 872 if (!(mvm->nvm_data->sku_cap_11n_enable)) 873 return -EACCES; 874 875 mutex_lock(&mvm->mutex); 876 877 switch (action) { 878 case IEEE80211_AMPDU_RX_START: 879 if (iwl_mvm_vif_from_mac80211(vif)->ap_sta_id == 880 iwl_mvm_sta_from_mac80211(sta)->sta_id) { 881 struct iwl_mvm_vif *mvmvif; 882 u16 macid = iwl_mvm_vif_from_mac80211(vif)->id; 883 struct iwl_mvm_tcm_mac *mdata = &mvm->tcm.data[macid]; 884 885 mdata->opened_rx_ba_sessions = true; 886 mvmvif = iwl_mvm_vif_from_mac80211(vif); 887 cancel_delayed_work(&mvmvif->uapsd_nonagg_detected_wk); 888 } 889 if (!iwl_enable_rx_ampdu()) { 890 ret = -EINVAL; 891 break; 892 } 893 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, *ssn, true, buf_size, 894 timeout); 895 break; 896 case IEEE80211_AMPDU_RX_STOP: 897 ret = iwl_mvm_sta_rx_agg(mvm, sta, tid, 0, false, buf_size, 898 timeout); 899 break; 900 case IEEE80211_AMPDU_TX_START: 901 if (!iwl_enable_tx_ampdu()) { 902 ret = -EINVAL; 903 break; 904 } 905 ret = iwl_mvm_sta_tx_agg_start(mvm, vif, sta, tid, ssn); 906 break; 907 case IEEE80211_AMPDU_TX_STOP_CONT: 908 ret = iwl_mvm_sta_tx_agg_stop(mvm, vif, sta, tid); 909 break; 910 case IEEE80211_AMPDU_TX_STOP_FLUSH: 911 case IEEE80211_AMPDU_TX_STOP_FLUSH_CONT: 912 ret = iwl_mvm_sta_tx_agg_flush(mvm, vif, sta, tid); 913 break; 914 case IEEE80211_AMPDU_TX_OPERATIONAL: 915 ret = iwl_mvm_sta_tx_agg_oper(mvm, vif, sta, tid, 916 buf_size, amsdu); 917 break; 918 default: 919 WARN_ON_ONCE(1); 920 ret = -EINVAL; 921 break; 922 } 923 924 if (!ret) { 925 u16 rx_ba_ssn = 0; 926 927 if (action == IEEE80211_AMPDU_RX_START) 928 rx_ba_ssn = *ssn; 929 930 iwl_mvm_ampdu_check_trigger(mvm, vif, sta, tid, 931 rx_ba_ssn, action); 932 } 933 mutex_unlock(&mvm->mutex); 934 935 return ret; 936 } 937 938 static void iwl_mvm_cleanup_iterator(void *data, u8 *mac, 939 struct ieee80211_vif *vif) 940 { 941 struct iwl_mvm *mvm = data; 942 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 943 944 mvmvif->uploaded = false; 945 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 946 947 spin_lock_bh(&mvm->time_event_lock); 948 iwl_mvm_te_clear_data(mvm, &mvmvif->time_event_data); 949 spin_unlock_bh(&mvm->time_event_lock); 950 951 mvmvif->phy_ctxt = NULL; 952 memset(&mvmvif->bf_data, 0, sizeof(mvmvif->bf_data)); 953 memset(&mvmvif->probe_resp_data, 0, sizeof(mvmvif->probe_resp_data)); 954 } 955 956 static void iwl_mvm_restart_cleanup(struct iwl_mvm *mvm) 957 { 958 iwl_mvm_stop_device(mvm); 959 960 mvm->cur_aid = 0; 961 962 mvm->scan_status = 0; 963 mvm->ps_disabled = false; 964 mvm->rfkill_safe_init_done = false; 965 966 /* just in case one was running */ 967 iwl_mvm_cleanup_roc_te(mvm); 968 ieee80211_remain_on_channel_expired(mvm->hw); 969 970 iwl_mvm_ftm_restart(mvm); 971 972 /* 973 * cleanup all interfaces, even inactive ones, as some might have 974 * gone down during the HW restart 975 */ 976 ieee80211_iterate_interfaces(mvm->hw, 0, iwl_mvm_cleanup_iterator, mvm); 977 978 mvm->p2p_device_vif = NULL; 979 980 iwl_mvm_reset_phy_ctxts(mvm); 981 memset(mvm->fw_key_table, 0, sizeof(mvm->fw_key_table)); 982 memset(&mvm->last_bt_notif, 0, sizeof(mvm->last_bt_notif)); 983 memset(&mvm->last_bt_ci_cmd, 0, sizeof(mvm->last_bt_ci_cmd)); 984 985 ieee80211_wake_queues(mvm->hw); 986 987 mvm->vif_count = 0; 988 mvm->rx_ba_sessions = 0; 989 mvm->fwrt.dump.conf = FW_DBG_INVALID; 990 mvm->monitor_on = false; 991 992 /* keep statistics ticking */ 993 iwl_mvm_accu_radio_stats(mvm); 994 } 995 996 int __iwl_mvm_mac_start(struct iwl_mvm *mvm) 997 { 998 int ret; 999 1000 lockdep_assert_held(&mvm->mutex); 1001 1002 ret = iwl_mvm_mei_get_ownership(mvm); 1003 if (ret) 1004 return ret; 1005 1006 if (mvm->mei_nvm_data) { 1007 /* We got the NIC, we can now free the MEI NVM data */ 1008 kfree(mvm->mei_nvm_data); 1009 mvm->mei_nvm_data = NULL; 1010 1011 /* 1012 * We can't free the nvm_data we allocated based on the SAP 1013 * data because we registered to cfg80211 with the channels 1014 * allocated on mvm->nvm_data. Keep a pointer in temp_nvm_data 1015 * just in order to be able free it later. 1016 * NULLify nvm_data so that we will read the NVM from the 1017 * firmware this time. 1018 */ 1019 mvm->temp_nvm_data = mvm->nvm_data; 1020 mvm->nvm_data = NULL; 1021 } 1022 1023 if (test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status)) { 1024 /* 1025 * Now convert the HW_RESTART_REQUESTED flag to IN_HW_RESTART 1026 * so later code will - from now on - see that we're doing it. 1027 */ 1028 set_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1029 clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status); 1030 /* Clean up some internal and mac80211 state on restart */ 1031 iwl_mvm_restart_cleanup(mvm); 1032 } 1033 ret = iwl_mvm_up(mvm); 1034 1035 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_POST_INIT, 1036 NULL); 1037 iwl_dbg_tlv_time_point(&mvm->fwrt, IWL_FW_INI_TIME_POINT_PERIODIC, 1038 NULL); 1039 1040 mvm->last_reset_or_resume_time_jiffies = jiffies; 1041 1042 if (ret && test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1043 /* Something went wrong - we need to finish some cleanup 1044 * that normally iwl_mvm_mac_restart_complete() below 1045 * would do. 1046 */ 1047 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1048 } 1049 1050 return ret; 1051 } 1052 1053 static int iwl_mvm_mac_start(struct ieee80211_hw *hw) 1054 { 1055 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1056 int ret; 1057 int retry, max_retry = 0; 1058 1059 mutex_lock(&mvm->mutex); 1060 1061 /* we are starting the mac not in error flow, and restart is enabled */ 1062 if (!test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) && 1063 iwlwifi_mod_params.fw_restart) { 1064 max_retry = IWL_MAX_INIT_RETRY; 1065 /* 1066 * This will prevent mac80211 recovery flows to trigger during 1067 * init failures 1068 */ 1069 set_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1070 } 1071 1072 for (retry = 0; retry <= max_retry; retry++) { 1073 ret = __iwl_mvm_mac_start(mvm); 1074 if (!ret) 1075 break; 1076 1077 IWL_ERR(mvm, "mac start retry %d\n", retry); 1078 } 1079 clear_bit(IWL_MVM_STATUS_STARTING, &mvm->status); 1080 1081 mutex_unlock(&mvm->mutex); 1082 1083 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1084 1085 return ret; 1086 } 1087 1088 static void iwl_mvm_restart_complete(struct iwl_mvm *mvm) 1089 { 1090 int ret; 1091 1092 mutex_lock(&mvm->mutex); 1093 1094 clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status); 1095 1096 ret = iwl_mvm_update_quotas(mvm, true, NULL); 1097 if (ret) 1098 IWL_ERR(mvm, "Failed to update quotas after restart (%d)\n", 1099 ret); 1100 1101 iwl_mvm_send_recovery_cmd(mvm, ERROR_RECOVERY_END_OF_RECOVERY); 1102 1103 /* 1104 * If we have TDLS peers, remove them. We don't know the last seqno/PN 1105 * of packets the FW sent out, so we must reconnect. 1106 */ 1107 iwl_mvm_teardown_tdls_peers(mvm); 1108 1109 mutex_unlock(&mvm->mutex); 1110 } 1111 1112 static void 1113 iwl_mvm_mac_reconfig_complete(struct ieee80211_hw *hw, 1114 enum ieee80211_reconfig_type reconfig_type) 1115 { 1116 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1117 1118 switch (reconfig_type) { 1119 case IEEE80211_RECONFIG_TYPE_RESTART: 1120 iwl_mvm_restart_complete(mvm); 1121 break; 1122 case IEEE80211_RECONFIG_TYPE_SUSPEND: 1123 break; 1124 } 1125 } 1126 1127 void __iwl_mvm_mac_stop(struct iwl_mvm *mvm) 1128 { 1129 lockdep_assert_held(&mvm->mutex); 1130 1131 iwl_mvm_ftm_initiator_smooth_stop(mvm); 1132 1133 /* firmware counters are obviously reset now, but we shouldn't 1134 * partially track so also clear the fw_reset_accu counters. 1135 */ 1136 memset(&mvm->accu_radio_stats, 0, sizeof(mvm->accu_radio_stats)); 1137 1138 /* async_handlers_wk is now blocked */ 1139 1140 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) < 12) 1141 iwl_mvm_rm_aux_sta(mvm); 1142 1143 iwl_mvm_stop_device(mvm); 1144 1145 iwl_mvm_async_handlers_purge(mvm); 1146 /* async_handlers_list is empty and will stay empty: HW is stopped */ 1147 1148 /* 1149 * Clear IN_HW_RESTART and HW_RESTART_REQUESTED flag when stopping the 1150 * hw (as restart_complete() won't be called in this case) and mac80211 1151 * won't execute the restart. 1152 * But make sure to cleanup interfaces that have gone down before/during 1153 * HW restart was requested. 1154 */ 1155 if (test_and_clear_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1156 test_and_clear_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 1157 &mvm->status)) 1158 ieee80211_iterate_interfaces(mvm->hw, 0, 1159 iwl_mvm_cleanup_iterator, mvm); 1160 1161 /* We shouldn't have any UIDs still set. Loop over all the UIDs to 1162 * make sure there's nothing left there and warn if any is found. 1163 */ 1164 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_UMAC_SCAN)) { 1165 int i; 1166 1167 for (i = 0; i < mvm->max_scans; i++) { 1168 if (WARN_ONCE(mvm->scan_uid_status[i], 1169 "UMAC scan UID %d status was not cleaned\n", 1170 i)) 1171 mvm->scan_uid_status[i] = 0; 1172 } 1173 } 1174 } 1175 1176 static void iwl_mvm_mac_stop(struct ieee80211_hw *hw) 1177 { 1178 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1179 1180 flush_work(&mvm->async_handlers_wk); 1181 flush_work(&mvm->add_stream_wk); 1182 1183 /* 1184 * Lock and clear the firmware running bit here already, so that 1185 * new commands coming in elsewhere, e.g. from debugfs, will not 1186 * be able to proceed. This is important here because one of those 1187 * debugfs files causes the firmware dump to be triggered, and if we 1188 * don't stop debugfs accesses before canceling that it could be 1189 * retriggered after we flush it but before we've cleared the bit. 1190 */ 1191 clear_bit(IWL_MVM_STATUS_FIRMWARE_RUNNING, &mvm->status); 1192 1193 cancel_delayed_work_sync(&mvm->cs_tx_unblock_dwork); 1194 cancel_delayed_work_sync(&mvm->scan_timeout_dwork); 1195 1196 /* 1197 * The work item could be running or queued if the 1198 * ROC time event stops just as we get here. 1199 */ 1200 flush_work(&mvm->roc_done_wk); 1201 1202 iwl_mvm_mei_set_sw_rfkill_state(mvm); 1203 1204 mutex_lock(&mvm->mutex); 1205 __iwl_mvm_mac_stop(mvm); 1206 mutex_unlock(&mvm->mutex); 1207 1208 /* 1209 * The worker might have been waiting for the mutex, let it run and 1210 * discover that its list is now empty. 1211 */ 1212 cancel_work_sync(&mvm->async_handlers_wk); 1213 } 1214 1215 static struct iwl_mvm_phy_ctxt *iwl_mvm_get_free_phy_ctxt(struct iwl_mvm *mvm) 1216 { 1217 u16 i; 1218 1219 lockdep_assert_held(&mvm->mutex); 1220 1221 for (i = 0; i < NUM_PHY_CTX; i++) 1222 if (!mvm->phy_ctxts[i].ref) 1223 return &mvm->phy_ctxts[i]; 1224 1225 IWL_ERR(mvm, "No available PHY context\n"); 1226 return NULL; 1227 } 1228 1229 static int iwl_mvm_set_tx_power(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1230 s16 tx_power) 1231 { 1232 u32 cmd_id = REDUCE_TX_POWER_CMD; 1233 int len; 1234 struct iwl_dev_tx_power_cmd cmd = { 1235 .common.set_mode = cpu_to_le32(IWL_TX_POWER_MODE_SET_MAC), 1236 .common.mac_context_id = 1237 cpu_to_le32(iwl_mvm_vif_from_mac80211(vif)->id), 1238 .common.pwr_restriction = cpu_to_le16(8 * tx_power), 1239 }; 1240 u8 cmd_ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1241 IWL_FW_CMD_VER_UNKNOWN); 1242 1243 if (tx_power == IWL_DEFAULT_MAX_TX_POWER) 1244 cmd.common.pwr_restriction = cpu_to_le16(IWL_DEV_MAX_TX_POWER); 1245 1246 if (cmd_ver == 7) 1247 len = sizeof(cmd.v7); 1248 else if (cmd_ver == 6) 1249 len = sizeof(cmd.v6); 1250 else if (fw_has_api(&mvm->fw->ucode_capa, 1251 IWL_UCODE_TLV_API_REDUCE_TX_POWER)) 1252 len = sizeof(cmd.v5); 1253 else if (fw_has_capa(&mvm->fw->ucode_capa, 1254 IWL_UCODE_TLV_CAPA_TX_POWER_ACK)) 1255 len = sizeof(cmd.v4); 1256 else 1257 len = sizeof(cmd.v3); 1258 1259 /* all structs have the same common part, add it */ 1260 len += sizeof(cmd.common); 1261 1262 return iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, len, &cmd); 1263 } 1264 1265 static int iwl_mvm_post_channel_switch(struct ieee80211_hw *hw, 1266 struct ieee80211_vif *vif) 1267 { 1268 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1269 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1270 int ret; 1271 1272 mutex_lock(&mvm->mutex); 1273 1274 if (vif->type == NL80211_IFTYPE_STATION) { 1275 struct iwl_mvm_sta *mvmsta; 1276 1277 mvmvif->csa_bcn_pending = false; 1278 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, 1279 mvmvif->ap_sta_id); 1280 1281 if (WARN_ON(!mvmsta)) { 1282 ret = -EIO; 1283 goto out_unlock; 1284 } 1285 1286 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, false); 1287 1288 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1289 1290 if (!fw_has_capa(&mvm->fw->ucode_capa, 1291 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 1292 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 1293 if (ret) 1294 goto out_unlock; 1295 1296 iwl_mvm_stop_session_protection(mvm, vif); 1297 } 1298 } 1299 1300 mvmvif->ps_disabled = false; 1301 1302 ret = iwl_mvm_power_update_ps(mvm); 1303 1304 out_unlock: 1305 if (mvmvif->csa_failed) 1306 ret = -EIO; 1307 mutex_unlock(&mvm->mutex); 1308 1309 return ret; 1310 } 1311 1312 static void iwl_mvm_abort_channel_switch(struct ieee80211_hw *hw, 1313 struct ieee80211_vif *vif) 1314 { 1315 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1316 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1317 struct iwl_chan_switch_te_cmd cmd = { 1318 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 1319 mvmvif->color)), 1320 .action = cpu_to_le32(FW_CTXT_ACTION_REMOVE), 1321 }; 1322 1323 /* 1324 * In the new flow since FW is in charge of the timing, 1325 * if driver has canceled the channel switch he will receive the 1326 * CHANNEL_SWITCH_START_NOTIF notification from FW and then cancel it 1327 */ 1328 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 1329 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 1330 return; 1331 1332 IWL_DEBUG_MAC80211(mvm, "Abort CSA on mac %d\n", mvmvif->id); 1333 1334 mutex_lock(&mvm->mutex); 1335 if (!fw_has_capa(&mvm->fw->ucode_capa, 1336 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 1337 iwl_mvm_remove_csa_period(mvm, vif); 1338 else 1339 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 1340 WIDE_ID(MAC_CONF_GROUP, 1341 CHANNEL_SWITCH_TIME_EVENT_CMD), 1342 0, sizeof(cmd), &cmd)); 1343 mvmvif->csa_failed = true; 1344 mutex_unlock(&mvm->mutex); 1345 1346 iwl_mvm_post_channel_switch(hw, vif); 1347 } 1348 1349 static void iwl_mvm_channel_switch_disconnect_wk(struct work_struct *wk) 1350 { 1351 struct iwl_mvm_vif *mvmvif; 1352 struct ieee80211_vif *vif; 1353 1354 mvmvif = container_of(wk, struct iwl_mvm_vif, csa_work.work); 1355 vif = container_of((void *)mvmvif, struct ieee80211_vif, drv_priv); 1356 1357 /* Trigger disconnect (should clear the CSA state) */ 1358 ieee80211_chswitch_done(vif, false); 1359 } 1360 1361 static int iwl_mvm_mac_add_interface(struct ieee80211_hw *hw, 1362 struct ieee80211_vif *vif) 1363 { 1364 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1365 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1366 int ret; 1367 1368 mvmvif->mvm = mvm; 1369 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1370 1371 /* 1372 * Not much to do here. The stack will not allow interface 1373 * types or combinations that we didn't advertise, so we 1374 * don't really have to check the types. 1375 */ 1376 1377 mutex_lock(&mvm->mutex); 1378 1379 /* make sure that beacon statistics don't go backwards with FW reset */ 1380 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1381 mvmvif->beacon_stats.accu_num_beacons += 1382 mvmvif->beacon_stats.num_beacons; 1383 1384 /* Allocate resources for the MAC context, and add it to the fw */ 1385 ret = iwl_mvm_mac_ctxt_init(mvm, vif); 1386 if (ret) 1387 goto out_unlock; 1388 1389 rcu_assign_pointer(mvm->vif_id_to_mac[mvmvif->id], vif); 1390 1391 /* Counting number of interfaces is needed for legacy PM */ 1392 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1393 mvm->vif_count++; 1394 1395 /* 1396 * The AP binding flow can be done only after the beacon 1397 * template is configured (which happens only in the mac80211 1398 * start_ap() flow), and adding the broadcast station can happen 1399 * only after the binding. 1400 * In addition, since modifying the MAC before adding a bcast 1401 * station is not allowed by the FW, delay the adding of MAC context to 1402 * the point where we can also add the bcast station. 1403 * In short: there's not much we can do at this point, other than 1404 * allocating resources :) 1405 */ 1406 if (vif->type == NL80211_IFTYPE_AP || 1407 vif->type == NL80211_IFTYPE_ADHOC) { 1408 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1409 if (ret) { 1410 IWL_ERR(mvm, "Failed to allocate bcast sta\n"); 1411 goto out_release; 1412 } 1413 1414 /* 1415 * Only queue for this station is the mcast queue, 1416 * which shouldn't be in TFD mask anyway 1417 */ 1418 ret = iwl_mvm_allocate_int_sta(mvm, &mvmvif->mcast_sta, 1419 0, vif->type, 1420 IWL_STA_MULTICAST); 1421 if (ret) 1422 goto out_release; 1423 1424 iwl_mvm_vif_dbgfs_register(mvm, vif); 1425 goto out_unlock; 1426 } 1427 1428 mvmvif->features |= hw->netdev_features; 1429 1430 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 1431 if (ret) 1432 goto out_release; 1433 1434 ret = iwl_mvm_power_update_mac(mvm); 1435 if (ret) 1436 goto out_remove_mac; 1437 1438 /* beacon filtering */ 1439 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 1440 if (ret) 1441 goto out_remove_mac; 1442 1443 if (!mvm->bf_allowed_vif && 1444 vif->type == NL80211_IFTYPE_STATION && !vif->p2p) { 1445 mvm->bf_allowed_vif = mvmvif; 1446 vif->driver_flags |= IEEE80211_VIF_BEACON_FILTER | 1447 IEEE80211_VIF_SUPPORTS_CQM_RSSI; 1448 } 1449 1450 /* 1451 * P2P_DEVICE interface does not have a channel context assigned to it, 1452 * so a dedicated PHY context is allocated to it and the corresponding 1453 * MAC context is bound to it at this stage. 1454 */ 1455 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1456 1457 mvmvif->phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 1458 if (!mvmvif->phy_ctxt) { 1459 ret = -ENOSPC; 1460 goto out_free_bf; 1461 } 1462 1463 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 1464 ret = iwl_mvm_binding_add_vif(mvm, vif); 1465 if (ret) 1466 goto out_unref_phy; 1467 1468 ret = iwl_mvm_add_p2p_bcast_sta(mvm, vif); 1469 if (ret) 1470 goto out_unbind; 1471 1472 /* Save a pointer to p2p device vif, so it can later be used to 1473 * update the p2p device MAC when a GO is started/stopped */ 1474 mvm->p2p_device_vif = vif; 1475 } 1476 1477 iwl_mvm_tcm_add_vif(mvm, vif); 1478 INIT_DELAYED_WORK(&mvmvif->csa_work, 1479 iwl_mvm_channel_switch_disconnect_wk); 1480 1481 if (vif->type == NL80211_IFTYPE_MONITOR) 1482 mvm->monitor_on = true; 1483 1484 iwl_mvm_vif_dbgfs_register(mvm, vif); 1485 1486 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 1487 vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 1488 !mvm->csme_vif && mvm->mei_registered) { 1489 iwl_mei_set_nic_info(vif->addr, mvm->nvm_data->hw_addr); 1490 iwl_mei_set_netdev(ieee80211_vif_to_wdev(vif)->netdev); 1491 mvm->csme_vif = vif; 1492 } 1493 1494 goto out_unlock; 1495 1496 out_unbind: 1497 iwl_mvm_binding_remove_vif(mvm, vif); 1498 out_unref_phy: 1499 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1500 out_free_bf: 1501 if (mvm->bf_allowed_vif == mvmvif) { 1502 mvm->bf_allowed_vif = NULL; 1503 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1504 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1505 } 1506 out_remove_mac: 1507 mvmvif->phy_ctxt = NULL; 1508 iwl_mvm_mac_ctxt_remove(mvm, vif); 1509 out_release: 1510 if (vif->type != NL80211_IFTYPE_P2P_DEVICE) 1511 mvm->vif_count--; 1512 out_unlock: 1513 mutex_unlock(&mvm->mutex); 1514 1515 return ret; 1516 } 1517 1518 static void iwl_mvm_prepare_mac_removal(struct iwl_mvm *mvm, 1519 struct ieee80211_vif *vif) 1520 { 1521 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1522 /* 1523 * Flush the ROC worker which will flush the OFFCHANNEL queue. 1524 * We assume here that all the packets sent to the OFFCHANNEL 1525 * queue are sent in ROC session. 1526 */ 1527 flush_work(&mvm->roc_done_wk); 1528 } 1529 } 1530 1531 static void iwl_mvm_mac_remove_interface(struct ieee80211_hw *hw, 1532 struct ieee80211_vif *vif) 1533 { 1534 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1535 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1536 struct iwl_probe_resp_data *probe_data; 1537 1538 iwl_mvm_prepare_mac_removal(mvm, vif); 1539 1540 if (!(vif->type == NL80211_IFTYPE_AP || 1541 vif->type == NL80211_IFTYPE_ADHOC)) 1542 iwl_mvm_tcm_rm_vif(mvm, vif); 1543 1544 mutex_lock(&mvm->mutex); 1545 1546 if (vif == mvm->csme_vif) { 1547 iwl_mei_set_netdev(NULL); 1548 mvm->csme_vif = NULL; 1549 } 1550 1551 probe_data = rcu_dereference_protected(mvmvif->probe_resp_data, 1552 lockdep_is_held(&mvm->mutex)); 1553 RCU_INIT_POINTER(mvmvif->probe_resp_data, NULL); 1554 if (probe_data) 1555 kfree_rcu(probe_data, rcu_head); 1556 1557 if (mvm->bf_allowed_vif == mvmvif) { 1558 mvm->bf_allowed_vif = NULL; 1559 vif->driver_flags &= ~(IEEE80211_VIF_BEACON_FILTER | 1560 IEEE80211_VIF_SUPPORTS_CQM_RSSI); 1561 } 1562 1563 if (vif->bss_conf.ftm_responder) 1564 memset(&mvm->ftm_resp_stats, 0, sizeof(mvm->ftm_resp_stats)); 1565 1566 iwl_mvm_vif_dbgfs_clean(mvm, vif); 1567 1568 /* 1569 * For AP/GO interface, the tear down of the resources allocated to the 1570 * interface is be handled as part of the stop_ap flow. 1571 */ 1572 if (vif->type == NL80211_IFTYPE_AP || 1573 vif->type == NL80211_IFTYPE_ADHOC) { 1574 #ifdef CONFIG_NL80211_TESTMODE 1575 if (vif == mvm->noa_vif) { 1576 mvm->noa_vif = NULL; 1577 mvm->noa_duration = 0; 1578 } 1579 #endif 1580 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->mcast_sta); 1581 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1582 goto out_release; 1583 } 1584 1585 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1586 mvm->p2p_device_vif = NULL; 1587 iwl_mvm_rm_p2p_bcast_sta(mvm, vif); 1588 iwl_mvm_binding_remove_vif(mvm, vif); 1589 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 1590 mvmvif->phy_ctxt = NULL; 1591 } 1592 1593 if (mvm->vif_count && vif->type != NL80211_IFTYPE_P2P_DEVICE) 1594 mvm->vif_count--; 1595 1596 iwl_mvm_power_update_mac(mvm); 1597 iwl_mvm_mac_ctxt_remove(mvm, vif); 1598 1599 RCU_INIT_POINTER(mvm->vif_id_to_mac[mvmvif->id], NULL); 1600 1601 if (vif->type == NL80211_IFTYPE_MONITOR) 1602 mvm->monitor_on = false; 1603 1604 out_release: 1605 mutex_unlock(&mvm->mutex); 1606 } 1607 1608 static int iwl_mvm_mac_config(struct ieee80211_hw *hw, u32 changed) 1609 { 1610 return 0; 1611 } 1612 1613 struct iwl_mvm_mc_iter_data { 1614 struct iwl_mvm *mvm; 1615 int port_id; 1616 }; 1617 1618 static void iwl_mvm_mc_iface_iterator(void *_data, u8 *mac, 1619 struct ieee80211_vif *vif) 1620 { 1621 struct iwl_mvm_mc_iter_data *data = _data; 1622 struct iwl_mvm *mvm = data->mvm; 1623 struct iwl_mcast_filter_cmd *cmd = mvm->mcast_filter_cmd; 1624 struct iwl_host_cmd hcmd = { 1625 .id = MCAST_FILTER_CMD, 1626 .flags = CMD_ASYNC, 1627 .dataflags[0] = IWL_HCMD_DFL_NOCOPY, 1628 }; 1629 int ret, len; 1630 1631 /* if we don't have free ports, mcast frames will be dropped */ 1632 if (WARN_ON_ONCE(data->port_id >= MAX_PORT_ID_NUM)) 1633 return; 1634 1635 if (vif->type != NL80211_IFTYPE_STATION || 1636 !vif->bss_conf.assoc) 1637 return; 1638 1639 cmd->port_id = data->port_id++; 1640 memcpy(cmd->bssid, vif->bss_conf.bssid, ETH_ALEN); 1641 len = roundup(sizeof(*cmd) + cmd->count * ETH_ALEN, 4); 1642 1643 hcmd.len[0] = len; 1644 hcmd.data[0] = cmd; 1645 1646 ret = iwl_mvm_send_cmd(mvm, &hcmd); 1647 if (ret) 1648 IWL_ERR(mvm, "mcast filter cmd error. ret=%d\n", ret); 1649 } 1650 1651 static void iwl_mvm_recalc_multicast(struct iwl_mvm *mvm) 1652 { 1653 struct iwl_mvm_mc_iter_data iter_data = { 1654 .mvm = mvm, 1655 }; 1656 int ret; 1657 1658 lockdep_assert_held(&mvm->mutex); 1659 1660 if (WARN_ON_ONCE(!mvm->mcast_filter_cmd)) 1661 return; 1662 1663 ieee80211_iterate_active_interfaces_atomic( 1664 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1665 iwl_mvm_mc_iface_iterator, &iter_data); 1666 1667 /* 1668 * Send a (synchronous) ech command so that we wait for the 1669 * multiple asynchronous MCAST_FILTER_CMD commands sent by 1670 * the interface iterator. Otherwise, we might get here over 1671 * and over again (by userspace just sending a lot of these) 1672 * and the CPU can send them faster than the firmware can 1673 * process them. 1674 * Note that the CPU is still faster - but with this we'll 1675 * actually send fewer commands overall because the CPU will 1676 * not schedule the work in mac80211 as frequently if it's 1677 * still running when rescheduled (possibly multiple times). 1678 */ 1679 ret = iwl_mvm_send_cmd_pdu(mvm, ECHO_CMD, 0, 0, NULL); 1680 if (ret) 1681 IWL_ERR(mvm, "Failed to synchronize multicast groups update\n"); 1682 } 1683 1684 static u64 iwl_mvm_prepare_multicast(struct ieee80211_hw *hw, 1685 struct netdev_hw_addr_list *mc_list) 1686 { 1687 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1688 struct iwl_mcast_filter_cmd *cmd; 1689 struct netdev_hw_addr *addr; 1690 int addr_count; 1691 bool pass_all; 1692 int len; 1693 1694 addr_count = netdev_hw_addr_list_count(mc_list); 1695 pass_all = addr_count > MAX_MCAST_FILTERING_ADDRESSES || 1696 IWL_MVM_FW_MCAST_FILTER_PASS_ALL; 1697 if (pass_all) 1698 addr_count = 0; 1699 1700 len = roundup(sizeof(*cmd) + addr_count * ETH_ALEN, 4); 1701 cmd = kzalloc(len, GFP_ATOMIC); 1702 if (!cmd) 1703 return 0; 1704 1705 if (pass_all) { 1706 cmd->pass_all = 1; 1707 #if defined(__linux__) 1708 return (u64)(unsigned long)cmd; 1709 #elif defined(__FreeBSD__) 1710 return (u64)(uintptr_t)cmd; 1711 #endif 1712 } 1713 1714 netdev_hw_addr_list_for_each(addr, mc_list) { 1715 #if defined(__linux__) 1716 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %pM\n", 1717 cmd->count, addr->addr); 1718 #elif defined(__FreeBSD__) 1719 IWL_DEBUG_MAC80211(mvm, "mcast addr (%d): %6D\n", 1720 cmd->count, addr->addr, ":"); 1721 #endif 1722 memcpy(&cmd->addr_list[cmd->count * ETH_ALEN], 1723 addr->addr, ETH_ALEN); 1724 cmd->count++; 1725 } 1726 1727 #if defined(__linux__) 1728 return (u64)(unsigned long)cmd; 1729 #elif defined(__FreeBSD__) 1730 return (u64)(uintptr_t)cmd; 1731 #endif 1732 } 1733 1734 static void iwl_mvm_configure_filter(struct ieee80211_hw *hw, 1735 unsigned int changed_flags, 1736 unsigned int *total_flags, 1737 u64 multicast) 1738 { 1739 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1740 #if defined(__linux__) 1741 struct iwl_mcast_filter_cmd *cmd = (void *)(unsigned long)multicast; 1742 #elif defined(__FreeBSD__) 1743 struct iwl_mcast_filter_cmd *cmd = (void *)(uintptr_t)multicast; 1744 #endif 1745 1746 mutex_lock(&mvm->mutex); 1747 1748 /* replace previous configuration */ 1749 kfree(mvm->mcast_filter_cmd); 1750 mvm->mcast_filter_cmd = cmd; 1751 1752 if (!cmd) 1753 goto out; 1754 1755 if (changed_flags & FIF_ALLMULTI) 1756 cmd->pass_all = !!(*total_flags & FIF_ALLMULTI); 1757 1758 if (cmd->pass_all) 1759 cmd->count = 0; 1760 1761 iwl_mvm_recalc_multicast(mvm); 1762 out: 1763 mutex_unlock(&mvm->mutex); 1764 *total_flags = 0; 1765 } 1766 1767 static void iwl_mvm_config_iface_filter(struct ieee80211_hw *hw, 1768 struct ieee80211_vif *vif, 1769 unsigned int filter_flags, 1770 unsigned int changed_flags) 1771 { 1772 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 1773 1774 /* We support only filter for probe requests */ 1775 if (!(changed_flags & FIF_PROBE_REQ)) 1776 return; 1777 1778 /* Supported only for p2p client interfaces */ 1779 if (vif->type != NL80211_IFTYPE_STATION || !vif->bss_conf.assoc || 1780 !vif->p2p) 1781 return; 1782 1783 mutex_lock(&mvm->mutex); 1784 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 1785 mutex_unlock(&mvm->mutex); 1786 } 1787 1788 static int iwl_mvm_update_mu_groups(struct iwl_mvm *mvm, 1789 struct ieee80211_vif *vif) 1790 { 1791 struct iwl_mu_group_mgmt_cmd cmd = {}; 1792 1793 memcpy(cmd.membership_status, vif->bss_conf.mu_group.membership, 1794 WLAN_MEMBERSHIP_LEN); 1795 memcpy(cmd.user_position, vif->bss_conf.mu_group.position, 1796 WLAN_USER_POSITION_LEN); 1797 1798 return iwl_mvm_send_cmd_pdu(mvm, 1799 WIDE_ID(DATA_PATH_GROUP, 1800 UPDATE_MU_GROUPS_CMD), 1801 0, sizeof(cmd), &cmd); 1802 } 1803 1804 static void iwl_mvm_mu_mimo_iface_iterator(void *_data, u8 *mac, 1805 struct ieee80211_vif *vif) 1806 { 1807 if (vif->mu_mimo_owner) { 1808 struct iwl_mu_group_mgmt_notif *notif = _data; 1809 1810 /* 1811 * MU-MIMO Group Id action frame is little endian. We treat 1812 * the data received from firmware as if it came from the 1813 * action frame, so no conversion is needed. 1814 */ 1815 ieee80211_update_mu_groups(vif, 1816 (u8 *)¬if->membership_status, 1817 (u8 *)¬if->user_position); 1818 } 1819 } 1820 1821 void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, 1822 struct iwl_rx_cmd_buffer *rxb) 1823 { 1824 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1825 struct iwl_mu_group_mgmt_notif *notif = (void *)pkt->data; 1826 1827 ieee80211_iterate_active_interfaces_atomic( 1828 mvm->hw, IEEE80211_IFACE_ITER_NORMAL, 1829 iwl_mvm_mu_mimo_iface_iterator, notif); 1830 } 1831 1832 static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) 1833 { 1834 u8 byte_num = ppe_pos_bit / 8; 1835 u8 bit_num = ppe_pos_bit % 8; 1836 u8 residue_bits; 1837 u8 res; 1838 1839 if (bit_num <= 5) 1840 return (ppe[byte_num] >> bit_num) & 1841 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); 1842 1843 /* 1844 * If bit_num > 5, we have to combine bits with next byte. 1845 * Calculate how many bits we need to take from current byte (called 1846 * here "residue_bits"), and add them to bits from next byte. 1847 */ 1848 1849 residue_bits = 8 - bit_num; 1850 1851 res = (ppe[byte_num + 1] & 1852 (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << 1853 residue_bits; 1854 res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); 1855 1856 return res; 1857 } 1858 1859 static void iwl_mvm_parse_ppe(struct iwl_mvm *mvm, 1860 struct iwl_he_pkt_ext_v2 *pkt_ext, u8 nss, 1861 u8 ru_index_bitmap, u8 *ppe, u8 ppe_pos_bit) 1862 { 1863 int i; 1864 1865 /* 1866 * FW currently supports only nss == MAX_HE_SUPP_NSS 1867 * 1868 * If nss > MAX: we can ignore values we don't support 1869 * If nss < MAX: we can set zeros in other streams 1870 */ 1871 if (nss > MAX_HE_SUPP_NSS) { 1872 IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, 1873 MAX_HE_SUPP_NSS); 1874 nss = MAX_HE_SUPP_NSS; 1875 } 1876 1877 for (i = 0; i < nss; i++) { 1878 u8 ru_index_tmp = ru_index_bitmap << 1; 1879 u8 low_th = IWL_HE_PKT_EXT_NONE, high_th = IWL_HE_PKT_EXT_NONE; 1880 u8 bw; 1881 1882 for (bw = 0; 1883 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1884 bw++) { 1885 ru_index_tmp >>= 1; 1886 1887 if (!(ru_index_tmp & 1)) 1888 continue; 1889 1890 high_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1891 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1892 low_th = iwl_mvm_he_get_ppe_val(ppe, ppe_pos_bit); 1893 ppe_pos_bit += IEEE80211_PPE_THRES_INFO_PPET_SIZE; 1894 1895 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1896 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1897 } 1898 } 1899 } 1900 1901 static void iwl_mvm_set_pkt_ext_from_he_ppe(struct iwl_mvm *mvm, 1902 struct ieee80211_sta *sta, 1903 struct iwl_he_pkt_ext_v2 *pkt_ext) 1904 { 1905 u8 nss = (sta->deflink.he_cap.ppe_thres[0] & IEEE80211_PPE_THRES_NSS_MASK) + 1; 1906 u8 *ppe = &sta->deflink.he_cap.ppe_thres[0]; 1907 u8 ru_index_bitmap = 1908 u8_get_bits(*ppe, 1909 IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK); 1910 /* Starting after PPE header */ 1911 u8 ppe_pos_bit = IEEE80211_HE_PPE_THRES_INFO_HEADER_SIZE; 1912 1913 iwl_mvm_parse_ppe(mvm, pkt_ext, nss, ru_index_bitmap, ppe, ppe_pos_bit); 1914 } 1915 1916 static void iwl_mvm_set_pkt_ext_from_nominal_padding(struct iwl_he_pkt_ext_v2 *pkt_ext, 1917 u8 nominal_padding, 1918 u32 *flags) 1919 { 1920 int low_th = -1; 1921 int high_th = -1; 1922 int i; 1923 1924 switch (nominal_padding) { 1925 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_0US: 1926 low_th = IWL_HE_PKT_EXT_NONE; 1927 high_th = IWL_HE_PKT_EXT_NONE; 1928 break; 1929 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_8US: 1930 low_th = IWL_HE_PKT_EXT_BPSK; 1931 high_th = IWL_HE_PKT_EXT_NONE; 1932 break; 1933 case IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_16US: 1934 low_th = IWL_HE_PKT_EXT_NONE; 1935 high_th = IWL_HE_PKT_EXT_BPSK; 1936 break; 1937 } 1938 1939 /* Set the PPE thresholds accordingly */ 1940 if (low_th >= 0 && high_th >= 0) { 1941 for (i = 0; i < MAX_HE_SUPP_NSS; i++) { 1942 u8 bw; 1943 1944 for (bw = 0; 1945 bw < ARRAY_SIZE(pkt_ext->pkt_ext_qam_th[i]); 1946 bw++) { 1947 pkt_ext->pkt_ext_qam_th[i][bw][0] = low_th; 1948 pkt_ext->pkt_ext_qam_th[i][bw][1] = high_th; 1949 } 1950 } 1951 1952 *flags |= STA_CTXT_HE_PACKET_EXT; 1953 } 1954 } 1955 1956 static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, 1957 struct ieee80211_vif *vif, u8 sta_id) 1958 { 1959 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1960 struct iwl_he_sta_context_cmd_v3 sta_ctxt_cmd = { 1961 .sta_id = sta_id, 1962 .tid_limit = IWL_MAX_TID_COUNT, 1963 .bss_color = vif->bss_conf.he_bss_color.color, 1964 .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, 1965 .frame_time_rts_th = 1966 cpu_to_le16(vif->bss_conf.frame_time_rts_th), 1967 }; 1968 struct iwl_he_sta_context_cmd_v2 sta_ctxt_cmd_v2 = {}; 1969 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, STA_HE_CTXT_CMD); 1970 u8 ver = iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 2); 1971 int size; 1972 struct ieee80211_sta *sta; 1973 u32 flags; 1974 int i; 1975 const struct ieee80211_sta_he_cap *own_he_cap = NULL; 1976 struct ieee80211_chanctx_conf *chanctx_conf; 1977 const struct ieee80211_supported_band *sband; 1978 void *cmd; 1979 1980 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_MBSSID_HE)) 1981 ver = 1; 1982 1983 switch (ver) { 1984 case 1: 1985 /* same layout as v2 except some data at the end */ 1986 cmd = &sta_ctxt_cmd_v2; 1987 size = sizeof(struct iwl_he_sta_context_cmd_v1); 1988 break; 1989 case 2: 1990 cmd = &sta_ctxt_cmd_v2; 1991 size = sizeof(struct iwl_he_sta_context_cmd_v2); 1992 break; 1993 case 3: 1994 cmd = &sta_ctxt_cmd; 1995 size = sizeof(struct iwl_he_sta_context_cmd_v3); 1996 break; 1997 default: 1998 IWL_ERR(mvm, "bad STA_HE_CTXT_CMD version %d\n", ver); 1999 return; 2000 } 2001 2002 rcu_read_lock(); 2003 2004 chanctx_conf = rcu_dereference(vif->chanctx_conf); 2005 if (WARN_ON(!chanctx_conf)) { 2006 rcu_read_unlock(); 2007 return; 2008 } 2009 2010 sband = mvm->hw->wiphy->bands[chanctx_conf->def.chan->band]; 2011 own_he_cap = ieee80211_get_he_iftype_cap(sband, 2012 ieee80211_vif_type_p2p(vif)); 2013 2014 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); 2015 if (IS_ERR_OR_NULL(sta)) { 2016 rcu_read_unlock(); 2017 WARN(1, "Can't find STA to configure HE\n"); 2018 return; 2019 } 2020 2021 if (!sta->deflink.he_cap.has_he) { 2022 rcu_read_unlock(); 2023 return; 2024 } 2025 2026 flags = 0; 2027 2028 /* Block 26-tone RU OFDMA transmissions */ 2029 if (mvmvif->he_ru_2mhz_block) 2030 flags |= STA_CTXT_HE_RU_2MHZ_BLOCK; 2031 2032 /* HTC flags */ 2033 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[0] & 2034 IEEE80211_HE_MAC_CAP0_HTC_HE) 2035 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); 2036 if ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2037 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || 2038 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2039 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { 2040 u8 link_adap = 2041 ((sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2042 IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + 2043 (sta->deflink.he_cap.he_cap_elem.mac_cap_info[1] & 2044 IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); 2045 2046 if (link_adap == 2) 2047 sta_ctxt_cmd.htc_flags |= 2048 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); 2049 else if (link_adap == 3) 2050 sta_ctxt_cmd.htc_flags |= 2051 cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); 2052 } 2053 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) 2054 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); 2055 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[3] & 2056 IEEE80211_HE_MAC_CAP3_OMI_CONTROL) 2057 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); 2058 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) 2059 sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); 2060 2061 /* 2062 * Initialize the PPE thresholds to "None" (7), as described in Table 2063 * 9-262ac of 80211.ax/D3.0. 2064 */ 2065 memset(&sta_ctxt_cmd.pkt_ext, IWL_HE_PKT_EXT_NONE, 2066 sizeof(sta_ctxt_cmd.pkt_ext)); 2067 2068 /* If PPE Thresholds exist, parse them into a FW-familiar format. */ 2069 if (sta->deflink.he_cap.he_cap_elem.phy_cap_info[6] & 2070 IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { 2071 iwl_mvm_set_pkt_ext_from_he_ppe(mvm, sta, 2072 &sta_ctxt_cmd.pkt_ext); 2073 flags |= STA_CTXT_HE_PACKET_EXT; 2074 /* PPE Thresholds doesn't exist - set the API PPE values 2075 * according to Common Nominal Packet Padding fiels. */ 2076 } else { 2077 u8 nominal_padding = 2078 u8_get_bits(sta->deflink.he_cap.he_cap_elem.phy_cap_info[9], 2079 IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_MASK); 2080 if (nominal_padding != IEEE80211_HE_PHY_CAP9_NOMINAL_PKT_PADDING_RESERVED) 2081 iwl_mvm_set_pkt_ext_from_nominal_padding(&sta_ctxt_cmd.pkt_ext, 2082 nominal_padding, 2083 &flags); 2084 } 2085 2086 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2087 IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP) 2088 flags |= STA_CTXT_HE_32BIT_BA_BITMAP; 2089 2090 if (sta->deflink.he_cap.he_cap_elem.mac_cap_info[2] & 2091 IEEE80211_HE_MAC_CAP2_ACK_EN) 2092 flags |= STA_CTXT_HE_ACK_ENABLED; 2093 2094 rcu_read_unlock(); 2095 2096 /* Mark MU EDCA as enabled, unless none detected on some AC */ 2097 flags |= STA_CTXT_HE_MU_EDCA_CW; 2098 for (i = 0; i < IEEE80211_NUM_ACS; i++) { 2099 struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = 2100 &mvmvif->queue_params[i].mu_edca_param_rec; 2101 u8 ac = iwl_mvm_mac80211_ac_to_ucode_ac(i); 2102 2103 if (!mvmvif->queue_params[i].mu_edca) { 2104 flags &= ~STA_CTXT_HE_MU_EDCA_CW; 2105 break; 2106 } 2107 2108 sta_ctxt_cmd.trig_based_txf[ac].cwmin = 2109 cpu_to_le16(mu_edca->ecw_min_max & 0xf); 2110 sta_ctxt_cmd.trig_based_txf[ac].cwmax = 2111 cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); 2112 sta_ctxt_cmd.trig_based_txf[ac].aifsn = 2113 cpu_to_le16(mu_edca->aifsn); 2114 sta_ctxt_cmd.trig_based_txf[ac].mu_time = 2115 cpu_to_le16(mu_edca->mu_edca_timer); 2116 } 2117 2118 2119 if (vif->bss_conf.uora_exists) { 2120 flags |= STA_CTXT_HE_TRIG_RND_ALLOC; 2121 2122 sta_ctxt_cmd.rand_alloc_ecwmin = 2123 vif->bss_conf.uora_ocw_range & 0x7; 2124 sta_ctxt_cmd.rand_alloc_ecwmax = 2125 (vif->bss_conf.uora_ocw_range >> 3) & 0x7; 2126 } 2127 2128 if (own_he_cap && !(own_he_cap->he_cap_elem.mac_cap_info[2] & 2129 IEEE80211_HE_MAC_CAP2_ACK_EN)) 2130 flags |= STA_CTXT_HE_NIC_NOT_ACK_ENABLED; 2131 2132 if (vif->bss_conf.nontransmitted) { 2133 flags |= STA_CTXT_HE_REF_BSSID_VALID; 2134 ether_addr_copy(sta_ctxt_cmd.ref_bssid_addr, 2135 vif->bss_conf.transmitter_bssid); 2136 sta_ctxt_cmd.max_bssid_indicator = 2137 vif->bss_conf.bssid_indicator; 2138 sta_ctxt_cmd.bssid_index = vif->bss_conf.bssid_index; 2139 sta_ctxt_cmd.ema_ap = vif->bss_conf.ema_ap; 2140 sta_ctxt_cmd.profile_periodicity = 2141 vif->bss_conf.profile_periodicity; 2142 } 2143 2144 sta_ctxt_cmd.flags = cpu_to_le32(flags); 2145 2146 if (ver < 3) { 2147 /* fields before pkt_ext */ 2148 BUILD_BUG_ON(offsetof(typeof(sta_ctxt_cmd), pkt_ext) != 2149 offsetof(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2150 memcpy(&sta_ctxt_cmd_v2, &sta_ctxt_cmd, 2151 offsetof(typeof(sta_ctxt_cmd), pkt_ext)); 2152 2153 /* pkt_ext */ 2154 for (i = 0; 2155 i < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th); 2156 i++) { 2157 u8 bw; 2158 2159 for (bw = 0; 2160 bw < ARRAY_SIZE(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i]); 2161 bw++) { 2162 BUILD_BUG_ON(sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw]) != 2163 sizeof(sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw])); 2164 2165 memcpy(&sta_ctxt_cmd_v2.pkt_ext.pkt_ext_qam_th[i][bw], 2166 &sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw], 2167 sizeof(sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw])); 2168 } 2169 } 2170 2171 /* fields after pkt_ext */ 2172 BUILD_BUG_ON(sizeof(sta_ctxt_cmd) - 2173 offsetofend(typeof(sta_ctxt_cmd), pkt_ext) != 2174 sizeof(sta_ctxt_cmd_v2) - 2175 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext)); 2176 memcpy((u8 *)&sta_ctxt_cmd_v2 + 2177 offsetofend(typeof(sta_ctxt_cmd_v2), pkt_ext), 2178 (u8 *)&sta_ctxt_cmd + 2179 offsetofend(typeof(sta_ctxt_cmd), pkt_ext), 2180 sizeof(sta_ctxt_cmd) - 2181 offsetofend(typeof(sta_ctxt_cmd), pkt_ext)); 2182 sta_ctxt_cmd_v2.reserved3 = 0; 2183 } 2184 2185 if (iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, size, cmd)) 2186 IWL_ERR(mvm, "Failed to config FW to work HE!\n"); 2187 } 2188 2189 static void iwl_mvm_protect_assoc(struct iwl_mvm *mvm, 2190 struct ieee80211_vif *vif, 2191 u32 duration_override) 2192 { 2193 u32 duration = IWL_MVM_TE_SESSION_PROTECTION_MAX_TIME_MS; 2194 u32 min_duration = IWL_MVM_TE_SESSION_PROTECTION_MIN_TIME_MS; 2195 2196 if (duration_override > duration) 2197 duration = duration_override; 2198 2199 /* Try really hard to protect the session and hear a beacon 2200 * The new session protection command allows us to protect the 2201 * session for a much longer time since the firmware will internally 2202 * create two events: a 300TU one with a very high priority that 2203 * won't be fragmented which should be enough for 99% of the cases, 2204 * and another one (which we configure here to be 900TU long) which 2205 * will have a slightly lower priority, but more importantly, can be 2206 * fragmented so that it'll allow other activities to run. 2207 */ 2208 if (fw_has_capa(&mvm->fw->ucode_capa, 2209 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) 2210 iwl_mvm_schedule_session_protection(mvm, vif, 900, 2211 min_duration, false); 2212 else 2213 iwl_mvm_protect_session(mvm, vif, duration, 2214 min_duration, 500, false); 2215 } 2216 2217 static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, 2218 struct ieee80211_vif *vif, 2219 struct ieee80211_bss_conf *bss_conf, 2220 u64 changes) 2221 { 2222 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2223 int ret; 2224 2225 /* 2226 * Re-calculate the tsf id, as the leader-follower relations depend 2227 * on the beacon interval, which was not known when the station 2228 * interface was added. 2229 */ 2230 if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { 2231 if (vif->bss_conf.he_support && 2232 !iwlwifi_mod_params.disable_11ax) 2233 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2234 2235 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2236 } 2237 2238 /* Update MU EDCA params */ 2239 if (changes & BSS_CHANGED_QOS && mvmvif->associated && 2240 bss_conf->assoc && vif->bss_conf.he_support && 2241 !iwlwifi_mod_params.disable_11ax) 2242 iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); 2243 2244 /* 2245 * If we're not associated yet, take the (new) BSSID before associating 2246 * so the firmware knows. If we're already associated, then use the old 2247 * BSSID here, and we'll send a cleared one later in the CHANGED_ASSOC 2248 * branch for disassociation below. 2249 */ 2250 if (changes & BSS_CHANGED_BSSID && !mvmvif->associated) 2251 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2252 2253 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, mvmvif->bssid); 2254 if (ret) 2255 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2256 2257 /* after sending it once, adopt mac80211 data */ 2258 memcpy(mvmvif->bssid, bss_conf->bssid, ETH_ALEN); 2259 mvmvif->associated = bss_conf->assoc; 2260 2261 if (changes & BSS_CHANGED_ASSOC) { 2262 if (bss_conf->assoc) { 2263 /* clear statistics to get clean beacon counter */ 2264 iwl_mvm_request_statistics(mvm, true); 2265 memset(&mvmvif->beacon_stats, 0, 2266 sizeof(mvmvif->beacon_stats)); 2267 2268 /* add quota for this interface */ 2269 ret = iwl_mvm_update_quotas(mvm, true, NULL); 2270 if (ret) { 2271 IWL_ERR(mvm, "failed to update quotas\n"); 2272 return; 2273 } 2274 2275 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2276 &mvm->status) && 2277 !fw_has_capa(&mvm->fw->ucode_capa, 2278 IWL_UCODE_TLV_CAPA_SESSION_PROT_CMD)) { 2279 /* 2280 * If we're restarting then the firmware will 2281 * obviously have lost synchronisation with 2282 * the AP. It will attempt to synchronise by 2283 * itself, but we can make it more reliable by 2284 * scheduling a session protection time event. 2285 * 2286 * The firmware needs to receive a beacon to 2287 * catch up with synchronisation, use 110% of 2288 * the beacon interval. 2289 * 2290 * Set a large maximum delay to allow for more 2291 * than a single interface. 2292 * 2293 * For new firmware versions, rely on the 2294 * firmware. This is relevant for DCM scenarios 2295 * only anyway. 2296 */ 2297 u32 dur = (11 * vif->bss_conf.beacon_int) / 10; 2298 iwl_mvm_protect_session(mvm, vif, dur, dur, 2299 5 * dur, false); 2300 } else if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2301 &mvm->status) && 2302 !vif->bss_conf.dtim_period) { 2303 /* 2304 * If we're not restarting and still haven't 2305 * heard a beacon (dtim period unknown) then 2306 * make sure we still have enough minimum time 2307 * remaining in the time event, since the auth 2308 * might actually have taken quite a while 2309 * (especially for SAE) and so the remaining 2310 * time could be small without us having heard 2311 * a beacon yet. 2312 */ 2313 iwl_mvm_protect_assoc(mvm, vif, 0); 2314 } 2315 2316 iwl_mvm_sf_update(mvm, vif, false); 2317 iwl_mvm_power_vif_assoc(mvm, vif); 2318 if (vif->p2p) { 2319 iwl_mvm_update_smps(mvm, vif, 2320 IWL_MVM_SMPS_REQ_PROT, 2321 IEEE80211_SMPS_DYNAMIC); 2322 } 2323 } else if (mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 2324 iwl_mvm_mei_host_disassociated(mvm); 2325 /* 2326 * If update fails - SF might be running in associated 2327 * mode while disassociated - which is forbidden. 2328 */ 2329 ret = iwl_mvm_sf_update(mvm, vif, false); 2330 WARN_ONCE(ret && 2331 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 2332 &mvm->status), 2333 "Failed to update SF upon disassociation\n"); 2334 2335 /* 2336 * If we get an assert during the connection (after the 2337 * station has been added, but before the vif is set 2338 * to associated), mac80211 will re-add the station and 2339 * then configure the vif. Since the vif is not 2340 * associated, we would remove the station here and 2341 * this would fail the recovery. 2342 */ 2343 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, 2344 &mvm->status)) { 2345 /* 2346 * Remove AP station now that 2347 * the MAC is unassoc 2348 */ 2349 ret = iwl_mvm_rm_sta_id(mvm, vif, 2350 mvmvif->ap_sta_id); 2351 if (ret) 2352 IWL_ERR(mvm, 2353 "failed to remove AP station\n"); 2354 2355 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 2356 } 2357 2358 /* remove quota for this interface */ 2359 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2360 if (ret) 2361 IWL_ERR(mvm, "failed to update quotas\n"); 2362 2363 /* this will take the cleared BSSID from bss_conf */ 2364 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 2365 if (ret) 2366 IWL_ERR(mvm, 2367 "failed to update MAC %pM (clear after unassoc)\n", 2368 vif->addr); 2369 } 2370 2371 /* 2372 * The firmware tracks the MU-MIMO group on its own. 2373 * However, on HW restart we should restore this data. 2374 */ 2375 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 2376 (changes & BSS_CHANGED_MU_GROUPS) && vif->mu_mimo_owner) { 2377 ret = iwl_mvm_update_mu_groups(mvm, vif); 2378 if (ret) 2379 IWL_ERR(mvm, 2380 "failed to update VHT MU_MIMO groups\n"); 2381 } 2382 2383 iwl_mvm_recalc_multicast(mvm); 2384 2385 /* reset rssi values */ 2386 mvmvif->bf_data.ave_beacon_signal = 0; 2387 2388 iwl_mvm_bt_coex_vif_change(mvm); 2389 iwl_mvm_update_smps(mvm, vif, IWL_MVM_SMPS_REQ_TT, 2390 IEEE80211_SMPS_AUTOMATIC); 2391 if (fw_has_capa(&mvm->fw->ucode_capa, 2392 IWL_UCODE_TLV_CAPA_UMAC_SCAN)) 2393 iwl_mvm_config_scan(mvm); 2394 } 2395 2396 if (changes & BSS_CHANGED_BEACON_INFO) { 2397 /* 2398 * We received a beacon from the associated AP so 2399 * remove the session protection. 2400 */ 2401 iwl_mvm_stop_session_protection(mvm, vif); 2402 2403 iwl_mvm_sf_update(mvm, vif, false); 2404 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 2405 } 2406 2407 if (changes & (BSS_CHANGED_PS | BSS_CHANGED_P2P_PS | BSS_CHANGED_QOS | 2408 /* 2409 * Send power command on every beacon change, 2410 * because we may have not enabled beacon abort yet. 2411 */ 2412 BSS_CHANGED_BEACON_INFO)) { 2413 ret = iwl_mvm_power_update_mac(mvm); 2414 if (ret) 2415 IWL_ERR(mvm, "failed to update power mode\n"); 2416 } 2417 2418 if (changes & BSS_CHANGED_CQM) { 2419 IWL_DEBUG_MAC80211(mvm, "cqm info_changed\n"); 2420 /* reset cqm events tracking */ 2421 mvmvif->bf_data.last_cqm_event = 0; 2422 if (mvmvif->bf_data.bf_enabled) { 2423 ret = iwl_mvm_enable_beacon_filter(mvm, vif, 0); 2424 if (ret) 2425 IWL_ERR(mvm, 2426 "failed to update CQM thresholds\n"); 2427 } 2428 } 2429 2430 if (changes & BSS_CHANGED_BANDWIDTH) 2431 iwl_mvm_apply_fw_smps_request(vif); 2432 } 2433 2434 static int iwl_mvm_start_ap_ibss(struct ieee80211_hw *hw, 2435 struct ieee80211_vif *vif) 2436 { 2437 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2438 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2439 int ret, i; 2440 2441 mutex_lock(&mvm->mutex); 2442 2443 /* Send the beacon template */ 2444 ret = iwl_mvm_mac_ctxt_beacon_changed(mvm, vif); 2445 if (ret) 2446 goto out_unlock; 2447 2448 /* 2449 * Re-calculate the tsf id, as the leader-follower relations depend on 2450 * the beacon interval, which was not known when the AP interface 2451 * was added. 2452 */ 2453 if (vif->type == NL80211_IFTYPE_AP) 2454 iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); 2455 2456 mvmvif->ap_assoc_sta_count = 0; 2457 2458 /* Add the mac context */ 2459 ret = iwl_mvm_mac_ctxt_add(mvm, vif); 2460 if (ret) 2461 goto out_unlock; 2462 2463 /* Perform the binding */ 2464 ret = iwl_mvm_binding_add_vif(mvm, vif); 2465 if (ret) 2466 goto out_remove; 2467 2468 /* 2469 * This is not very nice, but the simplest: 2470 * For older FWs adding the mcast sta before the bcast station may 2471 * cause assert 0x2b00. 2472 * This is fixed in later FW so make the order of removal depend on 2473 * the TLV 2474 */ 2475 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2476 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2477 if (ret) 2478 goto out_unbind; 2479 /* 2480 * Send the bcast station. At this stage the TBTT and DTIM time 2481 * events are added and applied to the scheduler 2482 */ 2483 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2484 if (ret) { 2485 iwl_mvm_rm_mcast_sta(mvm, vif); 2486 goto out_unbind; 2487 } 2488 } else { 2489 /* 2490 * Send the bcast station. At this stage the TBTT and DTIM time 2491 * events are added and applied to the scheduler 2492 */ 2493 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2494 if (ret) 2495 goto out_unbind; 2496 ret = iwl_mvm_add_mcast_sta(mvm, vif); 2497 if (ret) { 2498 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2499 goto out_unbind; 2500 } 2501 } 2502 2503 /* must be set before quota calculations */ 2504 mvmvif->ap_ibss_active = true; 2505 2506 /* send all the early keys to the device now */ 2507 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 2508 struct ieee80211_key_conf *key = mvmvif->ap_early_keys[i]; 2509 2510 if (!key) 2511 continue; 2512 2513 mvmvif->ap_early_keys[i] = NULL; 2514 2515 ret = __iwl_mvm_mac_set_key(hw, SET_KEY, vif, NULL, key); 2516 if (ret) 2517 goto out_quota_failed; 2518 } 2519 2520 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2521 iwl_mvm_vif_set_low_latency(mvmvif, true, 2522 LOW_LATENCY_VIF_TYPE); 2523 iwl_mvm_send_low_latency_cmd(mvm, true, mvmvif->id); 2524 } 2525 2526 /* power updated needs to be done before quotas */ 2527 iwl_mvm_power_update_mac(mvm); 2528 2529 ret = iwl_mvm_update_quotas(mvm, false, NULL); 2530 if (ret) 2531 goto out_quota_failed; 2532 2533 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2534 if (vif->p2p && mvm->p2p_device_vif) 2535 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2536 2537 iwl_mvm_bt_coex_vif_change(mvm); 2538 2539 /* we don't support TDLS during DCM */ 2540 if (iwl_mvm_phy_ctx_count(mvm) > 1) 2541 iwl_mvm_teardown_tdls_peers(mvm); 2542 2543 iwl_mvm_ftm_restart_responder(mvm, vif); 2544 2545 goto out_unlock; 2546 2547 out_quota_failed: 2548 iwl_mvm_power_update_mac(mvm); 2549 mvmvif->ap_ibss_active = false; 2550 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2551 iwl_mvm_rm_mcast_sta(mvm, vif); 2552 out_unbind: 2553 iwl_mvm_binding_remove_vif(mvm, vif); 2554 out_remove: 2555 iwl_mvm_mac_ctxt_remove(mvm, vif); 2556 out_unlock: 2557 mutex_unlock(&mvm->mutex); 2558 return ret; 2559 } 2560 2561 static void iwl_mvm_stop_ap_ibss(struct ieee80211_hw *hw, 2562 struct ieee80211_vif *vif) 2563 { 2564 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2565 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2566 2567 iwl_mvm_prepare_mac_removal(mvm, vif); 2568 2569 mutex_lock(&mvm->mutex); 2570 2571 /* Handle AP stop while in CSA */ 2572 if (rcu_access_pointer(mvm->csa_vif) == vif) { 2573 iwl_mvm_remove_time_event(mvm, mvmvif, 2574 &mvmvif->time_event_data); 2575 RCU_INIT_POINTER(mvm->csa_vif, NULL); 2576 mvmvif->csa_countdown = false; 2577 } 2578 2579 if (rcu_access_pointer(mvm->csa_tx_blocked_vif) == vif) { 2580 RCU_INIT_POINTER(mvm->csa_tx_blocked_vif, NULL); 2581 mvm->csa_tx_block_bcn_timeout = 0; 2582 } 2583 2584 mvmvif->ap_ibss_active = false; 2585 mvm->ap_last_beacon_gp2 = 0; 2586 2587 if (vif->type == NL80211_IFTYPE_AP && !vif->p2p) { 2588 iwl_mvm_vif_set_low_latency(mvmvif, false, 2589 LOW_LATENCY_VIF_TYPE); 2590 iwl_mvm_send_low_latency_cmd(mvm, false, mvmvif->id); 2591 } 2592 2593 iwl_mvm_bt_coex_vif_change(mvm); 2594 2595 /* Need to update the P2P Device MAC (only GO, IBSS is single vif) */ 2596 if (vif->p2p && mvm->p2p_device_vif) 2597 iwl_mvm_mac_ctxt_changed(mvm, mvm->p2p_device_vif, false, NULL); 2598 2599 iwl_mvm_update_quotas(mvm, false, NULL); 2600 2601 iwl_mvm_ftm_responder_clear(mvm, vif); 2602 2603 /* 2604 * This is not very nice, but the simplest: 2605 * For older FWs removing the mcast sta before the bcast station may 2606 * cause assert 0x2b00. 2607 * This is fixed in later FW (which will stop beaconing when removing 2608 * bcast station). 2609 * So make the order of removal depend on the TLV 2610 */ 2611 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2612 iwl_mvm_rm_mcast_sta(mvm, vif); 2613 iwl_mvm_send_rm_bcast_sta(mvm, vif); 2614 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 2615 iwl_mvm_rm_mcast_sta(mvm, vif); 2616 iwl_mvm_binding_remove_vif(mvm, vif); 2617 2618 iwl_mvm_power_update_mac(mvm); 2619 2620 iwl_mvm_mac_ctxt_remove(mvm, vif); 2621 2622 mutex_unlock(&mvm->mutex); 2623 } 2624 2625 static void 2626 iwl_mvm_bss_info_changed_ap_ibss(struct iwl_mvm *mvm, 2627 struct ieee80211_vif *vif, 2628 struct ieee80211_bss_conf *bss_conf, 2629 u64 changes) 2630 { 2631 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2632 2633 /* Changes will be applied when the AP/IBSS is started */ 2634 if (!mvmvif->ap_ibss_active) 2635 return; 2636 2637 if (changes & (BSS_CHANGED_ERP_CTS_PROT | BSS_CHANGED_HT | 2638 BSS_CHANGED_BANDWIDTH | BSS_CHANGED_QOS) && 2639 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL)) 2640 IWL_ERR(mvm, "failed to update MAC %pM\n", vif->addr); 2641 2642 /* Need to send a new beacon template to the FW */ 2643 if (changes & BSS_CHANGED_BEACON && 2644 iwl_mvm_mac_ctxt_beacon_changed(mvm, vif)) 2645 IWL_WARN(mvm, "Failed updating beacon data\n"); 2646 2647 if (changes & BSS_CHANGED_FTM_RESPONDER) { 2648 int ret = iwl_mvm_ftm_start_responder(mvm, vif); 2649 2650 if (ret) 2651 IWL_WARN(mvm, "Failed to enable FTM responder (%d)\n", 2652 ret); 2653 } 2654 2655 } 2656 2657 static void iwl_mvm_bss_info_changed(struct ieee80211_hw *hw, 2658 struct ieee80211_vif *vif, 2659 struct ieee80211_bss_conf *bss_conf, 2660 u64 changes) 2661 { 2662 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2663 2664 mutex_lock(&mvm->mutex); 2665 2666 if (changes & BSS_CHANGED_IDLE && !bss_conf->idle) 2667 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, true); 2668 2669 switch (vif->type) { 2670 case NL80211_IFTYPE_STATION: 2671 iwl_mvm_bss_info_changed_station(mvm, vif, bss_conf, changes); 2672 break; 2673 case NL80211_IFTYPE_AP: 2674 case NL80211_IFTYPE_ADHOC: 2675 iwl_mvm_bss_info_changed_ap_ibss(mvm, vif, bss_conf, changes); 2676 break; 2677 case NL80211_IFTYPE_MONITOR: 2678 if (changes & BSS_CHANGED_MU_GROUPS) 2679 iwl_mvm_update_mu_groups(mvm, vif); 2680 break; 2681 default: 2682 /* shouldn't happen */ 2683 WARN_ON_ONCE(1); 2684 } 2685 2686 if (changes & BSS_CHANGED_TXPOWER) { 2687 IWL_DEBUG_CALIB(mvm, "Changing TX Power to %d dBm\n", 2688 bss_conf->txpower); 2689 iwl_mvm_set_tx_power(mvm, vif, bss_conf->txpower); 2690 } 2691 2692 mutex_unlock(&mvm->mutex); 2693 } 2694 2695 static int iwl_mvm_mac_hw_scan(struct ieee80211_hw *hw, 2696 struct ieee80211_vif *vif, 2697 struct ieee80211_scan_request *hw_req) 2698 { 2699 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2700 int ret; 2701 2702 if (hw_req->req.n_channels == 0 || 2703 hw_req->req.n_channels > mvm->fw->ucode_capa.n_scan_channels) 2704 return -EINVAL; 2705 2706 mutex_lock(&mvm->mutex); 2707 ret = iwl_mvm_reg_scan_start(mvm, vif, &hw_req->req, &hw_req->ies); 2708 mutex_unlock(&mvm->mutex); 2709 2710 return ret; 2711 } 2712 2713 static void iwl_mvm_mac_cancel_hw_scan(struct ieee80211_hw *hw, 2714 struct ieee80211_vif *vif) 2715 { 2716 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2717 2718 mutex_lock(&mvm->mutex); 2719 2720 /* Due to a race condition, it's possible that mac80211 asks 2721 * us to stop a hw_scan when it's already stopped. This can 2722 * happen, for instance, if we stopped the scan ourselves, 2723 * called ieee80211_scan_completed() and the userspace called 2724 * cancel scan scan before ieee80211_scan_work() could run. 2725 * To handle that, simply return if the scan is not running. 2726 */ 2727 if (mvm->scan_status & IWL_MVM_SCAN_REGULAR) 2728 iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_REGULAR, true); 2729 2730 mutex_unlock(&mvm->mutex); 2731 } 2732 2733 static void 2734 iwl_mvm_mac_allow_buffered_frames(struct ieee80211_hw *hw, 2735 struct ieee80211_sta *sta, u16 tids, 2736 int num_frames, 2737 enum ieee80211_frame_release_type reason, 2738 bool more_data) 2739 { 2740 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2741 2742 /* Called when we need to transmit (a) frame(s) from mac80211 */ 2743 2744 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2745 tids, more_data, false); 2746 } 2747 2748 static void 2749 iwl_mvm_mac_release_buffered_frames(struct ieee80211_hw *hw, 2750 struct ieee80211_sta *sta, u16 tids, 2751 int num_frames, 2752 enum ieee80211_frame_release_type reason, 2753 bool more_data) 2754 { 2755 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2756 2757 /* Called when we need to transmit (a) frame(s) from agg or dqa queue */ 2758 2759 iwl_mvm_sta_modify_sleep_tx_count(mvm, sta, reason, num_frames, 2760 tids, more_data, true); 2761 } 2762 2763 static void __iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2764 enum sta_notify_cmd cmd, 2765 struct ieee80211_sta *sta) 2766 { 2767 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2768 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2769 unsigned long txqs = 0, tids = 0; 2770 int tid; 2771 2772 /* 2773 * If we have TVQM then we get too high queue numbers - luckily 2774 * we really shouldn't get here with that because such hardware 2775 * should have firmware supporting buffer station offload. 2776 */ 2777 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 2778 return; 2779 2780 spin_lock_bh(&mvmsta->lock); 2781 for (tid = 0; tid < ARRAY_SIZE(mvmsta->tid_data); tid++) { 2782 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2783 2784 if (tid_data->txq_id == IWL_MVM_INVALID_QUEUE) 2785 continue; 2786 2787 __set_bit(tid_data->txq_id, &txqs); 2788 2789 if (iwl_mvm_tid_queued(mvm, tid_data) == 0) 2790 continue; 2791 2792 __set_bit(tid, &tids); 2793 } 2794 2795 switch (cmd) { 2796 case STA_NOTIFY_SLEEP: 2797 for_each_set_bit(tid, &tids, IWL_MAX_TID_COUNT) 2798 ieee80211_sta_set_buffered(sta, tid, true); 2799 2800 if (txqs) 2801 iwl_trans_freeze_txq_timer(mvm->trans, txqs, true); 2802 /* 2803 * The fw updates the STA to be asleep. Tx packets on the Tx 2804 * queues to this station will not be transmitted. The fw will 2805 * send a Tx response with TX_STATUS_FAIL_DEST_PS. 2806 */ 2807 break; 2808 case STA_NOTIFY_AWAKE: 2809 if (WARN_ON(mvmsta->sta_id == IWL_MVM_INVALID_STA)) 2810 break; 2811 2812 if (txqs) 2813 iwl_trans_freeze_txq_timer(mvm->trans, txqs, false); 2814 iwl_mvm_sta_modify_ps_wake(mvm, sta); 2815 break; 2816 default: 2817 break; 2818 } 2819 spin_unlock_bh(&mvmsta->lock); 2820 } 2821 2822 static void iwl_mvm_mac_sta_notify(struct ieee80211_hw *hw, 2823 struct ieee80211_vif *vif, 2824 enum sta_notify_cmd cmd, 2825 struct ieee80211_sta *sta) 2826 { 2827 __iwl_mvm_mac_sta_notify(hw, cmd, sta); 2828 } 2829 2830 void iwl_mvm_sta_pm_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb) 2831 { 2832 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2833 struct iwl_mvm_pm_state_notification *notif = (void *)pkt->data; 2834 struct ieee80211_sta *sta; 2835 struct iwl_mvm_sta *mvmsta; 2836 bool sleeping = (notif->type != IWL_MVM_PM_EVENT_AWAKE); 2837 2838 if (WARN_ON(notif->sta_id >= mvm->fw->ucode_capa.num_stations)) 2839 return; 2840 2841 rcu_read_lock(); 2842 sta = rcu_dereference(mvm->fw_id_to_mac_id[notif->sta_id]); 2843 if (WARN_ON(IS_ERR_OR_NULL(sta))) { 2844 rcu_read_unlock(); 2845 return; 2846 } 2847 2848 mvmsta = iwl_mvm_sta_from_mac80211(sta); 2849 2850 if (!mvmsta->vif || 2851 mvmsta->vif->type != NL80211_IFTYPE_AP) { 2852 rcu_read_unlock(); 2853 return; 2854 } 2855 2856 if (mvmsta->sleeping != sleeping) { 2857 mvmsta->sleeping = sleeping; 2858 __iwl_mvm_mac_sta_notify(mvm->hw, 2859 sleeping ? STA_NOTIFY_SLEEP : STA_NOTIFY_AWAKE, 2860 sta); 2861 ieee80211_sta_ps_transition(sta, sleeping); 2862 } 2863 2864 if (sleeping) { 2865 switch (notif->type) { 2866 case IWL_MVM_PM_EVENT_AWAKE: 2867 case IWL_MVM_PM_EVENT_ASLEEP: 2868 break; 2869 case IWL_MVM_PM_EVENT_UAPSD: 2870 ieee80211_sta_uapsd_trigger(sta, IEEE80211_NUM_TIDS); 2871 break; 2872 case IWL_MVM_PM_EVENT_PS_POLL: 2873 ieee80211_sta_pspoll(sta); 2874 break; 2875 default: 2876 break; 2877 } 2878 } 2879 2880 rcu_read_unlock(); 2881 } 2882 2883 static void iwl_mvm_sta_pre_rcu_remove(struct ieee80211_hw *hw, 2884 struct ieee80211_vif *vif, 2885 struct ieee80211_sta *sta) 2886 { 2887 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 2888 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2889 2890 /* 2891 * This is called before mac80211 does RCU synchronisation, 2892 * so here we already invalidate our internal RCU-protected 2893 * station pointer. The rest of the code will thus no longer 2894 * be able to find the station this way, and we don't rely 2895 * on further RCU synchronisation after the sta_state() 2896 * callback deleted the station. 2897 */ 2898 mutex_lock(&mvm->mutex); 2899 if (sta == rcu_access_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id])) 2900 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 2901 ERR_PTR(-ENOENT)); 2902 2903 mutex_unlock(&mvm->mutex); 2904 } 2905 2906 static void iwl_mvm_check_uapsd(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2907 const u8 *bssid) 2908 { 2909 int i; 2910 2911 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2912 struct iwl_mvm_tcm_mac *mdata; 2913 2914 mdata = &mvm->tcm.data[iwl_mvm_vif_from_mac80211(vif)->id]; 2915 ewma_rate_init(&mdata->uapsd_nonagg_detect.rate); 2916 mdata->opened_rx_ba_sessions = false; 2917 } 2918 2919 if (!(mvm->fw->ucode_capa.flags & IWL_UCODE_TLV_FLAGS_UAPSD_SUPPORT)) 2920 return; 2921 2922 if (vif->p2p && !iwl_mvm_is_p2p_scm_uapsd_supported(mvm)) { 2923 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2924 return; 2925 } 2926 2927 if (!vif->p2p && 2928 (iwlwifi_mod_params.uapsd_disable & IWL_DISABLE_UAPSD_BSS)) { 2929 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2930 return; 2931 } 2932 2933 for (i = 0; i < IWL_MVM_UAPSD_NOAGG_LIST_LEN; i++) { 2934 if (ether_addr_equal(mvm->uapsd_noagg_bssids[i].addr, bssid)) { 2935 vif->driver_flags &= ~IEEE80211_VIF_SUPPORTS_UAPSD; 2936 return; 2937 } 2938 } 2939 2940 vif->driver_flags |= IEEE80211_VIF_SUPPORTS_UAPSD; 2941 } 2942 2943 static void 2944 iwl_mvm_tdls_check_trigger(struct iwl_mvm *mvm, 2945 struct ieee80211_vif *vif, u8 *peer_addr, 2946 enum nl80211_tdls_operation action) 2947 { 2948 struct iwl_fw_dbg_trigger_tlv *trig; 2949 struct iwl_fw_dbg_trigger_tdls *tdls_trig; 2950 2951 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 2952 FW_DBG_TRIGGER_TDLS); 2953 if (!trig) 2954 return; 2955 2956 tdls_trig = (void *)trig->data; 2957 2958 if (!(tdls_trig->action_bitmap & BIT(action))) 2959 return; 2960 2961 if (tdls_trig->peer_mode && 2962 memcmp(tdls_trig->peer, peer_addr, ETH_ALEN) != 0) 2963 return; 2964 2965 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 2966 "TDLS event occurred, peer %pM, action %d", 2967 peer_addr, action); 2968 } 2969 2970 struct iwl_mvm_he_obss_narrow_bw_ru_data { 2971 bool tolerated; 2972 }; 2973 2974 static void iwl_mvm_check_he_obss_narrow_bw_ru_iter(struct wiphy *wiphy, 2975 struct cfg80211_bss *bss, 2976 void *_data) 2977 { 2978 struct iwl_mvm_he_obss_narrow_bw_ru_data *data = _data; 2979 const struct cfg80211_bss_ies *ies; 2980 const struct element *elem; 2981 2982 rcu_read_lock(); 2983 ies = rcu_dereference(bss->ies); 2984 elem = cfg80211_find_elem(WLAN_EID_EXT_CAPABILITY, ies->data, 2985 ies->len); 2986 2987 if (!elem || elem->datalen < 10 || 2988 !(elem->data[10] & 2989 WLAN_EXT_CAPA10_OBSS_NARROW_BW_RU_TOLERANCE_SUPPORT)) { 2990 data->tolerated = false; 2991 } 2992 rcu_read_unlock(); 2993 } 2994 2995 static void iwl_mvm_check_he_obss_narrow_bw_ru(struct ieee80211_hw *hw, 2996 struct ieee80211_vif *vif) 2997 { 2998 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2999 struct iwl_mvm_he_obss_narrow_bw_ru_data iter_data = { 3000 .tolerated = true, 3001 }; 3002 3003 if (!(vif->bss_conf.chandef.chan->flags & IEEE80211_CHAN_RADAR)) { 3004 mvmvif->he_ru_2mhz_block = false; 3005 return; 3006 } 3007 3008 cfg80211_bss_iter(hw->wiphy, &vif->bss_conf.chandef, 3009 iwl_mvm_check_he_obss_narrow_bw_ru_iter, 3010 &iter_data); 3011 3012 /* 3013 * If there is at least one AP on radar channel that cannot 3014 * tolerate 26-tone RU UL OFDMA transmissions using HE TB PPDU. 3015 */ 3016 mvmvif->he_ru_2mhz_block = !iter_data.tolerated; 3017 } 3018 3019 static void iwl_mvm_reset_cca_40mhz_workaround(struct iwl_mvm *mvm, 3020 struct ieee80211_vif *vif) 3021 { 3022 struct ieee80211_supported_band *sband; 3023 const struct ieee80211_sta_he_cap *he_cap; 3024 3025 if (vif->type != NL80211_IFTYPE_STATION) 3026 return; 3027 3028 if (!mvm->cca_40mhz_workaround) 3029 return; 3030 3031 /* decrement and check that we reached zero */ 3032 mvm->cca_40mhz_workaround--; 3033 if (mvm->cca_40mhz_workaround) 3034 return; 3035 3036 sband = mvm->hw->wiphy->bands[NL80211_BAND_2GHZ]; 3037 3038 sband->ht_cap.cap |= IEEE80211_HT_CAP_SUP_WIDTH_20_40; 3039 3040 he_cap = ieee80211_get_he_iftype_cap(sband, 3041 ieee80211_vif_type_p2p(vif)); 3042 3043 if (he_cap) { 3044 /* we know that ours is writable */ 3045 struct ieee80211_sta_he_cap *he = (void *)(uintptr_t)he_cap; 3046 3047 he->he_cap_elem.phy_cap_info[0] |= 3048 IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G; 3049 } 3050 } 3051 3052 static void iwl_mvm_mei_host_associated(struct iwl_mvm *mvm, 3053 struct ieee80211_vif *vif, 3054 struct iwl_mvm_sta *mvm_sta) 3055 { 3056 #if IS_ENABLED(CONFIG_IWLMEI) 3057 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3058 struct iwl_mei_conn_info conn_info = { 3059 .ssid_len = vif->bss_conf.ssid_len, 3060 .channel = vif->bss_conf.chandef.chan->hw_value, 3061 }; 3062 3063 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3064 return; 3065 3066 if (!mvm->mei_registered) 3067 return; 3068 3069 switch (mvm_sta->pairwise_cipher) { 3070 case WLAN_CIPHER_SUITE_CCMP: 3071 conn_info.pairwise_cipher = IWL_MEI_CIPHER_CCMP; 3072 break; 3073 case WLAN_CIPHER_SUITE_GCMP: 3074 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP; 3075 break; 3076 case WLAN_CIPHER_SUITE_GCMP_256: 3077 conn_info.pairwise_cipher = IWL_MEI_CIPHER_GCMP_256; 3078 break; 3079 case 0: 3080 /* open profile */ 3081 break; 3082 default: 3083 /* cipher not supported, don't send anything to iwlmei */ 3084 return; 3085 } 3086 3087 switch (mvmvif->rekey_data.akm) { 3088 case WLAN_AKM_SUITE_SAE & 0xff: 3089 conn_info.auth_mode = IWL_MEI_AKM_AUTH_SAE; 3090 break; 3091 case WLAN_AKM_SUITE_PSK & 0xff: 3092 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA_PSK; 3093 break; 3094 case WLAN_AKM_SUITE_8021X & 0xff: 3095 conn_info.auth_mode = IWL_MEI_AKM_AUTH_RSNA; 3096 break; 3097 case 0: 3098 /* open profile */ 3099 conn_info.auth_mode = IWL_MEI_AKM_AUTH_OPEN; 3100 break; 3101 default: 3102 /* auth method / AKM not supported */ 3103 /* TODO: All the FT vesions of these? */ 3104 return; 3105 } 3106 3107 memcpy(conn_info.ssid, vif->bss_conf.ssid, vif->bss_conf.ssid_len); 3108 memcpy(conn_info.bssid, vif->bss_conf.bssid, ETH_ALEN); 3109 3110 /* TODO: add support for collocated AP data */ 3111 iwl_mei_host_associated(&conn_info, NULL); 3112 #endif 3113 } 3114 3115 static int iwl_mvm_mac_sta_state(struct ieee80211_hw *hw, 3116 struct ieee80211_vif *vif, 3117 struct ieee80211_sta *sta, 3118 enum ieee80211_sta_state old_state, 3119 enum ieee80211_sta_state new_state) 3120 { 3121 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3122 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3124 int ret; 3125 3126 IWL_DEBUG_MAC80211(mvm, "station %pM state change %d->%d\n", 3127 sta->addr, old_state, new_state); 3128 3129 /* this would be a mac80211 bug ... but don't crash */ 3130 if (WARN_ON_ONCE(!mvmvif->phy_ctxt)) 3131 return test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, &mvm->status) ? 0 : -EINVAL; 3132 3133 /* 3134 * If we are in a STA removal flow and in DQA mode: 3135 * 3136 * This is after the sync_rcu part, so the queues have already been 3137 * flushed. No more TXs on their way in mac80211's path, and no more in 3138 * the queues. 3139 * Also, we won't be getting any new TX frames for this station. 3140 * What we might have are deferred TX frames that need to be taken care 3141 * of. 3142 * 3143 * Drop any still-queued deferred-frame before removing the STA, and 3144 * make sure the worker is no longer handling frames for this STA. 3145 */ 3146 if (old_state == IEEE80211_STA_NONE && 3147 new_state == IEEE80211_STA_NOTEXIST) { 3148 flush_work(&mvm->add_stream_wk); 3149 3150 /* 3151 * No need to make sure deferred TX indication is off since the 3152 * worker will already remove it if it was on 3153 */ 3154 3155 /* 3156 * Additionally, reset the 40 MHz capability if we disconnected 3157 * from the AP now. 3158 */ 3159 iwl_mvm_reset_cca_40mhz_workaround(mvm, vif); 3160 } 3161 3162 mutex_lock(&mvm->mutex); 3163 /* track whether or not the station is associated */ 3164 mvm_sta->sta_state = new_state; 3165 3166 if (old_state == IEEE80211_STA_NOTEXIST && 3167 new_state == IEEE80211_STA_NONE) { 3168 /* 3169 * Firmware bug - it'll crash if the beacon interval is less 3170 * than 16. We can't avoid connecting at all, so refuse the 3171 * station state change, this will cause mac80211 to abandon 3172 * attempts to connect to this AP, and eventually wpa_s will 3173 * blocklist the AP... 3174 */ 3175 if (vif->type == NL80211_IFTYPE_STATION && 3176 vif->bss_conf.beacon_int < 16) { 3177 IWL_ERR(mvm, 3178 "AP %pM beacon interval is %d, refusing due to firmware bug!\n", 3179 sta->addr, vif->bss_conf.beacon_int); 3180 ret = -EINVAL; 3181 goto out_unlock; 3182 } 3183 3184 if (vif->type == NL80211_IFTYPE_STATION) 3185 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3186 3187 if (sta->tdls && 3188 (vif->p2p || 3189 iwl_mvm_tdls_sta_count(mvm, NULL) == 3190 IWL_MVM_TDLS_STA_COUNT || 3191 iwl_mvm_phy_ctx_count(mvm) > 1)) { 3192 IWL_DEBUG_MAC80211(mvm, "refusing TDLS sta\n"); 3193 ret = -EBUSY; 3194 goto out_unlock; 3195 } 3196 3197 ret = iwl_mvm_add_sta(mvm, vif, sta); 3198 if (sta->tdls && ret == 0) { 3199 iwl_mvm_recalc_tdls_state(mvm, vif, true); 3200 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3201 NL80211_TDLS_SETUP); 3202 } 3203 3204 sta->max_rc_amsdu_len = 1; 3205 } else if (old_state == IEEE80211_STA_NONE && 3206 new_state == IEEE80211_STA_AUTH) { 3207 /* 3208 * EBS may be disabled due to previous failures reported by FW. 3209 * Reset EBS status here assuming environment has been changed. 3210 */ 3211 mvm->last_ebs_successful = true; 3212 iwl_mvm_check_uapsd(mvm, vif, sta->addr); 3213 ret = 0; 3214 } else if (old_state == IEEE80211_STA_AUTH && 3215 new_state == IEEE80211_STA_ASSOC) { 3216 if (vif->type == NL80211_IFTYPE_AP) { 3217 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3218 mvmvif->ap_assoc_sta_count++; 3219 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3220 if (vif->bss_conf.he_support && 3221 !iwlwifi_mod_params.disable_11ax) 3222 iwl_mvm_cfg_he_sta(mvm, vif, mvm_sta->sta_id); 3223 } else if (vif->type == NL80211_IFTYPE_STATION) { 3224 vif->bss_conf.he_support = sta->deflink.he_cap.has_he; 3225 3226 mvmvif->he_ru_2mhz_block = false; 3227 if (sta->deflink.he_cap.has_he) 3228 iwl_mvm_check_he_obss_narrow_bw_ru(hw, vif); 3229 3230 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3231 } 3232 3233 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3234 false); 3235 ret = iwl_mvm_update_sta(mvm, vif, sta); 3236 } else if (old_state == IEEE80211_STA_ASSOC && 3237 new_state == IEEE80211_STA_AUTHORIZED) { 3238 ret = 0; 3239 3240 /* we don't support TDLS during DCM */ 3241 if (iwl_mvm_phy_ctx_count(mvm) > 1) 3242 iwl_mvm_teardown_tdls_peers(mvm); 3243 3244 if (sta->tdls) { 3245 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3246 NL80211_TDLS_ENABLE_LINK); 3247 } else { 3248 /* enable beacon filtering */ 3249 WARN_ON(iwl_mvm_enable_beacon_filter(mvm, vif, 0)); 3250 3251 mvmvif->authorized = 1; 3252 3253 /* 3254 * Now that the station is authorized, i.e., keys were already 3255 * installed, need to indicate to the FW that 3256 * multicast data frames can be forwarded to the driver 3257 */ 3258 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3259 iwl_mvm_mei_host_associated(mvm, vif, mvm_sta); 3260 } 3261 3262 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3263 true); 3264 } else if (old_state == IEEE80211_STA_AUTHORIZED && 3265 new_state == IEEE80211_STA_ASSOC) { 3266 /* once we move into assoc state, need to update rate scale to 3267 * disable using wide bandwidth 3268 */ 3269 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3270 false); 3271 if (!sta->tdls) { 3272 /* Multicast data frames are no longer allowed */ 3273 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3274 3275 /* 3276 * Set this after the above iwl_mvm_mac_ctxt_changed() 3277 * to avoid sending high prio again for a little time. 3278 */ 3279 mvmvif->authorized = 0; 3280 3281 /* disable beacon filtering */ 3282 ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 3283 WARN_ON(ret && 3284 !test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3285 &mvm->status)); 3286 } 3287 ret = 0; 3288 } else if (old_state == IEEE80211_STA_ASSOC && 3289 new_state == IEEE80211_STA_AUTH) { 3290 if (vif->type == NL80211_IFTYPE_AP) { 3291 mvmvif->ap_assoc_sta_count--; 3292 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3293 } else if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3294 iwl_mvm_stop_session_protection(mvm, vif); 3295 ret = 0; 3296 } else if (old_state == IEEE80211_STA_AUTH && 3297 new_state == IEEE80211_STA_NONE) { 3298 ret = 0; 3299 } else if (old_state == IEEE80211_STA_NONE && 3300 new_state == IEEE80211_STA_NOTEXIST) { 3301 if (vif->type == NL80211_IFTYPE_STATION && !sta->tdls) 3302 iwl_mvm_stop_session_protection(mvm, vif); 3303 ret = iwl_mvm_rm_sta(mvm, vif, sta); 3304 if (sta->tdls) { 3305 iwl_mvm_recalc_tdls_state(mvm, vif, false); 3306 iwl_mvm_tdls_check_trigger(mvm, vif, sta->addr, 3307 NL80211_TDLS_DISABLE_LINK); 3308 } 3309 3310 if (unlikely(ret && 3311 test_bit(IWL_MVM_STATUS_HW_RESTART_REQUESTED, 3312 &mvm->status))) 3313 ret = 0; 3314 } else { 3315 ret = -EIO; 3316 } 3317 out_unlock: 3318 mutex_unlock(&mvm->mutex); 3319 3320 if (sta->tdls && ret == 0) { 3321 if (old_state == IEEE80211_STA_NOTEXIST && 3322 new_state == IEEE80211_STA_NONE) 3323 ieee80211_reserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3324 else if (old_state == IEEE80211_STA_NONE && 3325 new_state == IEEE80211_STA_NOTEXIST) 3326 ieee80211_unreserve_tid(sta, IWL_MVM_TDLS_FW_TID); 3327 } 3328 3329 return ret; 3330 } 3331 3332 static int iwl_mvm_mac_set_rts_threshold(struct ieee80211_hw *hw, u32 value) 3333 { 3334 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3335 3336 mvm->rts_threshold = value; 3337 3338 return 0; 3339 } 3340 3341 static void iwl_mvm_sta_rc_update(struct ieee80211_hw *hw, 3342 struct ieee80211_vif *vif, 3343 struct ieee80211_sta *sta, u32 changed) 3344 { 3345 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3346 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3347 3348 if (changed & (IEEE80211_RC_BW_CHANGED | 3349 IEEE80211_RC_SUPP_RATES_CHANGED | 3350 IEEE80211_RC_NSS_CHANGED)) 3351 iwl_mvm_rs_rate_init(mvm, sta, mvmvif->phy_ctxt->channel->band, 3352 true); 3353 3354 if (vif->type == NL80211_IFTYPE_STATION && 3355 changed & IEEE80211_RC_NSS_CHANGED) 3356 iwl_mvm_sf_update(mvm, vif, false); 3357 } 3358 3359 static int iwl_mvm_mac_conf_tx(struct ieee80211_hw *hw, 3360 struct ieee80211_vif *vif, u16 ac, 3361 const struct ieee80211_tx_queue_params *params) 3362 { 3363 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3364 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3365 3366 mvmvif->queue_params[ac] = *params; 3367 3368 /* 3369 * No need to update right away, we'll get BSS_CHANGED_QOS 3370 * The exception is P2P_DEVICE interface which needs immediate update. 3371 */ 3372 if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 3373 int ret; 3374 3375 mutex_lock(&mvm->mutex); 3376 ret = iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 3377 mutex_unlock(&mvm->mutex); 3378 return ret; 3379 } 3380 return 0; 3381 } 3382 3383 static void iwl_mvm_mac_mgd_prepare_tx(struct ieee80211_hw *hw, 3384 struct ieee80211_vif *vif, 3385 struct ieee80211_prep_tx_info *info) 3386 { 3387 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3388 3389 mutex_lock(&mvm->mutex); 3390 iwl_mvm_protect_assoc(mvm, vif, info->duration); 3391 mutex_unlock(&mvm->mutex); 3392 } 3393 3394 static void iwl_mvm_mac_mgd_complete_tx(struct ieee80211_hw *hw, 3395 struct ieee80211_vif *vif, 3396 struct ieee80211_prep_tx_info *info) 3397 { 3398 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3399 3400 /* for successful cases (auth/assoc), don't cancel session protection */ 3401 if (info->success) 3402 return; 3403 3404 mutex_lock(&mvm->mutex); 3405 iwl_mvm_stop_session_protection(mvm, vif); 3406 mutex_unlock(&mvm->mutex); 3407 } 3408 3409 static int iwl_mvm_mac_sched_scan_start(struct ieee80211_hw *hw, 3410 struct ieee80211_vif *vif, 3411 struct cfg80211_sched_scan_request *req, 3412 struct ieee80211_scan_ies *ies) 3413 { 3414 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3415 3416 int ret; 3417 3418 mutex_lock(&mvm->mutex); 3419 3420 if (!vif->bss_conf.idle) { 3421 ret = -EBUSY; 3422 goto out; 3423 } 3424 3425 ret = iwl_mvm_sched_scan_start(mvm, vif, req, ies, IWL_MVM_SCAN_SCHED); 3426 3427 out: 3428 mutex_unlock(&mvm->mutex); 3429 return ret; 3430 } 3431 3432 static int iwl_mvm_mac_sched_scan_stop(struct ieee80211_hw *hw, 3433 struct ieee80211_vif *vif) 3434 { 3435 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3436 int ret; 3437 3438 mutex_lock(&mvm->mutex); 3439 3440 /* Due to a race condition, it's possible that mac80211 asks 3441 * us to stop a sched_scan when it's already stopped. This 3442 * can happen, for instance, if we stopped the scan ourselves, 3443 * called ieee80211_sched_scan_stopped() and the userspace called 3444 * stop sched scan scan before ieee80211_sched_scan_stopped_work() 3445 * could run. To handle this, simply return if the scan is 3446 * not running. 3447 */ 3448 if (!(mvm->scan_status & IWL_MVM_SCAN_SCHED)) { 3449 mutex_unlock(&mvm->mutex); 3450 return 0; 3451 } 3452 3453 ret = iwl_mvm_scan_stop(mvm, IWL_MVM_SCAN_SCHED, false); 3454 mutex_unlock(&mvm->mutex); 3455 iwl_mvm_wait_for_async_handlers(mvm); 3456 3457 return ret; 3458 } 3459 3460 static int __iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3461 enum set_key_cmd cmd, 3462 struct ieee80211_vif *vif, 3463 struct ieee80211_sta *sta, 3464 struct ieee80211_key_conf *key) 3465 { 3466 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3467 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3468 struct iwl_mvm_sta *mvmsta = NULL; 3469 struct iwl_mvm_key_pn *ptk_pn; 3470 int keyidx = key->keyidx; 3471 int ret, i; 3472 u8 key_offset; 3473 3474 if (sta) 3475 mvmsta = iwl_mvm_sta_from_mac80211(sta); 3476 3477 switch (key->cipher) { 3478 case WLAN_CIPHER_SUITE_TKIP: 3479 if (!mvm->trans->trans_cfg->gen2) { 3480 key->flags |= IEEE80211_KEY_FLAG_GENERATE_MMIC; 3481 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3482 } else if (vif->type == NL80211_IFTYPE_STATION) { 3483 key->flags |= IEEE80211_KEY_FLAG_PUT_MIC_SPACE; 3484 } else { 3485 IWL_DEBUG_MAC80211(mvm, "Use SW encryption for TKIP\n"); 3486 return -EOPNOTSUPP; 3487 } 3488 break; 3489 case WLAN_CIPHER_SUITE_CCMP: 3490 case WLAN_CIPHER_SUITE_GCMP: 3491 case WLAN_CIPHER_SUITE_GCMP_256: 3492 if (!iwl_mvm_has_new_tx_api(mvm)) 3493 key->flags |= IEEE80211_KEY_FLAG_PUT_IV_SPACE; 3494 break; 3495 case WLAN_CIPHER_SUITE_AES_CMAC: 3496 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3497 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3498 WARN_ON_ONCE(!ieee80211_hw_check(hw, MFP_CAPABLE)); 3499 break; 3500 case WLAN_CIPHER_SUITE_WEP40: 3501 case WLAN_CIPHER_SUITE_WEP104: 3502 if (vif->type == NL80211_IFTYPE_STATION) 3503 break; 3504 if (iwl_mvm_has_new_tx_api(mvm)) 3505 return -EOPNOTSUPP; 3506 /* support HW crypto on TX */ 3507 return 0; 3508 default: 3509 return -EOPNOTSUPP; 3510 } 3511 3512 switch (cmd) { 3513 case SET_KEY: 3514 if (keyidx == 6 || keyidx == 7) 3515 rcu_assign_pointer(mvmvif->bcn_prot.keys[keyidx - 6], 3516 key); 3517 3518 if ((vif->type == NL80211_IFTYPE_ADHOC || 3519 vif->type == NL80211_IFTYPE_AP) && !sta) { 3520 /* 3521 * GTK on AP interface is a TX-only key, return 0; 3522 * on IBSS they're per-station and because we're lazy 3523 * we don't support them for RX, so do the same. 3524 * CMAC/GMAC in AP/IBSS modes must be done in software. 3525 */ 3526 if (key->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3527 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3528 key->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3529 ret = -EOPNOTSUPP; 3530 break; 3531 } 3532 3533 if (key->cipher != WLAN_CIPHER_SUITE_GCMP && 3534 key->cipher != WLAN_CIPHER_SUITE_GCMP_256 && 3535 !iwl_mvm_has_new_tx_api(mvm)) { 3536 key->hw_key_idx = STA_KEY_IDX_INVALID; 3537 ret = 0; 3538 break; 3539 } 3540 3541 if (!mvmvif->ap_ibss_active) { 3542 for (i = 0; 3543 i < ARRAY_SIZE(mvmvif->ap_early_keys); 3544 i++) { 3545 if (!mvmvif->ap_early_keys[i]) { 3546 mvmvif->ap_early_keys[i] = key; 3547 break; 3548 } 3549 } 3550 3551 if (i >= ARRAY_SIZE(mvmvif->ap_early_keys)) 3552 ret = -ENOSPC; 3553 else 3554 ret = 0; 3555 3556 break; 3557 } 3558 } 3559 3560 /* During FW restart, in order to restore the state as it was, 3561 * don't try to reprogram keys we previously failed for. 3562 */ 3563 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3564 key->hw_key_idx == STA_KEY_IDX_INVALID) { 3565 IWL_DEBUG_MAC80211(mvm, 3566 "skip invalid idx key programming during restart\n"); 3567 ret = 0; 3568 break; 3569 } 3570 3571 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) && 3572 mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3573 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3574 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3575 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3576 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3577 struct ieee80211_key_seq seq; 3578 int tid, q; 3579 3580 WARN_ON(rcu_access_pointer(mvmsta->ptk_pn[keyidx])); 3581 ptk_pn = kzalloc(struct_size(ptk_pn, q, 3582 mvm->trans->num_rx_queues), 3583 GFP_KERNEL); 3584 if (!ptk_pn) { 3585 ret = -ENOMEM; 3586 break; 3587 } 3588 3589 for (tid = 0; tid < IWL_MAX_TID_COUNT; tid++) { 3590 ieee80211_get_key_rx_seq(key, tid, &seq); 3591 for (q = 0; q < mvm->trans->num_rx_queues; q++) 3592 memcpy(ptk_pn->q[q].pn[tid], 3593 seq.ccmp.pn, 3594 IEEE80211_CCMP_PN_LEN); 3595 } 3596 3597 rcu_assign_pointer(mvmsta->ptk_pn[keyidx], ptk_pn); 3598 } 3599 3600 /* in HW restart reuse the index, otherwise request a new one */ 3601 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 3602 key_offset = key->hw_key_idx; 3603 else 3604 key_offset = STA_KEY_IDX_INVALID; 3605 3606 if (mvmsta && key->flags & IEEE80211_KEY_FLAG_PAIRWISE) 3607 mvmsta->pairwise_cipher = key->cipher; 3608 3609 IWL_DEBUG_MAC80211(mvm, "set hwcrypto key\n"); 3610 ret = iwl_mvm_set_sta_key(mvm, vif, sta, key, key_offset); 3611 if (ret) { 3612 IWL_WARN(mvm, "set key failed\n"); 3613 key->hw_key_idx = STA_KEY_IDX_INVALID; 3614 /* 3615 * can't add key for RX, but we don't need it 3616 * in the device for TX so still return 0, 3617 * unless we have new TX API where we cannot 3618 * put key material into the TX_CMD 3619 */ 3620 if (iwl_mvm_has_new_tx_api(mvm)) 3621 ret = -EOPNOTSUPP; 3622 else 3623 ret = 0; 3624 } 3625 3626 break; 3627 case DISABLE_KEY: 3628 if (keyidx == 6 || keyidx == 7) 3629 RCU_INIT_POINTER(mvmvif->bcn_prot.keys[keyidx - 6], 3630 NULL); 3631 3632 ret = -ENOENT; 3633 for (i = 0; i < ARRAY_SIZE(mvmvif->ap_early_keys); i++) { 3634 if (mvmvif->ap_early_keys[i] == key) { 3635 mvmvif->ap_early_keys[i] = NULL; 3636 ret = 0; 3637 } 3638 } 3639 3640 /* found in pending list - don't do anything else */ 3641 if (ret == 0) 3642 break; 3643 3644 if (key->hw_key_idx == STA_KEY_IDX_INVALID) { 3645 ret = 0; 3646 break; 3647 } 3648 3649 if (mvmsta && iwl_mvm_has_new_rx_api(mvm) && 3650 key->flags & IEEE80211_KEY_FLAG_PAIRWISE && 3651 (key->cipher == WLAN_CIPHER_SUITE_CCMP || 3652 key->cipher == WLAN_CIPHER_SUITE_GCMP || 3653 key->cipher == WLAN_CIPHER_SUITE_GCMP_256)) { 3654 ptk_pn = rcu_dereference_protected( 3655 mvmsta->ptk_pn[keyidx], 3656 lockdep_is_held(&mvm->mutex)); 3657 RCU_INIT_POINTER(mvmsta->ptk_pn[keyidx], NULL); 3658 if (ptk_pn) 3659 kfree_rcu(ptk_pn, rcu_head); 3660 } 3661 3662 IWL_DEBUG_MAC80211(mvm, "disable hwcrypto key\n"); 3663 ret = iwl_mvm_remove_sta_key(mvm, vif, sta, key); 3664 break; 3665 default: 3666 ret = -EINVAL; 3667 } 3668 3669 return ret; 3670 } 3671 3672 static int iwl_mvm_mac_set_key(struct ieee80211_hw *hw, 3673 enum set_key_cmd cmd, 3674 struct ieee80211_vif *vif, 3675 struct ieee80211_sta *sta, 3676 struct ieee80211_key_conf *key) 3677 { 3678 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3679 int ret; 3680 3681 mutex_lock(&mvm->mutex); 3682 ret = __iwl_mvm_mac_set_key(hw, cmd, vif, sta, key); 3683 mutex_unlock(&mvm->mutex); 3684 3685 return ret; 3686 } 3687 3688 static void iwl_mvm_mac_update_tkip_key(struct ieee80211_hw *hw, 3689 struct ieee80211_vif *vif, 3690 struct ieee80211_key_conf *keyconf, 3691 struct ieee80211_sta *sta, 3692 u32 iv32, u16 *phase1key) 3693 { 3694 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3695 3696 if (keyconf->hw_key_idx == STA_KEY_IDX_INVALID) 3697 return; 3698 3699 iwl_mvm_update_tkip_key(mvm, vif, keyconf, sta, iv32, phase1key); 3700 } 3701 3702 3703 static bool iwl_mvm_rx_aux_roc(struct iwl_notif_wait_data *notif_wait, 3704 struct iwl_rx_packet *pkt, void *data) 3705 { 3706 struct iwl_mvm *mvm = 3707 container_of(notif_wait, struct iwl_mvm, notif_wait); 3708 struct iwl_hs20_roc_res *resp; 3709 int resp_len = iwl_rx_packet_payload_len(pkt); 3710 struct iwl_mvm_time_event_data *te_data = data; 3711 3712 if (WARN_ON(pkt->hdr.cmd != HOT_SPOT_CMD)) 3713 return true; 3714 3715 if (WARN_ON_ONCE(resp_len != sizeof(*resp))) { 3716 IWL_ERR(mvm, "Invalid HOT_SPOT_CMD response\n"); 3717 return true; 3718 } 3719 3720 resp = (void *)pkt->data; 3721 3722 IWL_DEBUG_TE(mvm, 3723 "Aux ROC: Received response from ucode: status=%d uid=%d\n", 3724 resp->status, resp->event_unique_id); 3725 3726 te_data->uid = le32_to_cpu(resp->event_unique_id); 3727 IWL_DEBUG_TE(mvm, "TIME_EVENT_CMD response - UID = 0x%x\n", 3728 te_data->uid); 3729 3730 spin_lock_bh(&mvm->time_event_lock); 3731 list_add_tail(&te_data->list, &mvm->aux_roc_te_list); 3732 spin_unlock_bh(&mvm->time_event_lock); 3733 3734 return true; 3735 } 3736 3737 #define AUX_ROC_MIN_DURATION MSEC_TO_TU(100) 3738 #define AUX_ROC_MIN_DELAY MSEC_TO_TU(200) 3739 #define AUX_ROC_MAX_DELAY MSEC_TO_TU(600) 3740 #define AUX_ROC_SAFETY_BUFFER MSEC_TO_TU(20) 3741 #define AUX_ROC_MIN_SAFETY_BUFFER MSEC_TO_TU(10) 3742 static int iwl_mvm_send_aux_roc_cmd(struct iwl_mvm *mvm, 3743 struct ieee80211_channel *channel, 3744 struct ieee80211_vif *vif, 3745 int duration) 3746 { 3747 int res; 3748 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3749 struct iwl_mvm_time_event_data *te_data = &mvmvif->hs_time_event_data; 3750 static const u16 time_event_response[] = { HOT_SPOT_CMD }; 3751 struct iwl_notification_wait wait_time_event; 3752 u32 dtim_interval = vif->bss_conf.dtim_period * 3753 vif->bss_conf.beacon_int; 3754 u32 req_dur, delay; 3755 struct iwl_hs20_roc_req aux_roc_req = { 3756 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 3757 .id_and_color = 3758 cpu_to_le32(FW_CMD_ID_AND_COLOR(MAC_INDEX_AUX, 0)), 3759 .sta_id_and_color = cpu_to_le32(mvm->aux_sta.sta_id), 3760 }; 3761 struct iwl_hs20_roc_req_tail *tail = iwl_mvm_chan_info_cmd_tail(mvm, 3762 &aux_roc_req.channel_info); 3763 u16 len = sizeof(aux_roc_req) - iwl_mvm_chan_info_padding(mvm); 3764 3765 /* Set the channel info data */ 3766 iwl_mvm_set_chan_info(mvm, &aux_roc_req.channel_info, channel->hw_value, 3767 iwl_mvm_phy_band_from_nl80211(channel->band), 3768 PHY_VHT_CHANNEL_MODE20, 3769 0); 3770 3771 /* Set the time and duration */ 3772 tail->apply_time = cpu_to_le32(iwl_mvm_get_systime(mvm)); 3773 3774 delay = AUX_ROC_MIN_DELAY; 3775 req_dur = MSEC_TO_TU(duration); 3776 3777 /* 3778 * If we are associated we want the delay time to be at least one 3779 * dtim interval so that the FW can wait until after the DTIM and 3780 * then start the time event, this will potentially allow us to 3781 * remain off-channel for the max duration. 3782 * Since we want to use almost a whole dtim interval we would also 3783 * like the delay to be for 2-3 dtim intervals, in case there are 3784 * other time events with higher priority. 3785 */ 3786 if (vif->bss_conf.assoc) { 3787 delay = min_t(u32, dtim_interval * 3, AUX_ROC_MAX_DELAY); 3788 /* We cannot remain off-channel longer than the DTIM interval */ 3789 if (dtim_interval <= req_dur) { 3790 req_dur = dtim_interval - AUX_ROC_SAFETY_BUFFER; 3791 if (req_dur <= AUX_ROC_MIN_DURATION) 3792 req_dur = dtim_interval - 3793 AUX_ROC_MIN_SAFETY_BUFFER; 3794 } 3795 } 3796 3797 tail->duration = cpu_to_le32(req_dur); 3798 tail->apply_time_max_delay = cpu_to_le32(delay); 3799 3800 IWL_DEBUG_TE(mvm, 3801 "ROC: Requesting to remain on channel %u for %ums\n", 3802 channel->hw_value, req_dur); 3803 IWL_DEBUG_TE(mvm, 3804 "\t(requested = %ums, max_delay = %ums, dtim_interval = %ums)\n", 3805 duration, delay, dtim_interval); 3806 3807 /* Set the node address */ 3808 memcpy(tail->node_addr, vif->addr, ETH_ALEN); 3809 3810 lockdep_assert_held(&mvm->mutex); 3811 3812 spin_lock_bh(&mvm->time_event_lock); 3813 3814 if (WARN_ON(te_data->id == HOT_SPOT_CMD)) { 3815 spin_unlock_bh(&mvm->time_event_lock); 3816 return -EIO; 3817 } 3818 3819 te_data->vif = vif; 3820 te_data->duration = duration; 3821 te_data->id = HOT_SPOT_CMD; 3822 3823 spin_unlock_bh(&mvm->time_event_lock); 3824 3825 /* 3826 * Use a notification wait, which really just processes the 3827 * command response and doesn't wait for anything, in order 3828 * to be able to process the response and get the UID inside 3829 * the RX path. Using CMD_WANT_SKB doesn't work because it 3830 * stores the buffer and then wakes up this thread, by which 3831 * time another notification (that the time event started) 3832 * might already be processed unsuccessfully. 3833 */ 3834 iwl_init_notification_wait(&mvm->notif_wait, &wait_time_event, 3835 time_event_response, 3836 ARRAY_SIZE(time_event_response), 3837 iwl_mvm_rx_aux_roc, te_data); 3838 3839 res = iwl_mvm_send_cmd_pdu(mvm, HOT_SPOT_CMD, 0, len, 3840 &aux_roc_req); 3841 3842 if (res) { 3843 IWL_ERR(mvm, "Couldn't send HOT_SPOT_CMD: %d\n", res); 3844 iwl_remove_notification(&mvm->notif_wait, &wait_time_event); 3845 goto out_clear_te; 3846 } 3847 3848 /* No need to wait for anything, so just pass 1 (0 isn't valid) */ 3849 res = iwl_wait_notification(&mvm->notif_wait, &wait_time_event, 1); 3850 /* should never fail */ 3851 WARN_ON_ONCE(res); 3852 3853 if (res) { 3854 out_clear_te: 3855 spin_lock_bh(&mvm->time_event_lock); 3856 iwl_mvm_te_clear_data(mvm, te_data); 3857 spin_unlock_bh(&mvm->time_event_lock); 3858 } 3859 3860 return res; 3861 } 3862 3863 static int iwl_mvm_roc(struct ieee80211_hw *hw, 3864 struct ieee80211_vif *vif, 3865 struct ieee80211_channel *channel, 3866 int duration, 3867 enum ieee80211_roc_type type) 3868 { 3869 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 3870 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3871 struct cfg80211_chan_def chandef; 3872 struct iwl_mvm_phy_ctxt *phy_ctxt; 3873 bool band_change_removal; 3874 int ret, i; 3875 3876 IWL_DEBUG_MAC80211(mvm, "enter (%d, %d, %d)\n", channel->hw_value, 3877 duration, type); 3878 3879 /* 3880 * Flush the done work, just in case it's still pending, so that 3881 * the work it does can complete and we can accept new frames. 3882 */ 3883 flush_work(&mvm->roc_done_wk); 3884 3885 mutex_lock(&mvm->mutex); 3886 3887 switch (vif->type) { 3888 case NL80211_IFTYPE_STATION: 3889 if (fw_has_capa(&mvm->fw->ucode_capa, 3890 IWL_UCODE_TLV_CAPA_HOTSPOT_SUPPORT)) { 3891 /* Use aux roc framework (HS20) */ 3892 if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12) { 3893 u32 lmac_id; 3894 3895 lmac_id = iwl_mvm_get_lmac_id(mvm->fw, 3896 channel->band); 3897 ret = iwl_mvm_add_aux_sta(mvm, lmac_id); 3898 if (WARN(ret, 3899 "Failed to allocate aux station")) 3900 goto out_unlock; 3901 } 3902 ret = iwl_mvm_send_aux_roc_cmd(mvm, channel, 3903 vif, duration); 3904 goto out_unlock; 3905 } 3906 IWL_ERR(mvm, "hotspot not supported\n"); 3907 ret = -EINVAL; 3908 goto out_unlock; 3909 case NL80211_IFTYPE_P2P_DEVICE: 3910 /* handle below */ 3911 break; 3912 default: 3913 IWL_ERR(mvm, "vif isn't P2P_DEVICE: %d\n", vif->type); 3914 ret = -EINVAL; 3915 goto out_unlock; 3916 } 3917 3918 for (i = 0; i < NUM_PHY_CTX; i++) { 3919 phy_ctxt = &mvm->phy_ctxts[i]; 3920 if (phy_ctxt->ref == 0 || mvmvif->phy_ctxt == phy_ctxt) 3921 continue; 3922 3923 if (phy_ctxt->ref && channel == phy_ctxt->channel) { 3924 /* 3925 * Unbind the P2P_DEVICE from the current PHY context, 3926 * and if the PHY context is not used remove it. 3927 */ 3928 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3929 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3930 goto out_unlock; 3931 3932 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3933 3934 /* Bind the P2P_DEVICE to the current PHY Context */ 3935 mvmvif->phy_ctxt = phy_ctxt; 3936 3937 ret = iwl_mvm_binding_add_vif(mvm, vif); 3938 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 3939 goto out_unlock; 3940 3941 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 3942 goto schedule_time_event; 3943 } 3944 } 3945 3946 /* Need to update the PHY context only if the ROC channel changed */ 3947 if (channel == mvmvif->phy_ctxt->channel) 3948 goto schedule_time_event; 3949 3950 cfg80211_chandef_create(&chandef, channel, NL80211_CHAN_NO_HT); 3951 3952 /* 3953 * Check if the remain-on-channel is on a different band and that 3954 * requires context removal, see iwl_mvm_phy_ctxt_changed(). If 3955 * so, we'll need to release and then re-configure here, since we 3956 * must not remove a PHY context that's part of a binding. 3957 */ 3958 band_change_removal = 3959 fw_has_capa(&mvm->fw->ucode_capa, 3960 IWL_UCODE_TLV_CAPA_BINDING_CDB_SUPPORT) && 3961 mvmvif->phy_ctxt->channel->band != chandef.chan->band; 3962 3963 if (mvmvif->phy_ctxt->ref == 1 && !band_change_removal) { 3964 /* 3965 * Change the PHY context configuration as it is currently 3966 * referenced only by the P2P Device MAC (and we can modify it) 3967 */ 3968 ret = iwl_mvm_phy_ctxt_changed(mvm, mvmvif->phy_ctxt, 3969 &chandef, 1, 1); 3970 if (ret) 3971 goto out_unlock; 3972 } else { 3973 /* 3974 * The PHY context is shared with other MACs (or we're trying to 3975 * switch bands), so remove the P2P Device from the binding, 3976 * allocate an new PHY context and create a new binding. 3977 */ 3978 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 3979 if (!phy_ctxt) { 3980 ret = -ENOSPC; 3981 goto out_unlock; 3982 } 3983 3984 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, &chandef, 3985 1, 1); 3986 if (ret) { 3987 IWL_ERR(mvm, "Failed to change PHY context\n"); 3988 goto out_unlock; 3989 } 3990 3991 /* Unbind the P2P_DEVICE from the current PHY context */ 3992 ret = iwl_mvm_binding_remove_vif(mvm, vif); 3993 if (WARN(ret, "Failed unbinding P2P_DEVICE\n")) 3994 goto out_unlock; 3995 3996 iwl_mvm_phy_ctxt_unref(mvm, mvmvif->phy_ctxt); 3997 3998 /* Bind the P2P_DEVICE to the new allocated PHY context */ 3999 mvmvif->phy_ctxt = phy_ctxt; 4000 4001 ret = iwl_mvm_binding_add_vif(mvm, vif); 4002 if (WARN(ret, "Failed binding P2P_DEVICE\n")) 4003 goto out_unlock; 4004 4005 iwl_mvm_phy_ctxt_ref(mvm, mvmvif->phy_ctxt); 4006 } 4007 4008 schedule_time_event: 4009 /* Schedule the time events */ 4010 ret = iwl_mvm_start_p2p_roc(mvm, vif, duration, type); 4011 4012 out_unlock: 4013 mutex_unlock(&mvm->mutex); 4014 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4015 return ret; 4016 } 4017 4018 static int iwl_mvm_cancel_roc(struct ieee80211_hw *hw, 4019 struct ieee80211_vif *vif) 4020 { 4021 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4022 4023 IWL_DEBUG_MAC80211(mvm, "enter\n"); 4024 4025 mutex_lock(&mvm->mutex); 4026 iwl_mvm_stop_roc(mvm, vif); 4027 mutex_unlock(&mvm->mutex); 4028 4029 IWL_DEBUG_MAC80211(mvm, "leave\n"); 4030 return 0; 4031 } 4032 4033 struct iwl_mvm_ftm_responder_iter_data { 4034 bool responder; 4035 struct ieee80211_chanctx_conf *ctx; 4036 }; 4037 4038 static void iwl_mvm_ftm_responder_chanctx_iter(void *_data, u8 *mac, 4039 struct ieee80211_vif *vif) 4040 { 4041 struct iwl_mvm_ftm_responder_iter_data *data = _data; 4042 4043 if (rcu_access_pointer(vif->chanctx_conf) == data->ctx && 4044 vif->type == NL80211_IFTYPE_AP && vif->bss_conf.ftmr_params) 4045 data->responder = true; 4046 } 4047 4048 static bool iwl_mvm_is_ftm_responder_chanctx(struct iwl_mvm *mvm, 4049 struct ieee80211_chanctx_conf *ctx) 4050 { 4051 struct iwl_mvm_ftm_responder_iter_data data = { 4052 .responder = false, 4053 .ctx = ctx, 4054 }; 4055 4056 ieee80211_iterate_active_interfaces_atomic(mvm->hw, 4057 IEEE80211_IFACE_ITER_NORMAL, 4058 iwl_mvm_ftm_responder_chanctx_iter, 4059 &data); 4060 return data.responder; 4061 } 4062 4063 static int __iwl_mvm_add_chanctx(struct iwl_mvm *mvm, 4064 struct ieee80211_chanctx_conf *ctx) 4065 { 4066 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4067 struct iwl_mvm_phy_ctxt *phy_ctxt; 4068 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4069 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4070 int ret; 4071 4072 lockdep_assert_held(&mvm->mutex); 4073 4074 IWL_DEBUG_MAC80211(mvm, "Add channel context\n"); 4075 4076 phy_ctxt = iwl_mvm_get_free_phy_ctxt(mvm); 4077 if (!phy_ctxt) { 4078 ret = -ENOSPC; 4079 goto out; 4080 } 4081 4082 ret = iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4083 ctx->rx_chains_static, 4084 ctx->rx_chains_dynamic); 4085 if (ret) { 4086 IWL_ERR(mvm, "Failed to add PHY context\n"); 4087 goto out; 4088 } 4089 4090 iwl_mvm_phy_ctxt_ref(mvm, phy_ctxt); 4091 *phy_ctxt_id = phy_ctxt->id; 4092 out: 4093 return ret; 4094 } 4095 4096 static int iwl_mvm_add_chanctx(struct ieee80211_hw *hw, 4097 struct ieee80211_chanctx_conf *ctx) 4098 { 4099 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4100 int ret; 4101 4102 mutex_lock(&mvm->mutex); 4103 ret = __iwl_mvm_add_chanctx(mvm, ctx); 4104 mutex_unlock(&mvm->mutex); 4105 4106 return ret; 4107 } 4108 4109 static void __iwl_mvm_remove_chanctx(struct iwl_mvm *mvm, 4110 struct ieee80211_chanctx_conf *ctx) 4111 { 4112 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4113 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4114 4115 lockdep_assert_held(&mvm->mutex); 4116 4117 iwl_mvm_phy_ctxt_unref(mvm, phy_ctxt); 4118 } 4119 4120 static void iwl_mvm_remove_chanctx(struct ieee80211_hw *hw, 4121 struct ieee80211_chanctx_conf *ctx) 4122 { 4123 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4124 4125 mutex_lock(&mvm->mutex); 4126 __iwl_mvm_remove_chanctx(mvm, ctx); 4127 mutex_unlock(&mvm->mutex); 4128 } 4129 4130 static void iwl_mvm_change_chanctx(struct ieee80211_hw *hw, 4131 struct ieee80211_chanctx_conf *ctx, 4132 u32 changed) 4133 { 4134 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4135 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4136 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4137 bool responder = iwl_mvm_is_ftm_responder_chanctx(mvm, ctx); 4138 struct cfg80211_chan_def *def = responder ? &ctx->def : &ctx->min_def; 4139 4140 if (WARN_ONCE((phy_ctxt->ref > 1) && 4141 (changed & ~(IEEE80211_CHANCTX_CHANGE_WIDTH | 4142 IEEE80211_CHANCTX_CHANGE_RX_CHAINS | 4143 IEEE80211_CHANCTX_CHANGE_RADAR | 4144 IEEE80211_CHANCTX_CHANGE_MIN_WIDTH)), 4145 "Cannot change PHY. Ref=%d, changed=0x%X\n", 4146 phy_ctxt->ref, changed)) 4147 return; 4148 4149 mutex_lock(&mvm->mutex); 4150 4151 /* we are only changing the min_width, may be a noop */ 4152 if (changed == IEEE80211_CHANCTX_CHANGE_MIN_WIDTH) { 4153 if (phy_ctxt->width == def->width) 4154 goto out_unlock; 4155 4156 /* we are just toggling between 20_NOHT and 20 */ 4157 if (phy_ctxt->width <= NL80211_CHAN_WIDTH_20 && 4158 def->width <= NL80211_CHAN_WIDTH_20) 4159 goto out_unlock; 4160 } 4161 4162 iwl_mvm_bt_coex_vif_change(mvm); 4163 iwl_mvm_phy_ctxt_changed(mvm, phy_ctxt, def, 4164 ctx->rx_chains_static, 4165 ctx->rx_chains_dynamic); 4166 4167 out_unlock: 4168 mutex_unlock(&mvm->mutex); 4169 } 4170 4171 static int __iwl_mvm_assign_vif_chanctx(struct iwl_mvm *mvm, 4172 struct ieee80211_vif *vif, 4173 struct ieee80211_chanctx_conf *ctx, 4174 bool switching_chanctx) 4175 { 4176 u16 *phy_ctxt_id = (u16 *)ctx->drv_priv; 4177 struct iwl_mvm_phy_ctxt *phy_ctxt = &mvm->phy_ctxts[*phy_ctxt_id]; 4178 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4179 int ret; 4180 4181 lockdep_assert_held(&mvm->mutex); 4182 4183 mvmvif->phy_ctxt = phy_ctxt; 4184 4185 switch (vif->type) { 4186 case NL80211_IFTYPE_AP: 4187 /* only needed if we're switching chanctx (i.e. during CSA) */ 4188 if (switching_chanctx) { 4189 mvmvif->ap_ibss_active = true; 4190 break; 4191 } 4192 fallthrough; 4193 case NL80211_IFTYPE_ADHOC: 4194 /* 4195 * The AP binding flow is handled as part of the start_ap flow 4196 * (in bss_info_changed), similarly for IBSS. 4197 */ 4198 ret = 0; 4199 goto out; 4200 case NL80211_IFTYPE_STATION: 4201 mvmvif->csa_bcn_pending = false; 4202 break; 4203 case NL80211_IFTYPE_MONITOR: 4204 /* always disable PS when a monitor interface is active */ 4205 mvmvif->ps_disabled = true; 4206 break; 4207 default: 4208 ret = -EINVAL; 4209 goto out; 4210 } 4211 4212 ret = iwl_mvm_binding_add_vif(mvm, vif); 4213 if (ret) 4214 goto out; 4215 4216 /* 4217 * Power state must be updated before quotas, 4218 * otherwise fw will complain. 4219 */ 4220 iwl_mvm_power_update_mac(mvm); 4221 4222 /* Setting the quota at this stage is only required for monitor 4223 * interfaces. For the other types, the bss_info changed flow 4224 * will handle quota settings. 4225 */ 4226 if (vif->type == NL80211_IFTYPE_MONITOR) { 4227 mvmvif->monitor_active = true; 4228 ret = iwl_mvm_update_quotas(mvm, false, NULL); 4229 if (ret) 4230 goto out_remove_binding; 4231 4232 ret = iwl_mvm_add_snif_sta(mvm, vif); 4233 if (ret) 4234 goto out_remove_binding; 4235 4236 } 4237 4238 /* Handle binding during CSA */ 4239 if (vif->type == NL80211_IFTYPE_AP) { 4240 iwl_mvm_update_quotas(mvm, false, NULL); 4241 iwl_mvm_mac_ctxt_changed(mvm, vif, false, NULL); 4242 } 4243 4244 if (switching_chanctx && vif->type == NL80211_IFTYPE_STATION) { 4245 mvmvif->csa_bcn_pending = true; 4246 4247 if (!fw_has_capa(&mvm->fw->ucode_capa, 4248 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4249 u32 duration = 3 * vif->bss_conf.beacon_int; 4250 4251 /* Protect the session to make sure we hear the first 4252 * beacon on the new channel. 4253 */ 4254 iwl_mvm_protect_session(mvm, vif, duration, duration, 4255 vif->bss_conf.beacon_int / 2, 4256 true); 4257 } 4258 4259 iwl_mvm_update_quotas(mvm, false, NULL); 4260 } 4261 4262 goto out; 4263 4264 out_remove_binding: 4265 iwl_mvm_binding_remove_vif(mvm, vif); 4266 iwl_mvm_power_update_mac(mvm); 4267 out: 4268 if (ret) 4269 mvmvif->phy_ctxt = NULL; 4270 return ret; 4271 } 4272 static int iwl_mvm_assign_vif_chanctx(struct ieee80211_hw *hw, 4273 struct ieee80211_vif *vif, 4274 struct ieee80211_chanctx_conf *ctx) 4275 { 4276 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4277 int ret; 4278 4279 mutex_lock(&mvm->mutex); 4280 ret = __iwl_mvm_assign_vif_chanctx(mvm, vif, ctx, false); 4281 mutex_unlock(&mvm->mutex); 4282 4283 return ret; 4284 } 4285 4286 static void __iwl_mvm_unassign_vif_chanctx(struct iwl_mvm *mvm, 4287 struct ieee80211_vif *vif, 4288 struct ieee80211_chanctx_conf *ctx, 4289 bool switching_chanctx) 4290 { 4291 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4292 struct ieee80211_vif *disabled_vif = NULL; 4293 4294 lockdep_assert_held(&mvm->mutex); 4295 iwl_mvm_remove_time_event(mvm, mvmvif, &mvmvif->time_event_data); 4296 4297 switch (vif->type) { 4298 case NL80211_IFTYPE_ADHOC: 4299 goto out; 4300 case NL80211_IFTYPE_MONITOR: 4301 mvmvif->monitor_active = false; 4302 mvmvif->ps_disabled = false; 4303 iwl_mvm_rm_snif_sta(mvm, vif); 4304 break; 4305 case NL80211_IFTYPE_AP: 4306 /* This part is triggered only during CSA */ 4307 if (!switching_chanctx || !mvmvif->ap_ibss_active) 4308 goto out; 4309 4310 mvmvif->csa_countdown = false; 4311 4312 /* Set CS bit on all the stations */ 4313 iwl_mvm_modify_all_sta_disable_tx(mvm, mvmvif, true); 4314 4315 /* Save blocked iface, the timeout is set on the next beacon */ 4316 rcu_assign_pointer(mvm->csa_tx_blocked_vif, vif); 4317 4318 mvmvif->ap_ibss_active = false; 4319 break; 4320 case NL80211_IFTYPE_STATION: 4321 if (!switching_chanctx) 4322 break; 4323 4324 disabled_vif = vif; 4325 4326 if (!fw_has_capa(&mvm->fw->ucode_capa, 4327 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) 4328 iwl_mvm_mac_ctxt_changed(mvm, vif, true, NULL); 4329 break; 4330 default: 4331 break; 4332 } 4333 4334 iwl_mvm_update_quotas(mvm, false, disabled_vif); 4335 iwl_mvm_binding_remove_vif(mvm, vif); 4336 4337 out: 4338 if (fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD) && 4339 switching_chanctx) 4340 return; 4341 mvmvif->phy_ctxt = NULL; 4342 iwl_mvm_power_update_mac(mvm); 4343 } 4344 4345 static void iwl_mvm_unassign_vif_chanctx(struct ieee80211_hw *hw, 4346 struct ieee80211_vif *vif, 4347 struct ieee80211_chanctx_conf *ctx) 4348 { 4349 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4350 4351 mutex_lock(&mvm->mutex); 4352 __iwl_mvm_unassign_vif_chanctx(mvm, vif, ctx, false); 4353 mutex_unlock(&mvm->mutex); 4354 } 4355 4356 static int 4357 iwl_mvm_switch_vif_chanctx_swap(struct iwl_mvm *mvm, 4358 struct ieee80211_vif_chanctx_switch *vifs) 4359 { 4360 int ret; 4361 4362 mutex_lock(&mvm->mutex); 4363 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4364 __iwl_mvm_remove_chanctx(mvm, vifs[0].old_ctx); 4365 4366 ret = __iwl_mvm_add_chanctx(mvm, vifs[0].new_ctx); 4367 if (ret) { 4368 IWL_ERR(mvm, "failed to add new_ctx during channel switch\n"); 4369 goto out_reassign; 4370 } 4371 4372 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4373 true); 4374 if (ret) { 4375 IWL_ERR(mvm, 4376 "failed to assign new_ctx during channel switch\n"); 4377 goto out_remove; 4378 } 4379 4380 /* we don't support TDLS during DCM - can be caused by channel switch */ 4381 if (iwl_mvm_phy_ctx_count(mvm) > 1) 4382 iwl_mvm_teardown_tdls_peers(mvm); 4383 4384 goto out; 4385 4386 out_remove: 4387 __iwl_mvm_remove_chanctx(mvm, vifs[0].new_ctx); 4388 4389 out_reassign: 4390 if (__iwl_mvm_add_chanctx(mvm, vifs[0].old_ctx)) { 4391 IWL_ERR(mvm, "failed to add old_ctx back after failure.\n"); 4392 goto out_restart; 4393 } 4394 4395 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4396 true)) { 4397 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4398 goto out_restart; 4399 } 4400 4401 goto out; 4402 4403 out_restart: 4404 /* things keep failing, better restart the hw */ 4405 iwl_mvm_nic_restart(mvm, false); 4406 4407 out: 4408 mutex_unlock(&mvm->mutex); 4409 4410 return ret; 4411 } 4412 4413 static int 4414 iwl_mvm_switch_vif_chanctx_reassign(struct iwl_mvm *mvm, 4415 struct ieee80211_vif_chanctx_switch *vifs) 4416 { 4417 int ret; 4418 4419 mutex_lock(&mvm->mutex); 4420 __iwl_mvm_unassign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, true); 4421 4422 ret = __iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].new_ctx, 4423 true); 4424 if (ret) { 4425 IWL_ERR(mvm, 4426 "failed to assign new_ctx during channel switch\n"); 4427 goto out_reassign; 4428 } 4429 4430 goto out; 4431 4432 out_reassign: 4433 if (__iwl_mvm_assign_vif_chanctx(mvm, vifs[0].vif, vifs[0].old_ctx, 4434 true)) { 4435 IWL_ERR(mvm, "failed to reassign old_ctx after failure.\n"); 4436 goto out_restart; 4437 } 4438 4439 goto out; 4440 4441 out_restart: 4442 /* things keep failing, better restart the hw */ 4443 iwl_mvm_nic_restart(mvm, false); 4444 4445 out: 4446 mutex_unlock(&mvm->mutex); 4447 4448 return ret; 4449 } 4450 4451 static int iwl_mvm_switch_vif_chanctx(struct ieee80211_hw *hw, 4452 struct ieee80211_vif_chanctx_switch *vifs, 4453 int n_vifs, 4454 enum ieee80211_chanctx_switch_mode mode) 4455 { 4456 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4457 int ret; 4458 4459 /* we only support a single-vif right now */ 4460 if (n_vifs > 1) 4461 return -EOPNOTSUPP; 4462 4463 switch (mode) { 4464 case CHANCTX_SWMODE_SWAP_CONTEXTS: 4465 ret = iwl_mvm_switch_vif_chanctx_swap(mvm, vifs); 4466 break; 4467 case CHANCTX_SWMODE_REASSIGN_VIF: 4468 ret = iwl_mvm_switch_vif_chanctx_reassign(mvm, vifs); 4469 break; 4470 default: 4471 ret = -EOPNOTSUPP; 4472 break; 4473 } 4474 4475 return ret; 4476 } 4477 4478 static int iwl_mvm_tx_last_beacon(struct ieee80211_hw *hw) 4479 { 4480 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4481 4482 return mvm->ibss_manager; 4483 } 4484 4485 static int iwl_mvm_set_tim(struct ieee80211_hw *hw, 4486 struct ieee80211_sta *sta, 4487 bool set) 4488 { 4489 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4490 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4491 4492 if (!mvm_sta || !mvm_sta->vif) { 4493 IWL_ERR(mvm, "Station is not associated to a vif\n"); 4494 return -EINVAL; 4495 } 4496 4497 return iwl_mvm_mac_ctxt_beacon_changed(mvm, mvm_sta->vif); 4498 } 4499 4500 #ifdef CONFIG_NL80211_TESTMODE 4501 static const struct nla_policy iwl_mvm_tm_policy[IWL_MVM_TM_ATTR_MAX + 1] = { 4502 [IWL_MVM_TM_ATTR_CMD] = { .type = NLA_U32 }, 4503 [IWL_MVM_TM_ATTR_NOA_DURATION] = { .type = NLA_U32 }, 4504 [IWL_MVM_TM_ATTR_BEACON_FILTER_STATE] = { .type = NLA_U32 }, 4505 }; 4506 4507 static int __iwl_mvm_mac_testmode_cmd(struct iwl_mvm *mvm, 4508 struct ieee80211_vif *vif, 4509 void *data, int len) 4510 { 4511 struct nlattr *tb[IWL_MVM_TM_ATTR_MAX + 1]; 4512 int err; 4513 u32 noa_duration; 4514 4515 err = nla_parse_deprecated(tb, IWL_MVM_TM_ATTR_MAX, data, len, 4516 iwl_mvm_tm_policy, NULL); 4517 if (err) 4518 return err; 4519 4520 if (!tb[IWL_MVM_TM_ATTR_CMD]) 4521 return -EINVAL; 4522 4523 switch (nla_get_u32(tb[IWL_MVM_TM_ATTR_CMD])) { 4524 case IWL_MVM_TM_CMD_SET_NOA: 4525 if (!vif || vif->type != NL80211_IFTYPE_AP || !vif->p2p || 4526 !vif->bss_conf.enable_beacon || 4527 !tb[IWL_MVM_TM_ATTR_NOA_DURATION]) 4528 return -EINVAL; 4529 4530 noa_duration = nla_get_u32(tb[IWL_MVM_TM_ATTR_NOA_DURATION]); 4531 if (noa_duration >= vif->bss_conf.beacon_int) 4532 return -EINVAL; 4533 4534 mvm->noa_duration = noa_duration; 4535 mvm->noa_vif = vif; 4536 4537 return iwl_mvm_update_quotas(mvm, true, NULL); 4538 case IWL_MVM_TM_CMD_SET_BEACON_FILTER: 4539 /* must be associated client vif - ignore authorized */ 4540 if (!vif || vif->type != NL80211_IFTYPE_STATION || 4541 !vif->bss_conf.assoc || !vif->bss_conf.dtim_period || 4542 !tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE]) 4543 return -EINVAL; 4544 4545 if (nla_get_u32(tb[IWL_MVM_TM_ATTR_BEACON_FILTER_STATE])) 4546 return iwl_mvm_enable_beacon_filter(mvm, vif, 0); 4547 return iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4548 } 4549 4550 return -EOPNOTSUPP; 4551 } 4552 4553 static int iwl_mvm_mac_testmode_cmd(struct ieee80211_hw *hw, 4554 struct ieee80211_vif *vif, 4555 void *data, int len) 4556 { 4557 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4558 int err; 4559 4560 mutex_lock(&mvm->mutex); 4561 err = __iwl_mvm_mac_testmode_cmd(mvm, vif, data, len); 4562 mutex_unlock(&mvm->mutex); 4563 4564 return err; 4565 } 4566 #endif 4567 4568 static void iwl_mvm_channel_switch(struct ieee80211_hw *hw, 4569 struct ieee80211_vif *vif, 4570 struct ieee80211_channel_switch *chsw) 4571 { 4572 /* By implementing this operation, we prevent mac80211 from 4573 * starting its own channel switch timer, so that we can call 4574 * ieee80211_chswitch_done() ourselves at the right time 4575 * (which is when the absence time event starts). 4576 */ 4577 4578 IWL_DEBUG_MAC80211(IWL_MAC80211_GET_MVM(hw), 4579 "dummy channel switch op\n"); 4580 } 4581 4582 static int iwl_mvm_schedule_client_csa(struct iwl_mvm *mvm, 4583 struct ieee80211_vif *vif, 4584 struct ieee80211_channel_switch *chsw) 4585 { 4586 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4587 struct iwl_chan_switch_te_cmd cmd = { 4588 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4589 mvmvif->color)), 4590 .action = cpu_to_le32(FW_CTXT_ACTION_ADD), 4591 .tsf = cpu_to_le32(chsw->timestamp), 4592 .cs_count = chsw->count, 4593 .cs_mode = chsw->block_tx, 4594 }; 4595 4596 lockdep_assert_held(&mvm->mutex); 4597 4598 if (chsw->delay) 4599 cmd.cs_delayed_bcn_count = 4600 DIV_ROUND_UP(chsw->delay, vif->bss_conf.beacon_int); 4601 4602 return iwl_mvm_send_cmd_pdu(mvm, 4603 WIDE_ID(MAC_CONF_GROUP, 4604 CHANNEL_SWITCH_TIME_EVENT_CMD), 4605 0, sizeof(cmd), &cmd); 4606 } 4607 4608 static int iwl_mvm_old_pre_chan_sw_sta(struct iwl_mvm *mvm, 4609 struct ieee80211_vif *vif, 4610 struct ieee80211_channel_switch *chsw) 4611 { 4612 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4613 u32 apply_time; 4614 4615 /* Schedule the time event to a bit before beacon 1, 4616 * to make sure we're in the new channel when the 4617 * GO/AP arrives. In case count <= 1 immediately schedule the 4618 * TE (this might result with some packet loss or connection 4619 * loss). 4620 */ 4621 if (chsw->count <= 1) 4622 apply_time = 0; 4623 else 4624 apply_time = chsw->device_timestamp + 4625 ((vif->bss_conf.beacon_int * (chsw->count - 1) - 4626 IWL_MVM_CHANNEL_SWITCH_TIME_CLIENT) * 1024); 4627 4628 if (chsw->block_tx) 4629 iwl_mvm_csa_client_absent(mvm, vif); 4630 4631 if (mvmvif->bf_data.bf_enabled) { 4632 int ret = iwl_mvm_disable_beacon_filter(mvm, vif, 0); 4633 4634 if (ret) 4635 return ret; 4636 } 4637 4638 iwl_mvm_schedule_csa_period(mvm, vif, vif->bss_conf.beacon_int, 4639 apply_time); 4640 4641 return 0; 4642 } 4643 4644 #define IWL_MAX_CSA_BLOCK_TX 1500 4645 static int iwl_mvm_pre_channel_switch(struct ieee80211_hw *hw, 4646 struct ieee80211_vif *vif, 4647 struct ieee80211_channel_switch *chsw) 4648 { 4649 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4650 struct ieee80211_vif *csa_vif; 4651 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4652 int ret; 4653 4654 mutex_lock(&mvm->mutex); 4655 4656 mvmvif->csa_failed = false; 4657 4658 IWL_DEBUG_MAC80211(mvm, "pre CSA to freq %d\n", 4659 chsw->chandef.center_freq1); 4660 4661 iwl_fw_dbg_trigger_simple_stop(&mvm->fwrt, 4662 ieee80211_vif_to_wdev(vif), 4663 FW_DBG_TRIGGER_CHANNEL_SWITCH); 4664 4665 switch (vif->type) { 4666 case NL80211_IFTYPE_AP: 4667 csa_vif = 4668 rcu_dereference_protected(mvm->csa_vif, 4669 lockdep_is_held(&mvm->mutex)); 4670 if (WARN_ONCE(csa_vif && csa_vif->csa_active, 4671 "Another CSA is already in progress")) { 4672 ret = -EBUSY; 4673 goto out_unlock; 4674 } 4675 4676 /* we still didn't unblock tx. prevent new CS meanwhile */ 4677 if (rcu_dereference_protected(mvm->csa_tx_blocked_vif, 4678 lockdep_is_held(&mvm->mutex))) { 4679 ret = -EBUSY; 4680 goto out_unlock; 4681 } 4682 4683 rcu_assign_pointer(mvm->csa_vif, vif); 4684 4685 if (WARN_ONCE(mvmvif->csa_countdown, 4686 "Previous CSA countdown didn't complete")) { 4687 ret = -EBUSY; 4688 goto out_unlock; 4689 } 4690 4691 mvmvif->csa_target_freq = chsw->chandef.chan->center_freq; 4692 4693 break; 4694 case NL80211_IFTYPE_STATION: 4695 /* 4696 * In the new flow FW is in charge of timing the switch so there 4697 * is no need for all of this 4698 */ 4699 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4700 CHANNEL_SWITCH_ERROR_NOTIF, 4701 0)) 4702 break; 4703 4704 /* 4705 * We haven't configured the firmware to be associated yet since 4706 * we don't know the dtim period. In this case, the firmware can't 4707 * track the beacons. 4708 */ 4709 if (!vif->bss_conf.assoc || !vif->bss_conf.dtim_period) { 4710 ret = -EBUSY; 4711 goto out_unlock; 4712 } 4713 4714 if (chsw->delay > IWL_MAX_CSA_BLOCK_TX) 4715 schedule_delayed_work(&mvmvif->csa_work, 0); 4716 4717 if (chsw->block_tx) { 4718 /* 4719 * In case of undetermined / long time with immediate 4720 * quiet monitor status to gracefully disconnect 4721 */ 4722 if (!chsw->count || 4723 chsw->count * vif->bss_conf.beacon_int > 4724 IWL_MAX_CSA_BLOCK_TX) 4725 schedule_delayed_work(&mvmvif->csa_work, 4726 msecs_to_jiffies(IWL_MAX_CSA_BLOCK_TX)); 4727 } 4728 4729 if (!fw_has_capa(&mvm->fw->ucode_capa, 4730 IWL_UCODE_TLV_CAPA_CHANNEL_SWITCH_CMD)) { 4731 ret = iwl_mvm_old_pre_chan_sw_sta(mvm, vif, chsw); 4732 if (ret) 4733 goto out_unlock; 4734 } else { 4735 iwl_mvm_schedule_client_csa(mvm, vif, chsw); 4736 } 4737 4738 mvmvif->csa_count = chsw->count; 4739 mvmvif->csa_misbehave = false; 4740 break; 4741 default: 4742 break; 4743 } 4744 4745 mvmvif->ps_disabled = true; 4746 4747 ret = iwl_mvm_power_update_ps(mvm); 4748 if (ret) 4749 goto out_unlock; 4750 4751 /* we won't be on this channel any longer */ 4752 iwl_mvm_teardown_tdls_peers(mvm); 4753 4754 out_unlock: 4755 mutex_unlock(&mvm->mutex); 4756 4757 return ret; 4758 } 4759 4760 static void iwl_mvm_channel_switch_rx_beacon(struct ieee80211_hw *hw, 4761 struct ieee80211_vif *vif, 4762 struct ieee80211_channel_switch *chsw) 4763 { 4764 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4765 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4766 struct iwl_chan_switch_te_cmd cmd = { 4767 .mac_id = cpu_to_le32(FW_CMD_ID_AND_COLOR(mvmvif->id, 4768 mvmvif->color)), 4769 .action = cpu_to_le32(FW_CTXT_ACTION_MODIFY), 4770 .tsf = cpu_to_le32(chsw->timestamp), 4771 .cs_count = chsw->count, 4772 .cs_mode = chsw->block_tx, 4773 }; 4774 4775 /* 4776 * In the new flow FW is in charge of timing the switch so there is no 4777 * need for all of this 4778 */ 4779 if (iwl_fw_lookup_notif_ver(mvm->fw, MAC_CONF_GROUP, 4780 CHANNEL_SWITCH_ERROR_NOTIF, 0)) 4781 return; 4782 4783 if (!fw_has_capa(&mvm->fw->ucode_capa, IWL_UCODE_TLV_CAPA_CS_MODIFY)) 4784 return; 4785 4786 IWL_DEBUG_MAC80211(mvm, "Modify CSA on mac %d count = %d (old %d) mode = %d\n", 4787 mvmvif->id, chsw->count, mvmvif->csa_count, chsw->block_tx); 4788 4789 if (chsw->count >= mvmvif->csa_count && chsw->block_tx) { 4790 if (mvmvif->csa_misbehave) { 4791 /* Second time, give up on this AP*/ 4792 iwl_mvm_abort_channel_switch(hw, vif); 4793 ieee80211_chswitch_done(vif, false); 4794 mvmvif->csa_misbehave = false; 4795 return; 4796 } 4797 mvmvif->csa_misbehave = true; 4798 } 4799 mvmvif->csa_count = chsw->count; 4800 4801 mutex_lock(&mvm->mutex); 4802 if (mvmvif->csa_failed) 4803 goto out_unlock; 4804 4805 WARN_ON(iwl_mvm_send_cmd_pdu(mvm, 4806 WIDE_ID(MAC_CONF_GROUP, 4807 CHANNEL_SWITCH_TIME_EVENT_CMD), 4808 0, sizeof(cmd), &cmd)); 4809 out_unlock: 4810 mutex_unlock(&mvm->mutex); 4811 } 4812 4813 static void iwl_mvm_flush_no_vif(struct iwl_mvm *mvm, u32 queues, bool drop) 4814 { 4815 int i; 4816 4817 if (!iwl_mvm_has_new_tx_api(mvm)) { 4818 if (drop) { 4819 mutex_lock(&mvm->mutex); 4820 iwl_mvm_flush_tx_path(mvm, 4821 iwl_mvm_flushable_queues(mvm) & queues); 4822 mutex_unlock(&mvm->mutex); 4823 } else { 4824 iwl_trans_wait_tx_queues_empty(mvm->trans, queues); 4825 } 4826 return; 4827 } 4828 4829 mutex_lock(&mvm->mutex); 4830 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4831 struct ieee80211_sta *sta; 4832 4833 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4834 lockdep_is_held(&mvm->mutex)); 4835 if (IS_ERR_OR_NULL(sta)) 4836 continue; 4837 4838 if (drop) 4839 iwl_mvm_flush_sta_tids(mvm, i, 0xFFFF); 4840 else 4841 iwl_mvm_wait_sta_queues_empty(mvm, 4842 iwl_mvm_sta_from_mac80211(sta)); 4843 } 4844 mutex_unlock(&mvm->mutex); 4845 } 4846 4847 static void iwl_mvm_mac_flush(struct ieee80211_hw *hw, 4848 struct ieee80211_vif *vif, u32 queues, bool drop) 4849 { 4850 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4851 struct iwl_mvm_vif *mvmvif; 4852 struct iwl_mvm_sta *mvmsta; 4853 struct ieee80211_sta *sta; 4854 int i; 4855 u32 msk = 0; 4856 4857 if (!vif) { 4858 iwl_mvm_flush_no_vif(mvm, queues, drop); 4859 return; 4860 } 4861 4862 if (vif->type != NL80211_IFTYPE_STATION) 4863 return; 4864 4865 /* Make sure we're done with the deferred traffic before flushing */ 4866 flush_work(&mvm->add_stream_wk); 4867 4868 mutex_lock(&mvm->mutex); 4869 mvmvif = iwl_mvm_vif_from_mac80211(vif); 4870 4871 /* flush the AP-station and all TDLS peers */ 4872 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4873 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 4874 lockdep_is_held(&mvm->mutex)); 4875 if (IS_ERR_OR_NULL(sta)) 4876 continue; 4877 4878 mvmsta = iwl_mvm_sta_from_mac80211(sta); 4879 if (mvmsta->vif != vif) 4880 continue; 4881 4882 /* make sure only TDLS peers or the AP are flushed */ 4883 WARN_ON(i != mvmvif->ap_sta_id && !sta->tdls); 4884 4885 if (drop) { 4886 if (iwl_mvm_flush_sta(mvm, mvmsta, false)) 4887 IWL_ERR(mvm, "flush request fail\n"); 4888 } else { 4889 msk |= mvmsta->tfd_queue_msk; 4890 if (iwl_mvm_has_new_tx_api(mvm)) 4891 iwl_mvm_wait_sta_queues_empty(mvm, mvmsta); 4892 } 4893 } 4894 4895 mutex_unlock(&mvm->mutex); 4896 4897 /* this can take a while, and we may need/want other operations 4898 * to succeed while doing this, so do it without the mutex held 4899 */ 4900 if (!drop && !iwl_mvm_has_new_tx_api(mvm)) 4901 iwl_trans_wait_tx_queues_empty(mvm->trans, msk); 4902 } 4903 4904 static int iwl_mvm_mac_get_survey(struct ieee80211_hw *hw, int idx, 4905 struct survey_info *survey) 4906 { 4907 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 4908 int ret; 4909 4910 memset(survey, 0, sizeof(*survey)); 4911 4912 /* only support global statistics right now */ 4913 if (idx != 0) 4914 return -ENOENT; 4915 4916 if (!fw_has_capa(&mvm->fw->ucode_capa, 4917 IWL_UCODE_TLV_CAPA_RADIO_BEACON_STATS)) 4918 return -ENOENT; 4919 4920 mutex_lock(&mvm->mutex); 4921 4922 if (iwl_mvm_firmware_running(mvm)) { 4923 ret = iwl_mvm_request_statistics(mvm, false); 4924 if (ret) 4925 goto out; 4926 } 4927 4928 survey->filled = SURVEY_INFO_TIME | 4929 SURVEY_INFO_TIME_RX | 4930 SURVEY_INFO_TIME_TX | 4931 SURVEY_INFO_TIME_SCAN; 4932 survey->time = mvm->accu_radio_stats.on_time_rf + 4933 mvm->radio_stats.on_time_rf; 4934 do_div(survey->time, USEC_PER_MSEC); 4935 4936 survey->time_rx = mvm->accu_radio_stats.rx_time + 4937 mvm->radio_stats.rx_time; 4938 do_div(survey->time_rx, USEC_PER_MSEC); 4939 4940 survey->time_tx = mvm->accu_radio_stats.tx_time + 4941 mvm->radio_stats.tx_time; 4942 do_div(survey->time_tx, USEC_PER_MSEC); 4943 4944 survey->time_scan = mvm->accu_radio_stats.on_time_scan + 4945 mvm->radio_stats.on_time_scan; 4946 do_div(survey->time_scan, USEC_PER_MSEC); 4947 4948 ret = 0; 4949 out: 4950 mutex_unlock(&mvm->mutex); 4951 return ret; 4952 } 4953 4954 static void iwl_mvm_set_sta_rate(u32 rate_n_flags, struct rate_info *rinfo) 4955 { 4956 u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK; 4957 4958 switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { 4959 case RATE_MCS_CHAN_WIDTH_20: 4960 rinfo->bw = RATE_INFO_BW_20; 4961 break; 4962 case RATE_MCS_CHAN_WIDTH_40: 4963 rinfo->bw = RATE_INFO_BW_40; 4964 break; 4965 case RATE_MCS_CHAN_WIDTH_80: 4966 rinfo->bw = RATE_INFO_BW_80; 4967 break; 4968 case RATE_MCS_CHAN_WIDTH_160: 4969 rinfo->bw = RATE_INFO_BW_160; 4970 break; 4971 } 4972 4973 if (format == RATE_MCS_CCK_MSK || 4974 format == RATE_MCS_LEGACY_OFDM_MSK) { 4975 int rate = u32_get_bits(rate_n_flags, RATE_LEGACY_RATE_MSK); 4976 4977 /* add the offset needed to get to the legacy ofdm indices */ 4978 if (format == RATE_MCS_LEGACY_OFDM_MSK) 4979 rate += IWL_FIRST_OFDM_RATE; 4980 4981 switch (rate) { 4982 case IWL_RATE_1M_INDEX: 4983 rinfo->legacy = 10; 4984 break; 4985 case IWL_RATE_2M_INDEX: 4986 rinfo->legacy = 20; 4987 break; 4988 case IWL_RATE_5M_INDEX: 4989 rinfo->legacy = 55; 4990 break; 4991 case IWL_RATE_11M_INDEX: 4992 rinfo->legacy = 110; 4993 break; 4994 case IWL_RATE_6M_INDEX: 4995 rinfo->legacy = 60; 4996 break; 4997 case IWL_RATE_9M_INDEX: 4998 rinfo->legacy = 90; 4999 break; 5000 case IWL_RATE_12M_INDEX: 5001 rinfo->legacy = 120; 5002 break; 5003 case IWL_RATE_18M_INDEX: 5004 rinfo->legacy = 180; 5005 break; 5006 case IWL_RATE_24M_INDEX: 5007 rinfo->legacy = 240; 5008 break; 5009 case IWL_RATE_36M_INDEX: 5010 rinfo->legacy = 360; 5011 break; 5012 case IWL_RATE_48M_INDEX: 5013 rinfo->legacy = 480; 5014 break; 5015 case IWL_RATE_54M_INDEX: 5016 rinfo->legacy = 540; 5017 } 5018 return; 5019 } 5020 5021 rinfo->nss = u32_get_bits(rate_n_flags, 5022 RATE_MCS_NSS_MSK) + 1; 5023 rinfo->mcs = format == RATE_MCS_HT_MSK ? 5024 RATE_HT_MCS_INDEX(rate_n_flags) : 5025 u32_get_bits(rate_n_flags, RATE_MCS_CODE_MSK); 5026 5027 if (format == RATE_MCS_HE_MSK) { 5028 u32 gi_ltf = u32_get_bits(rate_n_flags, 5029 RATE_MCS_HE_GI_LTF_MSK); 5030 5031 rinfo->flags |= RATE_INFO_FLAGS_HE_MCS; 5032 5033 if (rate_n_flags & RATE_MCS_HE_106T_MSK) { 5034 rinfo->bw = RATE_INFO_BW_HE_RU; 5035 rinfo->he_ru_alloc = NL80211_RATE_INFO_HE_RU_ALLOC_106; 5036 } 5037 5038 switch (rate_n_flags & RATE_MCS_HE_TYPE_MSK) { 5039 case RATE_MCS_HE_TYPE_SU: 5040 case RATE_MCS_HE_TYPE_EXT_SU: 5041 if (gi_ltf == 0 || gi_ltf == 1) 5042 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5043 else if (gi_ltf == 2) 5044 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5045 else if (gi_ltf == 3) 5046 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5047 else 5048 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5049 break; 5050 case RATE_MCS_HE_TYPE_MU: 5051 if (gi_ltf == 0 || gi_ltf == 1) 5052 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_0_8; 5053 else if (gi_ltf == 2) 5054 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5055 else 5056 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5057 break; 5058 case RATE_MCS_HE_TYPE_TRIG: 5059 if (gi_ltf == 0 || gi_ltf == 1) 5060 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_1_6; 5061 else 5062 rinfo->he_gi = NL80211_RATE_INFO_HE_GI_3_2; 5063 break; 5064 } 5065 5066 if (rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK) 5067 rinfo->he_dcm = 1; 5068 return; 5069 } 5070 5071 if (rate_n_flags & RATE_MCS_SGI_MSK) 5072 rinfo->flags |= RATE_INFO_FLAGS_SHORT_GI; 5073 5074 if (format == RATE_MCS_HT_MSK) { 5075 rinfo->flags |= RATE_INFO_FLAGS_MCS; 5076 5077 } else if (format == RATE_MCS_VHT_MSK) { 5078 rinfo->flags |= RATE_INFO_FLAGS_VHT_MCS; 5079 } 5080 5081 } 5082 5083 static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, 5084 struct ieee80211_vif *vif, 5085 struct ieee80211_sta *sta, 5086 struct station_info *sinfo) 5087 { 5088 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5089 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5090 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 5091 5092 if (mvmsta->avg_energy) { 5093 sinfo->signal_avg = -(s8)mvmsta->avg_energy; 5094 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); 5095 } 5096 5097 if (iwl_mvm_has_tlc_offload(mvm)) { 5098 struct iwl_lq_sta_rs_fw *lq_sta = &mvmsta->lq_sta.rs_fw; 5099 5100 iwl_mvm_set_sta_rate(lq_sta->last_rate_n_flags, &sinfo->txrate); 5101 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_TX_BITRATE); 5102 } 5103 5104 /* if beacon filtering isn't on mac80211 does it anyway */ 5105 if (!(vif->driver_flags & IEEE80211_VIF_BEACON_FILTER)) 5106 return; 5107 5108 if (!vif->bss_conf.assoc) 5109 return; 5110 5111 mutex_lock(&mvm->mutex); 5112 5113 if (mvmvif->ap_sta_id != mvmsta->sta_id) 5114 goto unlock; 5115 5116 if (iwl_mvm_request_statistics(mvm, false)) 5117 goto unlock; 5118 5119 sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + 5120 mvmvif->beacon_stats.accu_num_beacons; 5121 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); 5122 if (mvmvif->beacon_stats.avg_signal) { 5123 /* firmware only reports a value after RXing a few beacons */ 5124 sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; 5125 sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); 5126 } 5127 unlock: 5128 mutex_unlock(&mvm->mutex); 5129 } 5130 5131 static void iwl_mvm_event_mlme_callback_ini(struct iwl_mvm *mvm, 5132 struct ieee80211_vif *vif, 5133 const struct ieee80211_mlme_event *mlme) 5134 { 5135 if ((mlme->data == ASSOC_EVENT || mlme->data == AUTH_EVENT) && 5136 (mlme->status == MLME_DENIED || mlme->status == MLME_TIMEOUT)) { 5137 iwl_dbg_tlv_time_point(&mvm->fwrt, 5138 IWL_FW_INI_TIME_POINT_ASSOC_FAILED, 5139 NULL); 5140 return; 5141 } 5142 5143 if (mlme->data == DEAUTH_RX_EVENT || mlme->data == DEAUTH_TX_EVENT) { 5144 iwl_dbg_tlv_time_point(&mvm->fwrt, 5145 IWL_FW_INI_TIME_POINT_DEASSOC, 5146 NULL); 5147 return; 5148 } 5149 } 5150 5151 static void iwl_mvm_event_mlme_callback(struct iwl_mvm *mvm, 5152 struct ieee80211_vif *vif, 5153 const struct ieee80211_event *event) 5154 { 5155 #define CHECK_MLME_TRIGGER(_cnt, _fmt...) \ 5156 do { \ 5157 if ((trig_mlme->_cnt) && --(trig_mlme->_cnt)) \ 5158 break; \ 5159 iwl_fw_dbg_collect_trig(&(mvm)->fwrt, trig, _fmt); \ 5160 } while (0) 5161 5162 struct iwl_fw_dbg_trigger_tlv *trig; 5163 struct iwl_fw_dbg_trigger_mlme *trig_mlme; 5164 5165 if (iwl_trans_dbg_ini_valid(mvm->trans)) { 5166 iwl_mvm_event_mlme_callback_ini(mvm, vif, &event->u.mlme); 5167 return; 5168 } 5169 5170 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5171 FW_DBG_TRIGGER_MLME); 5172 if (!trig) 5173 return; 5174 5175 trig_mlme = (void *)trig->data; 5176 5177 if (event->u.mlme.data == ASSOC_EVENT) { 5178 if (event->u.mlme.status == MLME_DENIED) 5179 CHECK_MLME_TRIGGER(stop_assoc_denied, 5180 "DENIED ASSOC: reason %d", 5181 event->u.mlme.reason); 5182 else if (event->u.mlme.status == MLME_TIMEOUT) 5183 CHECK_MLME_TRIGGER(stop_assoc_timeout, 5184 "ASSOC TIMEOUT"); 5185 } else if (event->u.mlme.data == AUTH_EVENT) { 5186 if (event->u.mlme.status == MLME_DENIED) 5187 CHECK_MLME_TRIGGER(stop_auth_denied, 5188 "DENIED AUTH: reason %d", 5189 event->u.mlme.reason); 5190 else if (event->u.mlme.status == MLME_TIMEOUT) 5191 CHECK_MLME_TRIGGER(stop_auth_timeout, 5192 "AUTH TIMEOUT"); 5193 } else if (event->u.mlme.data == DEAUTH_RX_EVENT) { 5194 CHECK_MLME_TRIGGER(stop_rx_deauth, 5195 "DEAUTH RX %d", event->u.mlme.reason); 5196 } else if (event->u.mlme.data == DEAUTH_TX_EVENT) { 5197 CHECK_MLME_TRIGGER(stop_tx_deauth, 5198 "DEAUTH TX %d", event->u.mlme.reason); 5199 } 5200 #undef CHECK_MLME_TRIGGER 5201 } 5202 5203 static void iwl_mvm_event_bar_rx_callback(struct iwl_mvm *mvm, 5204 struct ieee80211_vif *vif, 5205 const struct ieee80211_event *event) 5206 { 5207 struct iwl_fw_dbg_trigger_tlv *trig; 5208 struct iwl_fw_dbg_trigger_ba *ba_trig; 5209 5210 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, ieee80211_vif_to_wdev(vif), 5211 FW_DBG_TRIGGER_BA); 5212 if (!trig) 5213 return; 5214 5215 ba_trig = (void *)trig->data; 5216 5217 if (!(le16_to_cpu(ba_trig->rx_bar) & BIT(event->u.ba.tid))) 5218 return; 5219 5220 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig, 5221 "BAR received from %pM, tid %d, ssn %d", 5222 event->u.ba.sta->addr, event->u.ba.tid, 5223 event->u.ba.ssn); 5224 } 5225 5226 static void iwl_mvm_mac_event_callback(struct ieee80211_hw *hw, 5227 struct ieee80211_vif *vif, 5228 const struct ieee80211_event *event) 5229 { 5230 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5231 5232 switch (event->type) { 5233 case MLME_EVENT: 5234 iwl_mvm_event_mlme_callback(mvm, vif, event); 5235 break; 5236 case BAR_RX_EVENT: 5237 iwl_mvm_event_bar_rx_callback(mvm, vif, event); 5238 break; 5239 case BA_FRAME_TIMEOUT: 5240 iwl_mvm_event_frame_timeout_callback(mvm, vif, event->u.ba.sta, 5241 event->u.ba.tid); 5242 break; 5243 default: 5244 break; 5245 } 5246 } 5247 5248 void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, 5249 enum iwl_mvm_rxq_notif_type type, 5250 bool sync, 5251 const void *data, u32 size) 5252 { 5253 struct { 5254 struct iwl_rxq_sync_cmd cmd; 5255 struct iwl_mvm_internal_rxq_notif notif; 5256 } __packed cmd = { 5257 .cmd.rxq_mask = cpu_to_le32(BIT(mvm->trans->num_rx_queues) - 1), 5258 .cmd.count = 5259 cpu_to_le32(sizeof(struct iwl_mvm_internal_rxq_notif) + 5260 size), 5261 .notif.type = type, 5262 .notif.sync = sync, 5263 }; 5264 struct iwl_host_cmd hcmd = { 5265 .id = WIDE_ID(DATA_PATH_GROUP, TRIGGER_RX_QUEUES_NOTIF_CMD), 5266 .data[0] = &cmd, 5267 .len[0] = sizeof(cmd), 5268 .data[1] = data, 5269 .len[1] = size, 5270 .flags = sync ? 0 : CMD_ASYNC, 5271 }; 5272 int ret; 5273 5274 /* size must be a multiple of DWORD */ 5275 if (WARN_ON(cmd.cmd.count & cpu_to_le32(3))) 5276 return; 5277 5278 if (!iwl_mvm_has_new_rx_api(mvm)) 5279 return; 5280 5281 if (sync) { 5282 cmd.notif.cookie = mvm->queue_sync_cookie; 5283 mvm->queue_sync_state = (1 << mvm->trans->num_rx_queues) - 1; 5284 } 5285 5286 ret = iwl_mvm_send_cmd(mvm, &hcmd); 5287 if (ret) { 5288 IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); 5289 goto out; 5290 } 5291 5292 if (sync) { 5293 lockdep_assert_held(&mvm->mutex); 5294 ret = wait_event_timeout(mvm->rx_sync_waitq, 5295 READ_ONCE(mvm->queue_sync_state) == 0 || 5296 iwl_mvm_is_radio_killed(mvm), 5297 HZ); 5298 WARN_ONCE(!ret && !iwl_mvm_is_radio_killed(mvm), 5299 "queue sync: failed to sync, state is 0x%lx\n", 5300 mvm->queue_sync_state); 5301 } 5302 5303 out: 5304 if (sync) { 5305 mvm->queue_sync_state = 0; 5306 mvm->queue_sync_cookie++; 5307 } 5308 } 5309 5310 static void iwl_mvm_sync_rx_queues(struct ieee80211_hw *hw) 5311 { 5312 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5313 5314 mutex_lock(&mvm->mutex); 5315 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, true, NULL, 0); 5316 mutex_unlock(&mvm->mutex); 5317 } 5318 5319 static int 5320 iwl_mvm_mac_get_ftm_responder_stats(struct ieee80211_hw *hw, 5321 struct ieee80211_vif *vif, 5322 struct cfg80211_ftm_responder_stats *stats) 5323 { 5324 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5325 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 5326 5327 if (vif->p2p || vif->type != NL80211_IFTYPE_AP || 5328 !mvmvif->ap_ibss_active || !vif->bss_conf.ftm_responder) 5329 return -EINVAL; 5330 5331 mutex_lock(&mvm->mutex); 5332 *stats = mvm->ftm_resp_stats; 5333 mutex_unlock(&mvm->mutex); 5334 5335 stats->filled = BIT(NL80211_FTM_STATS_SUCCESS_NUM) | 5336 BIT(NL80211_FTM_STATS_PARTIAL_NUM) | 5337 BIT(NL80211_FTM_STATS_FAILED_NUM) | 5338 BIT(NL80211_FTM_STATS_ASAP_NUM) | 5339 BIT(NL80211_FTM_STATS_NON_ASAP_NUM) | 5340 BIT(NL80211_FTM_STATS_TOTAL_DURATION_MSEC) | 5341 BIT(NL80211_FTM_STATS_UNKNOWN_TRIGGERS_NUM) | 5342 BIT(NL80211_FTM_STATS_RESCHEDULE_REQUESTS_NUM) | 5343 BIT(NL80211_FTM_STATS_OUT_OF_WINDOW_TRIGGERS_NUM); 5344 5345 return 0; 5346 } 5347 5348 static int iwl_mvm_start_pmsr(struct ieee80211_hw *hw, 5349 struct ieee80211_vif *vif, 5350 struct cfg80211_pmsr_request *request) 5351 { 5352 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5353 int ret; 5354 5355 mutex_lock(&mvm->mutex); 5356 ret = iwl_mvm_ftm_start(mvm, vif, request); 5357 mutex_unlock(&mvm->mutex); 5358 5359 return ret; 5360 } 5361 5362 static void iwl_mvm_abort_pmsr(struct ieee80211_hw *hw, 5363 struct ieee80211_vif *vif, 5364 struct cfg80211_pmsr_request *request) 5365 { 5366 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5367 5368 mutex_lock(&mvm->mutex); 5369 iwl_mvm_ftm_abort(mvm, request); 5370 mutex_unlock(&mvm->mutex); 5371 } 5372 5373 static bool iwl_mvm_can_hw_csum(struct sk_buff *skb) 5374 { 5375 u8 protocol = ip_hdr(skb)->protocol; 5376 5377 if (!IS_ENABLED(CONFIG_INET)) 5378 return false; 5379 5380 return protocol == IPPROTO_TCP || protocol == IPPROTO_UDP; 5381 } 5382 5383 static bool iwl_mvm_mac_can_aggregate(struct ieee80211_hw *hw, 5384 struct sk_buff *head, 5385 struct sk_buff *skb) 5386 { 5387 struct iwl_mvm *mvm = IWL_MAC80211_GET_MVM(hw); 5388 5389 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) 5390 return iwl_mvm_tx_csum_bz(mvm, head, true) == 5391 iwl_mvm_tx_csum_bz(mvm, skb, true); 5392 5393 /* For now don't aggregate IPv6 in AMSDU */ 5394 if (skb->protocol != htons(ETH_P_IP)) 5395 return false; 5396 5397 if (!iwl_mvm_is_csum_supported(mvm)) 5398 return true; 5399 5400 return iwl_mvm_can_hw_csum(skb) == iwl_mvm_can_hw_csum(head); 5401 } 5402 5403 const struct ieee80211_ops iwl_mvm_hw_ops = { 5404 .tx = iwl_mvm_mac_tx, 5405 .wake_tx_queue = iwl_mvm_mac_wake_tx_queue, 5406 .ampdu_action = iwl_mvm_mac_ampdu_action, 5407 .get_antenna = iwl_mvm_op_get_antenna, 5408 .start = iwl_mvm_mac_start, 5409 .reconfig_complete = iwl_mvm_mac_reconfig_complete, 5410 .stop = iwl_mvm_mac_stop, 5411 .add_interface = iwl_mvm_mac_add_interface, 5412 .remove_interface = iwl_mvm_mac_remove_interface, 5413 .config = iwl_mvm_mac_config, 5414 .prepare_multicast = iwl_mvm_prepare_multicast, 5415 .configure_filter = iwl_mvm_configure_filter, 5416 .config_iface_filter = iwl_mvm_config_iface_filter, 5417 .bss_info_changed = iwl_mvm_bss_info_changed, 5418 .hw_scan = iwl_mvm_mac_hw_scan, 5419 .cancel_hw_scan = iwl_mvm_mac_cancel_hw_scan, 5420 .sta_pre_rcu_remove = iwl_mvm_sta_pre_rcu_remove, 5421 .sta_state = iwl_mvm_mac_sta_state, 5422 .sta_notify = iwl_mvm_mac_sta_notify, 5423 .allow_buffered_frames = iwl_mvm_mac_allow_buffered_frames, 5424 .release_buffered_frames = iwl_mvm_mac_release_buffered_frames, 5425 .set_rts_threshold = iwl_mvm_mac_set_rts_threshold, 5426 .sta_rc_update = iwl_mvm_sta_rc_update, 5427 .conf_tx = iwl_mvm_mac_conf_tx, 5428 .mgd_prepare_tx = iwl_mvm_mac_mgd_prepare_tx, 5429 .mgd_complete_tx = iwl_mvm_mac_mgd_complete_tx, 5430 .mgd_protect_tdls_discover = iwl_mvm_mac_mgd_protect_tdls_discover, 5431 .flush = iwl_mvm_mac_flush, 5432 .sched_scan_start = iwl_mvm_mac_sched_scan_start, 5433 .sched_scan_stop = iwl_mvm_mac_sched_scan_stop, 5434 .set_key = iwl_mvm_mac_set_key, 5435 .update_tkip_key = iwl_mvm_mac_update_tkip_key, 5436 .remain_on_channel = iwl_mvm_roc, 5437 .cancel_remain_on_channel = iwl_mvm_cancel_roc, 5438 .add_chanctx = iwl_mvm_add_chanctx, 5439 .remove_chanctx = iwl_mvm_remove_chanctx, 5440 .change_chanctx = iwl_mvm_change_chanctx, 5441 .assign_vif_chanctx = iwl_mvm_assign_vif_chanctx, 5442 .unassign_vif_chanctx = iwl_mvm_unassign_vif_chanctx, 5443 .switch_vif_chanctx = iwl_mvm_switch_vif_chanctx, 5444 5445 .start_ap = iwl_mvm_start_ap_ibss, 5446 .stop_ap = iwl_mvm_stop_ap_ibss, 5447 .join_ibss = iwl_mvm_start_ap_ibss, 5448 .leave_ibss = iwl_mvm_stop_ap_ibss, 5449 5450 .tx_last_beacon = iwl_mvm_tx_last_beacon, 5451 5452 .set_tim = iwl_mvm_set_tim, 5453 5454 .channel_switch = iwl_mvm_channel_switch, 5455 .pre_channel_switch = iwl_mvm_pre_channel_switch, 5456 .post_channel_switch = iwl_mvm_post_channel_switch, 5457 .abort_channel_switch = iwl_mvm_abort_channel_switch, 5458 .channel_switch_rx_beacon = iwl_mvm_channel_switch_rx_beacon, 5459 5460 .tdls_channel_switch = iwl_mvm_tdls_channel_switch, 5461 .tdls_cancel_channel_switch = iwl_mvm_tdls_cancel_channel_switch, 5462 .tdls_recv_channel_switch = iwl_mvm_tdls_recv_channel_switch, 5463 5464 .event_callback = iwl_mvm_mac_event_callback, 5465 5466 .sync_rx_queues = iwl_mvm_sync_rx_queues, 5467 5468 CFG80211_TESTMODE_CMD(iwl_mvm_mac_testmode_cmd) 5469 5470 #ifdef CONFIG_PM_SLEEP 5471 /* look at d3.c */ 5472 .suspend = iwl_mvm_suspend, 5473 .resume = iwl_mvm_resume, 5474 .set_wakeup = iwl_mvm_set_wakeup, 5475 .set_rekey_data = iwl_mvm_set_rekey_data, 5476 #if IS_ENABLED(CONFIG_IPV6) 5477 .ipv6_addr_change = iwl_mvm_ipv6_addr_change, 5478 #endif 5479 .set_default_unicast_key = iwl_mvm_set_default_unicast_key, 5480 #endif 5481 .get_survey = iwl_mvm_mac_get_survey, 5482 .sta_statistics = iwl_mvm_mac_sta_statistics, 5483 .get_ftm_responder_stats = iwl_mvm_mac_get_ftm_responder_stats, 5484 .start_pmsr = iwl_mvm_start_pmsr, 5485 .abort_pmsr = iwl_mvm_abort_pmsr, 5486 5487 .can_aggregate_in_amsdu = iwl_mvm_mac_can_aggregate, 5488 #ifdef CONFIG_IWLWIFI_DEBUGFS 5489 .sta_add_debugfs = iwl_mvm_sta_add_debugfs, 5490 #endif 5491 }; 5492