1bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2bfcc09ddSBjoern A. Zeeb /* 3*a4128aadSBjoern A. Zeeb * Copyright (C) 2012-2015, 2018-2024 Intel Corporation 4bfcc09ddSBjoern A. Zeeb * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5bfcc09ddSBjoern A. Zeeb * Copyright (C) 2016-2017 Intel Deutschland GmbH 6bfcc09ddSBjoern A. Zeeb */ 7bfcc09ddSBjoern A. Zeeb #include <net/mac80211.h> 8bfcc09ddSBjoern A. Zeeb #if defined(__FreeBSD__) 9bfcc09ddSBjoern A. Zeeb #include <linux/cache.h> 10bfcc09ddSBjoern A. Zeeb #endif 11bfcc09ddSBjoern A. Zeeb 12bfcc09ddSBjoern A. Zeeb #include "mvm.h" 13bfcc09ddSBjoern A. Zeeb #include "sta.h" 14bfcc09ddSBjoern A. Zeeb #include "rs.h" 15bfcc09ddSBjoern A. Zeeb 16bfcc09ddSBjoern A. Zeeb /* 17bfcc09ddSBjoern A. Zeeb * New version of ADD_STA_sta command added new fields at the end of the 18bfcc09ddSBjoern A. Zeeb * structure, so sending the size of the relevant API's structure is enough to 19bfcc09ddSBjoern A. Zeeb * support both API versions. 20bfcc09ddSBjoern A. Zeeb */ 21bfcc09ddSBjoern A. Zeeb static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 22bfcc09ddSBjoern A. Zeeb { 23bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_rx_api(mvm) || 24bfcc09ddSBjoern A. Zeeb fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 25bfcc09ddSBjoern A. Zeeb return sizeof(struct iwl_mvm_add_sta_cmd); 26bfcc09ddSBjoern A. Zeeb else 27bfcc09ddSBjoern A. Zeeb return sizeof(struct iwl_mvm_add_sta_cmd_v7); 28bfcc09ddSBjoern A. Zeeb } 29bfcc09ddSBjoern A. Zeeb 309af1bba4SBjoern A. Zeeb int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) 31bfcc09ddSBjoern A. Zeeb { 32bfcc09ddSBjoern A. Zeeb int sta_id; 33bfcc09ddSBjoern A. Zeeb u32 reserved_ids = 0; 34bfcc09ddSBjoern A. Zeeb 35bfcc09ddSBjoern A. Zeeb BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); 36bfcc09ddSBjoern A. Zeeb WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 37bfcc09ddSBjoern A. Zeeb 38bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 39bfcc09ddSBjoern A. Zeeb 40bfcc09ddSBjoern A. Zeeb /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 41bfcc09ddSBjoern A. Zeeb if (iftype != NL80211_IFTYPE_STATION) 42bfcc09ddSBjoern A. Zeeb reserved_ids = BIT(0); 43bfcc09ddSBjoern A. Zeeb 44bfcc09ddSBjoern A. Zeeb /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 45bfcc09ddSBjoern A. Zeeb for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { 46bfcc09ddSBjoern A. Zeeb if (BIT(sta_id) & reserved_ids) 47bfcc09ddSBjoern A. Zeeb continue; 48bfcc09ddSBjoern A. Zeeb 49bfcc09ddSBjoern A. Zeeb if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 50bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex))) 51bfcc09ddSBjoern A. Zeeb return sta_id; 52bfcc09ddSBjoern A. Zeeb } 53bfcc09ddSBjoern A. Zeeb return IWL_MVM_INVALID_STA; 54bfcc09ddSBjoern A. Zeeb } 55bfcc09ddSBjoern A. Zeeb 569af1bba4SBjoern A. Zeeb /* Calculate the ampdu density and max size */ 579af1bba4SBjoern A. Zeeb u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, 589af1bba4SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf, 599af1bba4SBjoern A. Zeeb u32 *_agg_size) 609af1bba4SBjoern A. Zeeb { 619af1bba4SBjoern A. Zeeb u32 agg_size = 0, mpdu_dens = 0; 629af1bba4SBjoern A. Zeeb 639af1bba4SBjoern A. Zeeb if (WARN_ON(!link_sta)) 649af1bba4SBjoern A. Zeeb return 0; 659af1bba4SBjoern A. Zeeb 669af1bba4SBjoern A. Zeeb /* Note that we always use only legacy & highest supported PPDUs, so 679af1bba4SBjoern A. Zeeb * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating 689af1bba4SBjoern A. Zeeb * the maximum A-MPDU size of various PPDU types in different bands, 699af1bba4SBjoern A. Zeeb * we only need to worry about the highest supported PPDU type here. 709af1bba4SBjoern A. Zeeb */ 719af1bba4SBjoern A. Zeeb 729af1bba4SBjoern A. Zeeb if (link_sta->ht_cap.ht_supported) { 739af1bba4SBjoern A. Zeeb agg_size = link_sta->ht_cap.ampdu_factor; 749af1bba4SBjoern A. Zeeb mpdu_dens = link_sta->ht_cap.ampdu_density; 759af1bba4SBjoern A. Zeeb } 769af1bba4SBjoern A. Zeeb 77*a4128aadSBjoern A. Zeeb if (link_conf->chanreq.oper.chan->band == NL80211_BAND_6GHZ) { 789af1bba4SBjoern A. Zeeb /* overwrite HT values on 6 GHz */ 799af1bba4SBjoern A. Zeeb mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa, 809af1bba4SBjoern A. Zeeb IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); 819af1bba4SBjoern A. Zeeb agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa, 829af1bba4SBjoern A. Zeeb IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 839af1bba4SBjoern A. Zeeb } else if (link_sta->vht_cap.vht_supported) { 849af1bba4SBjoern A. Zeeb /* if VHT supported overwrite HT value */ 859af1bba4SBjoern A. Zeeb agg_size = u32_get_bits(link_sta->vht_cap.cap, 869af1bba4SBjoern A. Zeeb IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); 879af1bba4SBjoern A. Zeeb } 889af1bba4SBjoern A. Zeeb 899af1bba4SBjoern A. Zeeb /* D6.0 10.12.2 A-MPDU length limit rules 909af1bba4SBjoern A. Zeeb * A STA indicates the maximum length of the A-MPDU preEOF padding 919af1bba4SBjoern A. Zeeb * that it can receive in an HE PPDU in the Maximum A-MPDU Length 929af1bba4SBjoern A. Zeeb * Exponent field in its HT Capabilities, VHT Capabilities, 939af1bba4SBjoern A. Zeeb * and HE 6 GHz Band Capabilities elements (if present) and the 949af1bba4SBjoern A. Zeeb * Maximum AMPDU Length Exponent Extension field in its HE 959af1bba4SBjoern A. Zeeb * Capabilities element 969af1bba4SBjoern A. Zeeb */ 979af1bba4SBjoern A. Zeeb if (link_sta->he_cap.has_he) 989af1bba4SBjoern A. Zeeb agg_size += 999af1bba4SBjoern A. Zeeb u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3], 1009af1bba4SBjoern A. Zeeb IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); 1019af1bba4SBjoern A. Zeeb 1029af1bba4SBjoern A. Zeeb if (link_sta->eht_cap.has_eht) 1039af1bba4SBjoern A. Zeeb agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1], 1049af1bba4SBjoern A. Zeeb IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK); 1059af1bba4SBjoern A. Zeeb 1069af1bba4SBjoern A. Zeeb /* Limit to max A-MPDU supported by FW */ 1079af1bba4SBjoern A. Zeeb agg_size = min_t(u32, agg_size, 1089af1bba4SBjoern A. Zeeb STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); 1099af1bba4SBjoern A. Zeeb 1109af1bba4SBjoern A. Zeeb *_agg_size = agg_size; 1119af1bba4SBjoern A. Zeeb return mpdu_dens; 1129af1bba4SBjoern A. Zeeb } 1139af1bba4SBjoern A. Zeeb 1149af1bba4SBjoern A. Zeeb u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta) 1159af1bba4SBjoern A. Zeeb { 1169af1bba4SBjoern A. Zeeb u8 uapsd_acs = 0; 1179af1bba4SBjoern A. Zeeb 1189af1bba4SBjoern A. Zeeb if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 1199af1bba4SBjoern A. Zeeb uapsd_acs |= BIT(AC_BK); 1209af1bba4SBjoern A. Zeeb if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 1219af1bba4SBjoern A. Zeeb uapsd_acs |= BIT(AC_BE); 1229af1bba4SBjoern A. Zeeb if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 1239af1bba4SBjoern A. Zeeb uapsd_acs |= BIT(AC_VI); 1249af1bba4SBjoern A. Zeeb if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 1259af1bba4SBjoern A. Zeeb uapsd_acs |= BIT(AC_VO); 1269af1bba4SBjoern A. Zeeb 1279af1bba4SBjoern A. Zeeb return uapsd_acs | uapsd_acs << 4; 1289af1bba4SBjoern A. Zeeb } 1299af1bba4SBjoern A. Zeeb 130bfcc09ddSBjoern A. Zeeb /* send station add/update command to firmware */ 131bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 132bfcc09ddSBjoern A. Zeeb bool update, unsigned int flags) 133bfcc09ddSBjoern A. Zeeb { 134bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 135bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd add_sta_cmd = { 1369af1bba4SBjoern A. Zeeb .sta_id = mvm_sta->deflink.sta_id, 137bfcc09ddSBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 138bfcc09ddSBjoern A. Zeeb .add_modify = update ? 1 : 0, 139bfcc09ddSBjoern A. Zeeb .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 140bfcc09ddSBjoern A. Zeeb STA_FLG_MIMO_EN_MSK | 141bfcc09ddSBjoern A. Zeeb STA_FLG_RTS_MIMO_PROT), 142bfcc09ddSBjoern A. Zeeb .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 143bfcc09ddSBjoern A. Zeeb }; 144bfcc09ddSBjoern A. Zeeb int ret; 145bfcc09ddSBjoern A. Zeeb u32 status; 146bfcc09ddSBjoern A. Zeeb u32 agg_size = 0, mpdu_dens = 0; 147bfcc09ddSBjoern A. Zeeb 148bfcc09ddSBjoern A. Zeeb if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 149bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_type = mvm_sta->sta_type; 150bfcc09ddSBjoern A. Zeeb 151bfcc09ddSBjoern A. Zeeb if (!update || (flags & STA_MODIFY_QUEUES)) { 152bfcc09ddSBjoern A. Zeeb memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 153bfcc09ddSBjoern A. Zeeb 154bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) { 155bfcc09ddSBjoern A. Zeeb add_sta_cmd.tfd_queue_msk = 156bfcc09ddSBjoern A. Zeeb cpu_to_le32(mvm_sta->tfd_queue_msk); 157bfcc09ddSBjoern A. Zeeb 158bfcc09ddSBjoern A. Zeeb if (flags & STA_MODIFY_QUEUES) 159bfcc09ddSBjoern A. Zeeb add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 160bfcc09ddSBjoern A. Zeeb } else { 161bfcc09ddSBjoern A. Zeeb WARN_ON(flags & STA_MODIFY_QUEUES); 162bfcc09ddSBjoern A. Zeeb } 163bfcc09ddSBjoern A. Zeeb } 164bfcc09ddSBjoern A. Zeeb 1656cf748adSBjoern A. Zeeb switch (sta->deflink.bandwidth) { 1669ad210c1SBjoern A. Zeeb case IEEE80211_STA_RX_BW_320: 167bfcc09ddSBjoern A. Zeeb case IEEE80211_STA_RX_BW_160: 168bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 169bfcc09ddSBjoern A. Zeeb fallthrough; 170bfcc09ddSBjoern A. Zeeb case IEEE80211_STA_RX_BW_80: 171bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 172bfcc09ddSBjoern A. Zeeb fallthrough; 173bfcc09ddSBjoern A. Zeeb case IEEE80211_STA_RX_BW_40: 174bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 175bfcc09ddSBjoern A. Zeeb fallthrough; 176bfcc09ddSBjoern A. Zeeb case IEEE80211_STA_RX_BW_20: 1776cf748adSBjoern A. Zeeb if (sta->deflink.ht_cap.ht_supported) 178bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= 179bfcc09ddSBjoern A. Zeeb cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 180bfcc09ddSBjoern A. Zeeb break; 181bfcc09ddSBjoern A. Zeeb } 182bfcc09ddSBjoern A. Zeeb 1836cf748adSBjoern A. Zeeb switch (sta->deflink.rx_nss) { 184bfcc09ddSBjoern A. Zeeb case 1: 185bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 186bfcc09ddSBjoern A. Zeeb break; 187bfcc09ddSBjoern A. Zeeb case 2: 188bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 189bfcc09ddSBjoern A. Zeeb break; 190bfcc09ddSBjoern A. Zeeb case 3 ... 8: 191bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 192bfcc09ddSBjoern A. Zeeb break; 193bfcc09ddSBjoern A. Zeeb } 194bfcc09ddSBjoern A. Zeeb 1959af1bba4SBjoern A. Zeeb switch (sta->deflink.smps_mode) { 196bfcc09ddSBjoern A. Zeeb case IEEE80211_SMPS_AUTOMATIC: 197bfcc09ddSBjoern A. Zeeb case IEEE80211_SMPS_NUM_MODES: 198bfcc09ddSBjoern A. Zeeb WARN_ON(1); 199bfcc09ddSBjoern A. Zeeb break; 200bfcc09ddSBjoern A. Zeeb case IEEE80211_SMPS_STATIC: 201bfcc09ddSBjoern A. Zeeb /* override NSS */ 202bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 203bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 204bfcc09ddSBjoern A. Zeeb break; 205bfcc09ddSBjoern A. Zeeb case IEEE80211_SMPS_DYNAMIC: 206bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 207bfcc09ddSBjoern A. Zeeb break; 208bfcc09ddSBjoern A. Zeeb case IEEE80211_SMPS_OFF: 209bfcc09ddSBjoern A. Zeeb /* nothing */ 210bfcc09ddSBjoern A. Zeeb break; 211bfcc09ddSBjoern A. Zeeb } 212bfcc09ddSBjoern A. Zeeb 2139af1bba4SBjoern A. Zeeb if (sta->deflink.ht_cap.ht_supported || 214*a4128aadSBjoern A. Zeeb mvm_sta->vif->bss_conf.chanreq.oper.chan->band == NL80211_BAND_6GHZ) 215bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags_msk |= 216bfcc09ddSBjoern A. Zeeb cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 217bfcc09ddSBjoern A. Zeeb STA_FLG_AGG_MPDU_DENS_MSK); 218bfcc09ddSBjoern A. Zeeb 2199af1bba4SBjoern A. Zeeb mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink, 2209af1bba4SBjoern A. Zeeb &mvm_sta->vif->bss_conf, 2219af1bba4SBjoern A. Zeeb &agg_size); 222bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= 223bfcc09ddSBjoern A. Zeeb cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 224bfcc09ddSBjoern A. Zeeb add_sta_cmd.station_flags |= 225bfcc09ddSBjoern A. Zeeb cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 2269af1bba4SBjoern A. Zeeb 227bfcc09ddSBjoern A. Zeeb if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) 228bfcc09ddSBjoern A. Zeeb add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 229bfcc09ddSBjoern A. Zeeb 230bfcc09ddSBjoern A. Zeeb if (sta->wme) { 231bfcc09ddSBjoern A. Zeeb add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 2329af1bba4SBjoern A. Zeeb add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta); 233bfcc09ddSBjoern A. Zeeb add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 234bfcc09ddSBjoern A. Zeeb } 235bfcc09ddSBjoern A. Zeeb 236bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 237bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 238bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 239bfcc09ddSBjoern A. Zeeb &add_sta_cmd, &status); 240bfcc09ddSBjoern A. Zeeb if (ret) 241bfcc09ddSBjoern A. Zeeb return ret; 242bfcc09ddSBjoern A. Zeeb 243bfcc09ddSBjoern A. Zeeb switch (status & IWL_ADD_STA_STATUS_MASK) { 244bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 245bfcc09ddSBjoern A. Zeeb IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 246bfcc09ddSBjoern A. Zeeb break; 247bfcc09ddSBjoern A. Zeeb default: 248bfcc09ddSBjoern A. Zeeb ret = -EIO; 249bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "ADD_STA failed\n"); 250bfcc09ddSBjoern A. Zeeb break; 251bfcc09ddSBjoern A. Zeeb } 252bfcc09ddSBjoern A. Zeeb 253bfcc09ddSBjoern A. Zeeb return ret; 254bfcc09ddSBjoern A. Zeeb } 255bfcc09ddSBjoern A. Zeeb 256bfcc09ddSBjoern A. Zeeb static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) 257bfcc09ddSBjoern A. Zeeb { 258bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data *data = 259bfcc09ddSBjoern A. Zeeb from_timer(data, t, session_timer); 260bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; 261bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data *ba_data; 262bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 263bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta; 264bfcc09ddSBjoern A. Zeeb unsigned long timeout; 2659af1bba4SBjoern A. Zeeb unsigned int sta_id; 266bfcc09ddSBjoern A. Zeeb 267bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 268bfcc09ddSBjoern A. Zeeb 269bfcc09ddSBjoern A. Zeeb ba_data = rcu_dereference(*rcu_ptr); 270bfcc09ddSBjoern A. Zeeb 271bfcc09ddSBjoern A. Zeeb if (WARN_ON(!ba_data)) 272bfcc09ddSBjoern A. Zeeb goto unlock; 273bfcc09ddSBjoern A. Zeeb 274bfcc09ddSBjoern A. Zeeb if (!ba_data->timeout) 275bfcc09ddSBjoern A. Zeeb goto unlock; 276bfcc09ddSBjoern A. Zeeb 277bfcc09ddSBjoern A. Zeeb timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 278bfcc09ddSBjoern A. Zeeb if (time_is_after_jiffies(timeout)) { 279bfcc09ddSBjoern A. Zeeb mod_timer(&ba_data->session_timer, timeout); 280bfcc09ddSBjoern A. Zeeb goto unlock; 281bfcc09ddSBjoern A. Zeeb } 282bfcc09ddSBjoern A. Zeeb 283bfcc09ddSBjoern A. Zeeb /* Timer expired */ 2849af1bba4SBjoern A. Zeeb sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */ 2859af1bba4SBjoern A. Zeeb sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]); 286bfcc09ddSBjoern A. Zeeb 287bfcc09ddSBjoern A. Zeeb /* 288bfcc09ddSBjoern A. Zeeb * sta should be valid unless the following happens: 289bfcc09ddSBjoern A. Zeeb * The firmware asserts which triggers a reconfig flow, but 290bfcc09ddSBjoern A. Zeeb * the reconfig fails before we set the pointer to sta into 291bfcc09ddSBjoern A. Zeeb * the fw_id_to_mac_id pointer table. Mac80211 can't stop 292bfcc09ddSBjoern A. Zeeb * A-MDPU and hence the timer continues to run. Then, the 293bfcc09ddSBjoern A. Zeeb * timer expires and sta is NULL. 294bfcc09ddSBjoern A. Zeeb */ 2959af1bba4SBjoern A. Zeeb if (IS_ERR_OR_NULL(sta)) 296bfcc09ddSBjoern A. Zeeb goto unlock; 297bfcc09ddSBjoern A. Zeeb 298bfcc09ddSBjoern A. Zeeb mvm_sta = iwl_mvm_sta_from_mac80211(sta); 299bfcc09ddSBjoern A. Zeeb ieee80211_rx_ba_timer_expired(mvm_sta->vif, 300bfcc09ddSBjoern A. Zeeb sta->addr, ba_data->tid); 301bfcc09ddSBjoern A. Zeeb unlock: 302bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 303bfcc09ddSBjoern A. Zeeb } 304bfcc09ddSBjoern A. Zeeb 305bfcc09ddSBjoern A. Zeeb /* Disable aggregations for a bitmap of TIDs for a given station */ 306bfcc09ddSBjoern A. Zeeb static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 307bfcc09ddSBjoern A. Zeeb unsigned long disable_agg_tids, 308bfcc09ddSBjoern A. Zeeb bool remove_queue) 309bfcc09ddSBjoern A. Zeeb { 310bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = {}; 311bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 312bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 313bfcc09ddSBjoern A. Zeeb u32 status; 314bfcc09ddSBjoern A. Zeeb u8 sta_id; 315bfcc09ddSBjoern A. Zeeb 316bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 317bfcc09ddSBjoern A. Zeeb return -EINVAL; 318bfcc09ddSBjoern A. Zeeb 319bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[queue].ra_sta_id; 320bfcc09ddSBjoern A. Zeeb 321bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 322bfcc09ddSBjoern A. Zeeb 323bfcc09ddSBjoern A. Zeeb sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 324bfcc09ddSBjoern A. Zeeb 325bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 326bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 327bfcc09ddSBjoern A. Zeeb return -EINVAL; 328bfcc09ddSBjoern A. Zeeb } 329bfcc09ddSBjoern A. Zeeb 330bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_mac80211(sta); 331bfcc09ddSBjoern A. Zeeb 332bfcc09ddSBjoern A. Zeeb mvmsta->tid_disable_agg |= disable_agg_tids; 333bfcc09ddSBjoern A. Zeeb 334bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 3359af1bba4SBjoern A. Zeeb cmd.sta_id = mvmsta->deflink.sta_id; 336bfcc09ddSBjoern A. Zeeb cmd.add_modify = STA_MODE_MODIFY; 337bfcc09ddSBjoern A. Zeeb cmd.modify_mask = STA_MODIFY_QUEUES; 338bfcc09ddSBjoern A. Zeeb if (disable_agg_tids) 339bfcc09ddSBjoern A. Zeeb cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 340bfcc09ddSBjoern A. Zeeb if (remove_queue) 341bfcc09ddSBjoern A. Zeeb cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 342bfcc09ddSBjoern A. Zeeb cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 343bfcc09ddSBjoern A. Zeeb cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 344bfcc09ddSBjoern A. Zeeb 345bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 346bfcc09ddSBjoern A. Zeeb 347bfcc09ddSBjoern A. Zeeb /* Notify FW of queue removal from the STA queues */ 348bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 349bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 350bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 351bfcc09ddSBjoern A. Zeeb &cmd, &status); 352bfcc09ddSBjoern A. Zeeb } 353bfcc09ddSBjoern A. Zeeb 354bfcc09ddSBjoern A. Zeeb static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 3559af1bba4SBjoern A. Zeeb int sta_id, u16 *queueptr, u8 tid) 356bfcc09ddSBjoern A. Zeeb { 357bfcc09ddSBjoern A. Zeeb int queue = *queueptr; 358bfcc09ddSBjoern A. Zeeb struct iwl_scd_txq_cfg_cmd cmd = { 359bfcc09ddSBjoern A. Zeeb .scd_queue = queue, 360bfcc09ddSBjoern A. Zeeb .action = SCD_CFG_DISABLE_QUEUE, 361bfcc09ddSBjoern A. Zeeb }; 362bfcc09ddSBjoern A. Zeeb int ret; 363bfcc09ddSBjoern A. Zeeb 364d9836fb4SBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 365d9836fb4SBjoern A. Zeeb 366bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 367d9836fb4SBjoern A. Zeeb if (mvm->sta_remove_requires_queue_remove) { 368d9836fb4SBjoern A. Zeeb u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, 369d9836fb4SBjoern A. Zeeb SCD_QUEUE_CONFIG_CMD); 370d9836fb4SBjoern A. Zeeb struct iwl_scd_queue_cfg_cmd remove_cmd = { 371d9836fb4SBjoern A. Zeeb .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), 3729af1bba4SBjoern A. Zeeb .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)), 373d9836fb4SBjoern A. Zeeb }; 374d9836fb4SBjoern A. Zeeb 3759af1bba4SBjoern A. Zeeb if (tid == IWL_MAX_TID_COUNT) 3769af1bba4SBjoern A. Zeeb tid = IWL_MGMT_TID; 3779af1bba4SBjoern A. Zeeb 3789af1bba4SBjoern A. Zeeb remove_cmd.u.remove.tid = cpu_to_le32(tid); 3799af1bba4SBjoern A. Zeeb 380d9836fb4SBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, 381d9836fb4SBjoern A. Zeeb sizeof(remove_cmd), 382d9836fb4SBjoern A. Zeeb &remove_cmd); 383d9836fb4SBjoern A. Zeeb } else { 384d9836fb4SBjoern A. Zeeb ret = 0; 385d9836fb4SBjoern A. Zeeb } 386d9836fb4SBjoern A. Zeeb 387bfcc09ddSBjoern A. Zeeb iwl_trans_txq_free(mvm->trans, queue); 388bfcc09ddSBjoern A. Zeeb *queueptr = IWL_MVM_INVALID_QUEUE; 389bfcc09ddSBjoern A. Zeeb 390d9836fb4SBjoern A. Zeeb return ret; 391bfcc09ddSBjoern A. Zeeb } 392bfcc09ddSBjoern A. Zeeb 393bfcc09ddSBjoern A. Zeeb if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) 394bfcc09ddSBjoern A. Zeeb return 0; 395bfcc09ddSBjoern A. Zeeb 396bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 397bfcc09ddSBjoern A. Zeeb 398bfcc09ddSBjoern A. Zeeb cmd.action = mvm->queue_info[queue].tid_bitmap ? 399bfcc09ddSBjoern A. Zeeb SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 400bfcc09ddSBjoern A. Zeeb if (cmd.action == SCD_CFG_DISABLE_QUEUE) 401bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 402bfcc09ddSBjoern A. Zeeb 403bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 404bfcc09ddSBjoern A. Zeeb "Disabling TXQ #%d tids=0x%x\n", 405bfcc09ddSBjoern A. Zeeb queue, 406bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap); 407bfcc09ddSBjoern A. Zeeb 408bfcc09ddSBjoern A. Zeeb /* If the queue is still enabled - nothing left to do in this func */ 409bfcc09ddSBjoern A. Zeeb if (cmd.action == SCD_CFG_ENABLE_QUEUE) 410bfcc09ddSBjoern A. Zeeb return 0; 411bfcc09ddSBjoern A. Zeeb 412bfcc09ddSBjoern A. Zeeb cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 413bfcc09ddSBjoern A. Zeeb cmd.tid = mvm->queue_info[queue].txq_tid; 414bfcc09ddSBjoern A. Zeeb 415bfcc09ddSBjoern A. Zeeb /* Make sure queue info is correct even though we overwrite it */ 416bfcc09ddSBjoern A. Zeeb WARN(mvm->queue_info[queue].tid_bitmap, 417bfcc09ddSBjoern A. Zeeb "TXQ #%d info out-of-sync - tids=0x%x\n", 418bfcc09ddSBjoern A. Zeeb queue, mvm->queue_info[queue].tid_bitmap); 419bfcc09ddSBjoern A. Zeeb 420bfcc09ddSBjoern A. Zeeb /* If we are here - the queue is freed and we can zero out these vals */ 421bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap = 0; 422bfcc09ddSBjoern A. Zeeb 423bfcc09ddSBjoern A. Zeeb if (sta) { 424bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 425bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid); 426bfcc09ddSBjoern A. Zeeb 4279af1bba4SBjoern A. Zeeb spin_lock_bh(&mvm->add_stream_lock); 4289af1bba4SBjoern A. Zeeb list_del_init(&mvmtxq->list); 4299af1bba4SBjoern A. Zeeb clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 430bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 4319af1bba4SBjoern A. Zeeb spin_unlock_bh(&mvm->add_stream_lock); 432bfcc09ddSBjoern A. Zeeb } 433bfcc09ddSBjoern A. Zeeb 434bfcc09ddSBjoern A. Zeeb /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 435bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].reserved = false; 436bfcc09ddSBjoern A. Zeeb 437bfcc09ddSBjoern A. Zeeb iwl_trans_txq_disable(mvm->trans, queue, false); 438d9836fb4SBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, 439bfcc09ddSBjoern A. Zeeb sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 440bfcc09ddSBjoern A. Zeeb 441bfcc09ddSBjoern A. Zeeb if (ret) 442bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 443bfcc09ddSBjoern A. Zeeb queue, ret); 444bfcc09ddSBjoern A. Zeeb return ret; 445bfcc09ddSBjoern A. Zeeb } 446bfcc09ddSBjoern A. Zeeb 447bfcc09ddSBjoern A. Zeeb static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 448bfcc09ddSBjoern A. Zeeb { 449bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 450bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 451bfcc09ddSBjoern A. Zeeb unsigned long tid_bitmap; 452bfcc09ddSBjoern A. Zeeb unsigned long agg_tids = 0; 453bfcc09ddSBjoern A. Zeeb u8 sta_id; 454bfcc09ddSBjoern A. Zeeb int tid; 455bfcc09ddSBjoern A. Zeeb 456bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 457bfcc09ddSBjoern A. Zeeb 458bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 459bfcc09ddSBjoern A. Zeeb return -EINVAL; 460bfcc09ddSBjoern A. Zeeb 461bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[queue].ra_sta_id; 462bfcc09ddSBjoern A. Zeeb tid_bitmap = mvm->queue_info[queue].tid_bitmap; 463bfcc09ddSBjoern A. Zeeb 464bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 465bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 466bfcc09ddSBjoern A. Zeeb 467bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 468bfcc09ddSBjoern A. Zeeb return -EINVAL; 469bfcc09ddSBjoern A. Zeeb 470bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_mac80211(sta); 471bfcc09ddSBjoern A. Zeeb 472bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 473bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 474bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 475bfcc09ddSBjoern A. Zeeb agg_tids |= BIT(tid); 476bfcc09ddSBjoern A. Zeeb } 477bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 478bfcc09ddSBjoern A. Zeeb 479bfcc09ddSBjoern A. Zeeb return agg_tids; 480bfcc09ddSBjoern A. Zeeb } 481bfcc09ddSBjoern A. Zeeb 482bfcc09ddSBjoern A. Zeeb /* 483bfcc09ddSBjoern A. Zeeb * Remove a queue from a station's resources. 484bfcc09ddSBjoern A. Zeeb * Note that this only marks as free. It DOESN'T delete a BA agreement, and 485bfcc09ddSBjoern A. Zeeb * doesn't disable the queue 486bfcc09ddSBjoern A. Zeeb */ 487bfcc09ddSBjoern A. Zeeb static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 488bfcc09ddSBjoern A. Zeeb { 489bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 490bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 491bfcc09ddSBjoern A. Zeeb unsigned long tid_bitmap; 492bfcc09ddSBjoern A. Zeeb unsigned long disable_agg_tids = 0; 493bfcc09ddSBjoern A. Zeeb u8 sta_id; 494bfcc09ddSBjoern A. Zeeb int tid; 495bfcc09ddSBjoern A. Zeeb 496bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 497bfcc09ddSBjoern A. Zeeb 498bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 499bfcc09ddSBjoern A. Zeeb return -EINVAL; 500bfcc09ddSBjoern A. Zeeb 501bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[queue].ra_sta_id; 502bfcc09ddSBjoern A. Zeeb tid_bitmap = mvm->queue_info[queue].tid_bitmap; 503bfcc09ddSBjoern A. Zeeb 504bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 505bfcc09ddSBjoern A. Zeeb 506bfcc09ddSBjoern A. Zeeb sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 507bfcc09ddSBjoern A. Zeeb 508bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 509bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 510bfcc09ddSBjoern A. Zeeb return 0; 511bfcc09ddSBjoern A. Zeeb } 512bfcc09ddSBjoern A. Zeeb 513bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_mac80211(sta); 514bfcc09ddSBjoern A. Zeeb 515bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 516bfcc09ddSBjoern A. Zeeb /* Unmap MAC queues and TIDs from this queue */ 517bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 518bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 519bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid); 520bfcc09ddSBjoern A. Zeeb 521bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 522bfcc09ddSBjoern A. Zeeb disable_agg_tids |= BIT(tid); 523bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 524bfcc09ddSBjoern A. Zeeb 5259af1bba4SBjoern A. Zeeb spin_lock_bh(&mvm->add_stream_lock); 5269af1bba4SBjoern A. Zeeb list_del_init(&mvmtxq->list); 5279af1bba4SBjoern A. Zeeb clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 528bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 5299af1bba4SBjoern A. Zeeb spin_unlock_bh(&mvm->add_stream_lock); 530bfcc09ddSBjoern A. Zeeb } 531bfcc09ddSBjoern A. Zeeb 532bfcc09ddSBjoern A. Zeeb mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 533bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 534bfcc09ddSBjoern A. Zeeb 535bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 536bfcc09ddSBjoern A. Zeeb 537bfcc09ddSBjoern A. Zeeb /* 538bfcc09ddSBjoern A. Zeeb * The TX path may have been using this TXQ_ID from the tid_data, 539bfcc09ddSBjoern A. Zeeb * so make sure it's no longer running so that we can safely reuse 540bfcc09ddSBjoern A. Zeeb * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE 541bfcc09ddSBjoern A. Zeeb * above, but nothing guarantees we've stopped using them. Thus, 542bfcc09ddSBjoern A. Zeeb * without this, we could get to iwl_mvm_disable_txq() and remove 543bfcc09ddSBjoern A. Zeeb * the queue while still sending frames to it. 544bfcc09ddSBjoern A. Zeeb */ 545bfcc09ddSBjoern A. Zeeb synchronize_net(); 546bfcc09ddSBjoern A. Zeeb 547bfcc09ddSBjoern A. Zeeb return disable_agg_tids; 548bfcc09ddSBjoern A. Zeeb } 549bfcc09ddSBjoern A. Zeeb 550bfcc09ddSBjoern A. Zeeb static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 551bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *old_sta, 552bfcc09ddSBjoern A. Zeeb u8 new_sta_id) 553bfcc09ddSBjoern A. Zeeb { 554bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 555bfcc09ddSBjoern A. Zeeb u8 sta_id, tid; 556bfcc09ddSBjoern A. Zeeb unsigned long disable_agg_tids = 0; 557bfcc09ddSBjoern A. Zeeb bool same_sta; 558bfcc09ddSBjoern A. Zeeb u16 queue_tmp = queue; 559bfcc09ddSBjoern A. Zeeb int ret; 560bfcc09ddSBjoern A. Zeeb 561bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 562bfcc09ddSBjoern A. Zeeb 563bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 564bfcc09ddSBjoern A. Zeeb return -EINVAL; 565bfcc09ddSBjoern A. Zeeb 566bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[queue].ra_sta_id; 567bfcc09ddSBjoern A. Zeeb tid = mvm->queue_info[queue].txq_tid; 568bfcc09ddSBjoern A. Zeeb 569bfcc09ddSBjoern A. Zeeb same_sta = sta_id == new_sta_id; 570bfcc09ddSBjoern A. Zeeb 571bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 572bfcc09ddSBjoern A. Zeeb if (WARN_ON(!mvmsta)) 573bfcc09ddSBjoern A. Zeeb return -EINVAL; 574bfcc09ddSBjoern A. Zeeb 575bfcc09ddSBjoern A. Zeeb disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 576bfcc09ddSBjoern A. Zeeb /* Disable the queue */ 577bfcc09ddSBjoern A. Zeeb if (disable_agg_tids) 578bfcc09ddSBjoern A. Zeeb iwl_mvm_invalidate_sta_queue(mvm, queue, 579bfcc09ddSBjoern A. Zeeb disable_agg_tids, false); 580bfcc09ddSBjoern A. Zeeb 5819af1bba4SBjoern A. Zeeb ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid); 582bfcc09ddSBjoern A. Zeeb if (ret) { 583bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 584bfcc09ddSBjoern A. Zeeb "Failed to free inactive queue %d (ret=%d)\n", 585bfcc09ddSBjoern A. Zeeb queue, ret); 586bfcc09ddSBjoern A. Zeeb 587bfcc09ddSBjoern A. Zeeb return ret; 588bfcc09ddSBjoern A. Zeeb } 589bfcc09ddSBjoern A. Zeeb 590bfcc09ddSBjoern A. Zeeb /* If TXQ is allocated to another STA, update removal in FW */ 591bfcc09ddSBjoern A. Zeeb if (!same_sta) 592bfcc09ddSBjoern A. Zeeb iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 593bfcc09ddSBjoern A. Zeeb 594bfcc09ddSBjoern A. Zeeb return 0; 595bfcc09ddSBjoern A. Zeeb } 596bfcc09ddSBjoern A. Zeeb 597bfcc09ddSBjoern A. Zeeb static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 598bfcc09ddSBjoern A. Zeeb unsigned long tfd_queue_mask, u8 ac) 599bfcc09ddSBjoern A. Zeeb { 600bfcc09ddSBjoern A. Zeeb int queue = 0; 601bfcc09ddSBjoern A. Zeeb u8 ac_to_queue[IEEE80211_NUM_ACS]; 602bfcc09ddSBjoern A. Zeeb int i; 603bfcc09ddSBjoern A. Zeeb 604bfcc09ddSBjoern A. Zeeb /* 605bfcc09ddSBjoern A. Zeeb * This protects us against grabbing a queue that's being reconfigured 606bfcc09ddSBjoern A. Zeeb * by the inactivity checker. 607bfcc09ddSBjoern A. Zeeb */ 608bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 609bfcc09ddSBjoern A. Zeeb 610bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 611bfcc09ddSBjoern A. Zeeb return -EINVAL; 612bfcc09ddSBjoern A. Zeeb 613bfcc09ddSBjoern A. Zeeb memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 614bfcc09ddSBjoern A. Zeeb 615bfcc09ddSBjoern A. Zeeb /* See what ACs the existing queues for this STA have */ 616bfcc09ddSBjoern A. Zeeb for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 617bfcc09ddSBjoern A. Zeeb /* Only DATA queues can be shared */ 618bfcc09ddSBjoern A. Zeeb if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 619bfcc09ddSBjoern A. Zeeb i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 620bfcc09ddSBjoern A. Zeeb continue; 621bfcc09ddSBjoern A. Zeeb 622bfcc09ddSBjoern A. Zeeb ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 623bfcc09ddSBjoern A. Zeeb } 624bfcc09ddSBjoern A. Zeeb 625bfcc09ddSBjoern A. Zeeb /* 626bfcc09ddSBjoern A. Zeeb * The queue to share is chosen only from DATA queues as follows (in 627bfcc09ddSBjoern A. Zeeb * descending priority): 628bfcc09ddSBjoern A. Zeeb * 1. An AC_BE queue 629bfcc09ddSBjoern A. Zeeb * 2. Same AC queue 630bfcc09ddSBjoern A. Zeeb * 3. Highest AC queue that is lower than new AC 631bfcc09ddSBjoern A. Zeeb * 4. Any existing AC (there always is at least 1 DATA queue) 632bfcc09ddSBjoern A. Zeeb */ 633bfcc09ddSBjoern A. Zeeb 634bfcc09ddSBjoern A. Zeeb /* Priority 1: An AC_BE queue */ 635bfcc09ddSBjoern A. Zeeb if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 636bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[IEEE80211_AC_BE]; 637bfcc09ddSBjoern A. Zeeb /* Priority 2: Same AC queue */ 638bfcc09ddSBjoern A. Zeeb else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 639bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[ac]; 640bfcc09ddSBjoern A. Zeeb /* Priority 3a: If new AC is VO and VI exists - use VI */ 641bfcc09ddSBjoern A. Zeeb else if (ac == IEEE80211_AC_VO && 642bfcc09ddSBjoern A. Zeeb ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 643bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[IEEE80211_AC_VI]; 644bfcc09ddSBjoern A. Zeeb /* Priority 3b: No BE so only AC less than the new one is BK */ 645bfcc09ddSBjoern A. Zeeb else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 646bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[IEEE80211_AC_BK]; 647bfcc09ddSBjoern A. Zeeb /* Priority 4a: No BE nor BK - use VI if exists */ 648bfcc09ddSBjoern A. Zeeb else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 649bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[IEEE80211_AC_VI]; 650bfcc09ddSBjoern A. Zeeb /* Priority 4b: No BE, BK nor VI - use VO if exists */ 651bfcc09ddSBjoern A. Zeeb else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 652bfcc09ddSBjoern A. Zeeb queue = ac_to_queue[IEEE80211_AC_VO]; 653bfcc09ddSBjoern A. Zeeb 654bfcc09ddSBjoern A. Zeeb /* Make sure queue found (or not) is legal */ 655bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 656bfcc09ddSBjoern A. Zeeb !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 657bfcc09ddSBjoern A. Zeeb (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 658bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "No DATA queues available to share\n"); 659bfcc09ddSBjoern A. Zeeb return -ENOSPC; 660bfcc09ddSBjoern A. Zeeb } 661bfcc09ddSBjoern A. Zeeb 662bfcc09ddSBjoern A. Zeeb return queue; 663bfcc09ddSBjoern A. Zeeb } 664bfcc09ddSBjoern A. Zeeb 665d9836fb4SBjoern A. Zeeb /* Re-configure the SCD for a queue that has already been configured */ 666d9836fb4SBjoern A. Zeeb static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, 667d9836fb4SBjoern A. Zeeb int sta_id, int tid, int frame_limit, u16 ssn) 668d9836fb4SBjoern A. Zeeb { 669d9836fb4SBjoern A. Zeeb struct iwl_scd_txq_cfg_cmd cmd = { 670d9836fb4SBjoern A. Zeeb .scd_queue = queue, 671d9836fb4SBjoern A. Zeeb .action = SCD_CFG_ENABLE_QUEUE, 672d9836fb4SBjoern A. Zeeb .window = frame_limit, 673d9836fb4SBjoern A. Zeeb .sta_id = sta_id, 674d9836fb4SBjoern A. Zeeb .ssn = cpu_to_le16(ssn), 675d9836fb4SBjoern A. Zeeb .tx_fifo = fifo, 676d9836fb4SBjoern A. Zeeb .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 677d9836fb4SBjoern A. Zeeb queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), 678d9836fb4SBjoern A. Zeeb .tid = tid, 679d9836fb4SBjoern A. Zeeb }; 680d9836fb4SBjoern A. Zeeb int ret; 681d9836fb4SBjoern A. Zeeb 682d9836fb4SBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 683d9836fb4SBjoern A. Zeeb return -EINVAL; 684d9836fb4SBjoern A. Zeeb 685d9836fb4SBjoern A. Zeeb if (WARN(mvm->queue_info[queue].tid_bitmap == 0, 686d9836fb4SBjoern A. Zeeb "Trying to reconfig unallocated queue %d\n", queue)) 687d9836fb4SBjoern A. Zeeb return -ENXIO; 688d9836fb4SBjoern A. Zeeb 689d9836fb4SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); 690d9836fb4SBjoern A. Zeeb 691d9836fb4SBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 692d9836fb4SBjoern A. Zeeb WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", 693d9836fb4SBjoern A. Zeeb queue, fifo, ret); 694d9836fb4SBjoern A. Zeeb 695d9836fb4SBjoern A. Zeeb return ret; 696d9836fb4SBjoern A. Zeeb } 697d9836fb4SBjoern A. Zeeb 698bfcc09ddSBjoern A. Zeeb /* 699bfcc09ddSBjoern A. Zeeb * If a given queue has a higher AC than the TID stream that is being compared 700bfcc09ddSBjoern A. Zeeb * to, the queue needs to be redirected to the lower AC. This function does that 701bfcc09ddSBjoern A. Zeeb * in such a case, otherwise - if no redirection required - it does nothing, 702bfcc09ddSBjoern A. Zeeb * unless the %force param is true. 703bfcc09ddSBjoern A. Zeeb */ 704bfcc09ddSBjoern A. Zeeb static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, 705bfcc09ddSBjoern A. Zeeb int ac, int ssn, unsigned int wdg_timeout, 706bfcc09ddSBjoern A. Zeeb bool force, struct iwl_mvm_txq *txq) 707bfcc09ddSBjoern A. Zeeb { 708bfcc09ddSBjoern A. Zeeb struct iwl_scd_txq_cfg_cmd cmd = { 709bfcc09ddSBjoern A. Zeeb .scd_queue = queue, 710bfcc09ddSBjoern A. Zeeb .action = SCD_CFG_DISABLE_QUEUE, 711bfcc09ddSBjoern A. Zeeb }; 712bfcc09ddSBjoern A. Zeeb bool shared_queue; 713bfcc09ddSBjoern A. Zeeb int ret; 714bfcc09ddSBjoern A. Zeeb 715bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 716bfcc09ddSBjoern A. Zeeb return -EINVAL; 717bfcc09ddSBjoern A. Zeeb 718bfcc09ddSBjoern A. Zeeb /* 719bfcc09ddSBjoern A. Zeeb * If the AC is lower than current one - FIFO needs to be redirected to 720bfcc09ddSBjoern A. Zeeb * the lowest one of the streams in the queue. Check if this is needed 721bfcc09ddSBjoern A. Zeeb * here. 722bfcc09ddSBjoern A. Zeeb * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 723bfcc09ddSBjoern A. Zeeb * value 3 and VO with value 0, so to check if ac X is lower than ac Y 724bfcc09ddSBjoern A. Zeeb * we need to check if the numerical value of X is LARGER than of Y. 725bfcc09ddSBjoern A. Zeeb */ 726bfcc09ddSBjoern A. Zeeb if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 727bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 728bfcc09ddSBjoern A. Zeeb "No redirection needed on TXQ #%d\n", 729bfcc09ddSBjoern A. Zeeb queue); 730bfcc09ddSBjoern A. Zeeb return 0; 731bfcc09ddSBjoern A. Zeeb } 732bfcc09ddSBjoern A. Zeeb 733bfcc09ddSBjoern A. Zeeb cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 734bfcc09ddSBjoern A. Zeeb cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 735bfcc09ddSBjoern A. Zeeb cmd.tid = mvm->queue_info[queue].txq_tid; 736bfcc09ddSBjoern A. Zeeb shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; 737bfcc09ddSBjoern A. Zeeb 738bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 739bfcc09ddSBjoern A. Zeeb queue, iwl_mvm_ac_to_tx_fifo[ac]); 740bfcc09ddSBjoern A. Zeeb 741bfcc09ddSBjoern A. Zeeb /* Stop the queue and wait for it to empty */ 7429af1bba4SBjoern A. Zeeb set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); 743bfcc09ddSBjoern A. Zeeb 744bfcc09ddSBjoern A. Zeeb ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 745bfcc09ddSBjoern A. Zeeb if (ret) { 746bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 747bfcc09ddSBjoern A. Zeeb queue); 748bfcc09ddSBjoern A. Zeeb ret = -EIO; 749bfcc09ddSBjoern A. Zeeb goto out; 750bfcc09ddSBjoern A. Zeeb } 751bfcc09ddSBjoern A. Zeeb 752bfcc09ddSBjoern A. Zeeb /* Before redirecting the queue we need to de-activate it */ 753bfcc09ddSBjoern A. Zeeb iwl_trans_txq_disable(mvm->trans, queue, false); 754bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 755bfcc09ddSBjoern A. Zeeb if (ret) 756bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 757bfcc09ddSBjoern A. Zeeb ret); 758bfcc09ddSBjoern A. Zeeb 759bfcc09ddSBjoern A. Zeeb /* Make sure the SCD wrptr is correctly set before reconfiguring */ 760bfcc09ddSBjoern A. Zeeb iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 761bfcc09ddSBjoern A. Zeeb 762bfcc09ddSBjoern A. Zeeb /* Update the TID "owner" of the queue */ 763bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].txq_tid = tid; 764bfcc09ddSBjoern A. Zeeb 765bfcc09ddSBjoern A. Zeeb /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 766bfcc09ddSBjoern A. Zeeb 767bfcc09ddSBjoern A. Zeeb /* Redirect to lower AC */ 768bfcc09ddSBjoern A. Zeeb iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 769bfcc09ddSBjoern A. Zeeb cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); 770bfcc09ddSBjoern A. Zeeb 771bfcc09ddSBjoern A. Zeeb /* Update AC marking of the queue */ 772bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].mac80211_ac = ac; 773bfcc09ddSBjoern A. Zeeb 774bfcc09ddSBjoern A. Zeeb /* 775bfcc09ddSBjoern A. Zeeb * Mark queue as shared in transport if shared 776bfcc09ddSBjoern A. Zeeb * Note this has to be done after queue enablement because enablement 777bfcc09ddSBjoern A. Zeeb * can also set this value, and there is no indication there to shared 778bfcc09ddSBjoern A. Zeeb * queues 779bfcc09ddSBjoern A. Zeeb */ 780bfcc09ddSBjoern A. Zeeb if (shared_queue) 781bfcc09ddSBjoern A. Zeeb iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 782bfcc09ddSBjoern A. Zeeb 783bfcc09ddSBjoern A. Zeeb out: 784bfcc09ddSBjoern A. Zeeb /* Continue using the queue */ 7859af1bba4SBjoern A. Zeeb clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); 786bfcc09ddSBjoern A. Zeeb 787bfcc09ddSBjoern A. Zeeb return ret; 788bfcc09ddSBjoern A. Zeeb } 789bfcc09ddSBjoern A. Zeeb 790bfcc09ddSBjoern A. Zeeb static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, 791bfcc09ddSBjoern A. Zeeb u8 minq, u8 maxq) 792bfcc09ddSBjoern A. Zeeb { 793bfcc09ddSBjoern A. Zeeb int i; 794bfcc09ddSBjoern A. Zeeb 795bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 796bfcc09ddSBjoern A. Zeeb 797bfcc09ddSBjoern A. Zeeb if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, 798bfcc09ddSBjoern A. Zeeb "max queue %d >= num_of_queues (%d)", maxq, 799bfcc09ddSBjoern A. Zeeb mvm->trans->trans_cfg->base_params->num_of_queues)) 800bfcc09ddSBjoern A. Zeeb maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; 801bfcc09ddSBjoern A. Zeeb 802bfcc09ddSBjoern A. Zeeb /* This should not be hit with new TX path */ 803bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 804bfcc09ddSBjoern A. Zeeb return -ENOSPC; 805bfcc09ddSBjoern A. Zeeb 806bfcc09ddSBjoern A. Zeeb /* Start by looking for a free queue */ 807bfcc09ddSBjoern A. Zeeb for (i = minq; i <= maxq; i++) 808bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[i].tid_bitmap == 0 && 809bfcc09ddSBjoern A. Zeeb mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 810bfcc09ddSBjoern A. Zeeb return i; 811bfcc09ddSBjoern A. Zeeb 812bfcc09ddSBjoern A. Zeeb return -ENOSPC; 813bfcc09ddSBjoern A. Zeeb } 814bfcc09ddSBjoern A. Zeeb 8159af1bba4SBjoern A. Zeeb static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) 8169af1bba4SBjoern A. Zeeb { 8179af1bba4SBjoern A. Zeeb int max_size = IWL_DEFAULT_QUEUE_SIZE; 8189af1bba4SBjoern A. Zeeb unsigned int link_id; 8199af1bba4SBjoern A. Zeeb 8209af1bba4SBjoern A. Zeeb /* this queue isn't used for traffic (cab_queue) */ 8219af1bba4SBjoern A. Zeeb if (!sta) 8229af1bba4SBjoern A. Zeeb return IWL_MGMT_QUEUE_SIZE; 8239af1bba4SBjoern A. Zeeb 8249af1bba4SBjoern A. Zeeb rcu_read_lock(); 8259af1bba4SBjoern A. Zeeb 8269af1bba4SBjoern A. Zeeb for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { 8279af1bba4SBjoern A. Zeeb struct ieee80211_link_sta *link = 8289af1bba4SBjoern A. Zeeb rcu_dereference(sta->link[link_id]); 8299af1bba4SBjoern A. Zeeb 8309af1bba4SBjoern A. Zeeb if (!link) 8319af1bba4SBjoern A. Zeeb continue; 8329af1bba4SBjoern A. Zeeb 833*a4128aadSBjoern A. Zeeb /* support for 512 ba size */ 8349af1bba4SBjoern A. Zeeb if (link->eht_cap.has_eht && 8359af1bba4SBjoern A. Zeeb max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) 8369af1bba4SBjoern A. Zeeb max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; 8379af1bba4SBjoern A. Zeeb 8389af1bba4SBjoern A. Zeeb /* support for 256 ba size */ 8399af1bba4SBjoern A. Zeeb if (link->he_cap.has_he && 8409af1bba4SBjoern A. Zeeb max_size < IWL_DEFAULT_QUEUE_SIZE_HE) 8419af1bba4SBjoern A. Zeeb max_size = IWL_DEFAULT_QUEUE_SIZE_HE; 8429af1bba4SBjoern A. Zeeb } 8439af1bba4SBjoern A. Zeeb 8449af1bba4SBjoern A. Zeeb rcu_read_unlock(); 8459af1bba4SBjoern A. Zeeb return max_size; 8469af1bba4SBjoern A. Zeeb } 8479af1bba4SBjoern A. Zeeb 8489af1bba4SBjoern A. Zeeb int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, 8499af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 850bfcc09ddSBjoern A. Zeeb u8 sta_id, u8 tid, unsigned int timeout) 851bfcc09ddSBjoern A. Zeeb { 852d9836fb4SBjoern A. Zeeb int queue, size; 8539af1bba4SBjoern A. Zeeb u32 sta_mask = 0; 854bfcc09ddSBjoern A. Zeeb 855bfcc09ddSBjoern A. Zeeb if (tid == IWL_MAX_TID_COUNT) { 856bfcc09ddSBjoern A. Zeeb tid = IWL_MGMT_TID; 857bfcc09ddSBjoern A. Zeeb size = max_t(u32, IWL_MGMT_QUEUE_SIZE, 858bfcc09ddSBjoern A. Zeeb mvm->trans->cfg->min_txq_size); 859d9836fb4SBjoern A. Zeeb } else { 8609af1bba4SBjoern A. Zeeb size = iwl_mvm_get_queue_size(sta); 861d9836fb4SBjoern A. Zeeb } 862bfcc09ddSBjoern A. Zeeb 8639af1bba4SBjoern A. Zeeb if (sta) { 8649af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 865*a4128aadSBjoern A. Zeeb struct ieee80211_link_sta *link_sta; 8669af1bba4SBjoern A. Zeeb unsigned int link_id; 8679af1bba4SBjoern A. Zeeb 868*a4128aadSBjoern A. Zeeb rcu_read_lock(); 869*a4128aadSBjoern A. Zeeb for_each_sta_active_link(mvmsta->vif, sta, link_sta, link_id) { 8709af1bba4SBjoern A. Zeeb struct iwl_mvm_link_sta *link = 8719af1bba4SBjoern A. Zeeb rcu_dereference_protected(mvmsta->link[link_id], 8729af1bba4SBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 8739af1bba4SBjoern A. Zeeb 8749af1bba4SBjoern A. Zeeb if (!link) 8759af1bba4SBjoern A. Zeeb continue; 8769af1bba4SBjoern A. Zeeb 8779af1bba4SBjoern A. Zeeb sta_mask |= BIT(link->sta_id); 8789af1bba4SBjoern A. Zeeb } 879*a4128aadSBjoern A. Zeeb rcu_read_unlock(); 8809af1bba4SBjoern A. Zeeb } else { 8819af1bba4SBjoern A. Zeeb sta_mask |= BIT(sta_id); 8829af1bba4SBjoern A. Zeeb } 8839af1bba4SBjoern A. Zeeb 8849af1bba4SBjoern A. Zeeb if (!sta_mask) 8859af1bba4SBjoern A. Zeeb return -EINVAL; 8869af1bba4SBjoern A. Zeeb 8879af1bba4SBjoern A. Zeeb queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, 888d9836fb4SBjoern A. Zeeb tid, size, timeout); 889bfcc09ddSBjoern A. Zeeb 890*a4128aadSBjoern A. Zeeb if (queue >= 0) 891bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 892*a4128aadSBjoern A. Zeeb "Enabling TXQ #%d for sta mask 0x%x tid %d\n", 8939af1bba4SBjoern A. Zeeb queue, sta_mask, tid); 894bfcc09ddSBjoern A. Zeeb 895bfcc09ddSBjoern A. Zeeb return queue; 896bfcc09ddSBjoern A. Zeeb } 897bfcc09ddSBjoern A. Zeeb 898bfcc09ddSBjoern A. Zeeb static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 899bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u8 ac, 900bfcc09ddSBjoern A. Zeeb int tid) 901bfcc09ddSBjoern A. Zeeb { 902bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 903bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 904bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid); 905bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 906bfcc09ddSBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 907bfcc09ddSBjoern A. Zeeb int queue = -1; 908bfcc09ddSBjoern A. Zeeb 909bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 910bfcc09ddSBjoern A. Zeeb 911bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 912bfcc09ddSBjoern A. Zeeb "Allocating queue for sta %d on tid %d\n", 9139af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid); 9149af1bba4SBjoern A. Zeeb queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id, 9159af1bba4SBjoern A. Zeeb tid, wdg_timeout); 916bfcc09ddSBjoern A. Zeeb if (queue < 0) 917bfcc09ddSBjoern A. Zeeb return queue; 918bfcc09ddSBjoern A. Zeeb 919bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = queue; 920bfcc09ddSBjoern A. Zeeb mvm->tvqm_info[queue].txq_tid = tid; 9219af1bba4SBjoern A. Zeeb mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id; 922bfcc09ddSBjoern A. Zeeb 923bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 924bfcc09ddSBjoern A. Zeeb 925bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 926bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].txq_id = queue; 927bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 928bfcc09ddSBjoern A. Zeeb 929bfcc09ddSBjoern A. Zeeb return 0; 930bfcc09ddSBjoern A. Zeeb } 931bfcc09ddSBjoern A. Zeeb 932bfcc09ddSBjoern A. Zeeb static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, 933bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 934bfcc09ddSBjoern A. Zeeb int queue, u8 sta_id, u8 tid) 935bfcc09ddSBjoern A. Zeeb { 936bfcc09ddSBjoern A. Zeeb bool enable_queue = true; 937bfcc09ddSBjoern A. Zeeb 938bfcc09ddSBjoern A. Zeeb /* Make sure this TID isn't already enabled */ 939bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 940bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 941bfcc09ddSBjoern A. Zeeb queue, tid); 942bfcc09ddSBjoern A. Zeeb return false; 943bfcc09ddSBjoern A. Zeeb } 944bfcc09ddSBjoern A. Zeeb 945bfcc09ddSBjoern A. Zeeb /* Update mappings and refcounts */ 946bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[queue].tid_bitmap) 947bfcc09ddSBjoern A. Zeeb enable_queue = false; 948bfcc09ddSBjoern A. Zeeb 949bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap |= BIT(tid); 950bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].ra_sta_id = sta_id; 951bfcc09ddSBjoern A. Zeeb 952bfcc09ddSBjoern A. Zeeb if (enable_queue) { 953bfcc09ddSBjoern A. Zeeb if (tid != IWL_MAX_TID_COUNT) 954bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].mac80211_ac = 955bfcc09ddSBjoern A. Zeeb tid_to_mac80211_ac[tid]; 956bfcc09ddSBjoern A. Zeeb else 957bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 958bfcc09ddSBjoern A. Zeeb 959bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].txq_tid = tid; 960bfcc09ddSBjoern A. Zeeb } 961bfcc09ddSBjoern A. Zeeb 962bfcc09ddSBjoern A. Zeeb if (sta) { 963bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 964bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid); 965bfcc09ddSBjoern A. Zeeb 966bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = queue; 967bfcc09ddSBjoern A. Zeeb } 968bfcc09ddSBjoern A. Zeeb 969bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 970bfcc09ddSBjoern A. Zeeb "Enabling TXQ #%d tids=0x%x\n", 971bfcc09ddSBjoern A. Zeeb queue, mvm->queue_info[queue].tid_bitmap); 972bfcc09ddSBjoern A. Zeeb 973bfcc09ddSBjoern A. Zeeb return enable_queue; 974bfcc09ddSBjoern A. Zeeb } 975bfcc09ddSBjoern A. Zeeb 976bfcc09ddSBjoern A. Zeeb static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 977bfcc09ddSBjoern A. Zeeb int queue, u16 ssn, 978bfcc09ddSBjoern A. Zeeb const struct iwl_trans_txq_scd_cfg *cfg, 979bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout) 980bfcc09ddSBjoern A. Zeeb { 981bfcc09ddSBjoern A. Zeeb struct iwl_scd_txq_cfg_cmd cmd = { 982bfcc09ddSBjoern A. Zeeb .scd_queue = queue, 983bfcc09ddSBjoern A. Zeeb .action = SCD_CFG_ENABLE_QUEUE, 984bfcc09ddSBjoern A. Zeeb .window = cfg->frame_limit, 985bfcc09ddSBjoern A. Zeeb .sta_id = cfg->sta_id, 986bfcc09ddSBjoern A. Zeeb .ssn = cpu_to_le16(ssn), 987bfcc09ddSBjoern A. Zeeb .tx_fifo = cfg->fifo, 988bfcc09ddSBjoern A. Zeeb .aggregate = cfg->aggregate, 989bfcc09ddSBjoern A. Zeeb .tid = cfg->tid, 990bfcc09ddSBjoern A. Zeeb }; 991bfcc09ddSBjoern A. Zeeb bool inc_ssn; 992bfcc09ddSBjoern A. Zeeb 993bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 994bfcc09ddSBjoern A. Zeeb return false; 995bfcc09ddSBjoern A. Zeeb 996bfcc09ddSBjoern A. Zeeb /* Send the enabling command if we need to */ 997bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) 998bfcc09ddSBjoern A. Zeeb return false; 999bfcc09ddSBjoern A. Zeeb 1000bfcc09ddSBjoern A. Zeeb inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 1001bfcc09ddSBjoern A. Zeeb NULL, wdg_timeout); 1002bfcc09ddSBjoern A. Zeeb if (inc_ssn) 1003bfcc09ddSBjoern A. Zeeb le16_add_cpu(&cmd.ssn, 1); 1004bfcc09ddSBjoern A. Zeeb 1005bfcc09ddSBjoern A. Zeeb WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 1006bfcc09ddSBjoern A. Zeeb "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 1007bfcc09ddSBjoern A. Zeeb 1008bfcc09ddSBjoern A. Zeeb return inc_ssn; 1009bfcc09ddSBjoern A. Zeeb } 1010bfcc09ddSBjoern A. Zeeb 1011bfcc09ddSBjoern A. Zeeb static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) 1012bfcc09ddSBjoern A. Zeeb { 1013bfcc09ddSBjoern A. Zeeb struct iwl_scd_txq_cfg_cmd cmd = { 1014bfcc09ddSBjoern A. Zeeb .scd_queue = queue, 1015bfcc09ddSBjoern A. Zeeb .action = SCD_CFG_UPDATE_QUEUE_TID, 1016bfcc09ddSBjoern A. Zeeb }; 1017bfcc09ddSBjoern A. Zeeb int tid; 1018bfcc09ddSBjoern A. Zeeb unsigned long tid_bitmap; 1019bfcc09ddSBjoern A. Zeeb int ret; 1020bfcc09ddSBjoern A. Zeeb 1021bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1022bfcc09ddSBjoern A. Zeeb 1023bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1024bfcc09ddSBjoern A. Zeeb return; 1025bfcc09ddSBjoern A. Zeeb 1026bfcc09ddSBjoern A. Zeeb tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1027bfcc09ddSBjoern A. Zeeb 1028bfcc09ddSBjoern A. Zeeb if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 1029bfcc09ddSBjoern A. Zeeb return; 1030bfcc09ddSBjoern A. Zeeb 1031bfcc09ddSBjoern A. Zeeb /* Find any TID for queue */ 1032bfcc09ddSBjoern A. Zeeb tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 1033bfcc09ddSBjoern A. Zeeb cmd.tid = tid; 1034bfcc09ddSBjoern A. Zeeb cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 1035bfcc09ddSBjoern A. Zeeb 1036bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 1037bfcc09ddSBjoern A. Zeeb if (ret) { 1038bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 1039bfcc09ddSBjoern A. Zeeb queue, ret); 1040bfcc09ddSBjoern A. Zeeb return; 1041bfcc09ddSBjoern A. Zeeb } 1042bfcc09ddSBjoern A. Zeeb 1043bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].txq_tid = tid; 1044bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 1045bfcc09ddSBjoern A. Zeeb queue, tid); 1046bfcc09ddSBjoern A. Zeeb } 1047bfcc09ddSBjoern A. Zeeb 1048bfcc09ddSBjoern A. Zeeb static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 1049bfcc09ddSBjoern A. Zeeb { 1050bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 1051bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 1052bfcc09ddSBjoern A. Zeeb u8 sta_id; 1053bfcc09ddSBjoern A. Zeeb int tid = -1; 1054bfcc09ddSBjoern A. Zeeb unsigned long tid_bitmap; 1055bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout; 1056bfcc09ddSBjoern A. Zeeb int ssn; 1057bfcc09ddSBjoern A. Zeeb int ret = true; 1058bfcc09ddSBjoern A. Zeeb 1059bfcc09ddSBjoern A. Zeeb /* queue sharing is disabled on new TX path */ 1060bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1061bfcc09ddSBjoern A. Zeeb return; 1062bfcc09ddSBjoern A. Zeeb 1063bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1064bfcc09ddSBjoern A. Zeeb 1065bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[queue].ra_sta_id; 1066bfcc09ddSBjoern A. Zeeb tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1067bfcc09ddSBjoern A. Zeeb 1068bfcc09ddSBjoern A. Zeeb /* Find TID for queue, and make sure it is the only one on the queue */ 1069bfcc09ddSBjoern A. Zeeb tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 1070bfcc09ddSBjoern A. Zeeb if (tid_bitmap != BIT(tid)) { 1071bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 1072bfcc09ddSBjoern A. Zeeb queue, tid_bitmap); 1073bfcc09ddSBjoern A. Zeeb return; 1074bfcc09ddSBjoern A. Zeeb } 1075bfcc09ddSBjoern A. Zeeb 1076bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 1077bfcc09ddSBjoern A. Zeeb tid); 1078bfcc09ddSBjoern A. Zeeb 1079bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1080bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 1081bfcc09ddSBjoern A. Zeeb 1082bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 1083bfcc09ddSBjoern A. Zeeb return; 1084bfcc09ddSBjoern A. Zeeb 1085bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_mac80211(sta); 1086bfcc09ddSBjoern A. Zeeb wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1087bfcc09ddSBjoern A. Zeeb 1088bfcc09ddSBjoern A. Zeeb ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1089bfcc09ddSBjoern A. Zeeb 1090bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_redirect_queue(mvm, queue, tid, 1091bfcc09ddSBjoern A. Zeeb tid_to_mac80211_ac[tid], ssn, 1092bfcc09ddSBjoern A. Zeeb wdg_timeout, true, 1093bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid)); 1094bfcc09ddSBjoern A. Zeeb if (ret) { 1095bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 1096bfcc09ddSBjoern A. Zeeb return; 1097bfcc09ddSBjoern A. Zeeb } 1098bfcc09ddSBjoern A. Zeeb 1099bfcc09ddSBjoern A. Zeeb /* If aggs should be turned back on - do it */ 1100bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 1101bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = {0}; 1102bfcc09ddSBjoern A. Zeeb 1103bfcc09ddSBjoern A. Zeeb mvmsta->tid_disable_agg &= ~BIT(tid); 1104bfcc09ddSBjoern A. Zeeb 1105bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 11069af1bba4SBjoern A. Zeeb cmd.sta_id = mvmsta->deflink.sta_id; 1107bfcc09ddSBjoern A. Zeeb cmd.add_modify = STA_MODE_MODIFY; 1108bfcc09ddSBjoern A. Zeeb cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1109bfcc09ddSBjoern A. Zeeb cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 1110bfcc09ddSBjoern A. Zeeb cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 1111bfcc09ddSBjoern A. Zeeb 1112bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1113bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1114bfcc09ddSBjoern A. Zeeb if (!ret) { 1115bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1116bfcc09ddSBjoern A. Zeeb "TXQ #%d is now aggregated again\n", 1117bfcc09ddSBjoern A. Zeeb queue); 1118bfcc09ddSBjoern A. Zeeb 1119bfcc09ddSBjoern A. Zeeb /* Mark queue intenally as aggregating again */ 1120bfcc09ddSBjoern A. Zeeb iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1121bfcc09ddSBjoern A. Zeeb } 1122bfcc09ddSBjoern A. Zeeb } 1123bfcc09ddSBjoern A. Zeeb 1124bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1125bfcc09ddSBjoern A. Zeeb } 1126bfcc09ddSBjoern A. Zeeb 1127bfcc09ddSBjoern A. Zeeb /* 1128bfcc09ddSBjoern A. Zeeb * Remove inactive TIDs of a given queue. 1129bfcc09ddSBjoern A. Zeeb * If all queue TIDs are inactive - mark the queue as inactive 1130bfcc09ddSBjoern A. Zeeb * If only some the queue TIDs are inactive - unmap them from the queue 1131bfcc09ddSBjoern A. Zeeb * 1132bfcc09ddSBjoern A. Zeeb * Returns %true if all TIDs were removed and the queue could be reused. 1133bfcc09ddSBjoern A. Zeeb */ 1134bfcc09ddSBjoern A. Zeeb static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1135bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta, int queue, 1136bfcc09ddSBjoern A. Zeeb unsigned long tid_bitmap, 1137bfcc09ddSBjoern A. Zeeb unsigned long *unshare_queues, 1138bfcc09ddSBjoern A. Zeeb unsigned long *changetid_queues) 1139bfcc09ddSBjoern A. Zeeb { 11409af1bba4SBjoern A. Zeeb unsigned int tid; 1141bfcc09ddSBjoern A. Zeeb 1142bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvmsta->lock); 1143bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1144bfcc09ddSBjoern A. Zeeb 1145bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1146bfcc09ddSBjoern A. Zeeb return false; 1147bfcc09ddSBjoern A. Zeeb 1148bfcc09ddSBjoern A. Zeeb /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1149bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1150bfcc09ddSBjoern A. Zeeb /* If some TFDs are still queued - don't mark TID as inactive */ 1151bfcc09ddSBjoern A. Zeeb if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1152bfcc09ddSBjoern A. Zeeb tid_bitmap &= ~BIT(tid); 1153bfcc09ddSBjoern A. Zeeb 1154bfcc09ddSBjoern A. Zeeb /* Don't mark as inactive any TID that has an active BA */ 1155bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1156bfcc09ddSBjoern A. Zeeb tid_bitmap &= ~BIT(tid); 1157bfcc09ddSBjoern A. Zeeb } 1158bfcc09ddSBjoern A. Zeeb 1159bfcc09ddSBjoern A. Zeeb /* If all TIDs in the queue are inactive - return it can be reused */ 1160bfcc09ddSBjoern A. Zeeb if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1161bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); 1162bfcc09ddSBjoern A. Zeeb return true; 1163bfcc09ddSBjoern A. Zeeb } 1164bfcc09ddSBjoern A. Zeeb 1165bfcc09ddSBjoern A. Zeeb /* 1166bfcc09ddSBjoern A. Zeeb * If we are here, this is a shared queue and not all TIDs timed-out. 1167bfcc09ddSBjoern A. Zeeb * Remove the ones that did. 1168bfcc09ddSBjoern A. Zeeb */ 1169bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1170d9836fb4SBjoern A. Zeeb u16 q_tid_bitmap; 1171bfcc09ddSBjoern A. Zeeb 1172bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1173bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1174bfcc09ddSBjoern A. Zeeb 1175d9836fb4SBjoern A. Zeeb q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1176bfcc09ddSBjoern A. Zeeb 1177bfcc09ddSBjoern A. Zeeb /* 1178bfcc09ddSBjoern A. Zeeb * We need to take into account a situation in which a TXQ was 1179bfcc09ddSBjoern A. Zeeb * allocated to TID x, and then turned shared by adding TIDs y 1180bfcc09ddSBjoern A. Zeeb * and z. If TID x becomes inactive and is removed from the TXQ, 1181bfcc09ddSBjoern A. Zeeb * ownership must be given to one of the remaining TIDs. 1182bfcc09ddSBjoern A. Zeeb * This is mainly because if TID x continues - a new queue can't 1183bfcc09ddSBjoern A. Zeeb * be allocated for it as long as it is an owner of another TXQ. 1184bfcc09ddSBjoern A. Zeeb * 1185bfcc09ddSBjoern A. Zeeb * Mark this queue in the right bitmap, we'll send the command 1186bfcc09ddSBjoern A. Zeeb * to the firmware later. 1187bfcc09ddSBjoern A. Zeeb */ 1188d9836fb4SBjoern A. Zeeb if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) 1189bfcc09ddSBjoern A. Zeeb set_bit(queue, changetid_queues); 1190bfcc09ddSBjoern A. Zeeb 1191bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1192bfcc09ddSBjoern A. Zeeb "Removing inactive TID %d from shared Q:%d\n", 1193bfcc09ddSBjoern A. Zeeb tid, queue); 1194bfcc09ddSBjoern A. Zeeb } 1195bfcc09ddSBjoern A. Zeeb 1196bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1197bfcc09ddSBjoern A. Zeeb "TXQ #%d left with tid bitmap 0x%x\n", queue, 1198bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].tid_bitmap); 1199bfcc09ddSBjoern A. Zeeb 1200bfcc09ddSBjoern A. Zeeb /* 1201bfcc09ddSBjoern A. Zeeb * There may be different TIDs with the same mac queues, so make 1202bfcc09ddSBjoern A. Zeeb * sure all TIDs have existing corresponding mac queues enabled 1203bfcc09ddSBjoern A. Zeeb */ 1204bfcc09ddSBjoern A. Zeeb tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1205bfcc09ddSBjoern A. Zeeb 1206bfcc09ddSBjoern A. Zeeb /* If the queue is marked as shared - "unshare" it */ 1207bfcc09ddSBjoern A. Zeeb if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && 1208bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1209bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1210bfcc09ddSBjoern A. Zeeb queue); 1211bfcc09ddSBjoern A. Zeeb set_bit(queue, unshare_queues); 1212bfcc09ddSBjoern A. Zeeb } 1213bfcc09ddSBjoern A. Zeeb 1214bfcc09ddSBjoern A. Zeeb return false; 1215bfcc09ddSBjoern A. Zeeb } 1216bfcc09ddSBjoern A. Zeeb 1217bfcc09ddSBjoern A. Zeeb /* 1218bfcc09ddSBjoern A. Zeeb * Check for inactivity - this includes checking if any queue 1219bfcc09ddSBjoern A. Zeeb * can be unshared and finding one (and only one) that can be 1220bfcc09ddSBjoern A. Zeeb * reused. 1221bfcc09ddSBjoern A. Zeeb * This function is also invoked as a sort of clean-up task, 1222bfcc09ddSBjoern A. Zeeb * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. 1223bfcc09ddSBjoern A. Zeeb * 1224bfcc09ddSBjoern A. Zeeb * Returns the queue number, or -ENOSPC. 1225bfcc09ddSBjoern A. Zeeb */ 1226bfcc09ddSBjoern A. Zeeb static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) 1227bfcc09ddSBjoern A. Zeeb { 1228bfcc09ddSBjoern A. Zeeb unsigned long now = jiffies; 1229bfcc09ddSBjoern A. Zeeb unsigned long unshare_queues = 0; 1230bfcc09ddSBjoern A. Zeeb unsigned long changetid_queues = 0; 1231bfcc09ddSBjoern A. Zeeb int i, ret, free_queue = -ENOSPC; 1232bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *queue_owner = NULL; 1233bfcc09ddSBjoern A. Zeeb 1234bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1235bfcc09ddSBjoern A. Zeeb 1236bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) 1237bfcc09ddSBjoern A. Zeeb return -ENOSPC; 1238bfcc09ddSBjoern A. Zeeb 1239bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 1240bfcc09ddSBjoern A. Zeeb 1241bfcc09ddSBjoern A. Zeeb /* we skip the CMD queue below by starting at 1 */ 1242bfcc09ddSBjoern A. Zeeb BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); 1243bfcc09ddSBjoern A. Zeeb 1244bfcc09ddSBjoern A. Zeeb for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { 1245bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 1246bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 1247bfcc09ddSBjoern A. Zeeb u8 sta_id; 1248bfcc09ddSBjoern A. Zeeb int tid; 1249bfcc09ddSBjoern A. Zeeb unsigned long inactive_tid_bitmap = 0; 1250bfcc09ddSBjoern A. Zeeb unsigned long queue_tid_bitmap; 1251bfcc09ddSBjoern A. Zeeb 1252bfcc09ddSBjoern A. Zeeb queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1253bfcc09ddSBjoern A. Zeeb if (!queue_tid_bitmap) 1254bfcc09ddSBjoern A. Zeeb continue; 1255bfcc09ddSBjoern A. Zeeb 1256bfcc09ddSBjoern A. Zeeb /* If TXQ isn't in active use anyway - nothing to do here... */ 1257bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1258bfcc09ddSBjoern A. Zeeb mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) 1259bfcc09ddSBjoern A. Zeeb continue; 1260bfcc09ddSBjoern A. Zeeb 1261bfcc09ddSBjoern A. Zeeb /* Check to see if there are inactive TIDs on this queue */ 1262bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &queue_tid_bitmap, 1263bfcc09ddSBjoern A. Zeeb IWL_MAX_TID_COUNT + 1) { 1264bfcc09ddSBjoern A. Zeeb if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1265bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1266bfcc09ddSBjoern A. Zeeb continue; 1267bfcc09ddSBjoern A. Zeeb 1268bfcc09ddSBjoern A. Zeeb inactive_tid_bitmap |= BIT(tid); 1269bfcc09ddSBjoern A. Zeeb } 1270bfcc09ddSBjoern A. Zeeb 1271bfcc09ddSBjoern A. Zeeb /* If all TIDs are active - finish check on this queue */ 1272bfcc09ddSBjoern A. Zeeb if (!inactive_tid_bitmap) 1273bfcc09ddSBjoern A. Zeeb continue; 1274bfcc09ddSBjoern A. Zeeb 1275bfcc09ddSBjoern A. Zeeb /* 1276bfcc09ddSBjoern A. Zeeb * If we are here - the queue hadn't been served recently and is 1277bfcc09ddSBjoern A. Zeeb * in use 1278bfcc09ddSBjoern A. Zeeb */ 1279bfcc09ddSBjoern A. Zeeb 1280bfcc09ddSBjoern A. Zeeb sta_id = mvm->queue_info[i].ra_sta_id; 1281bfcc09ddSBjoern A. Zeeb sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1282bfcc09ddSBjoern A. Zeeb 1283bfcc09ddSBjoern A. Zeeb /* 1284bfcc09ddSBjoern A. Zeeb * If the STA doesn't exist anymore, it isn't an error. It could 1285bfcc09ddSBjoern A. Zeeb * be that it was removed since getting the queues, and in this 1286bfcc09ddSBjoern A. Zeeb * case it should've inactivated its queues anyway. 1287bfcc09ddSBjoern A. Zeeb */ 1288bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(sta)) 1289bfcc09ddSBjoern A. Zeeb continue; 1290bfcc09ddSBjoern A. Zeeb 1291bfcc09ddSBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_mac80211(sta); 1292bfcc09ddSBjoern A. Zeeb 1293bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 1294bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1295bfcc09ddSBjoern A. Zeeb inactive_tid_bitmap, 1296bfcc09ddSBjoern A. Zeeb &unshare_queues, 1297bfcc09ddSBjoern A. Zeeb &changetid_queues); 1298bfcc09ddSBjoern A. Zeeb if (ret && free_queue < 0) { 1299bfcc09ddSBjoern A. Zeeb queue_owner = sta; 1300bfcc09ddSBjoern A. Zeeb free_queue = i; 1301bfcc09ddSBjoern A. Zeeb } 1302bfcc09ddSBjoern A. Zeeb /* only unlock sta lock - we still need the queue info lock */ 1303bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 1304bfcc09ddSBjoern A. Zeeb } 1305bfcc09ddSBjoern A. Zeeb 1306bfcc09ddSBjoern A. Zeeb 1307bfcc09ddSBjoern A. Zeeb /* Reconfigure queues requiring reconfiguation */ 1308bfcc09ddSBjoern A. Zeeb for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) 1309bfcc09ddSBjoern A. Zeeb iwl_mvm_unshare_queue(mvm, i); 1310bfcc09ddSBjoern A. Zeeb for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1311bfcc09ddSBjoern A. Zeeb iwl_mvm_change_queue_tid(mvm, i); 1312bfcc09ddSBjoern A. Zeeb 1313bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 1314bfcc09ddSBjoern A. Zeeb 1315bfcc09ddSBjoern A. Zeeb if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1316bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, 1317bfcc09ddSBjoern A. Zeeb alloc_for_sta); 1318bfcc09ddSBjoern A. Zeeb if (ret) 1319bfcc09ddSBjoern A. Zeeb return ret; 1320bfcc09ddSBjoern A. Zeeb } 1321bfcc09ddSBjoern A. Zeeb 1322bfcc09ddSBjoern A. Zeeb return free_queue; 1323bfcc09ddSBjoern A. Zeeb } 1324bfcc09ddSBjoern A. Zeeb 1325bfcc09ddSBjoern A. Zeeb static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1326bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u8 ac, int tid) 1327bfcc09ddSBjoern A. Zeeb { 1328bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1329bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 1330bfcc09ddSBjoern A. Zeeb .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 13319af1bba4SBjoern A. Zeeb .sta_id = mvmsta->deflink.sta_id, 1332bfcc09ddSBjoern A. Zeeb .tid = tid, 1333bfcc09ddSBjoern A. Zeeb .frame_limit = IWL_FRAME_LIMIT, 1334bfcc09ddSBjoern A. Zeeb }; 1335bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 1336bfcc09ddSBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1337bfcc09ddSBjoern A. Zeeb int queue = -1; 1338bfcc09ddSBjoern A. Zeeb u16 queue_tmp; 1339bfcc09ddSBjoern A. Zeeb unsigned long disable_agg_tids = 0; 1340bfcc09ddSBjoern A. Zeeb enum iwl_mvm_agg_state queue_state; 1341bfcc09ddSBjoern A. Zeeb bool shared_queue = false, inc_ssn; 1342bfcc09ddSBjoern A. Zeeb int ssn; 1343bfcc09ddSBjoern A. Zeeb unsigned long tfd_queue_mask; 1344bfcc09ddSBjoern A. Zeeb int ret; 1345bfcc09ddSBjoern A. Zeeb 1346bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1347bfcc09ddSBjoern A. Zeeb 1348bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) 1349bfcc09ddSBjoern A. Zeeb return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 1350bfcc09ddSBjoern A. Zeeb 1351bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 1352bfcc09ddSBjoern A. Zeeb tfd_queue_mask = mvmsta->tfd_queue_msk; 1353bfcc09ddSBjoern A. Zeeb ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1354bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 1355bfcc09ddSBjoern A. Zeeb 1356bfcc09ddSBjoern A. Zeeb if (tid == IWL_MAX_TID_COUNT) { 13579af1bba4SBjoern A. Zeeb queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1358bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_MGMT_QUEUE, 1359bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_MGMT_QUEUE); 1360bfcc09ddSBjoern A. Zeeb if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 1361bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 1362bfcc09ddSBjoern A. Zeeb queue); 1363bfcc09ddSBjoern A. Zeeb 1364bfcc09ddSBjoern A. Zeeb /* If no such queue is found, we'll use a DATA queue instead */ 1365bfcc09ddSBjoern A. Zeeb } 1366bfcc09ddSBjoern A. Zeeb 1367bfcc09ddSBjoern A. Zeeb if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1368bfcc09ddSBjoern A. Zeeb (mvm->queue_info[mvmsta->reserved_queue].status == 1369bfcc09ddSBjoern A. Zeeb IWL_MVM_QUEUE_RESERVED)) { 1370bfcc09ddSBjoern A. Zeeb queue = mvmsta->reserved_queue; 1371bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].reserved = true; 1372bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1373bfcc09ddSBjoern A. Zeeb } 1374bfcc09ddSBjoern A. Zeeb 1375bfcc09ddSBjoern A. Zeeb if (queue < 0) 13769af1bba4SBjoern A. Zeeb queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1377bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_DATA_QUEUE, 1378bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_DATA_QUEUE); 1379bfcc09ddSBjoern A. Zeeb if (queue < 0) { 1380bfcc09ddSBjoern A. Zeeb /* try harder - perhaps kill an inactive queue */ 13819af1bba4SBjoern A. Zeeb queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); 1382bfcc09ddSBjoern A. Zeeb } 1383bfcc09ddSBjoern A. Zeeb 1384bfcc09ddSBjoern A. Zeeb /* No free queue - we'll have to share */ 1385bfcc09ddSBjoern A. Zeeb if (queue <= 0) { 1386bfcc09ddSBjoern A. Zeeb queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 1387bfcc09ddSBjoern A. Zeeb if (queue > 0) { 1388bfcc09ddSBjoern A. Zeeb shared_queue = true; 1389bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 1390bfcc09ddSBjoern A. Zeeb } 1391bfcc09ddSBjoern A. Zeeb } 1392bfcc09ddSBjoern A. Zeeb 1393bfcc09ddSBjoern A. Zeeb /* 1394bfcc09ddSBjoern A. Zeeb * Mark TXQ as ready, even though it hasn't been fully configured yet, 1395bfcc09ddSBjoern A. Zeeb * to make sure no one else takes it. 1396bfcc09ddSBjoern A. Zeeb * This will allow avoiding re-acquiring the lock at the end of the 1397bfcc09ddSBjoern A. Zeeb * configuration. On error we'll mark it back as free. 1398bfcc09ddSBjoern A. Zeeb */ 1399bfcc09ddSBjoern A. Zeeb if (queue > 0 && !shared_queue) 1400bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1401bfcc09ddSBjoern A. Zeeb 1402bfcc09ddSBjoern A. Zeeb /* This shouldn't happen - out of queues */ 1403bfcc09ddSBjoern A. Zeeb if (WARN_ON(queue <= 0)) { 1404bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 1405bfcc09ddSBjoern A. Zeeb tid, cfg.sta_id); 1406bfcc09ddSBjoern A. Zeeb return queue; 1407bfcc09ddSBjoern A. Zeeb } 1408bfcc09ddSBjoern A. Zeeb 1409bfcc09ddSBjoern A. Zeeb /* 1410bfcc09ddSBjoern A. Zeeb * Actual en/disablement of aggregations is through the ADD_STA HCMD, 1411bfcc09ddSBjoern A. Zeeb * but for configuring the SCD to send A-MPDUs we need to mark the queue 1412bfcc09ddSBjoern A. Zeeb * as aggregatable. 1413bfcc09ddSBjoern A. Zeeb * Mark all DATA queues as allowing to be aggregated at some point 1414bfcc09ddSBjoern A. Zeeb */ 1415bfcc09ddSBjoern A. Zeeb cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1416bfcc09ddSBjoern A. Zeeb queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1417bfcc09ddSBjoern A. Zeeb 1418bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1419bfcc09ddSBjoern A. Zeeb "Allocating %squeue #%d to sta %d on tid %d\n", 1420bfcc09ddSBjoern A. Zeeb shared_queue ? "shared " : "", queue, 14219af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid); 1422bfcc09ddSBjoern A. Zeeb 1423bfcc09ddSBjoern A. Zeeb if (shared_queue) { 1424bfcc09ddSBjoern A. Zeeb /* Disable any open aggs on this queue */ 1425bfcc09ddSBjoern A. Zeeb disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 1426bfcc09ddSBjoern A. Zeeb 1427bfcc09ddSBjoern A. Zeeb if (disable_agg_tids) { 1428bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 1429bfcc09ddSBjoern A. Zeeb queue); 1430bfcc09ddSBjoern A. Zeeb iwl_mvm_invalidate_sta_queue(mvm, queue, 1431bfcc09ddSBjoern A. Zeeb disable_agg_tids, false); 1432bfcc09ddSBjoern A. Zeeb } 1433bfcc09ddSBjoern A. Zeeb } 1434bfcc09ddSBjoern A. Zeeb 1435bfcc09ddSBjoern A. Zeeb inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); 1436bfcc09ddSBjoern A. Zeeb 1437bfcc09ddSBjoern A. Zeeb /* 1438bfcc09ddSBjoern A. Zeeb * Mark queue as shared in transport if shared 1439bfcc09ddSBjoern A. Zeeb * Note this has to be done after queue enablement because enablement 1440bfcc09ddSBjoern A. Zeeb * can also set this value, and there is no indication there to shared 1441bfcc09ddSBjoern A. Zeeb * queues 1442bfcc09ddSBjoern A. Zeeb */ 1443bfcc09ddSBjoern A. Zeeb if (shared_queue) 1444bfcc09ddSBjoern A. Zeeb iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1445bfcc09ddSBjoern A. Zeeb 1446bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 1447bfcc09ddSBjoern A. Zeeb /* 1448bfcc09ddSBjoern A. Zeeb * This looks racy, but it is not. We have only one packet for 1449bfcc09ddSBjoern A. Zeeb * this ra/tid in our Tx path since we stop the Qdisc when we 1450bfcc09ddSBjoern A. Zeeb * need to allocate a new TFD queue. 1451bfcc09ddSBjoern A. Zeeb */ 1452bfcc09ddSBjoern A. Zeeb if (inc_ssn) { 1453bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].seq_number += 0x10; 1454bfcc09ddSBjoern A. Zeeb ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1455bfcc09ddSBjoern A. Zeeb } 1456bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].txq_id = queue; 1457bfcc09ddSBjoern A. Zeeb mvmsta->tfd_queue_msk |= BIT(queue); 1458bfcc09ddSBjoern A. Zeeb queue_state = mvmsta->tid_data[tid].state; 1459bfcc09ddSBjoern A. Zeeb 1460bfcc09ddSBjoern A. Zeeb if (mvmsta->reserved_queue == queue) 1461bfcc09ddSBjoern A. Zeeb mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1462bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 1463bfcc09ddSBjoern A. Zeeb 1464bfcc09ddSBjoern A. Zeeb if (!shared_queue) { 1465bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1466bfcc09ddSBjoern A. Zeeb if (ret) 1467bfcc09ddSBjoern A. Zeeb goto out_err; 1468bfcc09ddSBjoern A. Zeeb 1469bfcc09ddSBjoern A. Zeeb /* If we need to re-enable aggregations... */ 1470bfcc09ddSBjoern A. Zeeb if (queue_state == IWL_AGG_ON) { 1471bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1472bfcc09ddSBjoern A. Zeeb if (ret) 1473bfcc09ddSBjoern A. Zeeb goto out_err; 1474bfcc09ddSBjoern A. Zeeb } 1475bfcc09ddSBjoern A. Zeeb } else { 1476bfcc09ddSBjoern A. Zeeb /* Redirect queue, if needed */ 1477bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, 1478bfcc09ddSBjoern A. Zeeb wdg_timeout, false, 1479bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_tid(sta, tid)); 1480bfcc09ddSBjoern A. Zeeb if (ret) 1481bfcc09ddSBjoern A. Zeeb goto out_err; 1482bfcc09ddSBjoern A. Zeeb } 1483bfcc09ddSBjoern A. Zeeb 1484bfcc09ddSBjoern A. Zeeb return 0; 1485bfcc09ddSBjoern A. Zeeb 1486bfcc09ddSBjoern A. Zeeb out_err: 1487bfcc09ddSBjoern A. Zeeb queue_tmp = queue; 14889af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid); 1489bfcc09ddSBjoern A. Zeeb 1490bfcc09ddSBjoern A. Zeeb return ret; 1491bfcc09ddSBjoern A. Zeeb } 1492bfcc09ddSBjoern A. Zeeb 1493*a4128aadSBjoern A. Zeeb int iwl_mvm_sta_ensure_queue(struct iwl_mvm *mvm, 1494*a4128aadSBjoern A. Zeeb struct ieee80211_txq *txq) 1495*a4128aadSBjoern A. Zeeb { 1496*a4128aadSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = iwl_mvm_txq_from_mac80211(txq); 1497*a4128aadSBjoern A. Zeeb int ret = -EINVAL; 1498*a4128aadSBjoern A. Zeeb 1499*a4128aadSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1500*a4128aadSBjoern A. Zeeb 1501*a4128aadSBjoern A. Zeeb if (likely(test_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state)) || 1502*a4128aadSBjoern A. Zeeb !txq->sta) { 1503*a4128aadSBjoern A. Zeeb return 0; 1504*a4128aadSBjoern A. Zeeb } 1505*a4128aadSBjoern A. Zeeb 1506*a4128aadSBjoern A. Zeeb if (!iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, txq->tid)) { 1507*a4128aadSBjoern A. Zeeb set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 1508*a4128aadSBjoern A. Zeeb ret = 0; 1509*a4128aadSBjoern A. Zeeb } 1510*a4128aadSBjoern A. Zeeb 1511*a4128aadSBjoern A. Zeeb local_bh_disable(); 1512*a4128aadSBjoern A. Zeeb spin_lock(&mvm->add_stream_lock); 1513*a4128aadSBjoern A. Zeeb if (!list_empty(&mvmtxq->list)) 1514*a4128aadSBjoern A. Zeeb list_del_init(&mvmtxq->list); 1515*a4128aadSBjoern A. Zeeb spin_unlock(&mvm->add_stream_lock); 1516*a4128aadSBjoern A. Zeeb local_bh_enable(); 1517*a4128aadSBjoern A. Zeeb 1518*a4128aadSBjoern A. Zeeb return ret; 1519*a4128aadSBjoern A. Zeeb } 1520*a4128aadSBjoern A. Zeeb 1521bfcc09ddSBjoern A. Zeeb void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1522bfcc09ddSBjoern A. Zeeb { 1523bfcc09ddSBjoern A. Zeeb struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1524bfcc09ddSBjoern A. Zeeb add_stream_wk); 1525bfcc09ddSBjoern A. Zeeb 1526bfcc09ddSBjoern A. Zeeb mutex_lock(&mvm->mutex); 1527bfcc09ddSBjoern A. Zeeb 1528bfcc09ddSBjoern A. Zeeb iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1529bfcc09ddSBjoern A. Zeeb 1530bfcc09ddSBjoern A. Zeeb while (!list_empty(&mvm->add_stream_txqs)) { 1531bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq; 1532bfcc09ddSBjoern A. Zeeb struct ieee80211_txq *txq; 1533bfcc09ddSBjoern A. Zeeb u8 tid; 1534bfcc09ddSBjoern A. Zeeb 1535bfcc09ddSBjoern A. Zeeb mvmtxq = list_first_entry(&mvm->add_stream_txqs, 1536bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq, list); 1537bfcc09ddSBjoern A. Zeeb 1538bfcc09ddSBjoern A. Zeeb txq = container_of((void *)mvmtxq, struct ieee80211_txq, 1539bfcc09ddSBjoern A. Zeeb drv_priv); 1540bfcc09ddSBjoern A. Zeeb tid = txq->tid; 1541bfcc09ddSBjoern A. Zeeb if (tid == IEEE80211_NUM_TIDS) 1542bfcc09ddSBjoern A. Zeeb tid = IWL_MAX_TID_COUNT; 1543bfcc09ddSBjoern A. Zeeb 1544bfcc09ddSBjoern A. Zeeb /* 1545bfcc09ddSBjoern A. Zeeb * We can't really do much here, but if this fails we can't 1546bfcc09ddSBjoern A. Zeeb * transmit anyway - so just don't transmit the frame etc. 1547bfcc09ddSBjoern A. Zeeb * and let them back up ... we've tried our best to allocate 1548bfcc09ddSBjoern A. Zeeb * a queue in the function itself. 1549bfcc09ddSBjoern A. Zeeb */ 1550bfcc09ddSBjoern A. Zeeb if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { 15519af1bba4SBjoern A. Zeeb spin_lock_bh(&mvm->add_stream_lock); 1552bfcc09ddSBjoern A. Zeeb list_del_init(&mvmtxq->list); 15539af1bba4SBjoern A. Zeeb spin_unlock_bh(&mvm->add_stream_lock); 1554bfcc09ddSBjoern A. Zeeb continue; 1555bfcc09ddSBjoern A. Zeeb } 1556bfcc09ddSBjoern A. Zeeb 15579af1bba4SBjoern A. Zeeb /* now we're ready, any remaining races/concurrency will be 15589af1bba4SBjoern A. Zeeb * handled in iwl_mvm_mac_itxq_xmit() 15599af1bba4SBjoern A. Zeeb */ 15609af1bba4SBjoern A. Zeeb set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 15619af1bba4SBjoern A. Zeeb 1562bfcc09ddSBjoern A. Zeeb local_bh_disable(); 15639af1bba4SBjoern A. Zeeb spin_lock(&mvm->add_stream_lock); 15649af1bba4SBjoern A. Zeeb list_del_init(&mvmtxq->list); 15659af1bba4SBjoern A. Zeeb spin_unlock(&mvm->add_stream_lock); 15669af1bba4SBjoern A. Zeeb 1567bfcc09ddSBjoern A. Zeeb iwl_mvm_mac_itxq_xmit(mvm->hw, txq); 1568bfcc09ddSBjoern A. Zeeb local_bh_enable(); 1569bfcc09ddSBjoern A. Zeeb } 1570bfcc09ddSBjoern A. Zeeb 1571bfcc09ddSBjoern A. Zeeb mutex_unlock(&mvm->mutex); 1572bfcc09ddSBjoern A. Zeeb } 1573bfcc09ddSBjoern A. Zeeb 1574bfcc09ddSBjoern A. Zeeb static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1575bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 1576bfcc09ddSBjoern A. Zeeb enum nl80211_iftype vif_type) 1577bfcc09ddSBjoern A. Zeeb { 1578bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1579bfcc09ddSBjoern A. Zeeb int queue; 1580bfcc09ddSBjoern A. Zeeb 1581bfcc09ddSBjoern A. Zeeb /* queue reserving is disabled on new TX path */ 1582bfcc09ddSBjoern A. Zeeb if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1583bfcc09ddSBjoern A. Zeeb return 0; 1584bfcc09ddSBjoern A. Zeeb 1585bfcc09ddSBjoern A. Zeeb /* run the general cleanup/unsharing of queues */ 1586bfcc09ddSBjoern A. Zeeb iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1587bfcc09ddSBjoern A. Zeeb 1588bfcc09ddSBjoern A. Zeeb /* Make sure we have free resources for this STA */ 1589bfcc09ddSBjoern A. Zeeb if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1590bfcc09ddSBjoern A. Zeeb !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && 1591bfcc09ddSBjoern A. Zeeb (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1592bfcc09ddSBjoern A. Zeeb IWL_MVM_QUEUE_FREE)) 1593bfcc09ddSBjoern A. Zeeb queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1594bfcc09ddSBjoern A. Zeeb else 15959af1bba4SBjoern A. Zeeb queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1596bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_DATA_QUEUE, 1597bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_DATA_QUEUE); 1598bfcc09ddSBjoern A. Zeeb if (queue < 0) { 1599bfcc09ddSBjoern A. Zeeb /* try again - this time kick out a queue if needed */ 16009af1bba4SBjoern A. Zeeb queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); 1601bfcc09ddSBjoern A. Zeeb if (queue < 0) { 1602bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "No available queues for new station\n"); 1603bfcc09ddSBjoern A. Zeeb return -ENOSPC; 1604bfcc09ddSBjoern A. Zeeb } 1605bfcc09ddSBjoern A. Zeeb } 1606bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1607bfcc09ddSBjoern A. Zeeb 1608bfcc09ddSBjoern A. Zeeb mvmsta->reserved_queue = queue; 1609bfcc09ddSBjoern A. Zeeb 1610bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 16119af1bba4SBjoern A. Zeeb queue, mvmsta->deflink.sta_id); 1612bfcc09ddSBjoern A. Zeeb 1613bfcc09ddSBjoern A. Zeeb return 0; 1614bfcc09ddSBjoern A. Zeeb } 1615bfcc09ddSBjoern A. Zeeb 1616bfcc09ddSBjoern A. Zeeb /* 1617bfcc09ddSBjoern A. Zeeb * In DQA mode, after a HW restart the queues should be allocated as before, in 1618bfcc09ddSBjoern A. Zeeb * order to avoid race conditions when there are shared queues. This function 1619bfcc09ddSBjoern A. Zeeb * does the re-mapping and queue allocation. 1620bfcc09ddSBjoern A. Zeeb * 1621bfcc09ddSBjoern A. Zeeb * Note that re-enabling aggregations isn't done in this function. 1622bfcc09ddSBjoern A. Zeeb */ 16239af1bba4SBjoern A. Zeeb void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1624bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 1625bfcc09ddSBjoern A. Zeeb { 1626bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1627bfcc09ddSBjoern A. Zeeb unsigned int wdg = 1628bfcc09ddSBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1629bfcc09ddSBjoern A. Zeeb int i; 1630bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 16319af1bba4SBjoern A. Zeeb .sta_id = mvm_sta->deflink.sta_id, 1632bfcc09ddSBjoern A. Zeeb .frame_limit = IWL_FRAME_LIMIT, 1633bfcc09ddSBjoern A. Zeeb }; 1634bfcc09ddSBjoern A. Zeeb 1635bfcc09ddSBjoern A. Zeeb /* Make sure reserved queue is still marked as such (if allocated) */ 1636bfcc09ddSBjoern A. Zeeb if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1637bfcc09ddSBjoern A. Zeeb mvm->queue_info[mvm_sta->reserved_queue].status = 1638bfcc09ddSBjoern A. Zeeb IWL_MVM_QUEUE_RESERVED; 1639bfcc09ddSBjoern A. Zeeb 1640bfcc09ddSBjoern A. Zeeb for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1641bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1642bfcc09ddSBjoern A. Zeeb int txq_id = tid_data->txq_id; 1643bfcc09ddSBjoern A. Zeeb int ac; 1644bfcc09ddSBjoern A. Zeeb 1645bfcc09ddSBjoern A. Zeeb if (txq_id == IWL_MVM_INVALID_QUEUE) 1646bfcc09ddSBjoern A. Zeeb continue; 1647bfcc09ddSBjoern A. Zeeb 1648bfcc09ddSBjoern A. Zeeb ac = tid_to_mac80211_ac[i]; 1649bfcc09ddSBjoern A. Zeeb 1650bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 1651bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1652bfcc09ddSBjoern A. Zeeb "Re-mapping sta %d tid %d\n", 16539af1bba4SBjoern A. Zeeb mvm_sta->deflink.sta_id, i); 16549af1bba4SBjoern A. Zeeb txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta, 16559af1bba4SBjoern A. Zeeb mvm_sta->deflink.sta_id, 1656bfcc09ddSBjoern A. Zeeb i, wdg); 1657bfcc09ddSBjoern A. Zeeb /* 1658bfcc09ddSBjoern A. Zeeb * on failures, just set it to IWL_MVM_INVALID_QUEUE 1659bfcc09ddSBjoern A. Zeeb * to try again later, we have no other good way of 1660bfcc09ddSBjoern A. Zeeb * failing here 1661bfcc09ddSBjoern A. Zeeb */ 1662bfcc09ddSBjoern A. Zeeb if (txq_id < 0) 1663bfcc09ddSBjoern A. Zeeb txq_id = IWL_MVM_INVALID_QUEUE; 1664bfcc09ddSBjoern A. Zeeb tid_data->txq_id = txq_id; 1665bfcc09ddSBjoern A. Zeeb 1666bfcc09ddSBjoern A. Zeeb /* 1667bfcc09ddSBjoern A. Zeeb * Since we don't set the seq number after reset, and HW 1668bfcc09ddSBjoern A. Zeeb * sets it now, FW reset will cause the seq num to start 1669bfcc09ddSBjoern A. Zeeb * at 0 again, so driver will need to update it 1670bfcc09ddSBjoern A. Zeeb * internally as well, so it keeps in sync with real val 1671bfcc09ddSBjoern A. Zeeb */ 1672bfcc09ddSBjoern A. Zeeb tid_data->seq_number = 0; 1673bfcc09ddSBjoern A. Zeeb } else { 1674bfcc09ddSBjoern A. Zeeb u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1675bfcc09ddSBjoern A. Zeeb 1676bfcc09ddSBjoern A. Zeeb cfg.tid = i; 1677bfcc09ddSBjoern A. Zeeb cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 1678bfcc09ddSBjoern A. Zeeb cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1679bfcc09ddSBjoern A. Zeeb txq_id == 1680bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1681bfcc09ddSBjoern A. Zeeb 1682bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 1683bfcc09ddSBjoern A. Zeeb "Re-mapping sta %d tid %d to queue %d\n", 16849af1bba4SBjoern A. Zeeb mvm_sta->deflink.sta_id, i, 16859af1bba4SBjoern A. Zeeb txq_id); 1686bfcc09ddSBjoern A. Zeeb 1687bfcc09ddSBjoern A. Zeeb iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); 1688bfcc09ddSBjoern A. Zeeb mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1689bfcc09ddSBjoern A. Zeeb } 1690bfcc09ddSBjoern A. Zeeb } 1691bfcc09ddSBjoern A. Zeeb } 1692bfcc09ddSBjoern A. Zeeb 1693bfcc09ddSBjoern A. Zeeb static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1694bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 1695bfcc09ddSBjoern A. Zeeb const u8 *addr, 1696bfcc09ddSBjoern A. Zeeb u16 mac_id, u16 color) 1697bfcc09ddSBjoern A. Zeeb { 1698bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd; 1699bfcc09ddSBjoern A. Zeeb int ret; 1700bfcc09ddSBjoern A. Zeeb u32 status = ADD_STA_SUCCESS; 1701bfcc09ddSBjoern A. Zeeb 1702bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1703bfcc09ddSBjoern A. Zeeb 1704bfcc09ddSBjoern A. Zeeb memset(&cmd, 0, sizeof(cmd)); 1705bfcc09ddSBjoern A. Zeeb cmd.sta_id = sta->sta_id; 1706bfcc09ddSBjoern A. Zeeb 17079af1bba4SBjoern A. Zeeb if (iwl_mvm_has_new_station_api(mvm->fw) && 1708bfcc09ddSBjoern A. Zeeb sta->type == IWL_STA_AUX_ACTIVITY) 1709bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(mac_id); 1710bfcc09ddSBjoern A. Zeeb else 1711bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1712bfcc09ddSBjoern A. Zeeb color)); 1713bfcc09ddSBjoern A. Zeeb 1714bfcc09ddSBjoern A. Zeeb if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1715bfcc09ddSBjoern A. Zeeb cmd.station_type = sta->type; 1716bfcc09ddSBjoern A. Zeeb 1717bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) 1718bfcc09ddSBjoern A. Zeeb cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1719bfcc09ddSBjoern A. Zeeb cmd.tid_disable_tx = cpu_to_le16(0xffff); 1720bfcc09ddSBjoern A. Zeeb 1721bfcc09ddSBjoern A. Zeeb if (addr) 1722bfcc09ddSBjoern A. Zeeb memcpy(cmd.addr, addr, ETH_ALEN); 1723bfcc09ddSBjoern A. Zeeb 1724bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1725bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 1726bfcc09ddSBjoern A. Zeeb &cmd, &status); 1727bfcc09ddSBjoern A. Zeeb if (ret) 1728bfcc09ddSBjoern A. Zeeb return ret; 1729bfcc09ddSBjoern A. Zeeb 1730bfcc09ddSBjoern A. Zeeb switch (status & IWL_ADD_STA_STATUS_MASK) { 1731bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 1732bfcc09ddSBjoern A. Zeeb IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1733bfcc09ddSBjoern A. Zeeb return 0; 1734bfcc09ddSBjoern A. Zeeb default: 1735bfcc09ddSBjoern A. Zeeb ret = -EIO; 1736bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1737bfcc09ddSBjoern A. Zeeb status); 1738bfcc09ddSBjoern A. Zeeb break; 1739bfcc09ddSBjoern A. Zeeb } 1740bfcc09ddSBjoern A. Zeeb return ret; 1741bfcc09ddSBjoern A. Zeeb } 1742bfcc09ddSBjoern A. Zeeb 17439af1bba4SBjoern A. Zeeb /* Initialize driver data of a new sta */ 17449af1bba4SBjoern A. Zeeb int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 17459af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, int sta_id, u8 sta_type) 1746bfcc09ddSBjoern A. Zeeb { 1747bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1748bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1749bfcc09ddSBjoern A. Zeeb struct iwl_mvm_rxq_dup_data *dup_data; 17509af1bba4SBjoern A. Zeeb int i, ret = 0; 1751bfcc09ddSBjoern A. Zeeb 1752bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1753bfcc09ddSBjoern A. Zeeb 1754bfcc09ddSBjoern A. Zeeb mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1755bfcc09ddSBjoern A. Zeeb mvmvif->color); 1756bfcc09ddSBjoern A. Zeeb mvm_sta->vif = vif; 17579af1bba4SBjoern A. Zeeb 17589af1bba4SBjoern A. Zeeb /* for MLD sta_id(s) should be allocated for each link before calling 17599af1bba4SBjoern A. Zeeb * this function 17609af1bba4SBjoern A. Zeeb */ 17619af1bba4SBjoern A. Zeeb if (!mvm->mld_api_is_used) { 17629af1bba4SBjoern A. Zeeb if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) 17639af1bba4SBjoern A. Zeeb return -EINVAL; 17649af1bba4SBjoern A. Zeeb 17659af1bba4SBjoern A. Zeeb mvm_sta->deflink.sta_id = sta_id; 17669af1bba4SBjoern A. Zeeb rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink); 17679af1bba4SBjoern A. Zeeb 1768bfcc09ddSBjoern A. Zeeb if (!mvm->trans->trans_cfg->gen2) 17699af1bba4SBjoern A. Zeeb mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 17709af1bba4SBjoern A. Zeeb LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1771bfcc09ddSBjoern A. Zeeb else 17729af1bba4SBjoern A. Zeeb mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 17739af1bba4SBjoern A. Zeeb LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; 17749af1bba4SBjoern A. Zeeb } 17759af1bba4SBjoern A. Zeeb 1776bfcc09ddSBjoern A. Zeeb mvm_sta->tt_tx_protection = false; 17779af1bba4SBjoern A. Zeeb mvm_sta->sta_type = sta_type; 1778bfcc09ddSBjoern A. Zeeb 1779bfcc09ddSBjoern A. Zeeb mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1780bfcc09ddSBjoern A. Zeeb 1781bfcc09ddSBjoern A. Zeeb for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1782bfcc09ddSBjoern A. Zeeb /* 1783bfcc09ddSBjoern A. Zeeb * Mark all queues for this STA as unallocated and defer TX 1784bfcc09ddSBjoern A. Zeeb * frames until the queue is allocated 1785bfcc09ddSBjoern A. Zeeb */ 1786bfcc09ddSBjoern A. Zeeb mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1787bfcc09ddSBjoern A. Zeeb } 1788bfcc09ddSBjoern A. Zeeb 1789bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1790bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 1791bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_mac80211(sta->txq[i]); 1792bfcc09ddSBjoern A. Zeeb 1793bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1794bfcc09ddSBjoern A. Zeeb INIT_LIST_HEAD(&mvmtxq->list); 1795bfcc09ddSBjoern A. Zeeb atomic_set(&mvmtxq->tx_request, 0); 1796bfcc09ddSBjoern A. Zeeb } 1797bfcc09ddSBjoern A. Zeeb 17989af1bba4SBjoern A. Zeeb if (iwl_mvm_has_new_rx_api(mvm)) { 1799bfcc09ddSBjoern A. Zeeb int q; 1800bfcc09ddSBjoern A. Zeeb 1801bfcc09ddSBjoern A. Zeeb dup_data = kcalloc(mvm->trans->num_rx_queues, 1802bfcc09ddSBjoern A. Zeeb sizeof(*dup_data), GFP_KERNEL); 1803bfcc09ddSBjoern A. Zeeb if (!dup_data) 1804bfcc09ddSBjoern A. Zeeb return -ENOMEM; 1805bfcc09ddSBjoern A. Zeeb /* 1806bfcc09ddSBjoern A. Zeeb * Initialize all the last_seq values to 0xffff which can never 1807bfcc09ddSBjoern A. Zeeb * compare equal to the frame's seq_ctrl in the check in 1808bfcc09ddSBjoern A. Zeeb * iwl_mvm_is_dup() since the lower 4 bits are the fragment 1809bfcc09ddSBjoern A. Zeeb * number and fragmented packets don't reach that function. 1810bfcc09ddSBjoern A. Zeeb * 1811bfcc09ddSBjoern A. Zeeb * This thus allows receiving a packet with seqno 0 and the 1812bfcc09ddSBjoern A. Zeeb * retry bit set as the very first packet on a new TID. 1813bfcc09ddSBjoern A. Zeeb */ 1814bfcc09ddSBjoern A. Zeeb for (q = 0; q < mvm->trans->num_rx_queues; q++) 1815bfcc09ddSBjoern A. Zeeb memset(dup_data[q].last_seq, 0xff, 1816bfcc09ddSBjoern A. Zeeb sizeof(dup_data[q].last_seq)); 1817bfcc09ddSBjoern A. Zeeb mvm_sta->dup_data = dup_data; 1818bfcc09ddSBjoern A. Zeeb } 1819bfcc09ddSBjoern A. Zeeb 1820bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) { 1821bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1822bfcc09ddSBjoern A. Zeeb ieee80211_vif_type_p2p(vif)); 1823bfcc09ddSBjoern A. Zeeb if (ret) 18249af1bba4SBjoern A. Zeeb return ret; 1825bfcc09ddSBjoern A. Zeeb } 1826bfcc09ddSBjoern A. Zeeb 1827bfcc09ddSBjoern A. Zeeb /* 1828bfcc09ddSBjoern A. Zeeb * if rs is registered with mac80211, then "add station" will be handled 1829bfcc09ddSBjoern A. Zeeb * via the corresponding ops, otherwise need to notify rate scaling here 1830bfcc09ddSBjoern A. Zeeb */ 1831bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_tlc_offload(mvm)) 1832bfcc09ddSBjoern A. Zeeb iwl_mvm_rs_add_sta(mvm, mvm_sta); 1833bfcc09ddSBjoern A. Zeeb else 18349af1bba4SBjoern A. Zeeb spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock); 1835bfcc09ddSBjoern A. Zeeb 1836bfcc09ddSBjoern A. Zeeb iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); 1837bfcc09ddSBjoern A. Zeeb 1838*a4128aadSBjoern A. Zeeb /* MPDUs are counted only when EMLSR is possible */ 1839*a4128aadSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_STATION && !vif->p2p && 1840*a4128aadSBjoern A. Zeeb !sta->tdls && ieee80211_vif_is_mld(vif)) { 1841*a4128aadSBjoern A. Zeeb mvm_sta->mpdu_counters = 1842*a4128aadSBjoern A. Zeeb kcalloc(mvm->trans->num_rx_queues, 1843*a4128aadSBjoern A. Zeeb sizeof(*mvm_sta->mpdu_counters), 1844*a4128aadSBjoern A. Zeeb GFP_KERNEL); 1845*a4128aadSBjoern A. Zeeb if (mvm_sta->mpdu_counters) 1846*a4128aadSBjoern A. Zeeb for (int q = 0; q < mvm->trans->num_rx_queues; q++) 1847*a4128aadSBjoern A. Zeeb spin_lock_init(&mvm_sta->mpdu_counters[q].lock); 1848*a4128aadSBjoern A. Zeeb } 1849*a4128aadSBjoern A. Zeeb 18509af1bba4SBjoern A. Zeeb return 0; 18519af1bba4SBjoern A. Zeeb } 18529af1bba4SBjoern A. Zeeb 18539af1bba4SBjoern A. Zeeb int iwl_mvm_add_sta(struct iwl_mvm *mvm, 18549af1bba4SBjoern A. Zeeb struct ieee80211_vif *vif, 18559af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta) 18569af1bba4SBjoern A. Zeeb { 18579af1bba4SBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 18589af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 18599af1bba4SBjoern A. Zeeb int ret, sta_id; 18609af1bba4SBjoern A. Zeeb bool sta_update = false; 18619af1bba4SBjoern A. Zeeb unsigned int sta_flags = 0; 18629af1bba4SBjoern A. Zeeb 18639af1bba4SBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 18649af1bba4SBjoern A. Zeeb 18659af1bba4SBjoern A. Zeeb if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 18669af1bba4SBjoern A. Zeeb sta_id = iwl_mvm_find_free_sta_id(mvm, 18679af1bba4SBjoern A. Zeeb ieee80211_vif_type_p2p(vif)); 18689af1bba4SBjoern A. Zeeb else 18699af1bba4SBjoern A. Zeeb sta_id = mvm_sta->deflink.sta_id; 18709af1bba4SBjoern A. Zeeb 18719af1bba4SBjoern A. Zeeb if (sta_id == IWL_MVM_INVALID_STA) 18729af1bba4SBjoern A. Zeeb return -ENOSPC; 18739af1bba4SBjoern A. Zeeb 18749af1bba4SBjoern A. Zeeb spin_lock_init(&mvm_sta->lock); 18759af1bba4SBjoern A. Zeeb 18769af1bba4SBjoern A. Zeeb /* if this is a HW restart re-alloc existing queues */ 18779af1bba4SBjoern A. Zeeb if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 18789af1bba4SBjoern A. Zeeb struct iwl_mvm_int_sta tmp_sta = { 18799af1bba4SBjoern A. Zeeb .sta_id = sta_id, 18809af1bba4SBjoern A. Zeeb .type = mvm_sta->sta_type, 18819af1bba4SBjoern A. Zeeb }; 18829af1bba4SBjoern A. Zeeb 18839af1bba4SBjoern A. Zeeb /* First add an empty station since allocating 18849af1bba4SBjoern A. Zeeb * a queue requires a valid station 18859af1bba4SBjoern A. Zeeb */ 18869af1bba4SBjoern A. Zeeb ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, 18879af1bba4SBjoern A. Zeeb mvmvif->id, mvmvif->color); 18889af1bba4SBjoern A. Zeeb if (ret) 18899af1bba4SBjoern A. Zeeb goto err; 18909af1bba4SBjoern A. Zeeb 18919af1bba4SBjoern A. Zeeb iwl_mvm_realloc_queues_after_restart(mvm, sta); 18929af1bba4SBjoern A. Zeeb sta_update = true; 18939af1bba4SBjoern A. Zeeb sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; 18949af1bba4SBjoern A. Zeeb goto update_fw; 18959af1bba4SBjoern A. Zeeb } 18969af1bba4SBjoern A. Zeeb 18979af1bba4SBjoern A. Zeeb ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id, 18989af1bba4SBjoern A. Zeeb sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK); 18999af1bba4SBjoern A. Zeeb if (ret) 19009af1bba4SBjoern A. Zeeb goto err; 19019af1bba4SBjoern A. Zeeb 1902bfcc09ddSBjoern A. Zeeb update_fw: 1903bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); 1904bfcc09ddSBjoern A. Zeeb if (ret) 1905bfcc09ddSBjoern A. Zeeb goto err; 1906bfcc09ddSBjoern A. Zeeb 1907bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_STATION) { 1908bfcc09ddSBjoern A. Zeeb if (!sta->tdls) { 19099af1bba4SBjoern A. Zeeb WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); 19109af1bba4SBjoern A. Zeeb mvmvif->deflink.ap_sta_id = sta_id; 1911bfcc09ddSBjoern A. Zeeb } else { 19129af1bba4SBjoern A. Zeeb WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); 1913bfcc09ddSBjoern A. Zeeb } 1914bfcc09ddSBjoern A. Zeeb } 1915bfcc09ddSBjoern A. Zeeb 1916bfcc09ddSBjoern A. Zeeb rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1917bfcc09ddSBjoern A. Zeeb 1918bfcc09ddSBjoern A. Zeeb return 0; 1919bfcc09ddSBjoern A. Zeeb 1920bfcc09ddSBjoern A. Zeeb err: 1921bfcc09ddSBjoern A. Zeeb return ret; 1922bfcc09ddSBjoern A. Zeeb } 1923bfcc09ddSBjoern A. Zeeb 1924bfcc09ddSBjoern A. Zeeb int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1925bfcc09ddSBjoern A. Zeeb bool drain) 1926bfcc09ddSBjoern A. Zeeb { 1927bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = {}; 1928bfcc09ddSBjoern A. Zeeb int ret; 1929bfcc09ddSBjoern A. Zeeb u32 status; 1930bfcc09ddSBjoern A. Zeeb 1931bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 1932bfcc09ddSBjoern A. Zeeb 1933bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 19349af1bba4SBjoern A. Zeeb cmd.sta_id = mvmsta->deflink.sta_id; 1935bfcc09ddSBjoern A. Zeeb cmd.add_modify = STA_MODE_MODIFY; 1936bfcc09ddSBjoern A. Zeeb cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1937bfcc09ddSBjoern A. Zeeb cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1938bfcc09ddSBjoern A. Zeeb 1939bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 1940bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1941bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 1942bfcc09ddSBjoern A. Zeeb &cmd, &status); 1943bfcc09ddSBjoern A. Zeeb if (ret) 1944bfcc09ddSBjoern A. Zeeb return ret; 1945bfcc09ddSBjoern A. Zeeb 1946bfcc09ddSBjoern A. Zeeb switch (status & IWL_ADD_STA_STATUS_MASK) { 1947bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 1948bfcc09ddSBjoern A. Zeeb IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 19499af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id); 1950bfcc09ddSBjoern A. Zeeb break; 1951bfcc09ddSBjoern A. Zeeb default: 1952bfcc09ddSBjoern A. Zeeb ret = -EIO; 1953d9836fb4SBjoern A. Zeeb #if defined(__linux__) 1954d9836fb4SBjoern A. Zeeb IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 19559af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id); 1956d9836fb4SBjoern A. Zeeb #elif defined(__FreeBSD__) 1957586c8e32SBjoern A. Zeeb IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n", 19589af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, status); 1959d9836fb4SBjoern A. Zeeb #endif 1960bfcc09ddSBjoern A. Zeeb break; 1961bfcc09ddSBjoern A. Zeeb } 1962bfcc09ddSBjoern A. Zeeb 1963bfcc09ddSBjoern A. Zeeb return ret; 1964bfcc09ddSBjoern A. Zeeb } 1965bfcc09ddSBjoern A. Zeeb 1966bfcc09ddSBjoern A. Zeeb /* 1967bfcc09ddSBjoern A. Zeeb * Remove a station from the FW table. Before sending the command to remove 1968bfcc09ddSBjoern A. Zeeb * the station validate that the station is indeed known to the driver (sanity 1969bfcc09ddSBjoern A. Zeeb * only). 1970bfcc09ddSBjoern A. Zeeb */ 1971bfcc09ddSBjoern A. Zeeb static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1972bfcc09ddSBjoern A. Zeeb { 1973bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 1974bfcc09ddSBjoern A. Zeeb struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1975bfcc09ddSBjoern A. Zeeb .sta_id = sta_id, 1976bfcc09ddSBjoern A. Zeeb }; 1977bfcc09ddSBjoern A. Zeeb int ret; 1978bfcc09ddSBjoern A. Zeeb 1979bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1980bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 1981bfcc09ddSBjoern A. Zeeb 1982bfcc09ddSBjoern A. Zeeb /* Note: internal stations are marked as error values */ 1983bfcc09ddSBjoern A. Zeeb if (!sta) { 1984bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Invalid station id\n"); 1985bfcc09ddSBjoern A. Zeeb return -EINVAL; 1986bfcc09ddSBjoern A. Zeeb } 1987bfcc09ddSBjoern A. Zeeb 1988bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1989bfcc09ddSBjoern A. Zeeb sizeof(rm_sta_cmd), &rm_sta_cmd); 1990bfcc09ddSBjoern A. Zeeb if (ret) { 1991bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1992bfcc09ddSBjoern A. Zeeb return ret; 1993bfcc09ddSBjoern A. Zeeb } 1994bfcc09ddSBjoern A. Zeeb 1995bfcc09ddSBjoern A. Zeeb return 0; 1996bfcc09ddSBjoern A. Zeeb } 1997bfcc09ddSBjoern A. Zeeb 1998bfcc09ddSBjoern A. Zeeb static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1999bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 2000bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 2001bfcc09ddSBjoern A. Zeeb { 2002bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2003bfcc09ddSBjoern A. Zeeb int i; 2004bfcc09ddSBjoern A. Zeeb 2005bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2006bfcc09ddSBjoern A. Zeeb 2007bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 2008bfcc09ddSBjoern A. Zeeb if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 2009bfcc09ddSBjoern A. Zeeb continue; 2010bfcc09ddSBjoern A. Zeeb 20119af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id, 20129af1bba4SBjoern A. Zeeb &mvm_sta->tid_data[i].txq_id, i); 2013bfcc09ddSBjoern A. Zeeb mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 2014bfcc09ddSBjoern A. Zeeb } 2015bfcc09ddSBjoern A. Zeeb 2016bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 2017bfcc09ddSBjoern A. Zeeb struct iwl_mvm_txq *mvmtxq = 2018bfcc09ddSBjoern A. Zeeb iwl_mvm_txq_from_mac80211(sta->txq[i]); 2019bfcc09ddSBjoern A. Zeeb 20209af1bba4SBjoern A. Zeeb spin_lock_bh(&mvm->add_stream_lock); 2021bfcc09ddSBjoern A. Zeeb mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 20229af1bba4SBjoern A. Zeeb list_del_init(&mvmtxq->list); 20239af1bba4SBjoern A. Zeeb clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 20249af1bba4SBjoern A. Zeeb spin_unlock_bh(&mvm->add_stream_lock); 2025bfcc09ddSBjoern A. Zeeb } 2026bfcc09ddSBjoern A. Zeeb } 2027bfcc09ddSBjoern A. Zeeb 2028bfcc09ddSBjoern A. Zeeb int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 2029bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta) 2030bfcc09ddSBjoern A. Zeeb { 2031bfcc09ddSBjoern A. Zeeb int i; 2032bfcc09ddSBjoern A. Zeeb 2033bfcc09ddSBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 2034bfcc09ddSBjoern A. Zeeb u16 txq_id; 2035bfcc09ddSBjoern A. Zeeb int ret; 2036bfcc09ddSBjoern A. Zeeb 2037bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvm_sta->lock); 2038bfcc09ddSBjoern A. Zeeb txq_id = mvm_sta->tid_data[i].txq_id; 2039bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvm_sta->lock); 2040bfcc09ddSBjoern A. Zeeb 2041bfcc09ddSBjoern A. Zeeb if (txq_id == IWL_MVM_INVALID_QUEUE) 2042bfcc09ddSBjoern A. Zeeb continue; 2043bfcc09ddSBjoern A. Zeeb 2044bfcc09ddSBjoern A. Zeeb ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); 2045bfcc09ddSBjoern A. Zeeb if (ret) 2046bfcc09ddSBjoern A. Zeeb return ret; 2047bfcc09ddSBjoern A. Zeeb } 2048bfcc09ddSBjoern A. Zeeb 2049bfcc09ddSBjoern A. Zeeb return 0; 2050bfcc09ddSBjoern A. Zeeb } 2051bfcc09ddSBjoern A. Zeeb 20529af1bba4SBjoern A. Zeeb /* Execute the common part for both MLD and non-MLD modes. 20539af1bba4SBjoern A. Zeeb * Returns if we're done with removing the station, either 20549af1bba4SBjoern A. Zeeb * with error or success 20559af1bba4SBjoern A. Zeeb */ 20569af1bba4SBjoern A. Zeeb bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 20579af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 20589af1bba4SBjoern A. Zeeb struct ieee80211_link_sta *link_sta, int *ret) 20599af1bba4SBjoern A. Zeeb { 20609af1bba4SBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 20619af1bba4SBjoern A. Zeeb struct iwl_mvm_vif_link_info *mvm_link = 20629af1bba4SBjoern A. Zeeb mvmvif->link[link_sta->link_id]; 20639af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 20649af1bba4SBjoern A. Zeeb struct iwl_mvm_link_sta *mvm_link_sta; 20659af1bba4SBjoern A. Zeeb u8 sta_id; 20669af1bba4SBjoern A. Zeeb 20679af1bba4SBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 20689af1bba4SBjoern A. Zeeb 20699af1bba4SBjoern A. Zeeb mvm_link_sta = 20709af1bba4SBjoern A. Zeeb rcu_dereference_protected(mvm_sta->link[link_sta->link_id], 20719af1bba4SBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 20729af1bba4SBjoern A. Zeeb sta_id = mvm_link_sta->sta_id; 20739af1bba4SBjoern A. Zeeb 20749af1bba4SBjoern A. Zeeb /* If there is a TXQ still marked as reserved - free it */ 20759af1bba4SBjoern A. Zeeb if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 20769af1bba4SBjoern A. Zeeb u8 reserved_txq = mvm_sta->reserved_queue; 20779af1bba4SBjoern A. Zeeb enum iwl_mvm_queue_status *status; 20789af1bba4SBjoern A. Zeeb 20799af1bba4SBjoern A. Zeeb /* 20809af1bba4SBjoern A. Zeeb * If no traffic has gone through the reserved TXQ - it 20819af1bba4SBjoern A. Zeeb * is still marked as IWL_MVM_QUEUE_RESERVED, and 20829af1bba4SBjoern A. Zeeb * should be manually marked as free again 20839af1bba4SBjoern A. Zeeb */ 20849af1bba4SBjoern A. Zeeb status = &mvm->queue_info[reserved_txq].status; 20859af1bba4SBjoern A. Zeeb if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 20869af1bba4SBjoern A. Zeeb (*status != IWL_MVM_QUEUE_FREE), 20879af1bba4SBjoern A. Zeeb "sta_id %d reserved txq %d status %d", 20889af1bba4SBjoern A. Zeeb sta_id, reserved_txq, *status)) { 20899af1bba4SBjoern A. Zeeb *ret = -EINVAL; 20909af1bba4SBjoern A. Zeeb return true; 20919af1bba4SBjoern A. Zeeb } 20929af1bba4SBjoern A. Zeeb 20939af1bba4SBjoern A. Zeeb *status = IWL_MVM_QUEUE_FREE; 20949af1bba4SBjoern A. Zeeb } 20959af1bba4SBjoern A. Zeeb 2096*a4128aadSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_STATION && 2097*a4128aadSBjoern A. Zeeb mvm_link->ap_sta_id == sta_id) { 20989af1bba4SBjoern A. Zeeb /* if associated - we can't remove the AP STA now */ 20999af1bba4SBjoern A. Zeeb if (vif->cfg.assoc) 21009af1bba4SBjoern A. Zeeb return true; 21019af1bba4SBjoern A. Zeeb 21029af1bba4SBjoern A. Zeeb /* first remove remaining keys */ 21039af1bba4SBjoern A. Zeeb iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); 21049af1bba4SBjoern A. Zeeb 21059af1bba4SBjoern A. Zeeb /* unassoc - go ahead - remove the AP STA now */ 21069af1bba4SBjoern A. Zeeb mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; 21079af1bba4SBjoern A. Zeeb } 21089af1bba4SBjoern A. Zeeb 21099af1bba4SBjoern A. Zeeb /* 21109af1bba4SBjoern A. Zeeb * This shouldn't happen - the TDLS channel switch should be canceled 21119af1bba4SBjoern A. Zeeb * before the STA is removed. 21129af1bba4SBjoern A. Zeeb */ 21139af1bba4SBjoern A. Zeeb if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 21149af1bba4SBjoern A. Zeeb mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 21159af1bba4SBjoern A. Zeeb cancel_delayed_work(&mvm->tdls_cs.dwork); 21169af1bba4SBjoern A. Zeeb } 21179af1bba4SBjoern A. Zeeb 21189af1bba4SBjoern A. Zeeb return false; 21199af1bba4SBjoern A. Zeeb } 21209af1bba4SBjoern A. Zeeb 2121bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 2122bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 2123bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 2124bfcc09ddSBjoern A. Zeeb { 2125bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2126bfcc09ddSBjoern A. Zeeb int ret; 2127bfcc09ddSBjoern A. Zeeb 2128bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2129bfcc09ddSBjoern A. Zeeb 2130bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 2131bfcc09ddSBjoern A. Zeeb if (ret) 2132bfcc09ddSBjoern A. Zeeb return ret; 2133bfcc09ddSBjoern A. Zeeb 2134bfcc09ddSBjoern A. Zeeb /* flush its queues here since we are freeing mvm_sta */ 2135*a4128aadSBjoern A. Zeeb ret = iwl_mvm_flush_sta(mvm, mvm_sta->deflink.sta_id, 2136*a4128aadSBjoern A. Zeeb mvm_sta->tfd_queue_msk); 2137bfcc09ddSBjoern A. Zeeb if (ret) 2138bfcc09ddSBjoern A. Zeeb return ret; 2139bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 2140bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); 2141bfcc09ddSBjoern A. Zeeb } else { 2142bfcc09ddSBjoern A. Zeeb u32 q_mask = mvm_sta->tfd_queue_msk; 2143bfcc09ddSBjoern A. Zeeb 2144bfcc09ddSBjoern A. Zeeb ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2145bfcc09ddSBjoern A. Zeeb q_mask); 2146bfcc09ddSBjoern A. Zeeb } 2147bfcc09ddSBjoern A. Zeeb if (ret) 2148bfcc09ddSBjoern A. Zeeb return ret; 2149bfcc09ddSBjoern A. Zeeb 2150bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 2151bfcc09ddSBjoern A. Zeeb 2152bfcc09ddSBjoern A. Zeeb iwl_mvm_disable_sta_queues(mvm, vif, sta); 2153bfcc09ddSBjoern A. Zeeb 21549af1bba4SBjoern A. Zeeb if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret)) 2155bfcc09ddSBjoern A. Zeeb return ret; 2156bfcc09ddSBjoern A. Zeeb 21579af1bba4SBjoern A. Zeeb ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id); 21589af1bba4SBjoern A. Zeeb RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL); 2159bfcc09ddSBjoern A. Zeeb 2160bfcc09ddSBjoern A. Zeeb return ret; 2161bfcc09ddSBjoern A. Zeeb } 2162bfcc09ddSBjoern A. Zeeb 2163bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 2164bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 2165bfcc09ddSBjoern A. Zeeb u8 sta_id) 2166bfcc09ddSBjoern A. Zeeb { 2167bfcc09ddSBjoern A. Zeeb int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 2168bfcc09ddSBjoern A. Zeeb 2169bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2170bfcc09ddSBjoern A. Zeeb 2171bfcc09ddSBjoern A. Zeeb RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 2172bfcc09ddSBjoern A. Zeeb return ret; 2173bfcc09ddSBjoern A. Zeeb } 2174bfcc09ddSBjoern A. Zeeb 2175bfcc09ddSBjoern A. Zeeb int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 2176bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 2177bfcc09ddSBjoern A. Zeeb u32 qmask, enum nl80211_iftype iftype, 21789af1bba4SBjoern A. Zeeb u8 type) 2179bfcc09ddSBjoern A. Zeeb { 2180bfcc09ddSBjoern A. Zeeb if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 2181bfcc09ddSBjoern A. Zeeb sta->sta_id == IWL_MVM_INVALID_STA) { 2182bfcc09ddSBjoern A. Zeeb sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 2183bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 2184bfcc09ddSBjoern A. Zeeb return -ENOSPC; 2185bfcc09ddSBjoern A. Zeeb } 2186bfcc09ddSBjoern A. Zeeb 2187bfcc09ddSBjoern A. Zeeb sta->tfd_queue_msk = qmask; 2188bfcc09ddSBjoern A. Zeeb sta->type = type; 2189bfcc09ddSBjoern A. Zeeb 2190bfcc09ddSBjoern A. Zeeb /* put a non-NULL value so iterating over the stations won't stop */ 21919af1bba4SBjoern A. Zeeb RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 2192bfcc09ddSBjoern A. Zeeb return 0; 2193bfcc09ddSBjoern A. Zeeb } 2194bfcc09ddSBjoern A. Zeeb 2195bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 2196bfcc09ddSBjoern A. Zeeb { 2197bfcc09ddSBjoern A. Zeeb RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 2198bfcc09ddSBjoern A. Zeeb memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 2199bfcc09ddSBjoern A. Zeeb sta->sta_id = IWL_MVM_INVALID_STA; 2200bfcc09ddSBjoern A. Zeeb } 2201bfcc09ddSBjoern A. Zeeb 2202bfcc09ddSBjoern A. Zeeb static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, 2203bfcc09ddSBjoern A. Zeeb u8 sta_id, u8 fifo) 2204bfcc09ddSBjoern A. Zeeb { 2205bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 2206bfcc09ddSBjoern A. Zeeb mvm->trans->trans_cfg->base_params->wd_timeout; 2207bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 2208bfcc09ddSBjoern A. Zeeb .fifo = fifo, 2209bfcc09ddSBjoern A. Zeeb .sta_id = sta_id, 2210bfcc09ddSBjoern A. Zeeb .tid = IWL_MAX_TID_COUNT, 2211bfcc09ddSBjoern A. Zeeb .aggregate = false, 2212bfcc09ddSBjoern A. Zeeb .frame_limit = IWL_FRAME_LIMIT, 2213bfcc09ddSBjoern A. Zeeb }; 2214bfcc09ddSBjoern A. Zeeb 2215bfcc09ddSBjoern A. Zeeb WARN_ON(iwl_mvm_has_new_tx_api(mvm)); 2216bfcc09ddSBjoern A. Zeeb 2217bfcc09ddSBjoern A. Zeeb iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); 2218bfcc09ddSBjoern A. Zeeb } 2219bfcc09ddSBjoern A. Zeeb 2220bfcc09ddSBjoern A. Zeeb static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) 2221bfcc09ddSBjoern A. Zeeb { 2222bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 2223bfcc09ddSBjoern A. Zeeb mvm->trans->trans_cfg->base_params->wd_timeout; 2224bfcc09ddSBjoern A. Zeeb 2225bfcc09ddSBjoern A. Zeeb WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); 2226bfcc09ddSBjoern A. Zeeb 22279af1bba4SBjoern A. Zeeb return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT, 2228bfcc09ddSBjoern A. Zeeb wdg_timeout); 2229bfcc09ddSBjoern A. Zeeb } 2230bfcc09ddSBjoern A. Zeeb 2231bfcc09ddSBjoern A. Zeeb static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, 2232bfcc09ddSBjoern A. Zeeb int maccolor, u8 *addr, 2233bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 2234bfcc09ddSBjoern A. Zeeb u16 *queue, int fifo) 2235bfcc09ddSBjoern A. Zeeb { 2236bfcc09ddSBjoern A. Zeeb int ret; 2237bfcc09ddSBjoern A. Zeeb 2238bfcc09ddSBjoern A. Zeeb /* Map queue to fifo - needs to happen before adding station */ 2239bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) 2240bfcc09ddSBjoern A. Zeeb iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); 2241bfcc09ddSBjoern A. Zeeb 2242bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); 2243bfcc09ddSBjoern A. Zeeb if (ret) { 2244bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) 22459af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue, 2246d9836fb4SBjoern A. Zeeb IWL_MAX_TID_COUNT); 2247bfcc09ddSBjoern A. Zeeb return ret; 2248bfcc09ddSBjoern A. Zeeb } 2249bfcc09ddSBjoern A. Zeeb 2250bfcc09ddSBjoern A. Zeeb /* 2251bfcc09ddSBjoern A. Zeeb * For 22000 firmware and on we cannot add queue to a station unknown 2252bfcc09ddSBjoern A. Zeeb * to firmware so enable queue here - after the station was added 2253bfcc09ddSBjoern A. Zeeb */ 2254bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 2255bfcc09ddSBjoern A. Zeeb int txq; 2256bfcc09ddSBjoern A. Zeeb 2257bfcc09ddSBjoern A. Zeeb txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); 2258bfcc09ddSBjoern A. Zeeb if (txq < 0) { 2259bfcc09ddSBjoern A. Zeeb iwl_mvm_rm_sta_common(mvm, sta->sta_id); 2260bfcc09ddSBjoern A. Zeeb return txq; 2261bfcc09ddSBjoern A. Zeeb } 2262bfcc09ddSBjoern A. Zeeb 2263bfcc09ddSBjoern A. Zeeb *queue = txq; 2264bfcc09ddSBjoern A. Zeeb } 2265bfcc09ddSBjoern A. Zeeb 2266bfcc09ddSBjoern A. Zeeb return 0; 2267bfcc09ddSBjoern A. Zeeb } 2268bfcc09ddSBjoern A. Zeeb 2269bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) 2270bfcc09ddSBjoern A. Zeeb { 2271bfcc09ddSBjoern A. Zeeb int ret; 22729af1bba4SBjoern A. Zeeb u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 : 22739af1bba4SBjoern A. Zeeb BIT(mvm->aux_queue); 2274bfcc09ddSBjoern A. Zeeb 2275bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2276bfcc09ddSBjoern A. Zeeb 2277bfcc09ddSBjoern A. Zeeb /* Allocate aux station and assign to it the aux queue */ 22789af1bba4SBjoern A. Zeeb ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask, 2279bfcc09ddSBjoern A. Zeeb NL80211_IFTYPE_UNSPECIFIED, 2280bfcc09ddSBjoern A. Zeeb IWL_STA_AUX_ACTIVITY); 2281bfcc09ddSBjoern A. Zeeb if (ret) 2282bfcc09ddSBjoern A. Zeeb return ret; 2283bfcc09ddSBjoern A. Zeeb 2284bfcc09ddSBjoern A. Zeeb /* 2285bfcc09ddSBjoern A. Zeeb * In CDB NICs we need to specify which lmac to use for aux activity 2286bfcc09ddSBjoern A. Zeeb * using the mac_id argument place to send lmac_id to the function 2287bfcc09ddSBjoern A. Zeeb */ 2288bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, 2289bfcc09ddSBjoern A. Zeeb &mvm->aux_sta, &mvm->aux_queue, 2290bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_MCAST); 2291bfcc09ddSBjoern A. Zeeb if (ret) { 2292bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2293bfcc09ddSBjoern A. Zeeb return ret; 2294bfcc09ddSBjoern A. Zeeb } 2295bfcc09ddSBjoern A. Zeeb 2296bfcc09ddSBjoern A. Zeeb return 0; 2297bfcc09ddSBjoern A. Zeeb } 2298bfcc09ddSBjoern A. Zeeb 2299bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2300bfcc09ddSBjoern A. Zeeb { 2301bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2302bfcc09ddSBjoern A. Zeeb 2303bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2304bfcc09ddSBjoern A. Zeeb 2305bfcc09ddSBjoern A. Zeeb return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, 2306bfcc09ddSBjoern A. Zeeb NULL, &mvm->snif_sta, 2307bfcc09ddSBjoern A. Zeeb &mvm->snif_queue, 2308bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_BE); 2309bfcc09ddSBjoern A. Zeeb } 2310bfcc09ddSBjoern A. Zeeb 2311bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2312bfcc09ddSBjoern A. Zeeb { 2313bfcc09ddSBjoern A. Zeeb int ret; 2314bfcc09ddSBjoern A. Zeeb 2315bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2316bfcc09ddSBjoern A. Zeeb 2317bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) 2318bfcc09ddSBjoern A. Zeeb return -EINVAL; 2319bfcc09ddSBjoern A. Zeeb 23209af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, 23219af1bba4SBjoern A. Zeeb &mvm->snif_queue, IWL_MAX_TID_COUNT); 2322bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 2323bfcc09ddSBjoern A. Zeeb if (ret) 2324bfcc09ddSBjoern A. Zeeb IWL_WARN(mvm, "Failed sending remove station\n"); 2325bfcc09ddSBjoern A. Zeeb 2326bfcc09ddSBjoern A. Zeeb return ret; 2327bfcc09ddSBjoern A. Zeeb } 2328bfcc09ddSBjoern A. Zeeb 2329bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) 2330bfcc09ddSBjoern A. Zeeb { 2331bfcc09ddSBjoern A. Zeeb int ret; 2332bfcc09ddSBjoern A. Zeeb 2333bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2334bfcc09ddSBjoern A. Zeeb 2335bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) 2336bfcc09ddSBjoern A. Zeeb return -EINVAL; 2337bfcc09ddSBjoern A. Zeeb 23389af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, 23399af1bba4SBjoern A. Zeeb &mvm->aux_queue, IWL_MAX_TID_COUNT); 2340bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); 2341bfcc09ddSBjoern A. Zeeb if (ret) 2342bfcc09ddSBjoern A. Zeeb IWL_WARN(mvm, "Failed sending remove station\n"); 2343bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2344bfcc09ddSBjoern A. Zeeb 2345bfcc09ddSBjoern A. Zeeb return ret; 2346bfcc09ddSBjoern A. Zeeb } 2347bfcc09ddSBjoern A. Zeeb 2348bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 2349bfcc09ddSBjoern A. Zeeb { 2350bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 2351bfcc09ddSBjoern A. Zeeb } 2352bfcc09ddSBjoern A. Zeeb 2353bfcc09ddSBjoern A. Zeeb /* 2354bfcc09ddSBjoern A. Zeeb * Send the add station command for the vif's broadcast station. 2355bfcc09ddSBjoern A. Zeeb * Assumes that the station was already allocated. 2356bfcc09ddSBjoern A. Zeeb * 2357bfcc09ddSBjoern A. Zeeb * @mvm: the mvm component 2358bfcc09ddSBjoern A. Zeeb * @vif: the interface to which the broadcast station is added 2359bfcc09ddSBjoern A. Zeeb * @bsta: the broadcast station to add. 2360bfcc09ddSBjoern A. Zeeb */ 2361bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2362bfcc09ddSBjoern A. Zeeb { 2363bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 23649af1bba4SBjoern A. Zeeb struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; 2365bfcc09ddSBjoern A. Zeeb static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 2366bfcc09ddSBjoern A. Zeeb const u8 *baddr = _baddr; 2367bfcc09ddSBjoern A. Zeeb int queue; 2368bfcc09ddSBjoern A. Zeeb int ret; 2369bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 2370bfcc09ddSBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2371bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 2372bfcc09ddSBjoern A. Zeeb .fifo = IWL_MVM_TX_FIFO_VO, 23739af1bba4SBjoern A. Zeeb .sta_id = mvmvif->deflink.bcast_sta.sta_id, 2374bfcc09ddSBjoern A. Zeeb .tid = IWL_MAX_TID_COUNT, 2375bfcc09ddSBjoern A. Zeeb .aggregate = false, 2376bfcc09ddSBjoern A. Zeeb .frame_limit = IWL_FRAME_LIMIT, 2377bfcc09ddSBjoern A. Zeeb }; 2378bfcc09ddSBjoern A. Zeeb 2379bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2380bfcc09ddSBjoern A. Zeeb 2381bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) { 2382bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_AP || 2383bfcc09ddSBjoern A. Zeeb vif->type == NL80211_IFTYPE_ADHOC) { 2384bfcc09ddSBjoern A. Zeeb queue = mvm->probe_queue; 2385bfcc09ddSBjoern A. Zeeb } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2386bfcc09ddSBjoern A. Zeeb queue = mvm->p2p_dev_queue; 2387bfcc09ddSBjoern A. Zeeb } else { 2388bfcc09ddSBjoern A. Zeeb WARN(1, "Missing required TXQ for adding bcast STA\n"); 2389bfcc09ddSBjoern A. Zeeb return -EINVAL; 2390bfcc09ddSBjoern A. Zeeb } 2391bfcc09ddSBjoern A. Zeeb 2392bfcc09ddSBjoern A. Zeeb bsta->tfd_queue_msk |= BIT(queue); 2393bfcc09ddSBjoern A. Zeeb 2394bfcc09ddSBjoern A. Zeeb iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); 2395bfcc09ddSBjoern A. Zeeb } 2396bfcc09ddSBjoern A. Zeeb 2397bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_ADHOC) 2398bfcc09ddSBjoern A. Zeeb baddr = vif->bss_conf.bssid; 2399bfcc09ddSBjoern A. Zeeb 2400bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 2401bfcc09ddSBjoern A. Zeeb return -ENOSPC; 2402bfcc09ddSBjoern A. Zeeb 2403bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 2404bfcc09ddSBjoern A. Zeeb mvmvif->id, mvmvif->color); 2405bfcc09ddSBjoern A. Zeeb if (ret) 2406bfcc09ddSBjoern A. Zeeb return ret; 2407bfcc09ddSBjoern A. Zeeb 2408bfcc09ddSBjoern A. Zeeb /* 2409bfcc09ddSBjoern A. Zeeb * For 22000 firmware and on we cannot add queue to a station unknown 2410bfcc09ddSBjoern A. Zeeb * to firmware so enable queue here - after the station was added 2411bfcc09ddSBjoern A. Zeeb */ 2412bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 24139af1bba4SBjoern A. Zeeb queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id, 2414bfcc09ddSBjoern A. Zeeb IWL_MAX_TID_COUNT, 2415bfcc09ddSBjoern A. Zeeb wdg_timeout); 2416bfcc09ddSBjoern A. Zeeb if (queue < 0) { 2417bfcc09ddSBjoern A. Zeeb iwl_mvm_rm_sta_common(mvm, bsta->sta_id); 2418bfcc09ddSBjoern A. Zeeb return queue; 2419bfcc09ddSBjoern A. Zeeb } 2420bfcc09ddSBjoern A. Zeeb 2421bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_AP || 24229af1bba4SBjoern A. Zeeb vif->type == NL80211_IFTYPE_ADHOC) { 24239af1bba4SBjoern A. Zeeb /* for queue management */ 2424bfcc09ddSBjoern A. Zeeb mvm->probe_queue = queue; 24259af1bba4SBjoern A. Zeeb /* for use in TX */ 24269af1bba4SBjoern A. Zeeb mvmvif->deflink.mgmt_queue = queue; 24279af1bba4SBjoern A. Zeeb } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2428bfcc09ddSBjoern A. Zeeb mvm->p2p_dev_queue = queue; 2429bfcc09ddSBjoern A. Zeeb } 24309af1bba4SBjoern A. Zeeb } else if (vif->type == NL80211_IFTYPE_AP || 24319af1bba4SBjoern A. Zeeb vif->type == NL80211_IFTYPE_ADHOC) { 24329af1bba4SBjoern A. Zeeb /* set it for use in TX */ 24339af1bba4SBjoern A. Zeeb mvmvif->deflink.mgmt_queue = mvm->probe_queue; 24349af1bba4SBjoern A. Zeeb } 2435bfcc09ddSBjoern A. Zeeb 2436bfcc09ddSBjoern A. Zeeb return 0; 2437bfcc09ddSBjoern A. Zeeb } 2438bfcc09ddSBjoern A. Zeeb 24399af1bba4SBjoern A. Zeeb void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 2440bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif) 2441bfcc09ddSBjoern A. Zeeb { 2442bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2443bfcc09ddSBjoern A. Zeeb u16 *queueptr, queue; 2444bfcc09ddSBjoern A. Zeeb 2445bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2446bfcc09ddSBjoern A. Zeeb 2447*a4128aadSBjoern A. Zeeb iwl_mvm_flush_sta(mvm, mvmvif->deflink.bcast_sta.sta_id, 2448*a4128aadSBjoern A. Zeeb mvmvif->deflink.bcast_sta.tfd_queue_msk); 2449bfcc09ddSBjoern A. Zeeb 2450bfcc09ddSBjoern A. Zeeb switch (vif->type) { 2451bfcc09ddSBjoern A. Zeeb case NL80211_IFTYPE_AP: 2452bfcc09ddSBjoern A. Zeeb case NL80211_IFTYPE_ADHOC: 2453bfcc09ddSBjoern A. Zeeb queueptr = &mvm->probe_queue; 2454bfcc09ddSBjoern A. Zeeb break; 2455bfcc09ddSBjoern A. Zeeb case NL80211_IFTYPE_P2P_DEVICE: 2456bfcc09ddSBjoern A. Zeeb queueptr = &mvm->p2p_dev_queue; 2457bfcc09ddSBjoern A. Zeeb break; 2458bfcc09ddSBjoern A. Zeeb default: 2459bfcc09ddSBjoern A. Zeeb WARN(1, "Can't free bcast queue on vif type %d\n", 2460bfcc09ddSBjoern A. Zeeb vif->type); 2461bfcc09ddSBjoern A. Zeeb return; 2462bfcc09ddSBjoern A. Zeeb } 2463bfcc09ddSBjoern A. Zeeb 2464bfcc09ddSBjoern A. Zeeb queue = *queueptr; 24659af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id, 24669af1bba4SBjoern A. Zeeb queueptr, IWL_MAX_TID_COUNT); 24679af1bba4SBjoern A. Zeeb 24689af1bba4SBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) 24699af1bba4SBjoern A. Zeeb mvmvif->deflink.mgmt_queue = mvm->probe_queue; 24709af1bba4SBjoern A. Zeeb 2471bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) 2472bfcc09ddSBjoern A. Zeeb return; 2473bfcc09ddSBjoern A. Zeeb 24749af1bba4SBjoern A. Zeeb WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue))); 24759af1bba4SBjoern A. Zeeb mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue); 2476bfcc09ddSBjoern A. Zeeb } 2477bfcc09ddSBjoern A. Zeeb 2478bfcc09ddSBjoern A. Zeeb /* Send the FW a request to remove the station from it's internal data 2479bfcc09ddSBjoern A. Zeeb * structures, but DO NOT remove the entry from the local data structures. */ 2480bfcc09ddSBjoern A. Zeeb int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2481bfcc09ddSBjoern A. Zeeb { 2482bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2483bfcc09ddSBjoern A. Zeeb int ret; 2484bfcc09ddSBjoern A. Zeeb 2485bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2486bfcc09ddSBjoern A. Zeeb 2487bfcc09ddSBjoern A. Zeeb iwl_mvm_free_bcast_sta_queues(mvm, vif); 2488bfcc09ddSBjoern A. Zeeb 24899af1bba4SBjoern A. Zeeb ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id); 2490bfcc09ddSBjoern A. Zeeb if (ret) 2491bfcc09ddSBjoern A. Zeeb IWL_WARN(mvm, "Failed sending remove station\n"); 2492bfcc09ddSBjoern A. Zeeb return ret; 2493bfcc09ddSBjoern A. Zeeb } 2494bfcc09ddSBjoern A. Zeeb 2495bfcc09ddSBjoern A. Zeeb int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2496bfcc09ddSBjoern A. Zeeb { 2497bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2498bfcc09ddSBjoern A. Zeeb 2499bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2500bfcc09ddSBjoern A. Zeeb 25019af1bba4SBjoern A. Zeeb return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0, 2502bfcc09ddSBjoern A. Zeeb ieee80211_vif_type_p2p(vif), 2503bfcc09ddSBjoern A. Zeeb IWL_STA_GENERAL_PURPOSE); 2504bfcc09ddSBjoern A. Zeeb } 2505bfcc09ddSBjoern A. Zeeb 2506bfcc09ddSBjoern A. Zeeb /* Allocate a new station entry for the broadcast station to the given vif, 2507bfcc09ddSBjoern A. Zeeb * and send it to the FW. 2508bfcc09ddSBjoern A. Zeeb * Note that each P2P mac should have its own broadcast station. 2509bfcc09ddSBjoern A. Zeeb * 2510bfcc09ddSBjoern A. Zeeb * @mvm: the mvm component 2511bfcc09ddSBjoern A. Zeeb * @vif: the interface to which the broadcast station is added 2512bfcc09ddSBjoern A. Zeeb * @bsta: the broadcast station to add. */ 2513bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2514bfcc09ddSBjoern A. Zeeb { 2515bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 25169af1bba4SBjoern A. Zeeb struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; 2517bfcc09ddSBjoern A. Zeeb int ret; 2518bfcc09ddSBjoern A. Zeeb 2519bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2520bfcc09ddSBjoern A. Zeeb 2521bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 2522bfcc09ddSBjoern A. Zeeb if (ret) 2523bfcc09ddSBjoern A. Zeeb return ret; 2524bfcc09ddSBjoern A. Zeeb 2525bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2526bfcc09ddSBjoern A. Zeeb 2527bfcc09ddSBjoern A. Zeeb if (ret) 2528bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, bsta); 2529bfcc09ddSBjoern A. Zeeb 2530bfcc09ddSBjoern A. Zeeb return ret; 2531bfcc09ddSBjoern A. Zeeb } 2532bfcc09ddSBjoern A. Zeeb 2533bfcc09ddSBjoern A. Zeeb void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2534bfcc09ddSBjoern A. Zeeb { 2535bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2536bfcc09ddSBjoern A. Zeeb 25379af1bba4SBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta); 2538bfcc09ddSBjoern A. Zeeb } 2539bfcc09ddSBjoern A. Zeeb 2540bfcc09ddSBjoern A. Zeeb /* 2541bfcc09ddSBjoern A. Zeeb * Send the FW a request to remove the station from it's internal data 2542bfcc09ddSBjoern A. Zeeb * structures, and in addition remove it from the local data structure. 2543bfcc09ddSBjoern A. Zeeb */ 2544bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2545bfcc09ddSBjoern A. Zeeb { 2546bfcc09ddSBjoern A. Zeeb int ret; 2547bfcc09ddSBjoern A. Zeeb 2548bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2549bfcc09ddSBjoern A. Zeeb 2550bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2551bfcc09ddSBjoern A. Zeeb 2552bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_bcast_sta(mvm, vif); 2553bfcc09ddSBjoern A. Zeeb 2554bfcc09ddSBjoern A. Zeeb return ret; 2555bfcc09ddSBjoern A. Zeeb } 2556bfcc09ddSBjoern A. Zeeb 2557bfcc09ddSBjoern A. Zeeb /* 2558bfcc09ddSBjoern A. Zeeb * Allocate a new station entry for the multicast station to the given vif, 2559bfcc09ddSBjoern A. Zeeb * and send it to the FW. 2560bfcc09ddSBjoern A. Zeeb * Note that each AP/GO mac should have its own multicast station. 2561bfcc09ddSBjoern A. Zeeb * 2562bfcc09ddSBjoern A. Zeeb * @mvm: the mvm component 2563bfcc09ddSBjoern A. Zeeb * @vif: the interface to which the multicast station is added 2564bfcc09ddSBjoern A. Zeeb */ 2565bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2566bfcc09ddSBjoern A. Zeeb { 2567bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 25689af1bba4SBjoern A. Zeeb struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta; 2569bfcc09ddSBjoern A. Zeeb static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2570bfcc09ddSBjoern A. Zeeb const u8 *maddr = _maddr; 2571bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 2572bfcc09ddSBjoern A. Zeeb .fifo = vif->type == NL80211_IFTYPE_AP ? 2573bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, 2574bfcc09ddSBjoern A. Zeeb .sta_id = msta->sta_id, 2575bfcc09ddSBjoern A. Zeeb .tid = 0, 2576bfcc09ddSBjoern A. Zeeb .aggregate = false, 2577bfcc09ddSBjoern A. Zeeb .frame_limit = IWL_FRAME_LIMIT, 2578bfcc09ddSBjoern A. Zeeb }; 2579bfcc09ddSBjoern A. Zeeb unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2580bfcc09ddSBjoern A. Zeeb int ret; 2581bfcc09ddSBjoern A. Zeeb 2582bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2583bfcc09ddSBjoern A. Zeeb 2584bfcc09ddSBjoern A. Zeeb if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2585bfcc09ddSBjoern A. Zeeb vif->type != NL80211_IFTYPE_ADHOC)) 2586*a4128aadSBjoern A. Zeeb return -EOPNOTSUPP; 2587bfcc09ddSBjoern A. Zeeb 2588bfcc09ddSBjoern A. Zeeb /* 2589bfcc09ddSBjoern A. Zeeb * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2590bfcc09ddSBjoern A. Zeeb * invalid, so make sure we use the queue we want. 2591bfcc09ddSBjoern A. Zeeb * Note that this is done here as we want to avoid making DQA 2592bfcc09ddSBjoern A. Zeeb * changes in mac80211 layer. 2593bfcc09ddSBjoern A. Zeeb */ 2594bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_ADHOC) 25959af1bba4SBjoern A. Zeeb mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2596bfcc09ddSBjoern A. Zeeb 2597bfcc09ddSBjoern A. Zeeb /* 2598bfcc09ddSBjoern A. Zeeb * While in previous FWs we had to exclude cab queue from TFD queue 2599bfcc09ddSBjoern A. Zeeb * mask, now it is needed as any other queue. 2600bfcc09ddSBjoern A. Zeeb */ 2601bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm) && 2602bfcc09ddSBjoern A. Zeeb fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 26039af1bba4SBjoern A. Zeeb iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, 26049af1bba4SBjoern A. Zeeb &cfg, 2605bfcc09ddSBjoern A. Zeeb timeout); 26069af1bba4SBjoern A. Zeeb msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue); 2607bfcc09ddSBjoern A. Zeeb } 2608bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2609bfcc09ddSBjoern A. Zeeb mvmvif->id, mvmvif->color); 2610bfcc09ddSBjoern A. Zeeb if (ret) 2611bfcc09ddSBjoern A. Zeeb goto err; 2612bfcc09ddSBjoern A. Zeeb 2613bfcc09ddSBjoern A. Zeeb /* 2614bfcc09ddSBjoern A. Zeeb * Enable cab queue after the ADD_STA command is sent. 2615bfcc09ddSBjoern A. Zeeb * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG 2616bfcc09ddSBjoern A. Zeeb * command with unknown station id, and for FW that doesn't support 2617bfcc09ddSBjoern A. Zeeb * station API since the cab queue is not included in the 2618bfcc09ddSBjoern A. Zeeb * tfd_queue_mask. 2619bfcc09ddSBjoern A. Zeeb */ 2620bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 26219af1bba4SBjoern A. Zeeb int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id, 26229af1bba4SBjoern A. Zeeb 0, timeout); 2623bfcc09ddSBjoern A. Zeeb if (queue < 0) { 2624bfcc09ddSBjoern A. Zeeb ret = queue; 2625bfcc09ddSBjoern A. Zeeb goto err; 2626bfcc09ddSBjoern A. Zeeb } 26279af1bba4SBjoern A. Zeeb mvmvif->deflink.cab_queue = queue; 2628bfcc09ddSBjoern A. Zeeb } else if (!fw_has_api(&mvm->fw->ucode_capa, 2629bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_API_STA_TYPE)) 26309af1bba4SBjoern A. Zeeb iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, 26319af1bba4SBjoern A. Zeeb &cfg, 2632bfcc09ddSBjoern A. Zeeb timeout); 2633bfcc09ddSBjoern A. Zeeb 2634bfcc09ddSBjoern A. Zeeb return 0; 2635bfcc09ddSBjoern A. Zeeb err: 2636bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, msta); 2637bfcc09ddSBjoern A. Zeeb return ret; 2638bfcc09ddSBjoern A. Zeeb } 2639bfcc09ddSBjoern A. Zeeb 2640bfcc09ddSBjoern A. Zeeb static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 2641bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 2642bfcc09ddSBjoern A. Zeeb bool mcast) 2643bfcc09ddSBjoern A. Zeeb { 2644bfcc09ddSBjoern A. Zeeb union { 2645bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 2646bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_key_cmd cmd; 2647bfcc09ddSBjoern A. Zeeb } u = {}; 2648bfcc09ddSBjoern A. Zeeb bool new_api = fw_has_api(&mvm->fw->ucode_capa, 2649bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 2650bfcc09ddSBjoern A. Zeeb __le16 key_flags; 2651bfcc09ddSBjoern A. Zeeb int ret, size; 2652bfcc09ddSBjoern A. Zeeb u32 status; 2653bfcc09ddSBjoern A. Zeeb 2654bfcc09ddSBjoern A. Zeeb /* This is a valid situation for GTK removal */ 2655bfcc09ddSBjoern A. Zeeb if (sta_id == IWL_MVM_INVALID_STA) 2656bfcc09ddSBjoern A. Zeeb return 0; 2657bfcc09ddSBjoern A. Zeeb 2658bfcc09ddSBjoern A. Zeeb key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 2659bfcc09ddSBjoern A. Zeeb STA_KEY_FLG_KEYID_MSK); 2660bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 2661bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 2662bfcc09ddSBjoern A. Zeeb 2663bfcc09ddSBjoern A. Zeeb if (mcast) 2664bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 2665bfcc09ddSBjoern A. Zeeb 2666bfcc09ddSBjoern A. Zeeb /* 2667bfcc09ddSBjoern A. Zeeb * The fields assigned here are in the same location at the start 2668bfcc09ddSBjoern A. Zeeb * of the command, so we can do this union trick. 2669bfcc09ddSBjoern A. Zeeb */ 2670bfcc09ddSBjoern A. Zeeb u.cmd.common.key_flags = key_flags; 2671bfcc09ddSBjoern A. Zeeb u.cmd.common.key_offset = keyconf->hw_key_idx; 2672bfcc09ddSBjoern A. Zeeb u.cmd.common.sta_id = sta_id; 2673bfcc09ddSBjoern A. Zeeb 2674bfcc09ddSBjoern A. Zeeb size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 2675bfcc09ddSBjoern A. Zeeb 2676bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 2677bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 2678bfcc09ddSBjoern A. Zeeb &status); 2679bfcc09ddSBjoern A. Zeeb 2680bfcc09ddSBjoern A. Zeeb switch (status) { 2681bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 2682bfcc09ddSBjoern A. Zeeb IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 2683bfcc09ddSBjoern A. Zeeb break; 2684bfcc09ddSBjoern A. Zeeb default: 2685bfcc09ddSBjoern A. Zeeb ret = -EIO; 2686bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 2687bfcc09ddSBjoern A. Zeeb break; 2688bfcc09ddSBjoern A. Zeeb } 2689bfcc09ddSBjoern A. Zeeb 2690bfcc09ddSBjoern A. Zeeb return ret; 2691bfcc09ddSBjoern A. Zeeb } 2692bfcc09ddSBjoern A. Zeeb 2693bfcc09ddSBjoern A. Zeeb /* 2694bfcc09ddSBjoern A. Zeeb * Send the FW a request to remove the station from it's internal data 2695bfcc09ddSBjoern A. Zeeb * structures, and in addition remove it from the local data structure. 2696bfcc09ddSBjoern A. Zeeb */ 2697bfcc09ddSBjoern A. Zeeb int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2698bfcc09ddSBjoern A. Zeeb { 2699bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2700bfcc09ddSBjoern A. Zeeb int ret; 2701bfcc09ddSBjoern A. Zeeb 2702bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2703bfcc09ddSBjoern A. Zeeb 2704*a4128aadSBjoern A. Zeeb iwl_mvm_flush_sta(mvm, mvmvif->deflink.mcast_sta.sta_id, 2705*a4128aadSBjoern A. Zeeb mvmvif->deflink.mcast_sta.tfd_queue_msk); 2706bfcc09ddSBjoern A. Zeeb 27079af1bba4SBjoern A. Zeeb iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, 27089af1bba4SBjoern A. Zeeb &mvmvif->deflink.cab_queue, 0); 2709bfcc09ddSBjoern A. Zeeb 27109af1bba4SBjoern A. Zeeb ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id); 2711bfcc09ddSBjoern A. Zeeb if (ret) 2712bfcc09ddSBjoern A. Zeeb IWL_WARN(mvm, "Failed sending remove station\n"); 2713bfcc09ddSBjoern A. Zeeb 2714bfcc09ddSBjoern A. Zeeb return ret; 2715bfcc09ddSBjoern A. Zeeb } 2716bfcc09ddSBjoern A. Zeeb 2717bfcc09ddSBjoern A. Zeeb static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2718bfcc09ddSBjoern A. Zeeb { 2719bfcc09ddSBjoern A. Zeeb struct iwl_mvm_delba_data notif = { 2720bfcc09ddSBjoern A. Zeeb .baid = baid, 2721bfcc09ddSBjoern A. Zeeb }; 2722bfcc09ddSBjoern A. Zeeb 2723bfcc09ddSBjoern A. Zeeb iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, 2724bfcc09ddSBjoern A. Zeeb ¬if, sizeof(notif)); 2725bfcc09ddSBjoern A. Zeeb }; 2726bfcc09ddSBjoern A. Zeeb 2727bfcc09ddSBjoern A. Zeeb static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2728bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data *data) 2729bfcc09ddSBjoern A. Zeeb { 2730bfcc09ddSBjoern A. Zeeb int i; 2731bfcc09ddSBjoern A. Zeeb 2732bfcc09ddSBjoern A. Zeeb iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2733bfcc09ddSBjoern A. Zeeb 2734bfcc09ddSBjoern A. Zeeb for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2735bfcc09ddSBjoern A. Zeeb int j; 2736bfcc09ddSBjoern A. Zeeb struct iwl_mvm_reorder_buffer *reorder_buf = 2737bfcc09ddSBjoern A. Zeeb &data->reorder_buf[i]; 2738bfcc09ddSBjoern A. Zeeb struct iwl_mvm_reorder_buf_entry *entries = 2739bfcc09ddSBjoern A. Zeeb &data->entries[i * data->entries_per_queue]; 2740bfcc09ddSBjoern A. Zeeb 2741bfcc09ddSBjoern A. Zeeb spin_lock_bh(&reorder_buf->lock); 2742bfcc09ddSBjoern A. Zeeb if (likely(!reorder_buf->num_stored)) { 2743bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&reorder_buf->lock); 2744bfcc09ddSBjoern A. Zeeb continue; 2745bfcc09ddSBjoern A. Zeeb } 2746bfcc09ddSBjoern A. Zeeb 2747bfcc09ddSBjoern A. Zeeb /* 2748bfcc09ddSBjoern A. Zeeb * This shouldn't happen in regular DELBA since the internal 2749bfcc09ddSBjoern A. Zeeb * delBA notification should trigger a release of all frames in 2750bfcc09ddSBjoern A. Zeeb * the reorder buffer. 2751bfcc09ddSBjoern A. Zeeb */ 2752bfcc09ddSBjoern A. Zeeb WARN_ON(1); 2753bfcc09ddSBjoern A. Zeeb 2754*a4128aadSBjoern A. Zeeb for (j = 0; j < data->buf_size; j++) 2755*a4128aadSBjoern A. Zeeb __skb_queue_purge(&entries[j].frames); 2756*a4128aadSBjoern A. Zeeb 2757bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&reorder_buf->lock); 2758bfcc09ddSBjoern A. Zeeb } 2759bfcc09ddSBjoern A. Zeeb } 2760bfcc09ddSBjoern A. Zeeb 2761bfcc09ddSBjoern A. Zeeb static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2762bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data *data, 2763*a4128aadSBjoern A. Zeeb u16 ssn) 2764bfcc09ddSBjoern A. Zeeb { 2765bfcc09ddSBjoern A. Zeeb int i; 2766bfcc09ddSBjoern A. Zeeb 2767bfcc09ddSBjoern A. Zeeb for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2768bfcc09ddSBjoern A. Zeeb struct iwl_mvm_reorder_buffer *reorder_buf = 2769bfcc09ddSBjoern A. Zeeb &data->reorder_buf[i]; 2770bfcc09ddSBjoern A. Zeeb struct iwl_mvm_reorder_buf_entry *entries = 2771bfcc09ddSBjoern A. Zeeb &data->entries[i * data->entries_per_queue]; 2772bfcc09ddSBjoern A. Zeeb int j; 2773bfcc09ddSBjoern A. Zeeb 2774bfcc09ddSBjoern A. Zeeb reorder_buf->num_stored = 0; 2775bfcc09ddSBjoern A. Zeeb reorder_buf->head_sn = ssn; 2776bfcc09ddSBjoern A. Zeeb spin_lock_init(&reorder_buf->lock); 2777bfcc09ddSBjoern A. Zeeb reorder_buf->queue = i; 2778bfcc09ddSBjoern A. Zeeb reorder_buf->valid = false; 2779*a4128aadSBjoern A. Zeeb for (j = 0; j < data->buf_size; j++) 2780*a4128aadSBjoern A. Zeeb __skb_queue_head_init(&entries[j].frames); 2781bfcc09ddSBjoern A. Zeeb } 2782bfcc09ddSBjoern A. Zeeb } 2783bfcc09ddSBjoern A. Zeeb 2784d9836fb4SBjoern A. Zeeb static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, 27859af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 2786d9836fb4SBjoern A. Zeeb bool start, int tid, u16 ssn, 2787d9836fb4SBjoern A. Zeeb u16 buf_size) 2788d9836fb4SBjoern A. Zeeb { 27899af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2790d9836fb4SBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = { 2791d9836fb4SBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 27929af1bba4SBjoern A. Zeeb .sta_id = mvm_sta->deflink.sta_id, 2793d9836fb4SBjoern A. Zeeb .add_modify = STA_MODE_MODIFY, 2794d9836fb4SBjoern A. Zeeb }; 2795d9836fb4SBjoern A. Zeeb u32 status; 2796d9836fb4SBjoern A. Zeeb int ret; 2797d9836fb4SBjoern A. Zeeb 2798d9836fb4SBjoern A. Zeeb if (start) { 2799d9836fb4SBjoern A. Zeeb cmd.add_immediate_ba_tid = tid; 2800d9836fb4SBjoern A. Zeeb cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2801d9836fb4SBjoern A. Zeeb cmd.rx_ba_window = cpu_to_le16(buf_size); 2802d9836fb4SBjoern A. Zeeb cmd.modify_mask = STA_MODIFY_ADD_BA_TID; 2803d9836fb4SBjoern A. Zeeb } else { 2804d9836fb4SBjoern A. Zeeb cmd.remove_immediate_ba_tid = tid; 2805d9836fb4SBjoern A. Zeeb cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; 2806d9836fb4SBjoern A. Zeeb } 2807d9836fb4SBjoern A. Zeeb 2808d9836fb4SBjoern A. Zeeb status = ADD_STA_SUCCESS; 2809d9836fb4SBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2810d9836fb4SBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 2811d9836fb4SBjoern A. Zeeb &cmd, &status); 2812d9836fb4SBjoern A. Zeeb if (ret) 2813d9836fb4SBjoern A. Zeeb return ret; 2814d9836fb4SBjoern A. Zeeb 2815d9836fb4SBjoern A. Zeeb switch (status & IWL_ADD_STA_STATUS_MASK) { 2816d9836fb4SBjoern A. Zeeb case ADD_STA_SUCCESS: 2817d9836fb4SBjoern A. Zeeb IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2818d9836fb4SBjoern A. Zeeb start ? "start" : "stopp"); 2819d9836fb4SBjoern A. Zeeb if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && 2820d9836fb4SBjoern A. Zeeb !(status & IWL_ADD_STA_BAID_VALID_MASK))) 2821d9836fb4SBjoern A. Zeeb return -EINVAL; 2822d9836fb4SBjoern A. Zeeb return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); 2823d9836fb4SBjoern A. Zeeb case ADD_STA_IMMEDIATE_BA_FAILURE: 2824d9836fb4SBjoern A. Zeeb IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2825d9836fb4SBjoern A. Zeeb return -ENOSPC; 2826d9836fb4SBjoern A. Zeeb default: 2827d9836fb4SBjoern A. Zeeb IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2828d9836fb4SBjoern A. Zeeb start ? "start" : "stopp", status); 2829d9836fb4SBjoern A. Zeeb return -EIO; 2830d9836fb4SBjoern A. Zeeb } 2831d9836fb4SBjoern A. Zeeb } 2832d9836fb4SBjoern A. Zeeb 2833d9836fb4SBjoern A. Zeeb static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, 28349af1bba4SBjoern A. Zeeb struct ieee80211_sta *sta, 2835d9836fb4SBjoern A. Zeeb bool start, int tid, u16 ssn, 2836d9836fb4SBjoern A. Zeeb u16 buf_size, int baid) 2837d9836fb4SBjoern A. Zeeb { 2838d9836fb4SBjoern A. Zeeb struct iwl_rx_baid_cfg_cmd cmd = { 2839d9836fb4SBjoern A. Zeeb .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : 2840d9836fb4SBjoern A. Zeeb cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), 2841d9836fb4SBjoern A. Zeeb }; 2842*a4128aadSBjoern A. Zeeb struct iwl_host_cmd hcmd = { 2843*a4128aadSBjoern A. Zeeb .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD), 2844*a4128aadSBjoern A. Zeeb .flags = CMD_SEND_IN_RFKILL, 2845*a4128aadSBjoern A. Zeeb .len[0] = sizeof(cmd), 2846*a4128aadSBjoern A. Zeeb .data[0] = &cmd, 2847*a4128aadSBjoern A. Zeeb }; 2848d9836fb4SBjoern A. Zeeb int ret; 2849d9836fb4SBjoern A. Zeeb 2850d9836fb4SBjoern A. Zeeb BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); 2851d9836fb4SBjoern A. Zeeb 2852d9836fb4SBjoern A. Zeeb if (start) { 28539af1bba4SBjoern A. Zeeb cmd.alloc.sta_id_mask = 28549af1bba4SBjoern A. Zeeb cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); 2855d9836fb4SBjoern A. Zeeb cmd.alloc.tid = tid; 2856d9836fb4SBjoern A. Zeeb cmd.alloc.ssn = cpu_to_le16(ssn); 2857d9836fb4SBjoern A. Zeeb cmd.alloc.win_size = cpu_to_le16(buf_size); 2858d9836fb4SBjoern A. Zeeb baid = -EIO; 2859*a4128aadSBjoern A. Zeeb } else if (iwl_fw_lookup_cmd_ver(mvm->fw, hcmd.id, 1) == 1) { 2860d9836fb4SBjoern A. Zeeb cmd.remove_v1.baid = cpu_to_le32(baid); 2861d9836fb4SBjoern A. Zeeb BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); 2862d9836fb4SBjoern A. Zeeb } else { 28639af1bba4SBjoern A. Zeeb cmd.remove.sta_id_mask = 28649af1bba4SBjoern A. Zeeb cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); 2865d9836fb4SBjoern A. Zeeb cmd.remove.tid = cpu_to_le32(tid); 2866d9836fb4SBjoern A. Zeeb } 2867d9836fb4SBjoern A. Zeeb 2868*a4128aadSBjoern A. Zeeb ret = iwl_mvm_send_cmd_status(mvm, &hcmd, &baid); 2869d9836fb4SBjoern A. Zeeb if (ret) 2870d9836fb4SBjoern A. Zeeb return ret; 2871d9836fb4SBjoern A. Zeeb 2872d9836fb4SBjoern A. Zeeb if (!start) { 2873d9836fb4SBjoern A. Zeeb /* ignore firmware baid on remove */ 2874d9836fb4SBjoern A. Zeeb baid = 0; 2875d9836fb4SBjoern A. Zeeb } 2876d9836fb4SBjoern A. Zeeb 2877d9836fb4SBjoern A. Zeeb IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2878d9836fb4SBjoern A. Zeeb start ? "start" : "stopp"); 2879d9836fb4SBjoern A. Zeeb 2880d9836fb4SBjoern A. Zeeb if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) 2881d9836fb4SBjoern A. Zeeb return -EINVAL; 2882d9836fb4SBjoern A. Zeeb 2883d9836fb4SBjoern A. Zeeb return baid; 2884d9836fb4SBjoern A. Zeeb } 2885d9836fb4SBjoern A. Zeeb 28869af1bba4SBjoern A. Zeeb static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2887d9836fb4SBjoern A. Zeeb bool start, int tid, u16 ssn, u16 buf_size, 2888d9836fb4SBjoern A. Zeeb int baid) 2889d9836fb4SBjoern A. Zeeb { 2890d9836fb4SBjoern A. Zeeb if (fw_has_capa(&mvm->fw->ucode_capa, 2891d9836fb4SBjoern A. Zeeb IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) 28929af1bba4SBjoern A. Zeeb return iwl_mvm_fw_baid_op_cmd(mvm, sta, start, 2893d9836fb4SBjoern A. Zeeb tid, ssn, buf_size, baid); 2894d9836fb4SBjoern A. Zeeb 28959af1bba4SBjoern A. Zeeb return iwl_mvm_fw_baid_op_sta(mvm, sta, start, 2896d9836fb4SBjoern A. Zeeb tid, ssn, buf_size); 2897d9836fb4SBjoern A. Zeeb } 2898d9836fb4SBjoern A. Zeeb 2899bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2900bfcc09ddSBjoern A. Zeeb int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) 2901bfcc09ddSBjoern A. Zeeb { 2902bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2903bfcc09ddSBjoern A. Zeeb struct iwl_mvm_baid_data *baid_data = NULL; 2904d9836fb4SBjoern A. Zeeb int ret, baid; 2905d9836fb4SBjoern A. Zeeb u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : 2906d9836fb4SBjoern A. Zeeb IWL_MAX_BAID_OLD; 2907bfcc09ddSBjoern A. Zeeb 2908bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 2909bfcc09ddSBjoern A. Zeeb 2910d9836fb4SBjoern A. Zeeb if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { 2911bfcc09ddSBjoern A. Zeeb IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2912bfcc09ddSBjoern A. Zeeb return -ENOSPC; 2913bfcc09ddSBjoern A. Zeeb } 2914bfcc09ddSBjoern A. Zeeb 2915bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_rx_api(mvm) && start) { 29169af1bba4SBjoern A. Zeeb u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 2917bfcc09ddSBjoern A. Zeeb 2918bfcc09ddSBjoern A. Zeeb /* sparse doesn't like the __align() so don't check */ 2919bfcc09ddSBjoern A. Zeeb #ifndef __CHECKER__ 2920bfcc09ddSBjoern A. Zeeb /* 2921bfcc09ddSBjoern A. Zeeb * The division below will be OK if either the cache line size 2922bfcc09ddSBjoern A. Zeeb * can be divided by the entry size (ALIGN will round up) or if 2923bfcc09ddSBjoern A. Zeeb * if the entry size can be divided by the cache line size, in 2924bfcc09ddSBjoern A. Zeeb * which case the ALIGN() will do nothing. 2925bfcc09ddSBjoern A. Zeeb */ 2926bfcc09ddSBjoern A. Zeeb BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 2927bfcc09ddSBjoern A. Zeeb sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 2928bfcc09ddSBjoern A. Zeeb #endif 2929bfcc09ddSBjoern A. Zeeb 2930bfcc09ddSBjoern A. Zeeb /* 2931bfcc09ddSBjoern A. Zeeb * Upward align the reorder buffer size to fill an entire cache 2932bfcc09ddSBjoern A. Zeeb * line for each queue, to avoid sharing cache lines between 2933bfcc09ddSBjoern A. Zeeb * different queues. 2934bfcc09ddSBjoern A. Zeeb */ 2935bfcc09ddSBjoern A. Zeeb reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 2936bfcc09ddSBjoern A. Zeeb 2937bfcc09ddSBjoern A. Zeeb /* 2938bfcc09ddSBjoern A. Zeeb * Allocate here so if allocation fails we can bail out early 2939bfcc09ddSBjoern A. Zeeb * before starting the BA session in the firmware 2940bfcc09ddSBjoern A. Zeeb */ 2941bfcc09ddSBjoern A. Zeeb baid_data = kzalloc(sizeof(*baid_data) + 2942bfcc09ddSBjoern A. Zeeb mvm->trans->num_rx_queues * 2943bfcc09ddSBjoern A. Zeeb reorder_buf_size, 2944bfcc09ddSBjoern A. Zeeb GFP_KERNEL); 2945bfcc09ddSBjoern A. Zeeb if (!baid_data) 2946bfcc09ddSBjoern A. Zeeb return -ENOMEM; 2947bfcc09ddSBjoern A. Zeeb 2948bfcc09ddSBjoern A. Zeeb /* 2949bfcc09ddSBjoern A. Zeeb * This division is why we need the above BUILD_BUG_ON(), 2950bfcc09ddSBjoern A. Zeeb * if that doesn't hold then this will not be right. 2951bfcc09ddSBjoern A. Zeeb */ 2952bfcc09ddSBjoern A. Zeeb baid_data->entries_per_queue = 2953bfcc09ddSBjoern A. Zeeb reorder_buf_size / sizeof(baid_data->entries[0]); 2954bfcc09ddSBjoern A. Zeeb } 2955bfcc09ddSBjoern A. Zeeb 2956d9836fb4SBjoern A. Zeeb if (iwl_mvm_has_new_rx_api(mvm) && !start) { 2957d9836fb4SBjoern A. Zeeb baid = mvm_sta->tid_to_baid[tid]; 2958bfcc09ddSBjoern A. Zeeb } else { 2959d9836fb4SBjoern A. Zeeb /* we don't really need it in this case */ 2960d9836fb4SBjoern A. Zeeb baid = -1; 2961bfcc09ddSBjoern A. Zeeb } 2962bfcc09ddSBjoern A. Zeeb 2963d9836fb4SBjoern A. Zeeb /* Don't send command to remove (start=0) BAID during restart */ 2964d9836fb4SBjoern A. Zeeb if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 29659af1bba4SBjoern A. Zeeb baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size, 2966d9836fb4SBjoern A. Zeeb baid); 2967d9836fb4SBjoern A. Zeeb 2968d9836fb4SBjoern A. Zeeb if (baid < 0) { 2969d9836fb4SBjoern A. Zeeb ret = baid; 2970bfcc09ddSBjoern A. Zeeb goto out_free; 2971d9836fb4SBjoern A. Zeeb } 2972bfcc09ddSBjoern A. Zeeb 2973bfcc09ddSBjoern A. Zeeb if (start) { 2974bfcc09ddSBjoern A. Zeeb mvm->rx_ba_sessions++; 2975bfcc09ddSBjoern A. Zeeb 2976bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_rx_api(mvm)) 2977bfcc09ddSBjoern A. Zeeb return 0; 2978bfcc09ddSBjoern A. Zeeb 2979bfcc09ddSBjoern A. Zeeb baid_data->baid = baid; 2980bfcc09ddSBjoern A. Zeeb baid_data->timeout = timeout; 2981bfcc09ddSBjoern A. Zeeb baid_data->last_rx = jiffies; 2982bfcc09ddSBjoern A. Zeeb baid_data->rcu_ptr = &mvm->baid_map[baid]; 2983bfcc09ddSBjoern A. Zeeb timer_setup(&baid_data->session_timer, 2984bfcc09ddSBjoern A. Zeeb iwl_mvm_rx_agg_session_expired, 0); 2985bfcc09ddSBjoern A. Zeeb baid_data->mvm = mvm; 2986bfcc09ddSBjoern A. Zeeb baid_data->tid = tid; 29879af1bba4SBjoern A. Zeeb baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); 2988*a4128aadSBjoern A. Zeeb baid_data->buf_size = buf_size; 2989bfcc09ddSBjoern A. Zeeb 2990bfcc09ddSBjoern A. Zeeb mvm_sta->tid_to_baid[tid] = baid; 2991bfcc09ddSBjoern A. Zeeb if (timeout) 2992bfcc09ddSBjoern A. Zeeb mod_timer(&baid_data->session_timer, 2993bfcc09ddSBjoern A. Zeeb TU_TO_EXP_TIME(timeout * 2)); 2994bfcc09ddSBjoern A. Zeeb 2995*a4128aadSBjoern A. Zeeb iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn); 2996bfcc09ddSBjoern A. Zeeb /* 2997bfcc09ddSBjoern A. Zeeb * protect the BA data with RCU to cover a case where our 2998bfcc09ddSBjoern A. Zeeb * internal RX sync mechanism will timeout (not that it's 2999bfcc09ddSBjoern A. Zeeb * supposed to happen) and we will free the session data while 3000bfcc09ddSBjoern A. Zeeb * RX is being processed in parallel 3001bfcc09ddSBjoern A. Zeeb */ 3002bfcc09ddSBjoern A. Zeeb IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 30039af1bba4SBjoern A. Zeeb mvm_sta->deflink.sta_id, tid, baid); 3004bfcc09ddSBjoern A. Zeeb WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 3005bfcc09ddSBjoern A. Zeeb rcu_assign_pointer(mvm->baid_map[baid], baid_data); 3006bfcc09ddSBjoern A. Zeeb } else { 3007d9836fb4SBjoern A. Zeeb baid = mvm_sta->tid_to_baid[tid]; 3008bfcc09ddSBjoern A. Zeeb 3009bfcc09ddSBjoern A. Zeeb if (mvm->rx_ba_sessions > 0) 3010bfcc09ddSBjoern A. Zeeb /* check that restart flow didn't zero the counter */ 3011bfcc09ddSBjoern A. Zeeb mvm->rx_ba_sessions--; 3012bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_rx_api(mvm)) 3013bfcc09ddSBjoern A. Zeeb return 0; 3014bfcc09ddSBjoern A. Zeeb 3015bfcc09ddSBjoern A. Zeeb if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 3016bfcc09ddSBjoern A. Zeeb return -EINVAL; 3017bfcc09ddSBjoern A. Zeeb 3018bfcc09ddSBjoern A. Zeeb baid_data = rcu_access_pointer(mvm->baid_map[baid]); 3019bfcc09ddSBjoern A. Zeeb if (WARN_ON(!baid_data)) 3020bfcc09ddSBjoern A. Zeeb return -EINVAL; 3021bfcc09ddSBjoern A. Zeeb 3022bfcc09ddSBjoern A. Zeeb /* synchronize all rx queues so we can safely delete */ 3023bfcc09ddSBjoern A. Zeeb iwl_mvm_free_reorder(mvm, baid_data); 30249af1bba4SBjoern A. Zeeb timer_shutdown_sync(&baid_data->session_timer); 3025bfcc09ddSBjoern A. Zeeb RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 3026bfcc09ddSBjoern A. Zeeb kfree_rcu(baid_data, rcu_head); 3027bfcc09ddSBjoern A. Zeeb IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 3028bfcc09ddSBjoern A. Zeeb } 3029bfcc09ddSBjoern A. Zeeb return 0; 3030bfcc09ddSBjoern A. Zeeb 3031bfcc09ddSBjoern A. Zeeb out_free: 3032bfcc09ddSBjoern A. Zeeb kfree(baid_data); 3033bfcc09ddSBjoern A. Zeeb return ret; 3034bfcc09ddSBjoern A. Zeeb } 3035bfcc09ddSBjoern A. Zeeb 3036bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 3037bfcc09ddSBjoern A. Zeeb int tid, u8 queue, bool start) 3038bfcc09ddSBjoern A. Zeeb { 3039bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3040bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = {}; 3041bfcc09ddSBjoern A. Zeeb int ret; 3042bfcc09ddSBjoern A. Zeeb u32 status; 3043bfcc09ddSBjoern A. Zeeb 3044bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3045bfcc09ddSBjoern A. Zeeb 3046bfcc09ddSBjoern A. Zeeb if (start) { 3047bfcc09ddSBjoern A. Zeeb mvm_sta->tfd_queue_msk |= BIT(queue); 3048bfcc09ddSBjoern A. Zeeb mvm_sta->tid_disable_agg &= ~BIT(tid); 3049bfcc09ddSBjoern A. Zeeb } else { 3050bfcc09ddSBjoern A. Zeeb /* In DQA-mode the queue isn't removed on agg termination */ 3051bfcc09ddSBjoern A. Zeeb mvm_sta->tid_disable_agg |= BIT(tid); 3052bfcc09ddSBjoern A. Zeeb } 3053bfcc09ddSBjoern A. Zeeb 3054bfcc09ddSBjoern A. Zeeb cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 30559af1bba4SBjoern A. Zeeb cmd.sta_id = mvm_sta->deflink.sta_id; 3056bfcc09ddSBjoern A. Zeeb cmd.add_modify = STA_MODE_MODIFY; 3057bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_tx_api(mvm)) 3058bfcc09ddSBjoern A. Zeeb cmd.modify_mask = STA_MODIFY_QUEUES; 3059bfcc09ddSBjoern A. Zeeb cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 3060bfcc09ddSBjoern A. Zeeb cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 3061bfcc09ddSBjoern A. Zeeb cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 3062bfcc09ddSBjoern A. Zeeb 3063bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 3064bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 3065bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), 3066bfcc09ddSBjoern A. Zeeb &cmd, &status); 3067bfcc09ddSBjoern A. Zeeb if (ret) 3068bfcc09ddSBjoern A. Zeeb return ret; 3069bfcc09ddSBjoern A. Zeeb 3070bfcc09ddSBjoern A. Zeeb switch (status & IWL_ADD_STA_STATUS_MASK) { 3071bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 3072bfcc09ddSBjoern A. Zeeb break; 3073bfcc09ddSBjoern A. Zeeb default: 3074bfcc09ddSBjoern A. Zeeb ret = -EIO; 3075bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 3076bfcc09ddSBjoern A. Zeeb start ? "start" : "stopp", status); 3077bfcc09ddSBjoern A. Zeeb break; 3078bfcc09ddSBjoern A. Zeeb } 3079bfcc09ddSBjoern A. Zeeb 3080bfcc09ddSBjoern A. Zeeb return ret; 3081bfcc09ddSBjoern A. Zeeb } 3082bfcc09ddSBjoern A. Zeeb 3083bfcc09ddSBjoern A. Zeeb const u8 tid_to_mac80211_ac[] = { 3084bfcc09ddSBjoern A. Zeeb IEEE80211_AC_BE, 3085bfcc09ddSBjoern A. Zeeb IEEE80211_AC_BK, 3086bfcc09ddSBjoern A. Zeeb IEEE80211_AC_BK, 3087bfcc09ddSBjoern A. Zeeb IEEE80211_AC_BE, 3088bfcc09ddSBjoern A. Zeeb IEEE80211_AC_VI, 3089bfcc09ddSBjoern A. Zeeb IEEE80211_AC_VI, 3090bfcc09ddSBjoern A. Zeeb IEEE80211_AC_VO, 3091bfcc09ddSBjoern A. Zeeb IEEE80211_AC_VO, 3092bfcc09ddSBjoern A. Zeeb IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 3093bfcc09ddSBjoern A. Zeeb }; 3094bfcc09ddSBjoern A. Zeeb 3095bfcc09ddSBjoern A. Zeeb static const u8 tid_to_ucode_ac[] = { 3096bfcc09ddSBjoern A. Zeeb AC_BE, 3097bfcc09ddSBjoern A. Zeeb AC_BK, 3098bfcc09ddSBjoern A. Zeeb AC_BK, 3099bfcc09ddSBjoern A. Zeeb AC_BE, 3100bfcc09ddSBjoern A. Zeeb AC_VI, 3101bfcc09ddSBjoern A. Zeeb AC_VI, 3102bfcc09ddSBjoern A. Zeeb AC_VO, 3103bfcc09ddSBjoern A. Zeeb AC_VO, 3104bfcc09ddSBjoern A. Zeeb }; 3105bfcc09ddSBjoern A. Zeeb 3106bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3107bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3108bfcc09ddSBjoern A. Zeeb { 3109bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3110bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data; 3111bfcc09ddSBjoern A. Zeeb u16 normalized_ssn; 3112bfcc09ddSBjoern A. Zeeb u16 txq_id; 3113bfcc09ddSBjoern A. Zeeb int ret; 3114bfcc09ddSBjoern A. Zeeb 3115bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 3116bfcc09ddSBjoern A. Zeeb return -EINVAL; 3117bfcc09ddSBjoern A. Zeeb 3118bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && 3119bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 3120bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 3121bfcc09ddSBjoern A. Zeeb "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", 3122bfcc09ddSBjoern A. Zeeb mvmsta->tid_data[tid].state); 3123bfcc09ddSBjoern A. Zeeb return -ENXIO; 3124bfcc09ddSBjoern A. Zeeb } 3125bfcc09ddSBjoern A. Zeeb 3126bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3127bfcc09ddSBjoern A. Zeeb 3128bfcc09ddSBjoern A. Zeeb if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && 3129bfcc09ddSBjoern A. Zeeb iwl_mvm_has_new_tx_api(mvm)) { 3130bfcc09ddSBjoern A. Zeeb u8 ac = tid_to_mac80211_ac[tid]; 3131bfcc09ddSBjoern A. Zeeb 3132bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 3133bfcc09ddSBjoern A. Zeeb if (ret) 3134bfcc09ddSBjoern A. Zeeb return ret; 3135bfcc09ddSBjoern A. Zeeb } 3136bfcc09ddSBjoern A. Zeeb 3137bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 3138bfcc09ddSBjoern A. Zeeb 3139bfcc09ddSBjoern A. Zeeb /* 3140bfcc09ddSBjoern A. Zeeb * Note the possible cases: 3141bfcc09ddSBjoern A. Zeeb * 1. An enabled TXQ - TXQ needs to become agg'ed 3142bfcc09ddSBjoern A. Zeeb * 2. The TXQ hasn't yet been enabled, so find a free one and mark 3143bfcc09ddSBjoern A. Zeeb * it as reserved 3144bfcc09ddSBjoern A. Zeeb */ 3145bfcc09ddSBjoern A. Zeeb txq_id = mvmsta->tid_data[tid].txq_id; 3146bfcc09ddSBjoern A. Zeeb if (txq_id == IWL_MVM_INVALID_QUEUE) { 31479af1bba4SBjoern A. Zeeb ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 3148bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MIN_DATA_QUEUE, 3149bfcc09ddSBjoern A. Zeeb IWL_MVM_DQA_MAX_DATA_QUEUE); 3150bfcc09ddSBjoern A. Zeeb if (ret < 0) { 3151bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to allocate agg queue\n"); 3152bfcc09ddSBjoern A. Zeeb goto out; 3153bfcc09ddSBjoern A. Zeeb } 3154bfcc09ddSBjoern A. Zeeb 3155bfcc09ddSBjoern A. Zeeb txq_id = ret; 3156bfcc09ddSBjoern A. Zeeb 3157bfcc09ddSBjoern A. Zeeb /* TXQ hasn't yet been enabled, so mark it only as reserved */ 3158bfcc09ddSBjoern A. Zeeb mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 3159bfcc09ddSBjoern A. Zeeb } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { 3160bfcc09ddSBjoern A. Zeeb ret = -ENXIO; 3161bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", 3162bfcc09ddSBjoern A. Zeeb tid, IWL_MAX_HW_QUEUES - 1); 3163bfcc09ddSBjoern A. Zeeb goto out; 3164bfcc09ddSBjoern A. Zeeb 3165bfcc09ddSBjoern A. Zeeb } else if (unlikely(mvm->queue_info[txq_id].status == 3166bfcc09ddSBjoern A. Zeeb IWL_MVM_QUEUE_SHARED)) { 3167bfcc09ddSBjoern A. Zeeb ret = -ENXIO; 3168bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 3169bfcc09ddSBjoern A. Zeeb "Can't start tid %d agg on shared queue!\n", 3170bfcc09ddSBjoern A. Zeeb tid); 3171bfcc09ddSBjoern A. Zeeb goto out; 3172bfcc09ddSBjoern A. Zeeb } 3173bfcc09ddSBjoern A. Zeeb 3174bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 3175bfcc09ddSBjoern A. Zeeb "AGG for tid %d will be on queue #%d\n", 3176bfcc09ddSBjoern A. Zeeb tid, txq_id); 3177bfcc09ddSBjoern A. Zeeb 3178bfcc09ddSBjoern A. Zeeb tid_data = &mvmsta->tid_data[tid]; 3179bfcc09ddSBjoern A. Zeeb tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3180bfcc09ddSBjoern A. Zeeb tid_data->txq_id = txq_id; 3181bfcc09ddSBjoern A. Zeeb *ssn = tid_data->ssn; 3182bfcc09ddSBjoern A. Zeeb 3183bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 3184bfcc09ddSBjoern A. Zeeb "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 31859af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid, txq_id, 31869af1bba4SBjoern A. Zeeb tid_data->ssn, 3187bfcc09ddSBjoern A. Zeeb tid_data->next_reclaimed); 3188bfcc09ddSBjoern A. Zeeb 3189bfcc09ddSBjoern A. Zeeb /* 3190bfcc09ddSBjoern A. Zeeb * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 3191bfcc09ddSBjoern A. Zeeb * to align the wrap around of ssn so we compare relevant values. 3192bfcc09ddSBjoern A. Zeeb */ 3193bfcc09ddSBjoern A. Zeeb normalized_ssn = tid_data->ssn; 3194bfcc09ddSBjoern A. Zeeb if (mvm->trans->trans_cfg->gen2) 3195bfcc09ddSBjoern A. Zeeb normalized_ssn &= 0xff; 3196bfcc09ddSBjoern A. Zeeb 3197bfcc09ddSBjoern A. Zeeb if (normalized_ssn == tid_data->next_reclaimed) { 3198bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_AGG_STARTING; 3199bfcc09ddSBjoern A. Zeeb ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 3200bfcc09ddSBjoern A. Zeeb } else { 3201bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 3202bfcc09ddSBjoern A. Zeeb ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; 3203bfcc09ddSBjoern A. Zeeb } 3204bfcc09ddSBjoern A. Zeeb 3205bfcc09ddSBjoern A. Zeeb out: 3206bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 3207bfcc09ddSBjoern A. Zeeb 3208bfcc09ddSBjoern A. Zeeb return ret; 3209bfcc09ddSBjoern A. Zeeb } 3210bfcc09ddSBjoern A. Zeeb 3211bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3212bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid, u16 buf_size, 3213bfcc09ddSBjoern A. Zeeb bool amsdu) 3214bfcc09ddSBjoern A. Zeeb { 3215bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3216bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3217bfcc09ddSBjoern A. Zeeb unsigned int wdg_timeout = 3218bfcc09ddSBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 3219bfcc09ddSBjoern A. Zeeb int queue, ret; 3220bfcc09ddSBjoern A. Zeeb bool alloc_queue = true; 3221bfcc09ddSBjoern A. Zeeb enum iwl_mvm_queue_status queue_status; 3222bfcc09ddSBjoern A. Zeeb u16 ssn; 3223bfcc09ddSBjoern A. Zeeb 3224bfcc09ddSBjoern A. Zeeb struct iwl_trans_txq_scd_cfg cfg = { 32259af1bba4SBjoern A. Zeeb .sta_id = mvmsta->deflink.sta_id, 3226bfcc09ddSBjoern A. Zeeb .tid = tid, 3227bfcc09ddSBjoern A. Zeeb .frame_limit = buf_size, 3228bfcc09ddSBjoern A. Zeeb .aggregate = true, 3229bfcc09ddSBjoern A. Zeeb }; 3230bfcc09ddSBjoern A. Zeeb 3231bfcc09ddSBjoern A. Zeeb /* 3232bfcc09ddSBjoern A. Zeeb * When FW supports TLC_OFFLOAD, it also implements Tx aggregation 3233bfcc09ddSBjoern A. Zeeb * manager, so this function should never be called in this case. 3234bfcc09ddSBjoern A. Zeeb */ 3235bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) 3236bfcc09ddSBjoern A. Zeeb return -EINVAL; 3237bfcc09ddSBjoern A. Zeeb 3238bfcc09ddSBjoern A. Zeeb BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 3239bfcc09ddSBjoern A. Zeeb != IWL_MAX_TID_COUNT); 3240bfcc09ddSBjoern A. Zeeb 3241bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 3242bfcc09ddSBjoern A. Zeeb ssn = tid_data->ssn; 3243bfcc09ddSBjoern A. Zeeb queue = tid_data->txq_id; 3244bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_AGG_ON; 3245bfcc09ddSBjoern A. Zeeb mvmsta->agg_tids |= BIT(tid); 3246bfcc09ddSBjoern A. Zeeb tid_data->ssn = 0xffff; 3247bfcc09ddSBjoern A. Zeeb tid_data->amsdu_in_ampdu_allowed = amsdu; 3248bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 3249bfcc09ddSBjoern A. Zeeb 3250bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 3251bfcc09ddSBjoern A. Zeeb /* 3252bfcc09ddSBjoern A. Zeeb * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() 3253bfcc09ddSBjoern A. Zeeb * would have failed, so if we are here there is no need to 3254bfcc09ddSBjoern A. Zeeb * allocate a queue. 3255bfcc09ddSBjoern A. Zeeb * However, if aggregation size is different than the default 3256bfcc09ddSBjoern A. Zeeb * size, the scheduler should be reconfigured. 3257bfcc09ddSBjoern A. Zeeb * We cannot do this with the new TX API, so return unsupported 3258bfcc09ddSBjoern A. Zeeb * for now, until it will be offloaded to firmware.. 3259bfcc09ddSBjoern A. Zeeb * Note that if SCD default value changes - this condition 3260bfcc09ddSBjoern A. Zeeb * should be updated as well. 3261bfcc09ddSBjoern A. Zeeb */ 3262bfcc09ddSBjoern A. Zeeb if (buf_size < IWL_FRAME_LIMIT) 3263*a4128aadSBjoern A. Zeeb return -EOPNOTSUPP; 3264bfcc09ddSBjoern A. Zeeb 3265bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 3266bfcc09ddSBjoern A. Zeeb if (ret) 3267bfcc09ddSBjoern A. Zeeb return -EIO; 3268bfcc09ddSBjoern A. Zeeb goto out; 3269bfcc09ddSBjoern A. Zeeb } 3270bfcc09ddSBjoern A. Zeeb 3271bfcc09ddSBjoern A. Zeeb cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 3272bfcc09ddSBjoern A. Zeeb 3273bfcc09ddSBjoern A. Zeeb queue_status = mvm->queue_info[queue].status; 3274bfcc09ddSBjoern A. Zeeb 3275bfcc09ddSBjoern A. Zeeb /* Maybe there is no need to even alloc a queue... */ 3276bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 3277bfcc09ddSBjoern A. Zeeb alloc_queue = false; 3278bfcc09ddSBjoern A. Zeeb 3279bfcc09ddSBjoern A. Zeeb /* 3280bfcc09ddSBjoern A. Zeeb * Only reconfig the SCD for the queue if the window size has 3281bfcc09ddSBjoern A. Zeeb * changed from current (become smaller) 3282bfcc09ddSBjoern A. Zeeb */ 3283bfcc09ddSBjoern A. Zeeb if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { 3284bfcc09ddSBjoern A. Zeeb /* 3285bfcc09ddSBjoern A. Zeeb * If reconfiguring an existing queue, it first must be 3286bfcc09ddSBjoern A. Zeeb * drained 3287bfcc09ddSBjoern A. Zeeb */ 3288bfcc09ddSBjoern A. Zeeb ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 3289bfcc09ddSBjoern A. Zeeb BIT(queue)); 3290bfcc09ddSBjoern A. Zeeb if (ret) { 3291bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 3292bfcc09ddSBjoern A. Zeeb "Error draining queue before reconfig\n"); 3293bfcc09ddSBjoern A. Zeeb return ret; 3294bfcc09ddSBjoern A. Zeeb } 3295bfcc09ddSBjoern A. Zeeb 3296bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 32979af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid, 3298bfcc09ddSBjoern A. Zeeb buf_size, ssn); 3299bfcc09ddSBjoern A. Zeeb if (ret) { 3300bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 3301bfcc09ddSBjoern A. Zeeb "Error reconfiguring TXQ #%d\n", queue); 3302bfcc09ddSBjoern A. Zeeb return ret; 3303bfcc09ddSBjoern A. Zeeb } 3304bfcc09ddSBjoern A. Zeeb } 3305bfcc09ddSBjoern A. Zeeb 3306bfcc09ddSBjoern A. Zeeb if (alloc_queue) 3307bfcc09ddSBjoern A. Zeeb iwl_mvm_enable_txq(mvm, sta, queue, ssn, 3308bfcc09ddSBjoern A. Zeeb &cfg, wdg_timeout); 3309bfcc09ddSBjoern A. Zeeb 3310bfcc09ddSBjoern A. Zeeb /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 3311bfcc09ddSBjoern A. Zeeb if (queue_status != IWL_MVM_QUEUE_SHARED) { 3312bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 3313bfcc09ddSBjoern A. Zeeb if (ret) 3314bfcc09ddSBjoern A. Zeeb return -EIO; 3315bfcc09ddSBjoern A. Zeeb } 3316bfcc09ddSBjoern A. Zeeb 3317bfcc09ddSBjoern A. Zeeb /* No need to mark as reserved */ 3318bfcc09ddSBjoern A. Zeeb mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 3319bfcc09ddSBjoern A. Zeeb 3320bfcc09ddSBjoern A. Zeeb out: 3321bfcc09ddSBjoern A. Zeeb /* 3322bfcc09ddSBjoern A. Zeeb * Even though in theory the peer could have different 3323bfcc09ddSBjoern A. Zeeb * aggregation reorder buffer sizes for different sessions, 3324bfcc09ddSBjoern A. Zeeb * our ucode doesn't allow for that and has a global limit 3325bfcc09ddSBjoern A. Zeeb * for each station. Therefore, use the minimum of all the 3326bfcc09ddSBjoern A. Zeeb * aggregation sessions and our default value. 3327bfcc09ddSBjoern A. Zeeb */ 33289af1bba4SBjoern A. Zeeb mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 33299af1bba4SBjoern A. Zeeb min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize, 33309af1bba4SBjoern A. Zeeb buf_size); 33319af1bba4SBjoern A. Zeeb mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit = 33329af1bba4SBjoern A. Zeeb mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize; 3333bfcc09ddSBjoern A. Zeeb 3334bfcc09ddSBjoern A. Zeeb IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 3335bfcc09ddSBjoern A. Zeeb sta->addr, tid); 3336bfcc09ddSBjoern A. Zeeb 33379af1bba4SBjoern A. Zeeb return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq); 3338bfcc09ddSBjoern A. Zeeb } 3339bfcc09ddSBjoern A. Zeeb 3340bfcc09ddSBjoern A. Zeeb static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 3341bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta, 3342bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data) 3343bfcc09ddSBjoern A. Zeeb { 3344bfcc09ddSBjoern A. Zeeb u16 txq_id = tid_data->txq_id; 3345bfcc09ddSBjoern A. Zeeb 3346bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3347bfcc09ddSBjoern A. Zeeb 3348bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) 3349bfcc09ddSBjoern A. Zeeb return; 3350bfcc09ddSBjoern A. Zeeb 3351bfcc09ddSBjoern A. Zeeb /* 3352bfcc09ddSBjoern A. Zeeb * The TXQ is marked as reserved only if no traffic came through yet 3353bfcc09ddSBjoern A. Zeeb * This means no traffic has been sent on this TID (agg'd or not), so 3354bfcc09ddSBjoern A. Zeeb * we no longer have use for the queue. Since it hasn't even been 3355bfcc09ddSBjoern A. Zeeb * allocated through iwl_mvm_enable_txq, so we can just mark it back as 3356bfcc09ddSBjoern A. Zeeb * free. 3357bfcc09ddSBjoern A. Zeeb */ 3358bfcc09ddSBjoern A. Zeeb if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { 3359bfcc09ddSBjoern A. Zeeb mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 3360bfcc09ddSBjoern A. Zeeb tid_data->txq_id = IWL_MVM_INVALID_QUEUE; 3361bfcc09ddSBjoern A. Zeeb } 3362bfcc09ddSBjoern A. Zeeb } 3363bfcc09ddSBjoern A. Zeeb 3364bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3365bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid) 3366bfcc09ddSBjoern A. Zeeb { 3367bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3368bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3369bfcc09ddSBjoern A. Zeeb u16 txq_id; 3370bfcc09ddSBjoern A. Zeeb int err; 3371bfcc09ddSBjoern A. Zeeb 3372bfcc09ddSBjoern A. Zeeb /* 3373bfcc09ddSBjoern A. Zeeb * If mac80211 is cleaning its state, then say that we finished since 3374bfcc09ddSBjoern A. Zeeb * our state has been cleared anyway. 3375bfcc09ddSBjoern A. Zeeb */ 3376bfcc09ddSBjoern A. Zeeb if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 3377bfcc09ddSBjoern A. Zeeb ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3378bfcc09ddSBjoern A. Zeeb return 0; 3379bfcc09ddSBjoern A. Zeeb } 3380bfcc09ddSBjoern A. Zeeb 3381bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 3382bfcc09ddSBjoern A. Zeeb 3383bfcc09ddSBjoern A. Zeeb txq_id = tid_data->txq_id; 3384bfcc09ddSBjoern A. Zeeb 3385bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 33869af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid, txq_id, 33879af1bba4SBjoern A. Zeeb tid_data->state); 3388bfcc09ddSBjoern A. Zeeb 3389bfcc09ddSBjoern A. Zeeb mvmsta->agg_tids &= ~BIT(tid); 3390bfcc09ddSBjoern A. Zeeb 3391bfcc09ddSBjoern A. Zeeb iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3392bfcc09ddSBjoern A. Zeeb 3393bfcc09ddSBjoern A. Zeeb switch (tid_data->state) { 3394bfcc09ddSBjoern A. Zeeb case IWL_AGG_ON: 3395bfcc09ddSBjoern A. Zeeb tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3396bfcc09ddSBjoern A. Zeeb 3397bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, 3398bfcc09ddSBjoern A. Zeeb "ssn = %d, next_recl = %d\n", 3399bfcc09ddSBjoern A. Zeeb tid_data->ssn, tid_data->next_reclaimed); 3400bfcc09ddSBjoern A. Zeeb 3401bfcc09ddSBjoern A. Zeeb tid_data->ssn = 0xffff; 3402bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_AGG_OFF; 3403bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 3404bfcc09ddSBjoern A. Zeeb 3405bfcc09ddSBjoern A. Zeeb ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3406bfcc09ddSBjoern A. Zeeb 3407bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3408bfcc09ddSBjoern A. Zeeb return 0; 3409bfcc09ddSBjoern A. Zeeb case IWL_AGG_STARTING: 3410bfcc09ddSBjoern A. Zeeb case IWL_EMPTYING_HW_QUEUE_ADDBA: 3411bfcc09ddSBjoern A. Zeeb /* 3412bfcc09ddSBjoern A. Zeeb * The agg session has been stopped before it was set up. This 3413bfcc09ddSBjoern A. Zeeb * can happen when the AddBA timer times out for example. 3414bfcc09ddSBjoern A. Zeeb */ 3415bfcc09ddSBjoern A. Zeeb 3416bfcc09ddSBjoern A. Zeeb /* No barriers since we are under mutex */ 3417bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3418bfcc09ddSBjoern A. Zeeb 3419bfcc09ddSBjoern A. Zeeb ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3420bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_AGG_OFF; 3421bfcc09ddSBjoern A. Zeeb err = 0; 3422bfcc09ddSBjoern A. Zeeb break; 3423bfcc09ddSBjoern A. Zeeb default: 3424bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 3425bfcc09ddSBjoern A. Zeeb "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 34269af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid, tid_data->state); 3427bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, 3428bfcc09ddSBjoern A. Zeeb "\ttid_data->txq_id = %d\n", tid_data->txq_id); 3429bfcc09ddSBjoern A. Zeeb err = -EINVAL; 3430bfcc09ddSBjoern A. Zeeb } 3431bfcc09ddSBjoern A. Zeeb 3432bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 3433bfcc09ddSBjoern A. Zeeb 3434bfcc09ddSBjoern A. Zeeb return err; 3435bfcc09ddSBjoern A. Zeeb } 3436bfcc09ddSBjoern A. Zeeb 3437bfcc09ddSBjoern A. Zeeb int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3438bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u16 tid) 3439bfcc09ddSBjoern A. Zeeb { 3440bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3441bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3442bfcc09ddSBjoern A. Zeeb u16 txq_id; 3443bfcc09ddSBjoern A. Zeeb enum iwl_mvm_agg_state old_state; 3444bfcc09ddSBjoern A. Zeeb 3445bfcc09ddSBjoern A. Zeeb /* 3446bfcc09ddSBjoern A. Zeeb * First set the agg state to OFF to avoid calling 3447bfcc09ddSBjoern A. Zeeb * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 3448bfcc09ddSBjoern A. Zeeb */ 3449bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 3450bfcc09ddSBjoern A. Zeeb txq_id = tid_data->txq_id; 3451bfcc09ddSBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 34529af1bba4SBjoern A. Zeeb mvmsta->deflink.sta_id, tid, txq_id, 34539af1bba4SBjoern A. Zeeb tid_data->state); 3454bfcc09ddSBjoern A. Zeeb old_state = tid_data->state; 3455bfcc09ddSBjoern A. Zeeb tid_data->state = IWL_AGG_OFF; 3456bfcc09ddSBjoern A. Zeeb mvmsta->agg_tids &= ~BIT(tid); 3457bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 3458bfcc09ddSBjoern A. Zeeb 3459bfcc09ddSBjoern A. Zeeb iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3460bfcc09ddSBjoern A. Zeeb 3461bfcc09ddSBjoern A. Zeeb if (old_state >= IWL_AGG_ON) { 3462bfcc09ddSBjoern A. Zeeb iwl_mvm_drain_sta(mvm, mvmsta, true); 3463bfcc09ddSBjoern A. Zeeb 3464bfcc09ddSBjoern A. Zeeb if (iwl_mvm_has_new_tx_api(mvm)) { 34659af1bba4SBjoern A. Zeeb if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id, 3466bfcc09ddSBjoern A. Zeeb BIT(tid))) 3467bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3468bfcc09ddSBjoern A. Zeeb iwl_trans_wait_txq_empty(mvm->trans, txq_id); 3469bfcc09ddSBjoern A. Zeeb } else { 3470bfcc09ddSBjoern A. Zeeb if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) 3471bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3472bfcc09ddSBjoern A. Zeeb iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); 3473bfcc09ddSBjoern A. Zeeb } 3474bfcc09ddSBjoern A. Zeeb 3475bfcc09ddSBjoern A. Zeeb iwl_mvm_drain_sta(mvm, mvmsta, false); 3476bfcc09ddSBjoern A. Zeeb 3477bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3478bfcc09ddSBjoern A. Zeeb } 3479bfcc09ddSBjoern A. Zeeb 3480bfcc09ddSBjoern A. Zeeb return 0; 3481bfcc09ddSBjoern A. Zeeb } 3482bfcc09ddSBjoern A. Zeeb 3483bfcc09ddSBjoern A. Zeeb static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 3484bfcc09ddSBjoern A. Zeeb { 3485bfcc09ddSBjoern A. Zeeb int i, max = -1, max_offs = -1; 3486bfcc09ddSBjoern A. Zeeb 3487bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3488bfcc09ddSBjoern A. Zeeb 3489bfcc09ddSBjoern A. Zeeb /* Pick the unused key offset with the highest 'deleted' 3490bfcc09ddSBjoern A. Zeeb * counter. Every time a key is deleted, all the counters 3491bfcc09ddSBjoern A. Zeeb * are incremented and the one that was just deleted is 3492bfcc09ddSBjoern A. Zeeb * reset to zero. Thus, the highest counter is the one 3493bfcc09ddSBjoern A. Zeeb * that was deleted longest ago. Pick that one. 3494bfcc09ddSBjoern A. Zeeb */ 3495bfcc09ddSBjoern A. Zeeb for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3496bfcc09ddSBjoern A. Zeeb if (test_bit(i, mvm->fw_key_table)) 3497bfcc09ddSBjoern A. Zeeb continue; 3498bfcc09ddSBjoern A. Zeeb if (mvm->fw_key_deleted[i] > max) { 3499bfcc09ddSBjoern A. Zeeb max = mvm->fw_key_deleted[i]; 3500bfcc09ddSBjoern A. Zeeb max_offs = i; 3501bfcc09ddSBjoern A. Zeeb } 3502bfcc09ddSBjoern A. Zeeb } 3503bfcc09ddSBjoern A. Zeeb 3504bfcc09ddSBjoern A. Zeeb if (max_offs < 0) 3505bfcc09ddSBjoern A. Zeeb return STA_KEY_IDX_INVALID; 3506bfcc09ddSBjoern A. Zeeb 3507bfcc09ddSBjoern A. Zeeb return max_offs; 3508bfcc09ddSBjoern A. Zeeb } 3509bfcc09ddSBjoern A. Zeeb 3510bfcc09ddSBjoern A. Zeeb static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 3511bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 3512bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 3513bfcc09ddSBjoern A. Zeeb { 3514bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3515bfcc09ddSBjoern A. Zeeb 3516bfcc09ddSBjoern A. Zeeb if (sta) 3517bfcc09ddSBjoern A. Zeeb return iwl_mvm_sta_from_mac80211(sta); 3518bfcc09ddSBjoern A. Zeeb 3519bfcc09ddSBjoern A. Zeeb /* 3520bfcc09ddSBjoern A. Zeeb * The device expects GTKs for station interfaces to be 3521bfcc09ddSBjoern A. Zeeb * installed as GTKs for the AP station. If we have no 3522bfcc09ddSBjoern A. Zeeb * station ID, then use AP's station ID. 3523bfcc09ddSBjoern A. Zeeb */ 3524bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_STATION && 35259af1bba4SBjoern A. Zeeb mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { 35269af1bba4SBjoern A. Zeeb u8 sta_id = mvmvif->deflink.ap_sta_id; 3527bfcc09ddSBjoern A. Zeeb 3528bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 3529bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 3530bfcc09ddSBjoern A. Zeeb 3531bfcc09ddSBjoern A. Zeeb /* 3532bfcc09ddSBjoern A. Zeeb * It is possible that the 'sta' parameter is NULL, 3533bfcc09ddSBjoern A. Zeeb * for example when a GTK is removed - the sta_id will then 3534bfcc09ddSBjoern A. Zeeb * be the AP ID, and no station was passed by mac80211. 3535bfcc09ddSBjoern A. Zeeb */ 3536bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(sta)) 3537bfcc09ddSBjoern A. Zeeb return NULL; 3538bfcc09ddSBjoern A. Zeeb 3539bfcc09ddSBjoern A. Zeeb return iwl_mvm_sta_from_mac80211(sta); 3540bfcc09ddSBjoern A. Zeeb } 3541bfcc09ddSBjoern A. Zeeb 3542bfcc09ddSBjoern A. Zeeb return NULL; 3543bfcc09ddSBjoern A. Zeeb } 3544bfcc09ddSBjoern A. Zeeb 3545bfcc09ddSBjoern A. Zeeb static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) 3546bfcc09ddSBjoern A. Zeeb { 3547bfcc09ddSBjoern A. Zeeb int i; 3548bfcc09ddSBjoern A. Zeeb 3549bfcc09ddSBjoern A. Zeeb for (i = len - 1; i >= 0; i--) { 3550bfcc09ddSBjoern A. Zeeb if (pn1[i] > pn2[i]) 3551bfcc09ddSBjoern A. Zeeb return 1; 3552bfcc09ddSBjoern A. Zeeb if (pn1[i] < pn2[i]) 3553bfcc09ddSBjoern A. Zeeb return -1; 3554bfcc09ddSBjoern A. Zeeb } 3555bfcc09ddSBjoern A. Zeeb 3556bfcc09ddSBjoern A. Zeeb return 0; 3557bfcc09ddSBjoern A. Zeeb } 3558bfcc09ddSBjoern A. Zeeb 3559bfcc09ddSBjoern A. Zeeb static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 3560bfcc09ddSBjoern A. Zeeb u32 sta_id, 3561bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *key, bool mcast, 3562bfcc09ddSBjoern A. Zeeb u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 3563bfcc09ddSBjoern A. Zeeb u8 key_offset, bool mfp) 3564bfcc09ddSBjoern A. Zeeb { 3565bfcc09ddSBjoern A. Zeeb union { 3566bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3567bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_key_cmd cmd; 3568bfcc09ddSBjoern A. Zeeb } u = {}; 3569bfcc09ddSBjoern A. Zeeb __le16 key_flags; 3570bfcc09ddSBjoern A. Zeeb int ret; 3571bfcc09ddSBjoern A. Zeeb u32 status; 3572bfcc09ddSBjoern A. Zeeb u16 keyidx; 3573bfcc09ddSBjoern A. Zeeb u64 pn = 0; 3574bfcc09ddSBjoern A. Zeeb int i, size; 3575bfcc09ddSBjoern A. Zeeb bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3576bfcc09ddSBjoern A. Zeeb IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3577d9836fb4SBjoern A. Zeeb int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, 3578bfcc09ddSBjoern A. Zeeb new_api ? 2 : 1); 3579bfcc09ddSBjoern A. Zeeb 3580bfcc09ddSBjoern A. Zeeb if (sta_id == IWL_MVM_INVALID_STA) 3581bfcc09ddSBjoern A. Zeeb return -EINVAL; 3582bfcc09ddSBjoern A. Zeeb 3583bfcc09ddSBjoern A. Zeeb keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 3584bfcc09ddSBjoern A. Zeeb STA_KEY_FLG_KEYID_MSK; 3585bfcc09ddSBjoern A. Zeeb key_flags = cpu_to_le16(keyidx); 3586bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 3587bfcc09ddSBjoern A. Zeeb 3588*a4128aadSBjoern A. Zeeb if (key->flags & IEEE80211_KEY_FLAG_SPP_AMSDU) 3589*a4128aadSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_AMSDU_SPP); 3590*a4128aadSBjoern A. Zeeb 3591bfcc09ddSBjoern A. Zeeb switch (key->cipher) { 3592bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_TKIP: 3593bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 3594bfcc09ddSBjoern A. Zeeb if (api_ver >= 2) { 3595bfcc09ddSBjoern A. Zeeb memcpy((void *)&u.cmd.tx_mic_key, 3596bfcc09ddSBjoern A. Zeeb &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 3597bfcc09ddSBjoern A. Zeeb IWL_MIC_KEY_SIZE); 3598bfcc09ddSBjoern A. Zeeb 3599bfcc09ddSBjoern A. Zeeb memcpy((void *)&u.cmd.rx_mic_key, 3600bfcc09ddSBjoern A. Zeeb &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 3601bfcc09ddSBjoern A. Zeeb IWL_MIC_KEY_SIZE); 3602bfcc09ddSBjoern A. Zeeb pn = atomic64_read(&key->tx_pn); 3603bfcc09ddSBjoern A. Zeeb 3604bfcc09ddSBjoern A. Zeeb } else { 3605bfcc09ddSBjoern A. Zeeb u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 3606bfcc09ddSBjoern A. Zeeb for (i = 0; i < 5; i++) 3607bfcc09ddSBjoern A. Zeeb u.cmd_v1.tkip_rx_ttak[i] = 3608bfcc09ddSBjoern A. Zeeb cpu_to_le16(tkip_p1k[i]); 3609bfcc09ddSBjoern A. Zeeb } 3610bfcc09ddSBjoern A. Zeeb memcpy(u.cmd.common.key, key->key, key->keylen); 3611bfcc09ddSBjoern A. Zeeb break; 3612bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_CCMP: 3613bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 3614bfcc09ddSBjoern A. Zeeb memcpy(u.cmd.common.key, key->key, key->keylen); 3615bfcc09ddSBjoern A. Zeeb if (api_ver >= 2) 3616bfcc09ddSBjoern A. Zeeb pn = atomic64_read(&key->tx_pn); 3617bfcc09ddSBjoern A. Zeeb break; 3618bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_WEP104: 3619bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 3620bfcc09ddSBjoern A. Zeeb fallthrough; 3621bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_WEP40: 3622bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 3623bfcc09ddSBjoern A. Zeeb memcpy(u.cmd.common.key + 3, key->key, key->keylen); 3624bfcc09ddSBjoern A. Zeeb break; 3625bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP_256: 3626bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 3627bfcc09ddSBjoern A. Zeeb fallthrough; 3628bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP: 3629bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 3630bfcc09ddSBjoern A. Zeeb memcpy(u.cmd.common.key, key->key, key->keylen); 3631bfcc09ddSBjoern A. Zeeb if (api_ver >= 2) 3632bfcc09ddSBjoern A. Zeeb pn = atomic64_read(&key->tx_pn); 3633bfcc09ddSBjoern A. Zeeb break; 3634bfcc09ddSBjoern A. Zeeb default: 3635bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 3636bfcc09ddSBjoern A. Zeeb memcpy(u.cmd.common.key, key->key, key->keylen); 3637bfcc09ddSBjoern A. Zeeb } 3638bfcc09ddSBjoern A. Zeeb 3639bfcc09ddSBjoern A. Zeeb if (mcast) 3640bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3641bfcc09ddSBjoern A. Zeeb if (mfp) 3642bfcc09ddSBjoern A. Zeeb key_flags |= cpu_to_le16(STA_KEY_MFP); 3643bfcc09ddSBjoern A. Zeeb 3644bfcc09ddSBjoern A. Zeeb u.cmd.common.key_offset = key_offset; 3645bfcc09ddSBjoern A. Zeeb u.cmd.common.key_flags = key_flags; 3646bfcc09ddSBjoern A. Zeeb u.cmd.common.sta_id = sta_id; 3647bfcc09ddSBjoern A. Zeeb 3648bfcc09ddSBjoern A. Zeeb if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 3649bfcc09ddSBjoern A. Zeeb i = 0; 3650bfcc09ddSBjoern A. Zeeb else 3651bfcc09ddSBjoern A. Zeeb i = -1; 3652bfcc09ddSBjoern A. Zeeb 3653bfcc09ddSBjoern A. Zeeb for (; i < IEEE80211_NUM_TIDS; i++) { 3654bfcc09ddSBjoern A. Zeeb struct ieee80211_key_seq seq = {}; 3655bfcc09ddSBjoern A. Zeeb u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; 3656bfcc09ddSBjoern A. Zeeb int rx_pn_len = 8; 3657bfcc09ddSBjoern A. Zeeb /* there's a hole at 2/3 in FW format depending on version */ 3658bfcc09ddSBjoern A. Zeeb int hole = api_ver >= 3 ? 0 : 2; 3659bfcc09ddSBjoern A. Zeeb 3660bfcc09ddSBjoern A. Zeeb ieee80211_get_key_rx_seq(key, i, &seq); 3661bfcc09ddSBjoern A. Zeeb 3662bfcc09ddSBjoern A. Zeeb if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 3663bfcc09ddSBjoern A. Zeeb rx_pn[0] = seq.tkip.iv16; 3664bfcc09ddSBjoern A. Zeeb rx_pn[1] = seq.tkip.iv16 >> 8; 3665bfcc09ddSBjoern A. Zeeb rx_pn[2 + hole] = seq.tkip.iv32; 3666bfcc09ddSBjoern A. Zeeb rx_pn[3 + hole] = seq.tkip.iv32 >> 8; 3667bfcc09ddSBjoern A. Zeeb rx_pn[4 + hole] = seq.tkip.iv32 >> 16; 3668bfcc09ddSBjoern A. Zeeb rx_pn[5 + hole] = seq.tkip.iv32 >> 24; 3669bfcc09ddSBjoern A. Zeeb } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { 3670bfcc09ddSBjoern A. Zeeb rx_pn = seq.hw.seq; 3671bfcc09ddSBjoern A. Zeeb rx_pn_len = seq.hw.seq_len; 3672bfcc09ddSBjoern A. Zeeb } else { 3673bfcc09ddSBjoern A. Zeeb rx_pn[0] = seq.ccmp.pn[0]; 3674bfcc09ddSBjoern A. Zeeb rx_pn[1] = seq.ccmp.pn[1]; 3675bfcc09ddSBjoern A. Zeeb rx_pn[2 + hole] = seq.ccmp.pn[2]; 3676bfcc09ddSBjoern A. Zeeb rx_pn[3 + hole] = seq.ccmp.pn[3]; 3677bfcc09ddSBjoern A. Zeeb rx_pn[4 + hole] = seq.ccmp.pn[4]; 3678bfcc09ddSBjoern A. Zeeb rx_pn[5 + hole] = seq.ccmp.pn[5]; 3679bfcc09ddSBjoern A. Zeeb } 3680bfcc09ddSBjoern A. Zeeb 3681bfcc09ddSBjoern A. Zeeb if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, 3682bfcc09ddSBjoern A. Zeeb rx_pn_len) > 0) 3683bfcc09ddSBjoern A. Zeeb memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, 3684bfcc09ddSBjoern A. Zeeb rx_pn_len); 3685bfcc09ddSBjoern A. Zeeb } 3686bfcc09ddSBjoern A. Zeeb 3687bfcc09ddSBjoern A. Zeeb if (api_ver >= 2) { 3688bfcc09ddSBjoern A. Zeeb u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 3689bfcc09ddSBjoern A. Zeeb size = sizeof(u.cmd); 3690bfcc09ddSBjoern A. Zeeb } else { 3691bfcc09ddSBjoern A. Zeeb size = sizeof(u.cmd_v1); 3692bfcc09ddSBjoern A. Zeeb } 3693bfcc09ddSBjoern A. Zeeb 3694bfcc09ddSBjoern A. Zeeb status = ADD_STA_SUCCESS; 3695bfcc09ddSBjoern A. Zeeb if (cmd_flags & CMD_ASYNC) 3696bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 3697bfcc09ddSBjoern A. Zeeb &u.cmd); 3698bfcc09ddSBjoern A. Zeeb else 3699bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 3700bfcc09ddSBjoern A. Zeeb &u.cmd, &status); 3701bfcc09ddSBjoern A. Zeeb 3702bfcc09ddSBjoern A. Zeeb switch (status) { 3703bfcc09ddSBjoern A. Zeeb case ADD_STA_SUCCESS: 3704bfcc09ddSBjoern A. Zeeb IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 3705bfcc09ddSBjoern A. Zeeb break; 3706bfcc09ddSBjoern A. Zeeb default: 3707bfcc09ddSBjoern A. Zeeb ret = -EIO; 3708bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3709bfcc09ddSBjoern A. Zeeb break; 3710bfcc09ddSBjoern A. Zeeb } 3711bfcc09ddSBjoern A. Zeeb 3712bfcc09ddSBjoern A. Zeeb return ret; 3713bfcc09ddSBjoern A. Zeeb } 3714bfcc09ddSBjoern A. Zeeb 3715bfcc09ddSBjoern A. Zeeb static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3716bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 3717bfcc09ddSBjoern A. Zeeb u8 sta_id, bool remove_key) 3718bfcc09ddSBjoern A. Zeeb { 3719bfcc09ddSBjoern A. Zeeb struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3720bfcc09ddSBjoern A. Zeeb 3721bfcc09ddSBjoern A. Zeeb /* verify the key details match the required command's expectations */ 3722bfcc09ddSBjoern A. Zeeb if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3723bfcc09ddSBjoern A. Zeeb (keyconf->keyidx != 4 && keyconf->keyidx != 5 && 3724bfcc09ddSBjoern A. Zeeb keyconf->keyidx != 6 && keyconf->keyidx != 7) || 3725bfcc09ddSBjoern A. Zeeb (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3726bfcc09ddSBjoern A. Zeeb keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3727bfcc09ddSBjoern A. Zeeb keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3728bfcc09ddSBjoern A. Zeeb return -EINVAL; 3729bfcc09ddSBjoern A. Zeeb 3730bfcc09ddSBjoern A. Zeeb if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3731bfcc09ddSBjoern A. Zeeb keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3732bfcc09ddSBjoern A. Zeeb return -EINVAL; 3733bfcc09ddSBjoern A. Zeeb 3734bfcc09ddSBjoern A. Zeeb igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3735bfcc09ddSBjoern A. Zeeb igtk_cmd.sta_id = cpu_to_le32(sta_id); 3736bfcc09ddSBjoern A. Zeeb 3737bfcc09ddSBjoern A. Zeeb if (remove_key) { 3738bfcc09ddSBjoern A. Zeeb /* This is a valid situation for IGTK */ 3739bfcc09ddSBjoern A. Zeeb if (sta_id == IWL_MVM_INVALID_STA) 3740bfcc09ddSBjoern A. Zeeb return 0; 3741bfcc09ddSBjoern A. Zeeb 3742bfcc09ddSBjoern A. Zeeb igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3743bfcc09ddSBjoern A. Zeeb } else { 3744bfcc09ddSBjoern A. Zeeb struct ieee80211_key_seq seq; 3745bfcc09ddSBjoern A. Zeeb const u8 *pn; 3746bfcc09ddSBjoern A. Zeeb 3747bfcc09ddSBjoern A. Zeeb switch (keyconf->cipher) { 3748bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_AES_CMAC: 3749bfcc09ddSBjoern A. Zeeb igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3750bfcc09ddSBjoern A. Zeeb break; 3751bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3752bfcc09ddSBjoern A. Zeeb case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3753bfcc09ddSBjoern A. Zeeb igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3754bfcc09ddSBjoern A. Zeeb break; 3755bfcc09ddSBjoern A. Zeeb default: 3756bfcc09ddSBjoern A. Zeeb return -EINVAL; 3757bfcc09ddSBjoern A. Zeeb } 3758bfcc09ddSBjoern A. Zeeb 3759bfcc09ddSBjoern A. Zeeb memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3760bfcc09ddSBjoern A. Zeeb if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3761bfcc09ddSBjoern A. Zeeb igtk_cmd.ctrl_flags |= 3762bfcc09ddSBjoern A. Zeeb cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3763bfcc09ddSBjoern A. Zeeb ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3764bfcc09ddSBjoern A. Zeeb pn = seq.aes_cmac.pn; 3765bfcc09ddSBjoern A. Zeeb igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3766bfcc09ddSBjoern A. Zeeb ((u64) pn[4] << 8) | 3767bfcc09ddSBjoern A. Zeeb ((u64) pn[3] << 16) | 3768bfcc09ddSBjoern A. Zeeb ((u64) pn[2] << 24) | 3769bfcc09ddSBjoern A. Zeeb ((u64) pn[1] << 32) | 3770bfcc09ddSBjoern A. Zeeb ((u64) pn[0] << 40)); 3771bfcc09ddSBjoern A. Zeeb } 3772bfcc09ddSBjoern A. Zeeb 3773bfcc09ddSBjoern A. Zeeb IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", 3774bfcc09ddSBjoern A. Zeeb remove_key ? "removing" : "installing", 3775bfcc09ddSBjoern A. Zeeb keyconf->keyidx >= 6 ? "B" : "", 3776bfcc09ddSBjoern A. Zeeb keyconf->keyidx, igtk_cmd.sta_id); 3777bfcc09ddSBjoern A. Zeeb 3778bfcc09ddSBjoern A. Zeeb if (!iwl_mvm_has_new_rx_api(mvm)) { 3779bfcc09ddSBjoern A. Zeeb struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3780bfcc09ddSBjoern A. Zeeb .ctrl_flags = igtk_cmd.ctrl_flags, 3781bfcc09ddSBjoern A. Zeeb .key_id = igtk_cmd.key_id, 3782bfcc09ddSBjoern A. Zeeb .sta_id = igtk_cmd.sta_id, 3783bfcc09ddSBjoern A. Zeeb .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3784bfcc09ddSBjoern A. Zeeb }; 3785bfcc09ddSBjoern A. Zeeb 3786bfcc09ddSBjoern A. Zeeb memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3787bfcc09ddSBjoern A. Zeeb ARRAY_SIZE(igtk_cmd_v1.igtk)); 3788bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3789bfcc09ddSBjoern A. Zeeb sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3790bfcc09ddSBjoern A. Zeeb } 3791bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3792bfcc09ddSBjoern A. Zeeb sizeof(igtk_cmd), &igtk_cmd); 3793bfcc09ddSBjoern A. Zeeb } 3794bfcc09ddSBjoern A. Zeeb 3795bfcc09ddSBjoern A. Zeeb 3796bfcc09ddSBjoern A. Zeeb static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3797bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 3798bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 3799bfcc09ddSBjoern A. Zeeb { 3800bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3801bfcc09ddSBjoern A. Zeeb 3802bfcc09ddSBjoern A. Zeeb if (sta) 3803bfcc09ddSBjoern A. Zeeb return sta->addr; 3804bfcc09ddSBjoern A. Zeeb 3805bfcc09ddSBjoern A. Zeeb if (vif->type == NL80211_IFTYPE_STATION && 38069af1bba4SBjoern A. Zeeb mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { 38079af1bba4SBjoern A. Zeeb u8 sta_id = mvmvif->deflink.ap_sta_id; 3808bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3809bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 38109af1bba4SBjoern A. Zeeb if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 38119af1bba4SBjoern A. Zeeb return NULL; 38129af1bba4SBjoern A. Zeeb 3813bfcc09ddSBjoern A. Zeeb return sta->addr; 3814bfcc09ddSBjoern A. Zeeb } 3815bfcc09ddSBjoern A. Zeeb 3816bfcc09ddSBjoern A. Zeeb 3817bfcc09ddSBjoern A. Zeeb return NULL; 3818bfcc09ddSBjoern A. Zeeb } 3819bfcc09ddSBjoern A. Zeeb 3820bfcc09ddSBjoern A. Zeeb static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3821bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 3822bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 3823bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 3824bfcc09ddSBjoern A. Zeeb u8 key_offset, 3825bfcc09ddSBjoern A. Zeeb bool mcast) 3826bfcc09ddSBjoern A. Zeeb { 3827bfcc09ddSBjoern A. Zeeb const u8 *addr; 3828bfcc09ddSBjoern A. Zeeb struct ieee80211_key_seq seq; 3829bfcc09ddSBjoern A. Zeeb u16 p1k[5]; 3830bfcc09ddSBjoern A. Zeeb u32 sta_id; 3831bfcc09ddSBjoern A. Zeeb bool mfp = false; 3832bfcc09ddSBjoern A. Zeeb 3833bfcc09ddSBjoern A. Zeeb if (sta) { 3834bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3835bfcc09ddSBjoern A. Zeeb 38369af1bba4SBjoern A. Zeeb sta_id = mvm_sta->deflink.sta_id; 3837bfcc09ddSBjoern A. Zeeb mfp = sta->mfp; 3838bfcc09ddSBjoern A. Zeeb } else if (vif->type == NL80211_IFTYPE_AP && 3839bfcc09ddSBjoern A. Zeeb !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 3840bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3841bfcc09ddSBjoern A. Zeeb 38429af1bba4SBjoern A. Zeeb sta_id = mvmvif->deflink.mcast_sta.sta_id; 3843bfcc09ddSBjoern A. Zeeb } else { 3844bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to find station id\n"); 3845bfcc09ddSBjoern A. Zeeb return -EINVAL; 3846bfcc09ddSBjoern A. Zeeb } 3847bfcc09ddSBjoern A. Zeeb 3848bfcc09ddSBjoern A. Zeeb if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { 3849bfcc09ddSBjoern A. Zeeb addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 38509af1bba4SBjoern A. Zeeb if (!addr) { 38519af1bba4SBjoern A. Zeeb IWL_ERR(mvm, "Failed to find mac address\n"); 38529af1bba4SBjoern A. Zeeb return -EINVAL; 38539af1bba4SBjoern A. Zeeb } 38549af1bba4SBjoern A. Zeeb 3855bfcc09ddSBjoern A. Zeeb /* get phase 1 key from mac80211 */ 3856bfcc09ddSBjoern A. Zeeb ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3857bfcc09ddSBjoern A. Zeeb ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3858bfcc09ddSBjoern A. Zeeb 3859bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3860bfcc09ddSBjoern A. Zeeb seq.tkip.iv32, p1k, 0, key_offset, 3861bfcc09ddSBjoern A. Zeeb mfp); 3862bfcc09ddSBjoern A. Zeeb } 3863bfcc09ddSBjoern A. Zeeb 3864bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3865bfcc09ddSBjoern A. Zeeb 0, NULL, 0, key_offset, mfp); 3866bfcc09ddSBjoern A. Zeeb } 3867bfcc09ddSBjoern A. Zeeb 3868bfcc09ddSBjoern A. Zeeb int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3869bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 3870bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 3871bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 3872bfcc09ddSBjoern A. Zeeb u8 key_offset) 3873bfcc09ddSBjoern A. Zeeb { 3874bfcc09ddSBjoern A. Zeeb bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3875bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta; 3876bfcc09ddSBjoern A. Zeeb u8 sta_id = IWL_MVM_INVALID_STA; 3877bfcc09ddSBjoern A. Zeeb int ret; 3878bfcc09ddSBjoern A. Zeeb static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3879bfcc09ddSBjoern A. Zeeb 3880bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3881bfcc09ddSBjoern A. Zeeb 3882bfcc09ddSBjoern A. Zeeb if (vif->type != NL80211_IFTYPE_AP || 3883bfcc09ddSBjoern A. Zeeb keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3884bfcc09ddSBjoern A. Zeeb /* Get the station id from the mvm local station table */ 3885bfcc09ddSBjoern A. Zeeb mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3886bfcc09ddSBjoern A. Zeeb if (!mvm_sta) { 3887bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to find station\n"); 3888bfcc09ddSBjoern A. Zeeb return -EINVAL; 3889bfcc09ddSBjoern A. Zeeb } 38909af1bba4SBjoern A. Zeeb sta_id = mvm_sta->deflink.sta_id; 3891bfcc09ddSBjoern A. Zeeb 3892bfcc09ddSBjoern A. Zeeb /* 3893bfcc09ddSBjoern A. Zeeb * It is possible that the 'sta' parameter is NULL, and thus 3894bfcc09ddSBjoern A. Zeeb * there is a need to retrieve the sta from the local station 3895bfcc09ddSBjoern A. Zeeb * table. 3896bfcc09ddSBjoern A. Zeeb */ 3897bfcc09ddSBjoern A. Zeeb if (!sta) { 3898bfcc09ddSBjoern A. Zeeb sta = rcu_dereference_protected( 3899bfcc09ddSBjoern A. Zeeb mvm->fw_id_to_mac_id[sta_id], 3900bfcc09ddSBjoern A. Zeeb lockdep_is_held(&mvm->mutex)); 3901bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(sta)) { 3902bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Invalid station id\n"); 3903bfcc09ddSBjoern A. Zeeb return -EINVAL; 3904bfcc09ddSBjoern A. Zeeb } 3905bfcc09ddSBjoern A. Zeeb } 3906bfcc09ddSBjoern A. Zeeb 3907bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3908bfcc09ddSBjoern A. Zeeb return -EINVAL; 3909bfcc09ddSBjoern A. Zeeb } else { 3910bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3911bfcc09ddSBjoern A. Zeeb 39129af1bba4SBjoern A. Zeeb sta_id = mvmvif->deflink.mcast_sta.sta_id; 3913bfcc09ddSBjoern A. Zeeb } 3914bfcc09ddSBjoern A. Zeeb 3915bfcc09ddSBjoern A. Zeeb if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3916bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3917bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3918bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3919bfcc09ddSBjoern A. Zeeb goto end; 3920bfcc09ddSBjoern A. Zeeb } 3921bfcc09ddSBjoern A. Zeeb 3922bfcc09ddSBjoern A. Zeeb /* If the key_offset is not pre-assigned, we need to find a 3923bfcc09ddSBjoern A. Zeeb * new offset to use. In normal cases, the offset is not 3924bfcc09ddSBjoern A. Zeeb * pre-assigned, but during HW_RESTART we want to reuse the 3925bfcc09ddSBjoern A. Zeeb * same indices, so we pass them when this function is called. 3926bfcc09ddSBjoern A. Zeeb * 3927bfcc09ddSBjoern A. Zeeb * In D3 entry, we need to hardcoded the indices (because the 3928bfcc09ddSBjoern A. Zeeb * firmware hardcodes the PTK offset to 0). In this case, we 3929bfcc09ddSBjoern A. Zeeb * need to make sure we don't overwrite the hw_key_idx in the 3930bfcc09ddSBjoern A. Zeeb * keyconf structure, because otherwise we cannot configure 3931bfcc09ddSBjoern A. Zeeb * the original ones back when resuming. 3932bfcc09ddSBjoern A. Zeeb */ 3933bfcc09ddSBjoern A. Zeeb if (key_offset == STA_KEY_IDX_INVALID) { 3934bfcc09ddSBjoern A. Zeeb key_offset = iwl_mvm_set_fw_key_idx(mvm); 3935bfcc09ddSBjoern A. Zeeb if (key_offset == STA_KEY_IDX_INVALID) 3936bfcc09ddSBjoern A. Zeeb return -ENOSPC; 3937bfcc09ddSBjoern A. Zeeb keyconf->hw_key_idx = key_offset; 3938bfcc09ddSBjoern A. Zeeb } 3939bfcc09ddSBjoern A. Zeeb 3940bfcc09ddSBjoern A. Zeeb ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3941bfcc09ddSBjoern A. Zeeb if (ret) 3942bfcc09ddSBjoern A. Zeeb goto end; 3943bfcc09ddSBjoern A. Zeeb 3944bfcc09ddSBjoern A. Zeeb /* 3945bfcc09ddSBjoern A. Zeeb * For WEP, the same key is used for multicast and unicast. Upload it 3946bfcc09ddSBjoern A. Zeeb * again, using the same key offset, and now pointing the other one 3947bfcc09ddSBjoern A. Zeeb * to the same key slot (offset). 3948bfcc09ddSBjoern A. Zeeb * If this fails, remove the original as well. 3949bfcc09ddSBjoern A. Zeeb */ 3950bfcc09ddSBjoern A. Zeeb if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3951bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && 3952bfcc09ddSBjoern A. Zeeb sta) { 3953bfcc09ddSBjoern A. Zeeb ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3954bfcc09ddSBjoern A. Zeeb key_offset, !mcast); 3955bfcc09ddSBjoern A. Zeeb if (ret) { 3956bfcc09ddSBjoern A. Zeeb __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3957bfcc09ddSBjoern A. Zeeb goto end; 3958bfcc09ddSBjoern A. Zeeb } 3959bfcc09ddSBjoern A. Zeeb } 3960bfcc09ddSBjoern A. Zeeb 3961bfcc09ddSBjoern A. Zeeb __set_bit(key_offset, mvm->fw_key_table); 3962bfcc09ddSBjoern A. Zeeb 3963bfcc09ddSBjoern A. Zeeb end: 3964bfcc09ddSBjoern A. Zeeb IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3965bfcc09ddSBjoern A. Zeeb keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3966bfcc09ddSBjoern A. Zeeb sta ? sta->addr : zero_addr, ret); 3967bfcc09ddSBjoern A. Zeeb return ret; 3968bfcc09ddSBjoern A. Zeeb } 3969bfcc09ddSBjoern A. Zeeb 3970bfcc09ddSBjoern A. Zeeb int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3971bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 3972bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 3973bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf) 3974bfcc09ddSBjoern A. Zeeb { 3975bfcc09ddSBjoern A. Zeeb bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3976bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta; 3977bfcc09ddSBjoern A. Zeeb u8 sta_id = IWL_MVM_INVALID_STA; 3978bfcc09ddSBjoern A. Zeeb int ret, i; 3979bfcc09ddSBjoern A. Zeeb 3980bfcc09ddSBjoern A. Zeeb lockdep_assert_held(&mvm->mutex); 3981bfcc09ddSBjoern A. Zeeb 3982bfcc09ddSBjoern A. Zeeb /* Get the station from the mvm local station table */ 3983bfcc09ddSBjoern A. Zeeb mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3984bfcc09ddSBjoern A. Zeeb if (mvm_sta) 39859af1bba4SBjoern A. Zeeb sta_id = mvm_sta->deflink.sta_id; 3986bfcc09ddSBjoern A. Zeeb else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) 39879af1bba4SBjoern A. Zeeb sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id; 3988bfcc09ddSBjoern A. Zeeb 3989bfcc09ddSBjoern A. Zeeb 3990bfcc09ddSBjoern A. Zeeb IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3991bfcc09ddSBjoern A. Zeeb keyconf->keyidx, sta_id); 3992bfcc09ddSBjoern A. Zeeb 3993bfcc09ddSBjoern A. Zeeb if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3994bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3995bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3996bfcc09ddSBjoern A. Zeeb return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3997bfcc09ddSBjoern A. Zeeb 3998bfcc09ddSBjoern A. Zeeb if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3999bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "offset %d not used in fw key table.\n", 4000bfcc09ddSBjoern A. Zeeb keyconf->hw_key_idx); 4001bfcc09ddSBjoern A. Zeeb return -ENOENT; 4002bfcc09ddSBjoern A. Zeeb } 4003bfcc09ddSBjoern A. Zeeb 4004bfcc09ddSBjoern A. Zeeb /* track which key was deleted last */ 4005bfcc09ddSBjoern A. Zeeb for (i = 0; i < STA_KEY_MAX_NUM; i++) { 4006bfcc09ddSBjoern A. Zeeb if (mvm->fw_key_deleted[i] < U8_MAX) 4007bfcc09ddSBjoern A. Zeeb mvm->fw_key_deleted[i]++; 4008bfcc09ddSBjoern A. Zeeb } 4009bfcc09ddSBjoern A. Zeeb mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 4010bfcc09ddSBjoern A. Zeeb 4011bfcc09ddSBjoern A. Zeeb if (sta && !mvm_sta) { 4012bfcc09ddSBjoern A. Zeeb IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 4013bfcc09ddSBjoern A. Zeeb return 0; 4014bfcc09ddSBjoern A. Zeeb } 4015bfcc09ddSBjoern A. Zeeb 4016bfcc09ddSBjoern A. Zeeb ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 4017bfcc09ddSBjoern A. Zeeb if (ret) 4018bfcc09ddSBjoern A. Zeeb return ret; 4019bfcc09ddSBjoern A. Zeeb 4020bfcc09ddSBjoern A. Zeeb /* delete WEP key twice to get rid of (now useless) offset */ 4021bfcc09ddSBjoern A. Zeeb if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 4022bfcc09ddSBjoern A. Zeeb keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 4023bfcc09ddSBjoern A. Zeeb ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 4024bfcc09ddSBjoern A. Zeeb 4025bfcc09ddSBjoern A. Zeeb return ret; 4026bfcc09ddSBjoern A. Zeeb } 4027bfcc09ddSBjoern A. Zeeb 4028bfcc09ddSBjoern A. Zeeb void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 4029bfcc09ddSBjoern A. Zeeb struct ieee80211_vif *vif, 4030bfcc09ddSBjoern A. Zeeb struct ieee80211_key_conf *keyconf, 4031bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, u32 iv32, 4032bfcc09ddSBjoern A. Zeeb u16 *phase1key) 4033bfcc09ddSBjoern A. Zeeb { 4034bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta; 4035bfcc09ddSBjoern A. Zeeb bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 4036bfcc09ddSBjoern A. Zeeb bool mfp = sta ? sta->mfp : false; 4037bfcc09ddSBjoern A. Zeeb 4038bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 4039bfcc09ddSBjoern A. Zeeb 4040bfcc09ddSBjoern A. Zeeb mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 4041bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(!mvm_sta)) 4042bfcc09ddSBjoern A. Zeeb goto unlock; 40439af1bba4SBjoern A. Zeeb iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast, 4044bfcc09ddSBjoern A. Zeeb iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, 4045bfcc09ddSBjoern A. Zeeb mfp); 4046bfcc09ddSBjoern A. Zeeb 4047bfcc09ddSBjoern A. Zeeb unlock: 4048bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 4049bfcc09ddSBjoern A. Zeeb } 4050bfcc09ddSBjoern A. Zeeb 4051bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 4052bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta) 4053bfcc09ddSBjoern A. Zeeb { 4054bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4055bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = { 4056bfcc09ddSBjoern A. Zeeb .add_modify = STA_MODE_MODIFY, 40579af1bba4SBjoern A. Zeeb .sta_id = mvmsta->deflink.sta_id, 4058bfcc09ddSBjoern A. Zeeb .station_flags_msk = cpu_to_le32(STA_FLG_PS), 4059bfcc09ddSBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4060bfcc09ddSBjoern A. Zeeb }; 4061bfcc09ddSBjoern A. Zeeb int ret; 4062bfcc09ddSBjoern A. Zeeb 4063bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4064bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4065bfcc09ddSBjoern A. Zeeb if (ret) 4066bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4067bfcc09ddSBjoern A. Zeeb } 4068bfcc09ddSBjoern A. Zeeb 4069bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 4070bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 4071bfcc09ddSBjoern A. Zeeb enum ieee80211_frame_release_type reason, 4072bfcc09ddSBjoern A. Zeeb u16 cnt, u16 tids, bool more_data, 4073bfcc09ddSBjoern A. Zeeb bool single_sta_queue) 4074bfcc09ddSBjoern A. Zeeb { 4075bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4076bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = { 4077bfcc09ddSBjoern A. Zeeb .add_modify = STA_MODE_MODIFY, 40789af1bba4SBjoern A. Zeeb .sta_id = mvmsta->deflink.sta_id, 4079bfcc09ddSBjoern A. Zeeb .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 4080bfcc09ddSBjoern A. Zeeb .sleep_tx_count = cpu_to_le16(cnt), 4081bfcc09ddSBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4082bfcc09ddSBjoern A. Zeeb }; 4083bfcc09ddSBjoern A. Zeeb int tid, ret; 4084bfcc09ddSBjoern A. Zeeb unsigned long _tids = tids; 4085bfcc09ddSBjoern A. Zeeb 4086bfcc09ddSBjoern A. Zeeb /* convert TIDs to ACs - we don't support TSPEC so that's OK 4087bfcc09ddSBjoern A. Zeeb * Note that this field is reserved and unused by firmware not 4088bfcc09ddSBjoern A. Zeeb * supporting GO uAPSD, so it's safe to always do this. 4089bfcc09ddSBjoern A. Zeeb */ 4090bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 4091bfcc09ddSBjoern A. Zeeb cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 4092bfcc09ddSBjoern A. Zeeb 4093bfcc09ddSBjoern A. Zeeb /* If we're releasing frames from aggregation or dqa queues then check 4094bfcc09ddSBjoern A. Zeeb * if all the queues that we're releasing frames from, combined, have: 4095bfcc09ddSBjoern A. Zeeb * - more frames than the service period, in which case more_data 4096bfcc09ddSBjoern A. Zeeb * needs to be set 4097bfcc09ddSBjoern A. Zeeb * - fewer than 'cnt' frames, in which case we need to adjust the 4098bfcc09ddSBjoern A. Zeeb * firmware command (but do that unconditionally) 4099bfcc09ddSBjoern A. Zeeb */ 4100bfcc09ddSBjoern A. Zeeb if (single_sta_queue) { 4101bfcc09ddSBjoern A. Zeeb int remaining = cnt; 4102bfcc09ddSBjoern A. Zeeb int sleep_tx_count; 4103bfcc09ddSBjoern A. Zeeb 4104bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvmsta->lock); 4105bfcc09ddSBjoern A. Zeeb for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 4106bfcc09ddSBjoern A. Zeeb struct iwl_mvm_tid_data *tid_data; 4107bfcc09ddSBjoern A. Zeeb u16 n_queued; 4108bfcc09ddSBjoern A. Zeeb 4109bfcc09ddSBjoern A. Zeeb tid_data = &mvmsta->tid_data[tid]; 4110bfcc09ddSBjoern A. Zeeb 4111bfcc09ddSBjoern A. Zeeb n_queued = iwl_mvm_tid_queued(mvm, tid_data); 4112bfcc09ddSBjoern A. Zeeb if (n_queued > remaining) { 4113bfcc09ddSBjoern A. Zeeb more_data = true; 4114bfcc09ddSBjoern A. Zeeb remaining = 0; 4115bfcc09ddSBjoern A. Zeeb break; 4116bfcc09ddSBjoern A. Zeeb } 4117bfcc09ddSBjoern A. Zeeb remaining -= n_queued; 4118bfcc09ddSBjoern A. Zeeb } 4119bfcc09ddSBjoern A. Zeeb sleep_tx_count = cnt - remaining; 4120bfcc09ddSBjoern A. Zeeb if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 4121bfcc09ddSBjoern A. Zeeb mvmsta->sleep_tx_count = sleep_tx_count; 4122bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvmsta->lock); 4123bfcc09ddSBjoern A. Zeeb 4124bfcc09ddSBjoern A. Zeeb cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 4125bfcc09ddSBjoern A. Zeeb if (WARN_ON(cnt - remaining == 0)) { 4126bfcc09ddSBjoern A. Zeeb ieee80211_sta_eosp(sta); 4127bfcc09ddSBjoern A. Zeeb return; 4128bfcc09ddSBjoern A. Zeeb } 4129bfcc09ddSBjoern A. Zeeb } 4130bfcc09ddSBjoern A. Zeeb 4131bfcc09ddSBjoern A. Zeeb /* Note: this is ignored by firmware not supporting GO uAPSD */ 4132bfcc09ddSBjoern A. Zeeb if (more_data) 4133bfcc09ddSBjoern A. Zeeb cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 4134bfcc09ddSBjoern A. Zeeb 4135bfcc09ddSBjoern A. Zeeb if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 4136bfcc09ddSBjoern A. Zeeb mvmsta->next_status_eosp = true; 4137bfcc09ddSBjoern A. Zeeb cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 4138bfcc09ddSBjoern A. Zeeb } else { 4139bfcc09ddSBjoern A. Zeeb cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 4140bfcc09ddSBjoern A. Zeeb } 4141bfcc09ddSBjoern A. Zeeb 4142bfcc09ddSBjoern A. Zeeb /* block the Tx queues until the FW updated the sleep Tx count */ 4143bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 4144*a4128aadSBjoern A. Zeeb CMD_ASYNC | CMD_BLOCK_TXQS, 4145bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4146bfcc09ddSBjoern A. Zeeb if (ret) 4147bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4148bfcc09ddSBjoern A. Zeeb } 4149bfcc09ddSBjoern A. Zeeb 4150bfcc09ddSBjoern A. Zeeb void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 4151bfcc09ddSBjoern A. Zeeb struct iwl_rx_cmd_buffer *rxb) 4152bfcc09ddSBjoern A. Zeeb { 4153bfcc09ddSBjoern A. Zeeb struct iwl_rx_packet *pkt = rxb_addr(rxb); 4154bfcc09ddSBjoern A. Zeeb struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 4155bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 4156bfcc09ddSBjoern A. Zeeb u32 sta_id = le32_to_cpu(notif->sta_id); 4157bfcc09ddSBjoern A. Zeeb 4158bfcc09ddSBjoern A. Zeeb if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) 4159bfcc09ddSBjoern A. Zeeb return; 4160bfcc09ddSBjoern A. Zeeb 4161bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 4162bfcc09ddSBjoern A. Zeeb sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 4163bfcc09ddSBjoern A. Zeeb if (!IS_ERR_OR_NULL(sta)) 4164bfcc09ddSBjoern A. Zeeb ieee80211_sta_eosp(sta); 4165bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 4166bfcc09ddSBjoern A. Zeeb } 4167bfcc09ddSBjoern A. Zeeb 4168bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 41699af1bba4SBjoern A. Zeeb struct iwl_mvm_sta *mvmsta, 41709af1bba4SBjoern A. Zeeb bool disable) 4171bfcc09ddSBjoern A. Zeeb { 4172bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = { 4173bfcc09ddSBjoern A. Zeeb .add_modify = STA_MODE_MODIFY, 41749af1bba4SBjoern A. Zeeb .sta_id = mvmsta->deflink.sta_id, 4175bfcc09ddSBjoern A. Zeeb .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 4176bfcc09ddSBjoern A. Zeeb .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 4177bfcc09ddSBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4178bfcc09ddSBjoern A. Zeeb }; 4179bfcc09ddSBjoern A. Zeeb int ret; 4180bfcc09ddSBjoern A. Zeeb 41819af1bba4SBjoern A. Zeeb if (mvm->mld_api_is_used) { 4182*a4128aadSBjoern A. Zeeb if (!iwl_mvm_has_no_host_disable_tx(mvm)) 41839af1bba4SBjoern A. Zeeb iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable); 41849af1bba4SBjoern A. Zeeb return; 41859af1bba4SBjoern A. Zeeb } 41869af1bba4SBjoern A. Zeeb 4187bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4188bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4189bfcc09ddSBjoern A. Zeeb if (ret) 4190bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4191bfcc09ddSBjoern A. Zeeb } 4192bfcc09ddSBjoern A. Zeeb 4193bfcc09ddSBjoern A. Zeeb void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 4194bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta, 4195bfcc09ddSBjoern A. Zeeb bool disable) 4196bfcc09ddSBjoern A. Zeeb { 4197bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4198bfcc09ddSBjoern A. Zeeb 41999af1bba4SBjoern A. Zeeb if (mvm->mld_api_is_used) { 4200*a4128aadSBjoern A. Zeeb if (!iwl_mvm_has_no_host_disable_tx(mvm)) 42019af1bba4SBjoern A. Zeeb iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable); 42029af1bba4SBjoern A. Zeeb return; 42039af1bba4SBjoern A. Zeeb } 42049af1bba4SBjoern A. Zeeb 4205bfcc09ddSBjoern A. Zeeb spin_lock_bh(&mvm_sta->lock); 4206bfcc09ddSBjoern A. Zeeb 4207bfcc09ddSBjoern A. Zeeb if (mvm_sta->disable_tx == disable) { 4208bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvm_sta->lock); 4209bfcc09ddSBjoern A. Zeeb return; 4210bfcc09ddSBjoern A. Zeeb } 4211bfcc09ddSBjoern A. Zeeb 4212bfcc09ddSBjoern A. Zeeb mvm_sta->disable_tx = disable; 4213bfcc09ddSBjoern A. Zeeb 4214bfcc09ddSBjoern A. Zeeb /* 4215bfcc09ddSBjoern A. Zeeb * If sta PS state is handled by mac80211, tell it to start/stop 4216bfcc09ddSBjoern A. Zeeb * queuing tx for this station. 4217bfcc09ddSBjoern A. Zeeb */ 4218bfcc09ddSBjoern A. Zeeb if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) 4219bfcc09ddSBjoern A. Zeeb ieee80211_sta_block_awake(mvm->hw, sta, disable); 4220bfcc09ddSBjoern A. Zeeb 4221bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 4222bfcc09ddSBjoern A. Zeeb 4223bfcc09ddSBjoern A. Zeeb spin_unlock_bh(&mvm_sta->lock); 4224bfcc09ddSBjoern A. Zeeb } 4225bfcc09ddSBjoern A. Zeeb 4226bfcc09ddSBjoern A. Zeeb static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 4227bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif, 4228bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, 4229bfcc09ddSBjoern A. Zeeb bool disable) 4230bfcc09ddSBjoern A. Zeeb { 4231bfcc09ddSBjoern A. Zeeb u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 4232bfcc09ddSBjoern A. Zeeb struct iwl_mvm_add_sta_cmd cmd = { 4233bfcc09ddSBjoern A. Zeeb .add_modify = STA_MODE_MODIFY, 4234bfcc09ddSBjoern A. Zeeb .sta_id = sta->sta_id, 4235bfcc09ddSBjoern A. Zeeb .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 4236bfcc09ddSBjoern A. Zeeb .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 4237bfcc09ddSBjoern A. Zeeb .mac_id_n_color = cpu_to_le32(id), 4238bfcc09ddSBjoern A. Zeeb }; 4239bfcc09ddSBjoern A. Zeeb int ret; 4240bfcc09ddSBjoern A. Zeeb 4241bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4242bfcc09ddSBjoern A. Zeeb iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4243bfcc09ddSBjoern A. Zeeb if (ret) 4244bfcc09ddSBjoern A. Zeeb IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4245bfcc09ddSBjoern A. Zeeb } 4246bfcc09ddSBjoern A. Zeeb 4247bfcc09ddSBjoern A. Zeeb void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 4248bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif, 4249bfcc09ddSBjoern A. Zeeb bool disable) 4250bfcc09ddSBjoern A. Zeeb { 4251bfcc09ddSBjoern A. Zeeb struct ieee80211_sta *sta; 4252bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvm_sta; 4253bfcc09ddSBjoern A. Zeeb int i; 4254bfcc09ddSBjoern A. Zeeb 42559af1bba4SBjoern A. Zeeb if (mvm->mld_api_is_used) { 4256*a4128aadSBjoern A. Zeeb if (!iwl_mvm_has_no_host_disable_tx(mvm)) 4257*a4128aadSBjoern A. Zeeb iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, 4258*a4128aadSBjoern A. Zeeb disable); 42599af1bba4SBjoern A. Zeeb return; 42609af1bba4SBjoern A. Zeeb } 42619af1bba4SBjoern A. Zeeb 4262bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 4263bfcc09ddSBjoern A. Zeeb 4264bfcc09ddSBjoern A. Zeeb /* Block/unblock all the stations of the given mvmvif */ 4265bfcc09ddSBjoern A. Zeeb for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4266bfcc09ddSBjoern A. Zeeb sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); 4267bfcc09ddSBjoern A. Zeeb if (IS_ERR_OR_NULL(sta)) 4268bfcc09ddSBjoern A. Zeeb continue; 4269bfcc09ddSBjoern A. Zeeb 4270bfcc09ddSBjoern A. Zeeb mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4271bfcc09ddSBjoern A. Zeeb if (mvm_sta->mac_id_n_color != 4272bfcc09ddSBjoern A. Zeeb FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 4273bfcc09ddSBjoern A. Zeeb continue; 4274bfcc09ddSBjoern A. Zeeb 4275bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 4276bfcc09ddSBjoern A. Zeeb } 4277bfcc09ddSBjoern A. Zeeb 4278bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 4279bfcc09ddSBjoern A. Zeeb 4280bfcc09ddSBjoern A. Zeeb if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 4281bfcc09ddSBjoern A. Zeeb return; 4282bfcc09ddSBjoern A. Zeeb 4283bfcc09ddSBjoern A. Zeeb /* Need to block/unblock also multicast station */ 42849af1bba4SBjoern A. Zeeb if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) 4285bfcc09ddSBjoern A. Zeeb iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 42869af1bba4SBjoern A. Zeeb &mvmvif->deflink.mcast_sta, 42879af1bba4SBjoern A. Zeeb disable); 4288bfcc09ddSBjoern A. Zeeb 4289bfcc09ddSBjoern A. Zeeb /* 4290bfcc09ddSBjoern A. Zeeb * Only unblock the broadcast station (FW blocks it for immediate 4291bfcc09ddSBjoern A. Zeeb * quiet, not the driver) 4292bfcc09ddSBjoern A. Zeeb */ 42939af1bba4SBjoern A. Zeeb if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) 4294bfcc09ddSBjoern A. Zeeb iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 42959af1bba4SBjoern A. Zeeb &mvmvif->deflink.bcast_sta, 42969af1bba4SBjoern A. Zeeb disable); 4297bfcc09ddSBjoern A. Zeeb } 4298bfcc09ddSBjoern A. Zeeb 4299bfcc09ddSBjoern A. Zeeb void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 4300bfcc09ddSBjoern A. Zeeb { 4301bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4302bfcc09ddSBjoern A. Zeeb struct iwl_mvm_sta *mvmsta; 4303bfcc09ddSBjoern A. Zeeb 4304bfcc09ddSBjoern A. Zeeb rcu_read_lock(); 4305bfcc09ddSBjoern A. Zeeb 43069af1bba4SBjoern A. Zeeb mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id); 4307bfcc09ddSBjoern A. Zeeb 4308d9836fb4SBjoern A. Zeeb if (mvmsta) 4309bfcc09ddSBjoern A. Zeeb iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 4310bfcc09ddSBjoern A. Zeeb 4311bfcc09ddSBjoern A. Zeeb rcu_read_unlock(); 4312bfcc09ddSBjoern A. Zeeb } 4313bfcc09ddSBjoern A. Zeeb 4314bfcc09ddSBjoern A. Zeeb u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) 4315bfcc09ddSBjoern A. Zeeb { 4316bfcc09ddSBjoern A. Zeeb u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 4317bfcc09ddSBjoern A. Zeeb 4318bfcc09ddSBjoern A. Zeeb /* 4319bfcc09ddSBjoern A. Zeeb * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 4320bfcc09ddSBjoern A. Zeeb * to align the wrap around of ssn so we compare relevant values. 4321bfcc09ddSBjoern A. Zeeb */ 4322bfcc09ddSBjoern A. Zeeb if (mvm->trans->trans_cfg->gen2) 4323bfcc09ddSBjoern A. Zeeb sn &= 0xff; 4324bfcc09ddSBjoern A. Zeeb 4325bfcc09ddSBjoern A. Zeeb return ieee80211_sn_sub(sn, tid_data->next_reclaimed); 4326bfcc09ddSBjoern A. Zeeb } 4327bfcc09ddSBjoern A. Zeeb 43287db7bfe1SBjoern A. Zeeb #if defined(__linux__) 4329bfcc09ddSBjoern A. Zeeb int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 4330bfcc09ddSBjoern A. Zeeb struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, 4331*a4128aadSBjoern A. Zeeb u8 *key, u32 key_len, 4332*a4128aadSBjoern A. Zeeb struct ieee80211_key_conf *keyconf) 4333bfcc09ddSBjoern A. Zeeb { 4334bfcc09ddSBjoern A. Zeeb int ret; 4335bfcc09ddSBjoern A. Zeeb u16 queue; 4336bfcc09ddSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 43379af1bba4SBjoern A. Zeeb unsigned int wdg_timeout = 43389af1bba4SBjoern A. Zeeb iwl_mvm_get_wd_timeout(mvm, vif, false, false); 43399af1bba4SBjoern A. Zeeb bool mld = iwl_mvm_has_mld_api(mvm->fw); 43409af1bba4SBjoern A. Zeeb u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; 4341bfcc09ddSBjoern A. Zeeb 4342bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, 43439af1bba4SBjoern A. Zeeb NL80211_IFTYPE_UNSPECIFIED, type); 4344bfcc09ddSBjoern A. Zeeb if (ret) 4345bfcc09ddSBjoern A. Zeeb return ret; 4346bfcc09ddSBjoern A. Zeeb 43479af1bba4SBjoern A. Zeeb if (mld) 43489af1bba4SBjoern A. Zeeb ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr, 43499af1bba4SBjoern A. Zeeb mvmvif->deflink.fw_link_id, 43509af1bba4SBjoern A. Zeeb &queue, 43519af1bba4SBjoern A. Zeeb IWL_MAX_TID_COUNT, 43529af1bba4SBjoern A. Zeeb &wdg_timeout); 43539af1bba4SBjoern A. Zeeb else 43549af1bba4SBjoern A. Zeeb ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, 43559af1bba4SBjoern A. Zeeb mvmvif->color, addr, sta, 43569af1bba4SBjoern A. Zeeb &queue, 4357bfcc09ddSBjoern A. Zeeb IWL_MVM_TX_FIFO_BE); 4358bfcc09ddSBjoern A. Zeeb if (ret) 4359bfcc09ddSBjoern A. Zeeb goto out; 4360bfcc09ddSBjoern A. Zeeb 4361bfcc09ddSBjoern A. Zeeb keyconf->cipher = cipher; 4362bfcc09ddSBjoern A. Zeeb memcpy(keyconf->key, key, key_len); 4363bfcc09ddSBjoern A. Zeeb keyconf->keylen = key_len; 43649af1bba4SBjoern A. Zeeb keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE; 4365bfcc09ddSBjoern A. Zeeb 43669af1bba4SBjoern A. Zeeb if (mld) { 43679af1bba4SBjoern A. Zeeb /* The MFP flag is set according to the station mfp field. Since 43689af1bba4SBjoern A. Zeeb * we don't have a station, set it manually. 43699af1bba4SBjoern A. Zeeb */ 43709af1bba4SBjoern A. Zeeb u32 key_flags = 43719af1bba4SBjoern A. Zeeb iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | 43729af1bba4SBjoern A. Zeeb IWL_SEC_KEY_FLAG_MFP; 43739af1bba4SBjoern A. Zeeb u32 sta_mask = BIT(sta->sta_id); 43749af1bba4SBjoern A. Zeeb 43759af1bba4SBjoern A. Zeeb ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); 43769af1bba4SBjoern A. Zeeb } else { 4377bfcc09ddSBjoern A. Zeeb ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, 4378bfcc09ddSBjoern A. Zeeb 0, NULL, 0, 0, true); 43799af1bba4SBjoern A. Zeeb } 43809af1bba4SBjoern A. Zeeb 4381bfcc09ddSBjoern A. Zeeb out: 4382*a4128aadSBjoern A. Zeeb if (ret) 4383bfcc09ddSBjoern A. Zeeb iwl_mvm_dealloc_int_sta(mvm, sta); 4384bfcc09ddSBjoern A. Zeeb return ret; 4385bfcc09ddSBjoern A. Zeeb } 43867db7bfe1SBjoern A. Zeeb #endif 4387d9836fb4SBjoern A. Zeeb 4388d9836fb4SBjoern A. Zeeb void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, 4389d9836fb4SBjoern A. Zeeb struct ieee80211_vif *vif, 43909af1bba4SBjoern A. Zeeb u32 id) 4391d9836fb4SBjoern A. Zeeb { 4392d9836fb4SBjoern A. Zeeb struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { 43939af1bba4SBjoern A. Zeeb .id = cpu_to_le32(id), 4394d9836fb4SBjoern A. Zeeb }; 4395d9836fb4SBjoern A. Zeeb int ret; 4396d9836fb4SBjoern A. Zeeb 4397d9836fb4SBjoern A. Zeeb ret = iwl_mvm_send_cmd_pdu(mvm, 4398d9836fb4SBjoern A. Zeeb WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), 4399d9836fb4SBjoern A. Zeeb CMD_ASYNC, 4400d9836fb4SBjoern A. Zeeb sizeof(cancel_channel_switch_cmd), 4401d9836fb4SBjoern A. Zeeb &cancel_channel_switch_cmd); 4402d9836fb4SBjoern A. Zeeb if (ret) 4403d9836fb4SBjoern A. Zeeb IWL_ERR(mvm, "Failed to cancel the channel switch\n"); 4404d9836fb4SBjoern A. Zeeb } 4405*a4128aadSBjoern A. Zeeb 4406*a4128aadSBjoern A. Zeeb static int iwl_mvm_fw_sta_id_to_fw_link_id(struct iwl_mvm_vif *mvmvif, 4407*a4128aadSBjoern A. Zeeb u8 fw_sta_id) 4408*a4128aadSBjoern A. Zeeb { 4409*a4128aadSBjoern A. Zeeb struct ieee80211_link_sta *link_sta = 4410*a4128aadSBjoern A. Zeeb rcu_dereference(mvmvif->mvm->fw_id_to_link_sta[fw_sta_id]); 4411*a4128aadSBjoern A. Zeeb struct iwl_mvm_vif_link_info *link; 4412*a4128aadSBjoern A. Zeeb 4413*a4128aadSBjoern A. Zeeb if (WARN_ON_ONCE(!link_sta)) 4414*a4128aadSBjoern A. Zeeb return -EINVAL; 4415*a4128aadSBjoern A. Zeeb 4416*a4128aadSBjoern A. Zeeb link = mvmvif->link[link_sta->link_id]; 4417*a4128aadSBjoern A. Zeeb 4418*a4128aadSBjoern A. Zeeb if (WARN_ON_ONCE(!link)) 4419*a4128aadSBjoern A. Zeeb return -EINVAL; 4420*a4128aadSBjoern A. Zeeb 4421*a4128aadSBjoern A. Zeeb return link->fw_link_id; 4422*a4128aadSBjoern A. Zeeb } 4423*a4128aadSBjoern A. Zeeb 4424*a4128aadSBjoern A. Zeeb #define IWL_MVM_TPT_COUNT_WINDOW (IWL_MVM_TPT_COUNT_WINDOW_SEC * HZ) 4425*a4128aadSBjoern A. Zeeb 4426*a4128aadSBjoern A. Zeeb void iwl_mvm_count_mpdu(struct iwl_mvm_sta *mvm_sta, u8 fw_sta_id, u32 count, 4427*a4128aadSBjoern A. Zeeb bool tx, int queue) 4428*a4128aadSBjoern A. Zeeb { 4429*a4128aadSBjoern A. Zeeb struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(mvm_sta->vif); 4430*a4128aadSBjoern A. Zeeb struct iwl_mvm *mvm = mvmvif->mvm; 4431*a4128aadSBjoern A. Zeeb struct iwl_mvm_tpt_counter *queue_counter; 4432*a4128aadSBjoern A. Zeeb struct iwl_mvm_mpdu_counter *link_counter; 4433*a4128aadSBjoern A. Zeeb u32 total_mpdus = 0; 4434*a4128aadSBjoern A. Zeeb int fw_link_id; 4435*a4128aadSBjoern A. Zeeb 4436*a4128aadSBjoern A. Zeeb /* Count only for a BSS sta, and only when EMLSR is possible */ 4437*a4128aadSBjoern A. Zeeb if (!mvm_sta->mpdu_counters) 4438*a4128aadSBjoern A. Zeeb return; 4439*a4128aadSBjoern A. Zeeb 4440*a4128aadSBjoern A. Zeeb /* Map sta id to link id */ 4441*a4128aadSBjoern A. Zeeb fw_link_id = iwl_mvm_fw_sta_id_to_fw_link_id(mvmvif, fw_sta_id); 4442*a4128aadSBjoern A. Zeeb if (fw_link_id < 0) 4443*a4128aadSBjoern A. Zeeb return; 4444*a4128aadSBjoern A. Zeeb 4445*a4128aadSBjoern A. Zeeb queue_counter = &mvm_sta->mpdu_counters[queue]; 4446*a4128aadSBjoern A. Zeeb link_counter = &queue_counter->per_link[fw_link_id]; 4447*a4128aadSBjoern A. Zeeb 4448*a4128aadSBjoern A. Zeeb spin_lock_bh(&queue_counter->lock); 4449*a4128aadSBjoern A. Zeeb 4450*a4128aadSBjoern A. Zeeb if (tx) 4451*a4128aadSBjoern A. Zeeb link_counter->tx += count; 4452*a4128aadSBjoern A. Zeeb else 4453*a4128aadSBjoern A. Zeeb link_counter->rx += count; 4454*a4128aadSBjoern A. Zeeb 4455*a4128aadSBjoern A. Zeeb /* 4456*a4128aadSBjoern A. Zeeb * When not in EMLSR, the window and the decision to enter EMLSR are 4457*a4128aadSBjoern A. Zeeb * handled during counting, when in EMLSR - in the statistics flow 4458*a4128aadSBjoern A. Zeeb */ 4459*a4128aadSBjoern A. Zeeb if (mvmvif->esr_active) 4460*a4128aadSBjoern A. Zeeb goto out; 4461*a4128aadSBjoern A. Zeeb 4462*a4128aadSBjoern A. Zeeb if (time_is_before_jiffies(queue_counter->window_start + 4463*a4128aadSBjoern A. Zeeb IWL_MVM_TPT_COUNT_WINDOW)) { 4464*a4128aadSBjoern A. Zeeb memset(queue_counter->per_link, 0, 4465*a4128aadSBjoern A. Zeeb sizeof(queue_counter->per_link)); 4466*a4128aadSBjoern A. Zeeb queue_counter->window_start = jiffies; 4467*a4128aadSBjoern A. Zeeb 4468*a4128aadSBjoern A. Zeeb IWL_DEBUG_STATS(mvm, "MPDU counters are cleared\n"); 4469*a4128aadSBjoern A. Zeeb } 4470*a4128aadSBjoern A. Zeeb 4471*a4128aadSBjoern A. Zeeb for (int i = 0; i < IWL_MVM_FW_MAX_LINK_ID; i++) 4472*a4128aadSBjoern A. Zeeb total_mpdus += tx ? queue_counter->per_link[i].tx : 4473*a4128aadSBjoern A. Zeeb queue_counter->per_link[i].rx; 4474*a4128aadSBjoern A. Zeeb 4475*a4128aadSBjoern A. Zeeb if (total_mpdus > IWL_MVM_ENTER_ESR_TPT_THRESH) 4476*a4128aadSBjoern A. Zeeb wiphy_work_queue(mvmvif->mvm->hw->wiphy, 4477*a4128aadSBjoern A. Zeeb &mvmvif->unblock_esr_tpt_wk); 4478*a4128aadSBjoern A. Zeeb 4479*a4128aadSBjoern A. Zeeb out: 4480*a4128aadSBjoern A. Zeeb spin_unlock_bh(&queue_counter->lock); 4481*a4128aadSBjoern A. Zeeb } 4482