1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2012-2015, 2018-2023 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <net/mac80211.h> 8 #if defined(__FreeBSD__) 9 #include <linux/cache.h> 10 #endif 11 12 #include "mvm.h" 13 #include "sta.h" 14 #include "rs.h" 15 16 /* 17 * New version of ADD_STA_sta command added new fields at the end of the 18 * structure, so sending the size of the relevant API's structure is enough to 19 * support both API versions. 20 */ 21 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 22 { 23 if (iwl_mvm_has_new_rx_api(mvm) || 24 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 25 return sizeof(struct iwl_mvm_add_sta_cmd); 26 else 27 return sizeof(struct iwl_mvm_add_sta_cmd_v7); 28 } 29 30 int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, enum nl80211_iftype iftype) 31 { 32 int sta_id; 33 u32 reserved_ids = 0; 34 35 BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32); 36 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 37 38 lockdep_assert_held(&mvm->mutex); 39 40 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 41 if (iftype != NL80211_IFTYPE_STATION) 42 reserved_ids = BIT(0); 43 44 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 45 for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) { 46 if (BIT(sta_id) & reserved_ids) 47 continue; 48 49 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 50 lockdep_is_held(&mvm->mutex))) 51 return sta_id; 52 } 53 return IWL_MVM_INVALID_STA; 54 } 55 56 /* Calculate the ampdu density and max size */ 57 u32 iwl_mvm_get_sta_ampdu_dens(struct ieee80211_link_sta *link_sta, 58 struct ieee80211_bss_conf *link_conf, 59 u32 *_agg_size) 60 { 61 u32 agg_size = 0, mpdu_dens = 0; 62 63 if (WARN_ON(!link_sta)) 64 return 0; 65 66 /* Note that we always use only legacy & highest supported PPDUs, so 67 * of Draft P802.11be D.30 Table 10-12a--Fields used for calculating 68 * the maximum A-MPDU size of various PPDU types in different bands, 69 * we only need to worry about the highest supported PPDU type here. 70 */ 71 72 if (link_sta->ht_cap.ht_supported) { 73 agg_size = link_sta->ht_cap.ampdu_factor; 74 mpdu_dens = link_sta->ht_cap.ampdu_density; 75 } 76 77 if (link_conf->chandef.chan->band == NL80211_BAND_6GHZ) { 78 /* overwrite HT values on 6 GHz */ 79 mpdu_dens = le16_get_bits(link_sta->he_6ghz_capa.capa, 80 IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START); 81 agg_size = le16_get_bits(link_sta->he_6ghz_capa.capa, 82 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP); 83 } else if (link_sta->vht_cap.vht_supported) { 84 /* if VHT supported overwrite HT value */ 85 agg_size = u32_get_bits(link_sta->vht_cap.cap, 86 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK); 87 } 88 89 /* D6.0 10.12.2 A-MPDU length limit rules 90 * A STA indicates the maximum length of the A-MPDU preEOF padding 91 * that it can receive in an HE PPDU in the Maximum A-MPDU Length 92 * Exponent field in its HT Capabilities, VHT Capabilities, 93 * and HE 6 GHz Band Capabilities elements (if present) and the 94 * Maximum AMPDU Length Exponent Extension field in its HE 95 * Capabilities element 96 */ 97 if (link_sta->he_cap.has_he) 98 agg_size += 99 u8_get_bits(link_sta->he_cap.he_cap_elem.mac_cap_info[3], 100 IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK); 101 102 if (link_sta->eht_cap.has_eht) 103 agg_size += u8_get_bits(link_sta->eht_cap.eht_cap_elem.mac_cap_info[1], 104 IEEE80211_EHT_MAC_CAP1_MAX_AMPDU_LEN_MASK); 105 106 /* Limit to max A-MPDU supported by FW */ 107 agg_size = min_t(u32, agg_size, 108 STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT); 109 110 *_agg_size = agg_size; 111 return mpdu_dens; 112 } 113 114 u8 iwl_mvm_get_sta_uapsd_acs(struct ieee80211_sta *sta) 115 { 116 u8 uapsd_acs = 0; 117 118 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 119 uapsd_acs |= BIT(AC_BK); 120 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 121 uapsd_acs |= BIT(AC_BE); 122 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 123 uapsd_acs |= BIT(AC_VI); 124 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 125 uapsd_acs |= BIT(AC_VO); 126 127 return uapsd_acs | uapsd_acs << 4; 128 } 129 130 /* send station add/update command to firmware */ 131 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 132 bool update, unsigned int flags) 133 { 134 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 135 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 136 .sta_id = mvm_sta->deflink.sta_id, 137 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 138 .add_modify = update ? 1 : 0, 139 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 140 STA_FLG_MIMO_EN_MSK | 141 STA_FLG_RTS_MIMO_PROT), 142 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 143 }; 144 int ret; 145 u32 status; 146 u32 agg_size = 0, mpdu_dens = 0; 147 148 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 149 add_sta_cmd.station_type = mvm_sta->sta_type; 150 151 if (!update || (flags & STA_MODIFY_QUEUES)) { 152 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 153 154 if (!iwl_mvm_has_new_tx_api(mvm)) { 155 add_sta_cmd.tfd_queue_msk = 156 cpu_to_le32(mvm_sta->tfd_queue_msk); 157 158 if (flags & STA_MODIFY_QUEUES) 159 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 160 } else { 161 WARN_ON(flags & STA_MODIFY_QUEUES); 162 } 163 } 164 165 switch (sta->deflink.bandwidth) { 166 case IEEE80211_STA_RX_BW_320: 167 case IEEE80211_STA_RX_BW_160: 168 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 169 fallthrough; 170 case IEEE80211_STA_RX_BW_80: 171 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 172 fallthrough; 173 case IEEE80211_STA_RX_BW_40: 174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 175 fallthrough; 176 case IEEE80211_STA_RX_BW_20: 177 if (sta->deflink.ht_cap.ht_supported) 178 add_sta_cmd.station_flags |= 179 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 180 break; 181 } 182 183 switch (sta->deflink.rx_nss) { 184 case 1: 185 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 186 break; 187 case 2: 188 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 189 break; 190 case 3 ... 8: 191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 192 break; 193 } 194 195 switch (sta->deflink.smps_mode) { 196 case IEEE80211_SMPS_AUTOMATIC: 197 case IEEE80211_SMPS_NUM_MODES: 198 WARN_ON(1); 199 break; 200 case IEEE80211_SMPS_STATIC: 201 /* override NSS */ 202 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 203 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 204 break; 205 case IEEE80211_SMPS_DYNAMIC: 206 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 207 break; 208 case IEEE80211_SMPS_OFF: 209 /* nothing */ 210 break; 211 } 212 213 if (sta->deflink.ht_cap.ht_supported || 214 mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) 215 add_sta_cmd.station_flags_msk |= 216 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 217 STA_FLG_AGG_MPDU_DENS_MSK); 218 219 mpdu_dens = iwl_mvm_get_sta_ampdu_dens(&sta->deflink, 220 &mvm_sta->vif->bss_conf, 221 &agg_size); 222 add_sta_cmd.station_flags |= 223 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 224 add_sta_cmd.station_flags |= 225 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 226 227 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) 228 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 229 230 if (sta->wme) { 231 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 232 add_sta_cmd.uapsd_acs = iwl_mvm_get_sta_uapsd_acs(sta); 233 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 234 } 235 236 status = ADD_STA_SUCCESS; 237 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 238 iwl_mvm_add_sta_cmd_size(mvm), 239 &add_sta_cmd, &status); 240 if (ret) 241 return ret; 242 243 switch (status & IWL_ADD_STA_STATUS_MASK) { 244 case ADD_STA_SUCCESS: 245 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 246 break; 247 default: 248 ret = -EIO; 249 IWL_ERR(mvm, "ADD_STA failed\n"); 250 break; 251 } 252 253 return ret; 254 } 255 256 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) 257 { 258 struct iwl_mvm_baid_data *data = 259 from_timer(data, t, session_timer); 260 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; 261 struct iwl_mvm_baid_data *ba_data; 262 struct ieee80211_sta *sta; 263 struct iwl_mvm_sta *mvm_sta; 264 unsigned long timeout; 265 unsigned int sta_id; 266 267 rcu_read_lock(); 268 269 ba_data = rcu_dereference(*rcu_ptr); 270 271 if (WARN_ON(!ba_data)) 272 goto unlock; 273 274 if (!ba_data->timeout) 275 goto unlock; 276 277 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 278 if (time_is_after_jiffies(timeout)) { 279 mod_timer(&ba_data->session_timer, timeout); 280 goto unlock; 281 } 282 283 /* Timer expired */ 284 sta_id = ffs(ba_data->sta_mask) - 1; /* don't care which one */ 285 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[sta_id]); 286 287 /* 288 * sta should be valid unless the following happens: 289 * The firmware asserts which triggers a reconfig flow, but 290 * the reconfig fails before we set the pointer to sta into 291 * the fw_id_to_mac_id pointer table. Mac80211 can't stop 292 * A-MDPU and hence the timer continues to run. Then, the 293 * timer expires and sta is NULL. 294 */ 295 if (IS_ERR_OR_NULL(sta)) 296 goto unlock; 297 298 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 299 ieee80211_rx_ba_timer_expired(mvm_sta->vif, 300 sta->addr, ba_data->tid); 301 unlock: 302 rcu_read_unlock(); 303 } 304 305 /* Disable aggregations for a bitmap of TIDs for a given station */ 306 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 307 unsigned long disable_agg_tids, 308 bool remove_queue) 309 { 310 struct iwl_mvm_add_sta_cmd cmd = {}; 311 struct ieee80211_sta *sta; 312 struct iwl_mvm_sta *mvmsta; 313 u32 status; 314 u8 sta_id; 315 316 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 317 return -EINVAL; 318 319 sta_id = mvm->queue_info[queue].ra_sta_id; 320 321 rcu_read_lock(); 322 323 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 324 325 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 326 rcu_read_unlock(); 327 return -EINVAL; 328 } 329 330 mvmsta = iwl_mvm_sta_from_mac80211(sta); 331 332 mvmsta->tid_disable_agg |= disable_agg_tids; 333 334 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 335 cmd.sta_id = mvmsta->deflink.sta_id; 336 cmd.add_modify = STA_MODE_MODIFY; 337 cmd.modify_mask = STA_MODIFY_QUEUES; 338 if (disable_agg_tids) 339 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 340 if (remove_queue) 341 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 342 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 343 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 344 345 rcu_read_unlock(); 346 347 /* Notify FW of queue removal from the STA queues */ 348 status = ADD_STA_SUCCESS; 349 return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 350 iwl_mvm_add_sta_cmd_size(mvm), 351 &cmd, &status); 352 } 353 354 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 355 int sta_id, u16 *queueptr, u8 tid) 356 { 357 int queue = *queueptr; 358 struct iwl_scd_txq_cfg_cmd cmd = { 359 .scd_queue = queue, 360 .action = SCD_CFG_DISABLE_QUEUE, 361 }; 362 int ret; 363 364 lockdep_assert_held(&mvm->mutex); 365 366 if (iwl_mvm_has_new_tx_api(mvm)) { 367 if (mvm->sta_remove_requires_queue_remove) { 368 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, 369 SCD_QUEUE_CONFIG_CMD); 370 struct iwl_scd_queue_cfg_cmd remove_cmd = { 371 .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE), 372 .u.remove.sta_mask = cpu_to_le32(BIT(sta_id)), 373 }; 374 375 if (tid == IWL_MAX_TID_COUNT) 376 tid = IWL_MGMT_TID; 377 378 remove_cmd.u.remove.tid = cpu_to_le32(tid); 379 380 ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0, 381 sizeof(remove_cmd), 382 &remove_cmd); 383 } else { 384 ret = 0; 385 } 386 387 iwl_trans_txq_free(mvm->trans, queue); 388 *queueptr = IWL_MVM_INVALID_QUEUE; 389 390 return ret; 391 } 392 393 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) 394 return 0; 395 396 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 397 398 cmd.action = mvm->queue_info[queue].tid_bitmap ? 399 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 400 if (cmd.action == SCD_CFG_DISABLE_QUEUE) 401 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 402 403 IWL_DEBUG_TX_QUEUES(mvm, 404 "Disabling TXQ #%d tids=0x%x\n", 405 queue, 406 mvm->queue_info[queue].tid_bitmap); 407 408 /* If the queue is still enabled - nothing left to do in this func */ 409 if (cmd.action == SCD_CFG_ENABLE_QUEUE) 410 return 0; 411 412 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 413 cmd.tid = mvm->queue_info[queue].txq_tid; 414 415 /* Make sure queue info is correct even though we overwrite it */ 416 WARN(mvm->queue_info[queue].tid_bitmap, 417 "TXQ #%d info out-of-sync - tids=0x%x\n", 418 queue, mvm->queue_info[queue].tid_bitmap); 419 420 /* If we are here - the queue is freed and we can zero out these vals */ 421 mvm->queue_info[queue].tid_bitmap = 0; 422 423 if (sta) { 424 struct iwl_mvm_txq *mvmtxq = 425 iwl_mvm_txq_from_tid(sta, tid); 426 427 spin_lock_bh(&mvm->add_stream_lock); 428 list_del_init(&mvmtxq->list); 429 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 430 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 431 spin_unlock_bh(&mvm->add_stream_lock); 432 } 433 434 /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 435 mvm->queue_info[queue].reserved = false; 436 437 iwl_trans_txq_disable(mvm->trans, queue, false); 438 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, 439 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 440 441 if (ret) 442 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 443 queue, ret); 444 return ret; 445 } 446 447 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 448 { 449 struct ieee80211_sta *sta; 450 struct iwl_mvm_sta *mvmsta; 451 unsigned long tid_bitmap; 452 unsigned long agg_tids = 0; 453 u8 sta_id; 454 int tid; 455 456 lockdep_assert_held(&mvm->mutex); 457 458 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 459 return -EINVAL; 460 461 sta_id = mvm->queue_info[queue].ra_sta_id; 462 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 463 464 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 465 lockdep_is_held(&mvm->mutex)); 466 467 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 468 return -EINVAL; 469 470 mvmsta = iwl_mvm_sta_from_mac80211(sta); 471 472 spin_lock_bh(&mvmsta->lock); 473 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 474 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 475 agg_tids |= BIT(tid); 476 } 477 spin_unlock_bh(&mvmsta->lock); 478 479 return agg_tids; 480 } 481 482 /* 483 * Remove a queue from a station's resources. 484 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 485 * doesn't disable the queue 486 */ 487 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 488 { 489 struct ieee80211_sta *sta; 490 struct iwl_mvm_sta *mvmsta; 491 unsigned long tid_bitmap; 492 unsigned long disable_agg_tids = 0; 493 u8 sta_id; 494 int tid; 495 496 lockdep_assert_held(&mvm->mutex); 497 498 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 499 return -EINVAL; 500 501 sta_id = mvm->queue_info[queue].ra_sta_id; 502 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 503 504 rcu_read_lock(); 505 506 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 507 508 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 509 rcu_read_unlock(); 510 return 0; 511 } 512 513 mvmsta = iwl_mvm_sta_from_mac80211(sta); 514 515 spin_lock_bh(&mvmsta->lock); 516 /* Unmap MAC queues and TIDs from this queue */ 517 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 518 struct iwl_mvm_txq *mvmtxq = 519 iwl_mvm_txq_from_tid(sta, tid); 520 521 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 522 disable_agg_tids |= BIT(tid); 523 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 524 525 spin_lock_bh(&mvm->add_stream_lock); 526 list_del_init(&mvmtxq->list); 527 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 528 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 529 spin_unlock_bh(&mvm->add_stream_lock); 530 } 531 532 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 533 spin_unlock_bh(&mvmsta->lock); 534 535 rcu_read_unlock(); 536 537 /* 538 * The TX path may have been using this TXQ_ID from the tid_data, 539 * so make sure it's no longer running so that we can safely reuse 540 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE 541 * above, but nothing guarantees we've stopped using them. Thus, 542 * without this, we could get to iwl_mvm_disable_txq() and remove 543 * the queue while still sending frames to it. 544 */ 545 synchronize_net(); 546 547 return disable_agg_tids; 548 } 549 550 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 551 struct ieee80211_sta *old_sta, 552 u8 new_sta_id) 553 { 554 struct iwl_mvm_sta *mvmsta; 555 u8 sta_id, tid; 556 unsigned long disable_agg_tids = 0; 557 bool same_sta; 558 u16 queue_tmp = queue; 559 int ret; 560 561 lockdep_assert_held(&mvm->mutex); 562 563 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 564 return -EINVAL; 565 566 sta_id = mvm->queue_info[queue].ra_sta_id; 567 tid = mvm->queue_info[queue].txq_tid; 568 569 same_sta = sta_id == new_sta_id; 570 571 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 572 if (WARN_ON(!mvmsta)) 573 return -EINVAL; 574 575 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 576 /* Disable the queue */ 577 if (disable_agg_tids) 578 iwl_mvm_invalidate_sta_queue(mvm, queue, 579 disable_agg_tids, false); 580 581 ret = iwl_mvm_disable_txq(mvm, old_sta, sta_id, &queue_tmp, tid); 582 if (ret) { 583 IWL_ERR(mvm, 584 "Failed to free inactive queue %d (ret=%d)\n", 585 queue, ret); 586 587 return ret; 588 } 589 590 /* If TXQ is allocated to another STA, update removal in FW */ 591 if (!same_sta) 592 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 593 594 return 0; 595 } 596 597 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 598 unsigned long tfd_queue_mask, u8 ac) 599 { 600 int queue = 0; 601 u8 ac_to_queue[IEEE80211_NUM_ACS]; 602 int i; 603 604 /* 605 * This protects us against grabbing a queue that's being reconfigured 606 * by the inactivity checker. 607 */ 608 lockdep_assert_held(&mvm->mutex); 609 610 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 611 return -EINVAL; 612 613 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 614 615 /* See what ACs the existing queues for this STA have */ 616 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 617 /* Only DATA queues can be shared */ 618 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 619 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 620 continue; 621 622 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 623 } 624 625 /* 626 * The queue to share is chosen only from DATA queues as follows (in 627 * descending priority): 628 * 1. An AC_BE queue 629 * 2. Same AC queue 630 * 3. Highest AC queue that is lower than new AC 631 * 4. Any existing AC (there always is at least 1 DATA queue) 632 */ 633 634 /* Priority 1: An AC_BE queue */ 635 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 636 queue = ac_to_queue[IEEE80211_AC_BE]; 637 /* Priority 2: Same AC queue */ 638 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 639 queue = ac_to_queue[ac]; 640 /* Priority 3a: If new AC is VO and VI exists - use VI */ 641 else if (ac == IEEE80211_AC_VO && 642 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 643 queue = ac_to_queue[IEEE80211_AC_VI]; 644 /* Priority 3b: No BE so only AC less than the new one is BK */ 645 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 646 queue = ac_to_queue[IEEE80211_AC_BK]; 647 /* Priority 4a: No BE nor BK - use VI if exists */ 648 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 649 queue = ac_to_queue[IEEE80211_AC_VI]; 650 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 651 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 652 queue = ac_to_queue[IEEE80211_AC_VO]; 653 654 /* Make sure queue found (or not) is legal */ 655 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 656 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 657 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 658 IWL_ERR(mvm, "No DATA queues available to share\n"); 659 return -ENOSPC; 660 } 661 662 return queue; 663 } 664 665 /* Re-configure the SCD for a queue that has already been configured */ 666 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo, 667 int sta_id, int tid, int frame_limit, u16 ssn) 668 { 669 struct iwl_scd_txq_cfg_cmd cmd = { 670 .scd_queue = queue, 671 .action = SCD_CFG_ENABLE_QUEUE, 672 .window = frame_limit, 673 .sta_id = sta_id, 674 .ssn = cpu_to_le16(ssn), 675 .tx_fifo = fifo, 676 .aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 677 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE), 678 .tid = tid, 679 }; 680 int ret; 681 682 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 683 return -EINVAL; 684 685 if (WARN(mvm->queue_info[queue].tid_bitmap == 0, 686 "Trying to reconfig unallocated queue %d\n", queue)) 687 return -ENXIO; 688 689 IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue); 690 691 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 692 WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n", 693 queue, fifo, ret); 694 695 return ret; 696 } 697 698 /* 699 * If a given queue has a higher AC than the TID stream that is being compared 700 * to, the queue needs to be redirected to the lower AC. This function does that 701 * in such a case, otherwise - if no redirection required - it does nothing, 702 * unless the %force param is true. 703 */ 704 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, 705 int ac, int ssn, unsigned int wdg_timeout, 706 bool force, struct iwl_mvm_txq *txq) 707 { 708 struct iwl_scd_txq_cfg_cmd cmd = { 709 .scd_queue = queue, 710 .action = SCD_CFG_DISABLE_QUEUE, 711 }; 712 bool shared_queue; 713 int ret; 714 715 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 716 return -EINVAL; 717 718 /* 719 * If the AC is lower than current one - FIFO needs to be redirected to 720 * the lowest one of the streams in the queue. Check if this is needed 721 * here. 722 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 723 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 724 * we need to check if the numerical value of X is LARGER than of Y. 725 */ 726 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 727 IWL_DEBUG_TX_QUEUES(mvm, 728 "No redirection needed on TXQ #%d\n", 729 queue); 730 return 0; 731 } 732 733 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 734 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 735 cmd.tid = mvm->queue_info[queue].txq_tid; 736 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; 737 738 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 739 queue, iwl_mvm_ac_to_tx_fifo[ac]); 740 741 /* Stop the queue and wait for it to empty */ 742 set_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); 743 744 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 745 if (ret) { 746 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 747 queue); 748 ret = -EIO; 749 goto out; 750 } 751 752 /* Before redirecting the queue we need to de-activate it */ 753 iwl_trans_txq_disable(mvm->trans, queue, false); 754 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 755 if (ret) 756 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 757 ret); 758 759 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 760 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 761 762 /* Update the TID "owner" of the queue */ 763 mvm->queue_info[queue].txq_tid = tid; 764 765 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 766 767 /* Redirect to lower AC */ 768 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 769 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); 770 771 /* Update AC marking of the queue */ 772 mvm->queue_info[queue].mac80211_ac = ac; 773 774 /* 775 * Mark queue as shared in transport if shared 776 * Note this has to be done after queue enablement because enablement 777 * can also set this value, and there is no indication there to shared 778 * queues 779 */ 780 if (shared_queue) 781 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 782 783 out: 784 /* Continue using the queue */ 785 clear_bit(IWL_MVM_TXQ_STATE_STOP_REDIRECT, &txq->state); 786 787 return ret; 788 } 789 790 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, 791 u8 minq, u8 maxq) 792 { 793 int i; 794 795 lockdep_assert_held(&mvm->mutex); 796 797 if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues, 798 "max queue %d >= num_of_queues (%d)", maxq, 799 mvm->trans->trans_cfg->base_params->num_of_queues)) 800 maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1; 801 802 /* This should not be hit with new TX path */ 803 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 804 return -ENOSPC; 805 806 /* Start by looking for a free queue */ 807 for (i = minq; i <= maxq; i++) 808 if (mvm->queue_info[i].tid_bitmap == 0 && 809 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 810 return i; 811 812 return -ENOSPC; 813 } 814 815 static int iwl_mvm_get_queue_size(struct ieee80211_sta *sta) 816 { 817 int max_size = IWL_DEFAULT_QUEUE_SIZE; 818 unsigned int link_id; 819 820 /* this queue isn't used for traffic (cab_queue) */ 821 if (!sta) 822 return IWL_MGMT_QUEUE_SIZE; 823 824 rcu_read_lock(); 825 826 for (link_id = 0; link_id < ARRAY_SIZE(sta->link); link_id++) { 827 struct ieee80211_link_sta *link = 828 rcu_dereference(sta->link[link_id]); 829 830 if (!link) 831 continue; 832 833 /* support for 1k ba size */ 834 if (link->eht_cap.has_eht && 835 max_size < IWL_DEFAULT_QUEUE_SIZE_EHT) 836 max_size = IWL_DEFAULT_QUEUE_SIZE_EHT; 837 838 /* support for 256 ba size */ 839 if (link->he_cap.has_he && 840 max_size < IWL_DEFAULT_QUEUE_SIZE_HE) 841 max_size = IWL_DEFAULT_QUEUE_SIZE_HE; 842 } 843 844 rcu_read_unlock(); 845 return max_size; 846 } 847 848 int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, 849 struct ieee80211_sta *sta, 850 u8 sta_id, u8 tid, unsigned int timeout) 851 { 852 int queue, size; 853 u32 sta_mask = 0; 854 855 if (tid == IWL_MAX_TID_COUNT) { 856 tid = IWL_MGMT_TID; 857 size = max_t(u32, IWL_MGMT_QUEUE_SIZE, 858 mvm->trans->cfg->min_txq_size); 859 } else { 860 size = iwl_mvm_get_queue_size(sta); 861 } 862 863 /* take the min with bc tbl entries allowed */ 864 size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16)); 865 866 /* size needs to be power of 2 values for calculating read/write pointers */ 867 size = rounddown_pow_of_two(size); 868 869 if (sta) { 870 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 871 unsigned int link_id; 872 873 for (link_id = 0; 874 link_id < ARRAY_SIZE(mvmsta->link); 875 link_id++) { 876 struct iwl_mvm_link_sta *link = 877 rcu_dereference_protected(mvmsta->link[link_id], 878 lockdep_is_held(&mvm->mutex)); 879 880 if (!link) 881 continue; 882 883 sta_mask |= BIT(link->sta_id); 884 } 885 } else { 886 sta_mask |= BIT(sta_id); 887 } 888 889 if (!sta_mask) 890 return -EINVAL; 891 892 do { 893 queue = iwl_trans_txq_alloc(mvm->trans, 0, sta_mask, 894 tid, size, timeout); 895 896 if (queue < 0) 897 IWL_DEBUG_TX_QUEUES(mvm, 898 "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %d\n", 899 size, sta_mask, tid, queue); 900 size /= 2; 901 } while (queue < 0 && size >= 16); 902 903 if (queue < 0) 904 return queue; 905 906 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta mask 0x%x tid %d\n", 907 queue, sta_mask, tid); 908 909 return queue; 910 } 911 912 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 913 struct ieee80211_sta *sta, u8 ac, 914 int tid) 915 { 916 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 917 struct iwl_mvm_txq *mvmtxq = 918 iwl_mvm_txq_from_tid(sta, tid); 919 unsigned int wdg_timeout = 920 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 921 int queue = -1; 922 923 lockdep_assert_held(&mvm->mutex); 924 925 IWL_DEBUG_TX_QUEUES(mvm, 926 "Allocating queue for sta %d on tid %d\n", 927 mvmsta->deflink.sta_id, tid); 928 queue = iwl_mvm_tvqm_enable_txq(mvm, sta, mvmsta->deflink.sta_id, 929 tid, wdg_timeout); 930 if (queue < 0) 931 return queue; 932 933 mvmtxq->txq_id = queue; 934 mvm->tvqm_info[queue].txq_tid = tid; 935 mvm->tvqm_info[queue].sta_id = mvmsta->deflink.sta_id; 936 937 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 938 939 spin_lock_bh(&mvmsta->lock); 940 mvmsta->tid_data[tid].txq_id = queue; 941 spin_unlock_bh(&mvmsta->lock); 942 943 return 0; 944 } 945 946 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, 947 struct ieee80211_sta *sta, 948 int queue, u8 sta_id, u8 tid) 949 { 950 bool enable_queue = true; 951 952 /* Make sure this TID isn't already enabled */ 953 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 954 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 955 queue, tid); 956 return false; 957 } 958 959 /* Update mappings and refcounts */ 960 if (mvm->queue_info[queue].tid_bitmap) 961 enable_queue = false; 962 963 mvm->queue_info[queue].tid_bitmap |= BIT(tid); 964 mvm->queue_info[queue].ra_sta_id = sta_id; 965 966 if (enable_queue) { 967 if (tid != IWL_MAX_TID_COUNT) 968 mvm->queue_info[queue].mac80211_ac = 969 tid_to_mac80211_ac[tid]; 970 else 971 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 972 973 mvm->queue_info[queue].txq_tid = tid; 974 } 975 976 if (sta) { 977 struct iwl_mvm_txq *mvmtxq = 978 iwl_mvm_txq_from_tid(sta, tid); 979 980 mvmtxq->txq_id = queue; 981 } 982 983 IWL_DEBUG_TX_QUEUES(mvm, 984 "Enabling TXQ #%d tids=0x%x\n", 985 queue, mvm->queue_info[queue].tid_bitmap); 986 987 return enable_queue; 988 } 989 990 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 991 int queue, u16 ssn, 992 const struct iwl_trans_txq_scd_cfg *cfg, 993 unsigned int wdg_timeout) 994 { 995 struct iwl_scd_txq_cfg_cmd cmd = { 996 .scd_queue = queue, 997 .action = SCD_CFG_ENABLE_QUEUE, 998 .window = cfg->frame_limit, 999 .sta_id = cfg->sta_id, 1000 .ssn = cpu_to_le16(ssn), 1001 .tx_fifo = cfg->fifo, 1002 .aggregate = cfg->aggregate, 1003 .tid = cfg->tid, 1004 }; 1005 bool inc_ssn; 1006 1007 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1008 return false; 1009 1010 /* Send the enabling command if we need to */ 1011 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) 1012 return false; 1013 1014 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 1015 NULL, wdg_timeout); 1016 if (inc_ssn) 1017 le16_add_cpu(&cmd.ssn, 1); 1018 1019 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 1020 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 1021 1022 return inc_ssn; 1023 } 1024 1025 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) 1026 { 1027 struct iwl_scd_txq_cfg_cmd cmd = { 1028 .scd_queue = queue, 1029 .action = SCD_CFG_UPDATE_QUEUE_TID, 1030 }; 1031 int tid; 1032 unsigned long tid_bitmap; 1033 int ret; 1034 1035 lockdep_assert_held(&mvm->mutex); 1036 1037 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1038 return; 1039 1040 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1041 1042 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 1043 return; 1044 1045 /* Find any TID for queue */ 1046 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 1047 cmd.tid = tid; 1048 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 1049 1050 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 1051 if (ret) { 1052 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 1053 queue, ret); 1054 return; 1055 } 1056 1057 mvm->queue_info[queue].txq_tid = tid; 1058 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 1059 queue, tid); 1060 } 1061 1062 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 1063 { 1064 struct ieee80211_sta *sta; 1065 struct iwl_mvm_sta *mvmsta; 1066 u8 sta_id; 1067 int tid = -1; 1068 unsigned long tid_bitmap; 1069 unsigned int wdg_timeout; 1070 int ssn; 1071 int ret = true; 1072 1073 /* queue sharing is disabled on new TX path */ 1074 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1075 return; 1076 1077 lockdep_assert_held(&mvm->mutex); 1078 1079 sta_id = mvm->queue_info[queue].ra_sta_id; 1080 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1081 1082 /* Find TID for queue, and make sure it is the only one on the queue */ 1083 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 1084 if (tid_bitmap != BIT(tid)) { 1085 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 1086 queue, tid_bitmap); 1087 return; 1088 } 1089 1090 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 1091 tid); 1092 1093 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1094 lockdep_is_held(&mvm->mutex)); 1095 1096 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 1097 return; 1098 1099 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1100 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1101 1102 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1103 1104 ret = iwl_mvm_redirect_queue(mvm, queue, tid, 1105 tid_to_mac80211_ac[tid], ssn, 1106 wdg_timeout, true, 1107 iwl_mvm_txq_from_tid(sta, tid)); 1108 if (ret) { 1109 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 1110 return; 1111 } 1112 1113 /* If aggs should be turned back on - do it */ 1114 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 1115 struct iwl_mvm_add_sta_cmd cmd = {0}; 1116 1117 mvmsta->tid_disable_agg &= ~BIT(tid); 1118 1119 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1120 cmd.sta_id = mvmsta->deflink.sta_id; 1121 cmd.add_modify = STA_MODE_MODIFY; 1122 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 1123 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 1124 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 1125 1126 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 1127 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 1128 if (!ret) { 1129 IWL_DEBUG_TX_QUEUES(mvm, 1130 "TXQ #%d is now aggregated again\n", 1131 queue); 1132 1133 /* Mark queue intenally as aggregating again */ 1134 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1135 } 1136 } 1137 1138 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1139 } 1140 1141 /* 1142 * Remove inactive TIDs of a given queue. 1143 * If all queue TIDs are inactive - mark the queue as inactive 1144 * If only some the queue TIDs are inactive - unmap them from the queue 1145 * 1146 * Returns %true if all TIDs were removed and the queue could be reused. 1147 */ 1148 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1149 struct iwl_mvm_sta *mvmsta, int queue, 1150 unsigned long tid_bitmap, 1151 unsigned long *unshare_queues, 1152 unsigned long *changetid_queues) 1153 { 1154 unsigned int tid; 1155 1156 lockdep_assert_held(&mvmsta->lock); 1157 lockdep_assert_held(&mvm->mutex); 1158 1159 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1160 return false; 1161 1162 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1163 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1164 /* If some TFDs are still queued - don't mark TID as inactive */ 1165 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1166 tid_bitmap &= ~BIT(tid); 1167 1168 /* Don't mark as inactive any TID that has an active BA */ 1169 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1170 tid_bitmap &= ~BIT(tid); 1171 } 1172 1173 /* If all TIDs in the queue are inactive - return it can be reused */ 1174 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1175 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); 1176 return true; 1177 } 1178 1179 /* 1180 * If we are here, this is a shared queue and not all TIDs timed-out. 1181 * Remove the ones that did. 1182 */ 1183 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1184 u16 q_tid_bitmap; 1185 1186 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1187 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1188 1189 q_tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1190 1191 /* 1192 * We need to take into account a situation in which a TXQ was 1193 * allocated to TID x, and then turned shared by adding TIDs y 1194 * and z. If TID x becomes inactive and is removed from the TXQ, 1195 * ownership must be given to one of the remaining TIDs. 1196 * This is mainly because if TID x continues - a new queue can't 1197 * be allocated for it as long as it is an owner of another TXQ. 1198 * 1199 * Mark this queue in the right bitmap, we'll send the command 1200 * to the firmware later. 1201 */ 1202 if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) 1203 set_bit(queue, changetid_queues); 1204 1205 IWL_DEBUG_TX_QUEUES(mvm, 1206 "Removing inactive TID %d from shared Q:%d\n", 1207 tid, queue); 1208 } 1209 1210 IWL_DEBUG_TX_QUEUES(mvm, 1211 "TXQ #%d left with tid bitmap 0x%x\n", queue, 1212 mvm->queue_info[queue].tid_bitmap); 1213 1214 /* 1215 * There may be different TIDs with the same mac queues, so make 1216 * sure all TIDs have existing corresponding mac queues enabled 1217 */ 1218 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1219 1220 /* If the queue is marked as shared - "unshare" it */ 1221 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && 1222 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1223 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1224 queue); 1225 set_bit(queue, unshare_queues); 1226 } 1227 1228 return false; 1229 } 1230 1231 /* 1232 * Check for inactivity - this includes checking if any queue 1233 * can be unshared and finding one (and only one) that can be 1234 * reused. 1235 * This function is also invoked as a sort of clean-up task, 1236 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. 1237 * 1238 * Returns the queue number, or -ENOSPC. 1239 */ 1240 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) 1241 { 1242 unsigned long now = jiffies; 1243 unsigned long unshare_queues = 0; 1244 unsigned long changetid_queues = 0; 1245 int i, ret, free_queue = -ENOSPC; 1246 struct ieee80211_sta *queue_owner = NULL; 1247 1248 lockdep_assert_held(&mvm->mutex); 1249 1250 if (iwl_mvm_has_new_tx_api(mvm)) 1251 return -ENOSPC; 1252 1253 rcu_read_lock(); 1254 1255 /* we skip the CMD queue below by starting at 1 */ 1256 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); 1257 1258 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { 1259 struct ieee80211_sta *sta; 1260 struct iwl_mvm_sta *mvmsta; 1261 u8 sta_id; 1262 int tid; 1263 unsigned long inactive_tid_bitmap = 0; 1264 unsigned long queue_tid_bitmap; 1265 1266 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1267 if (!queue_tid_bitmap) 1268 continue; 1269 1270 /* If TXQ isn't in active use anyway - nothing to do here... */ 1271 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1272 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) 1273 continue; 1274 1275 /* Check to see if there are inactive TIDs on this queue */ 1276 for_each_set_bit(tid, &queue_tid_bitmap, 1277 IWL_MAX_TID_COUNT + 1) { 1278 if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1279 IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1280 continue; 1281 1282 inactive_tid_bitmap |= BIT(tid); 1283 } 1284 1285 /* If all TIDs are active - finish check on this queue */ 1286 if (!inactive_tid_bitmap) 1287 continue; 1288 1289 /* 1290 * If we are here - the queue hadn't been served recently and is 1291 * in use 1292 */ 1293 1294 sta_id = mvm->queue_info[i].ra_sta_id; 1295 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1296 1297 /* 1298 * If the STA doesn't exist anymore, it isn't an error. It could 1299 * be that it was removed since getting the queues, and in this 1300 * case it should've inactivated its queues anyway. 1301 */ 1302 if (IS_ERR_OR_NULL(sta)) 1303 continue; 1304 1305 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1306 1307 spin_lock_bh(&mvmsta->lock); 1308 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1309 inactive_tid_bitmap, 1310 &unshare_queues, 1311 &changetid_queues); 1312 if (ret && free_queue < 0) { 1313 queue_owner = sta; 1314 free_queue = i; 1315 } 1316 /* only unlock sta lock - we still need the queue info lock */ 1317 spin_unlock_bh(&mvmsta->lock); 1318 } 1319 1320 1321 /* Reconfigure queues requiring reconfiguation */ 1322 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) 1323 iwl_mvm_unshare_queue(mvm, i); 1324 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1325 iwl_mvm_change_queue_tid(mvm, i); 1326 1327 rcu_read_unlock(); 1328 1329 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1330 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, 1331 alloc_for_sta); 1332 if (ret) 1333 return ret; 1334 } 1335 1336 return free_queue; 1337 } 1338 1339 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1340 struct ieee80211_sta *sta, u8 ac, int tid) 1341 { 1342 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1343 struct iwl_trans_txq_scd_cfg cfg = { 1344 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 1345 .sta_id = mvmsta->deflink.sta_id, 1346 .tid = tid, 1347 .frame_limit = IWL_FRAME_LIMIT, 1348 }; 1349 unsigned int wdg_timeout = 1350 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1351 int queue = -1; 1352 u16 queue_tmp; 1353 unsigned long disable_agg_tids = 0; 1354 enum iwl_mvm_agg_state queue_state; 1355 bool shared_queue = false, inc_ssn; 1356 int ssn; 1357 unsigned long tfd_queue_mask; 1358 int ret; 1359 1360 lockdep_assert_held(&mvm->mutex); 1361 1362 if (iwl_mvm_has_new_tx_api(mvm)) 1363 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 1364 1365 spin_lock_bh(&mvmsta->lock); 1366 tfd_queue_mask = mvmsta->tfd_queue_msk; 1367 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1368 spin_unlock_bh(&mvmsta->lock); 1369 1370 if (tid == IWL_MAX_TID_COUNT) { 1371 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1372 IWL_MVM_DQA_MIN_MGMT_QUEUE, 1373 IWL_MVM_DQA_MAX_MGMT_QUEUE); 1374 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 1375 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 1376 queue); 1377 1378 /* If no such queue is found, we'll use a DATA queue instead */ 1379 } 1380 1381 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1382 (mvm->queue_info[mvmsta->reserved_queue].status == 1383 IWL_MVM_QUEUE_RESERVED)) { 1384 queue = mvmsta->reserved_queue; 1385 mvm->queue_info[queue].reserved = true; 1386 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1387 } 1388 1389 if (queue < 0) 1390 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1391 IWL_MVM_DQA_MIN_DATA_QUEUE, 1392 IWL_MVM_DQA_MAX_DATA_QUEUE); 1393 if (queue < 0) { 1394 /* try harder - perhaps kill an inactive queue */ 1395 queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); 1396 } 1397 1398 /* No free queue - we'll have to share */ 1399 if (queue <= 0) { 1400 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 1401 if (queue > 0) { 1402 shared_queue = true; 1403 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 1404 } 1405 } 1406 1407 /* 1408 * Mark TXQ as ready, even though it hasn't been fully configured yet, 1409 * to make sure no one else takes it. 1410 * This will allow avoiding re-acquiring the lock at the end of the 1411 * configuration. On error we'll mark it back as free. 1412 */ 1413 if (queue > 0 && !shared_queue) 1414 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1415 1416 /* This shouldn't happen - out of queues */ 1417 if (WARN_ON(queue <= 0)) { 1418 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 1419 tid, cfg.sta_id); 1420 return queue; 1421 } 1422 1423 /* 1424 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 1425 * but for configuring the SCD to send A-MPDUs we need to mark the queue 1426 * as aggregatable. 1427 * Mark all DATA queues as allowing to be aggregated at some point 1428 */ 1429 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1430 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1431 1432 IWL_DEBUG_TX_QUEUES(mvm, 1433 "Allocating %squeue #%d to sta %d on tid %d\n", 1434 shared_queue ? "shared " : "", queue, 1435 mvmsta->deflink.sta_id, tid); 1436 1437 if (shared_queue) { 1438 /* Disable any open aggs on this queue */ 1439 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 1440 1441 if (disable_agg_tids) { 1442 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 1443 queue); 1444 iwl_mvm_invalidate_sta_queue(mvm, queue, 1445 disable_agg_tids, false); 1446 } 1447 } 1448 1449 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); 1450 1451 /* 1452 * Mark queue as shared in transport if shared 1453 * Note this has to be done after queue enablement because enablement 1454 * can also set this value, and there is no indication there to shared 1455 * queues 1456 */ 1457 if (shared_queue) 1458 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1459 1460 spin_lock_bh(&mvmsta->lock); 1461 /* 1462 * This looks racy, but it is not. We have only one packet for 1463 * this ra/tid in our Tx path since we stop the Qdisc when we 1464 * need to allocate a new TFD queue. 1465 */ 1466 if (inc_ssn) { 1467 mvmsta->tid_data[tid].seq_number += 0x10; 1468 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1469 } 1470 mvmsta->tid_data[tid].txq_id = queue; 1471 mvmsta->tfd_queue_msk |= BIT(queue); 1472 queue_state = mvmsta->tid_data[tid].state; 1473 1474 if (mvmsta->reserved_queue == queue) 1475 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1476 spin_unlock_bh(&mvmsta->lock); 1477 1478 if (!shared_queue) { 1479 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1480 if (ret) 1481 goto out_err; 1482 1483 /* If we need to re-enable aggregations... */ 1484 if (queue_state == IWL_AGG_ON) { 1485 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1486 if (ret) 1487 goto out_err; 1488 } 1489 } else { 1490 /* Redirect queue, if needed */ 1491 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, 1492 wdg_timeout, false, 1493 iwl_mvm_txq_from_tid(sta, tid)); 1494 if (ret) 1495 goto out_err; 1496 } 1497 1498 return 0; 1499 1500 out_err: 1501 queue_tmp = queue; 1502 iwl_mvm_disable_txq(mvm, sta, mvmsta->deflink.sta_id, &queue_tmp, tid); 1503 1504 return ret; 1505 } 1506 1507 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1508 { 1509 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1510 add_stream_wk); 1511 1512 mutex_lock(&mvm->mutex); 1513 1514 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1515 1516 while (!list_empty(&mvm->add_stream_txqs)) { 1517 struct iwl_mvm_txq *mvmtxq; 1518 struct ieee80211_txq *txq; 1519 u8 tid; 1520 1521 mvmtxq = list_first_entry(&mvm->add_stream_txqs, 1522 struct iwl_mvm_txq, list); 1523 1524 txq = container_of((void *)mvmtxq, struct ieee80211_txq, 1525 drv_priv); 1526 tid = txq->tid; 1527 if (tid == IEEE80211_NUM_TIDS) 1528 tid = IWL_MAX_TID_COUNT; 1529 1530 /* 1531 * We can't really do much here, but if this fails we can't 1532 * transmit anyway - so just don't transmit the frame etc. 1533 * and let them back up ... we've tried our best to allocate 1534 * a queue in the function itself. 1535 */ 1536 if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) { 1537 spin_lock_bh(&mvm->add_stream_lock); 1538 list_del_init(&mvmtxq->list); 1539 spin_unlock_bh(&mvm->add_stream_lock); 1540 continue; 1541 } 1542 1543 /* now we're ready, any remaining races/concurrency will be 1544 * handled in iwl_mvm_mac_itxq_xmit() 1545 */ 1546 set_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 1547 1548 local_bh_disable(); 1549 spin_lock(&mvm->add_stream_lock); 1550 list_del_init(&mvmtxq->list); 1551 spin_unlock(&mvm->add_stream_lock); 1552 1553 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); 1554 local_bh_enable(); 1555 } 1556 1557 mutex_unlock(&mvm->mutex); 1558 } 1559 1560 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1561 struct ieee80211_sta *sta, 1562 enum nl80211_iftype vif_type) 1563 { 1564 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1565 int queue; 1566 1567 /* queue reserving is disabled on new TX path */ 1568 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1569 return 0; 1570 1571 /* run the general cleanup/unsharing of queues */ 1572 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1573 1574 /* Make sure we have free resources for this STA */ 1575 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1576 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && 1577 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1578 IWL_MVM_QUEUE_FREE)) 1579 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1580 else 1581 queue = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 1582 IWL_MVM_DQA_MIN_DATA_QUEUE, 1583 IWL_MVM_DQA_MAX_DATA_QUEUE); 1584 if (queue < 0) { 1585 /* try again - this time kick out a queue if needed */ 1586 queue = iwl_mvm_inactivity_check(mvm, mvmsta->deflink.sta_id); 1587 if (queue < 0) { 1588 IWL_ERR(mvm, "No available queues for new station\n"); 1589 return -ENOSPC; 1590 } 1591 } 1592 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1593 1594 mvmsta->reserved_queue = queue; 1595 1596 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1597 queue, mvmsta->deflink.sta_id); 1598 1599 return 0; 1600 } 1601 1602 /* 1603 * In DQA mode, after a HW restart the queues should be allocated as before, in 1604 * order to avoid race conditions when there are shared queues. This function 1605 * does the re-mapping and queue allocation. 1606 * 1607 * Note that re-enabling aggregations isn't done in this function. 1608 */ 1609 void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1610 struct ieee80211_sta *sta) 1611 { 1612 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1613 unsigned int wdg = 1614 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1615 int i; 1616 struct iwl_trans_txq_scd_cfg cfg = { 1617 .sta_id = mvm_sta->deflink.sta_id, 1618 .frame_limit = IWL_FRAME_LIMIT, 1619 }; 1620 1621 /* Make sure reserved queue is still marked as such (if allocated) */ 1622 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1623 mvm->queue_info[mvm_sta->reserved_queue].status = 1624 IWL_MVM_QUEUE_RESERVED; 1625 1626 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1627 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1628 int txq_id = tid_data->txq_id; 1629 int ac; 1630 1631 if (txq_id == IWL_MVM_INVALID_QUEUE) 1632 continue; 1633 1634 ac = tid_to_mac80211_ac[i]; 1635 1636 if (iwl_mvm_has_new_tx_api(mvm)) { 1637 IWL_DEBUG_TX_QUEUES(mvm, 1638 "Re-mapping sta %d tid %d\n", 1639 mvm_sta->deflink.sta_id, i); 1640 txq_id = iwl_mvm_tvqm_enable_txq(mvm, sta, 1641 mvm_sta->deflink.sta_id, 1642 i, wdg); 1643 /* 1644 * on failures, just set it to IWL_MVM_INVALID_QUEUE 1645 * to try again later, we have no other good way of 1646 * failing here 1647 */ 1648 if (txq_id < 0) 1649 txq_id = IWL_MVM_INVALID_QUEUE; 1650 tid_data->txq_id = txq_id; 1651 1652 /* 1653 * Since we don't set the seq number after reset, and HW 1654 * sets it now, FW reset will cause the seq num to start 1655 * at 0 again, so driver will need to update it 1656 * internally as well, so it keeps in sync with real val 1657 */ 1658 tid_data->seq_number = 0; 1659 } else { 1660 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1661 1662 cfg.tid = i; 1663 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 1664 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1665 txq_id == 1666 IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1667 1668 IWL_DEBUG_TX_QUEUES(mvm, 1669 "Re-mapping sta %d tid %d to queue %d\n", 1670 mvm_sta->deflink.sta_id, i, 1671 txq_id); 1672 1673 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); 1674 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1675 } 1676 } 1677 } 1678 1679 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1680 struct iwl_mvm_int_sta *sta, 1681 const u8 *addr, 1682 u16 mac_id, u16 color) 1683 { 1684 struct iwl_mvm_add_sta_cmd cmd; 1685 int ret; 1686 u32 status = ADD_STA_SUCCESS; 1687 1688 lockdep_assert_held(&mvm->mutex); 1689 1690 memset(&cmd, 0, sizeof(cmd)); 1691 cmd.sta_id = sta->sta_id; 1692 1693 if (iwl_mvm_has_new_station_api(mvm->fw) && 1694 sta->type == IWL_STA_AUX_ACTIVITY) 1695 cmd.mac_id_n_color = cpu_to_le32(mac_id); 1696 else 1697 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1698 color)); 1699 1700 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1701 cmd.station_type = sta->type; 1702 1703 if (!iwl_mvm_has_new_tx_api(mvm)) 1704 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1705 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1706 1707 if (addr) 1708 memcpy(cmd.addr, addr, ETH_ALEN); 1709 1710 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1711 iwl_mvm_add_sta_cmd_size(mvm), 1712 &cmd, &status); 1713 if (ret) 1714 return ret; 1715 1716 switch (status & IWL_ADD_STA_STATUS_MASK) { 1717 case ADD_STA_SUCCESS: 1718 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1719 return 0; 1720 default: 1721 ret = -EIO; 1722 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1723 status); 1724 break; 1725 } 1726 return ret; 1727 } 1728 1729 /* Initialize driver data of a new sta */ 1730 int iwl_mvm_sta_init(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1731 struct ieee80211_sta *sta, int sta_id, u8 sta_type) 1732 { 1733 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1734 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1735 struct iwl_mvm_rxq_dup_data *dup_data; 1736 int i, ret = 0; 1737 1738 lockdep_assert_held(&mvm->mutex); 1739 1740 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1741 mvmvif->color); 1742 mvm_sta->vif = vif; 1743 1744 /* for MLD sta_id(s) should be allocated for each link before calling 1745 * this function 1746 */ 1747 if (!mvm->mld_api_is_used) { 1748 if (WARN_ON(sta_id == IWL_MVM_INVALID_STA)) 1749 return -EINVAL; 1750 1751 mvm_sta->deflink.sta_id = sta_id; 1752 rcu_assign_pointer(mvm_sta->link[0], &mvm_sta->deflink); 1753 1754 if (!mvm->trans->trans_cfg->gen2) 1755 mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 1756 LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1757 else 1758 mvm_sta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 1759 LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; 1760 } 1761 1762 mvm_sta->tt_tx_protection = false; 1763 mvm_sta->sta_type = sta_type; 1764 1765 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1766 1767 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1768 /* 1769 * Mark all queues for this STA as unallocated and defer TX 1770 * frames until the queue is allocated 1771 */ 1772 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1773 } 1774 1775 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1776 struct iwl_mvm_txq *mvmtxq = 1777 iwl_mvm_txq_from_mac80211(sta->txq[i]); 1778 1779 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1780 INIT_LIST_HEAD(&mvmtxq->list); 1781 atomic_set(&mvmtxq->tx_request, 0); 1782 } 1783 1784 if (iwl_mvm_has_new_rx_api(mvm)) { 1785 int q; 1786 1787 dup_data = kcalloc(mvm->trans->num_rx_queues, 1788 sizeof(*dup_data), GFP_KERNEL); 1789 if (!dup_data) 1790 return -ENOMEM; 1791 /* 1792 * Initialize all the last_seq values to 0xffff which can never 1793 * compare equal to the frame's seq_ctrl in the check in 1794 * iwl_mvm_is_dup() since the lower 4 bits are the fragment 1795 * number and fragmented packets don't reach that function. 1796 * 1797 * This thus allows receiving a packet with seqno 0 and the 1798 * retry bit set as the very first packet on a new TID. 1799 */ 1800 for (q = 0; q < mvm->trans->num_rx_queues; q++) 1801 memset(dup_data[q].last_seq, 0xff, 1802 sizeof(dup_data[q].last_seq)); 1803 mvm_sta->dup_data = dup_data; 1804 } 1805 1806 if (!iwl_mvm_has_new_tx_api(mvm)) { 1807 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1808 ieee80211_vif_type_p2p(vif)); 1809 if (ret) 1810 return ret; 1811 } 1812 1813 /* 1814 * if rs is registered with mac80211, then "add station" will be handled 1815 * via the corresponding ops, otherwise need to notify rate scaling here 1816 */ 1817 if (iwl_mvm_has_tlc_offload(mvm)) 1818 iwl_mvm_rs_add_sta(mvm, mvm_sta); 1819 else 1820 spin_lock_init(&mvm_sta->deflink.lq_sta.rs_drv.pers.lock); 1821 1822 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); 1823 1824 return 0; 1825 } 1826 1827 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 1828 struct ieee80211_vif *vif, 1829 struct ieee80211_sta *sta) 1830 { 1831 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1832 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1833 int ret, sta_id; 1834 bool sta_update = false; 1835 unsigned int sta_flags = 0; 1836 1837 lockdep_assert_held(&mvm->mutex); 1838 1839 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1840 sta_id = iwl_mvm_find_free_sta_id(mvm, 1841 ieee80211_vif_type_p2p(vif)); 1842 else 1843 sta_id = mvm_sta->deflink.sta_id; 1844 1845 if (sta_id == IWL_MVM_INVALID_STA) 1846 return -ENOSPC; 1847 1848 spin_lock_init(&mvm_sta->lock); 1849 1850 /* if this is a HW restart re-alloc existing queues */ 1851 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1852 struct iwl_mvm_int_sta tmp_sta = { 1853 .sta_id = sta_id, 1854 .type = mvm_sta->sta_type, 1855 }; 1856 1857 /* First add an empty station since allocating 1858 * a queue requires a valid station 1859 */ 1860 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, 1861 mvmvif->id, mvmvif->color); 1862 if (ret) 1863 goto err; 1864 1865 iwl_mvm_realloc_queues_after_restart(mvm, sta); 1866 sta_update = true; 1867 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; 1868 goto update_fw; 1869 } 1870 1871 ret = iwl_mvm_sta_init(mvm, vif, sta, sta_id, 1872 sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK); 1873 if (ret) 1874 goto err; 1875 1876 update_fw: 1877 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); 1878 if (ret) 1879 goto err; 1880 1881 if (vif->type == NL80211_IFTYPE_STATION) { 1882 if (!sta->tdls) { 1883 WARN_ON(mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA); 1884 mvmvif->deflink.ap_sta_id = sta_id; 1885 } else { 1886 WARN_ON(mvmvif->deflink.ap_sta_id == IWL_MVM_INVALID_STA); 1887 } 1888 } 1889 1890 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1891 1892 return 0; 1893 1894 err: 1895 return ret; 1896 } 1897 1898 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1899 bool drain) 1900 { 1901 struct iwl_mvm_add_sta_cmd cmd = {}; 1902 int ret; 1903 u32 status; 1904 1905 lockdep_assert_held(&mvm->mutex); 1906 1907 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1908 cmd.sta_id = mvmsta->deflink.sta_id; 1909 cmd.add_modify = STA_MODE_MODIFY; 1910 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1911 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1912 1913 status = ADD_STA_SUCCESS; 1914 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1915 iwl_mvm_add_sta_cmd_size(mvm), 1916 &cmd, &status); 1917 if (ret) 1918 return ret; 1919 1920 switch (status & IWL_ADD_STA_STATUS_MASK) { 1921 case ADD_STA_SUCCESS: 1922 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1923 mvmsta->deflink.sta_id); 1924 break; 1925 default: 1926 ret = -EIO; 1927 #if defined(__linux__) 1928 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1929 mvmsta->deflink.sta_id); 1930 #elif defined(__FreeBSD__) 1931 IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n", 1932 mvmsta->deflink.sta_id, status); 1933 #endif 1934 break; 1935 } 1936 1937 return ret; 1938 } 1939 1940 /* 1941 * Remove a station from the FW table. Before sending the command to remove 1942 * the station validate that the station is indeed known to the driver (sanity 1943 * only). 1944 */ 1945 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1946 { 1947 struct ieee80211_sta *sta; 1948 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1949 .sta_id = sta_id, 1950 }; 1951 int ret; 1952 1953 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1954 lockdep_is_held(&mvm->mutex)); 1955 1956 /* Note: internal stations are marked as error values */ 1957 if (!sta) { 1958 IWL_ERR(mvm, "Invalid station id\n"); 1959 return -EINVAL; 1960 } 1961 1962 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1963 sizeof(rm_sta_cmd), &rm_sta_cmd); 1964 if (ret) { 1965 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1966 return ret; 1967 } 1968 1969 return 0; 1970 } 1971 1972 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1973 struct ieee80211_vif *vif, 1974 struct ieee80211_sta *sta) 1975 { 1976 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1977 int i; 1978 1979 lockdep_assert_held(&mvm->mutex); 1980 1981 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1982 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 1983 continue; 1984 1985 iwl_mvm_disable_txq(mvm, sta, mvm_sta->deflink.sta_id, 1986 &mvm_sta->tid_data[i].txq_id, i); 1987 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1988 } 1989 1990 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1991 struct iwl_mvm_txq *mvmtxq = 1992 iwl_mvm_txq_from_mac80211(sta->txq[i]); 1993 1994 spin_lock_bh(&mvm->add_stream_lock); 1995 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1996 list_del_init(&mvmtxq->list); 1997 clear_bit(IWL_MVM_TXQ_STATE_READY, &mvmtxq->state); 1998 spin_unlock_bh(&mvm->add_stream_lock); 1999 } 2000 } 2001 2002 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 2003 struct iwl_mvm_sta *mvm_sta) 2004 { 2005 int i; 2006 2007 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 2008 u16 txq_id; 2009 int ret; 2010 2011 spin_lock_bh(&mvm_sta->lock); 2012 txq_id = mvm_sta->tid_data[i].txq_id; 2013 spin_unlock_bh(&mvm_sta->lock); 2014 2015 if (txq_id == IWL_MVM_INVALID_QUEUE) 2016 continue; 2017 2018 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); 2019 if (ret) 2020 return ret; 2021 } 2022 2023 return 0; 2024 } 2025 2026 /* Execute the common part for both MLD and non-MLD modes. 2027 * Returns if we're done with removing the station, either 2028 * with error or success 2029 */ 2030 bool iwl_mvm_sta_del(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2031 struct ieee80211_sta *sta, 2032 struct ieee80211_link_sta *link_sta, int *ret) 2033 { 2034 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2035 struct iwl_mvm_vif_link_info *mvm_link = 2036 mvmvif->link[link_sta->link_id]; 2037 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2038 struct iwl_mvm_link_sta *mvm_link_sta; 2039 u8 sta_id; 2040 2041 lockdep_assert_held(&mvm->mutex); 2042 2043 mvm_link_sta = 2044 rcu_dereference_protected(mvm_sta->link[link_sta->link_id], 2045 lockdep_is_held(&mvm->mutex)); 2046 sta_id = mvm_link_sta->sta_id; 2047 2048 /* If there is a TXQ still marked as reserved - free it */ 2049 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 2050 u8 reserved_txq = mvm_sta->reserved_queue; 2051 enum iwl_mvm_queue_status *status; 2052 2053 /* 2054 * If no traffic has gone through the reserved TXQ - it 2055 * is still marked as IWL_MVM_QUEUE_RESERVED, and 2056 * should be manually marked as free again 2057 */ 2058 status = &mvm->queue_info[reserved_txq].status; 2059 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 2060 (*status != IWL_MVM_QUEUE_FREE), 2061 "sta_id %d reserved txq %d status %d", 2062 sta_id, reserved_txq, *status)) { 2063 *ret = -EINVAL; 2064 return true; 2065 } 2066 2067 *status = IWL_MVM_QUEUE_FREE; 2068 } 2069 2070 if (vif->type == NL80211_IFTYPE_STATION) { 2071 /* if associated - we can't remove the AP STA now */ 2072 if (vif->cfg.assoc) 2073 return true; 2074 2075 /* first remove remaining keys */ 2076 iwl_mvm_sec_key_remove_ap(mvm, vif, mvm_link, 0); 2077 2078 /* unassoc - go ahead - remove the AP STA now */ 2079 mvm_link->ap_sta_id = IWL_MVM_INVALID_STA; 2080 } 2081 2082 /* 2083 * This shouldn't happen - the TDLS channel switch should be canceled 2084 * before the STA is removed. 2085 */ 2086 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 2087 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 2088 cancel_delayed_work(&mvm->tdls_cs.dwork); 2089 } 2090 2091 return false; 2092 } 2093 2094 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 2095 struct ieee80211_vif *vif, 2096 struct ieee80211_sta *sta) 2097 { 2098 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2099 int ret; 2100 2101 lockdep_assert_held(&mvm->mutex); 2102 2103 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 2104 if (ret) 2105 return ret; 2106 2107 /* flush its queues here since we are freeing mvm_sta */ 2108 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false); 2109 if (ret) 2110 return ret; 2111 if (iwl_mvm_has_new_tx_api(mvm)) { 2112 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); 2113 } else { 2114 u32 q_mask = mvm_sta->tfd_queue_msk; 2115 2116 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2117 q_mask); 2118 } 2119 if (ret) 2120 return ret; 2121 2122 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 2123 2124 iwl_mvm_disable_sta_queues(mvm, vif, sta); 2125 2126 if (iwl_mvm_sta_del(mvm, vif, sta, &sta->deflink, &ret)) 2127 return ret; 2128 2129 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->deflink.sta_id); 2130 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->deflink.sta_id], NULL); 2131 2132 return ret; 2133 } 2134 2135 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 2136 struct ieee80211_vif *vif, 2137 u8 sta_id) 2138 { 2139 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 2140 2141 lockdep_assert_held(&mvm->mutex); 2142 2143 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 2144 return ret; 2145 } 2146 2147 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 2148 struct iwl_mvm_int_sta *sta, 2149 u32 qmask, enum nl80211_iftype iftype, 2150 u8 type) 2151 { 2152 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 2153 sta->sta_id == IWL_MVM_INVALID_STA) { 2154 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 2155 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 2156 return -ENOSPC; 2157 } 2158 2159 sta->tfd_queue_msk = qmask; 2160 sta->type = type; 2161 2162 /* put a non-NULL value so iterating over the stations won't stop */ 2163 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 2164 return 0; 2165 } 2166 2167 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 2168 { 2169 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 2170 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 2171 sta->sta_id = IWL_MVM_INVALID_STA; 2172 } 2173 2174 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue, 2175 u8 sta_id, u8 fifo) 2176 { 2177 unsigned int wdg_timeout = 2178 mvm->trans->trans_cfg->base_params->wd_timeout; 2179 struct iwl_trans_txq_scd_cfg cfg = { 2180 .fifo = fifo, 2181 .sta_id = sta_id, 2182 .tid = IWL_MAX_TID_COUNT, 2183 .aggregate = false, 2184 .frame_limit = IWL_FRAME_LIMIT, 2185 }; 2186 2187 WARN_ON(iwl_mvm_has_new_tx_api(mvm)); 2188 2189 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); 2190 } 2191 2192 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id) 2193 { 2194 unsigned int wdg_timeout = 2195 mvm->trans->trans_cfg->base_params->wd_timeout; 2196 2197 WARN_ON(!iwl_mvm_has_new_tx_api(mvm)); 2198 2199 return iwl_mvm_tvqm_enable_txq(mvm, NULL, sta_id, IWL_MAX_TID_COUNT, 2200 wdg_timeout); 2201 } 2202 2203 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx, 2204 int maccolor, u8 *addr, 2205 struct iwl_mvm_int_sta *sta, 2206 u16 *queue, int fifo) 2207 { 2208 int ret; 2209 2210 /* Map queue to fifo - needs to happen before adding station */ 2211 if (!iwl_mvm_has_new_tx_api(mvm)) 2212 iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo); 2213 2214 ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor); 2215 if (ret) { 2216 if (!iwl_mvm_has_new_tx_api(mvm)) 2217 iwl_mvm_disable_txq(mvm, NULL, sta->sta_id, queue, 2218 IWL_MAX_TID_COUNT); 2219 return ret; 2220 } 2221 2222 /* 2223 * For 22000 firmware and on we cannot add queue to a station unknown 2224 * to firmware so enable queue here - after the station was added 2225 */ 2226 if (iwl_mvm_has_new_tx_api(mvm)) { 2227 int txq; 2228 2229 txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id); 2230 if (txq < 0) { 2231 iwl_mvm_rm_sta_common(mvm, sta->sta_id); 2232 return txq; 2233 } 2234 2235 *queue = txq; 2236 } 2237 2238 return 0; 2239 } 2240 2241 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id) 2242 { 2243 int ret; 2244 u32 qmask = mvm->aux_queue == IWL_MVM_INVALID_QUEUE ? 0 : 2245 BIT(mvm->aux_queue); 2246 2247 lockdep_assert_held(&mvm->mutex); 2248 2249 /* Allocate aux station and assign to it the aux queue */ 2250 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, qmask, 2251 NL80211_IFTYPE_UNSPECIFIED, 2252 IWL_STA_AUX_ACTIVITY); 2253 if (ret) 2254 return ret; 2255 2256 /* 2257 * In CDB NICs we need to specify which lmac to use for aux activity 2258 * using the mac_id argument place to send lmac_id to the function 2259 */ 2260 ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL, 2261 &mvm->aux_sta, &mvm->aux_queue, 2262 IWL_MVM_TX_FIFO_MCAST); 2263 if (ret) { 2264 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2265 return ret; 2266 } 2267 2268 return 0; 2269 } 2270 2271 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2272 { 2273 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2274 2275 lockdep_assert_held(&mvm->mutex); 2276 2277 return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color, 2278 NULL, &mvm->snif_sta, 2279 &mvm->snif_queue, 2280 IWL_MVM_TX_FIFO_BE); 2281 } 2282 2283 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2284 { 2285 int ret; 2286 2287 lockdep_assert_held(&mvm->mutex); 2288 2289 if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA)) 2290 return -EINVAL; 2291 2292 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_sta.sta_id, 2293 &mvm->snif_queue, IWL_MAX_TID_COUNT); 2294 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 2295 if (ret) 2296 IWL_WARN(mvm, "Failed sending remove station\n"); 2297 2298 return ret; 2299 } 2300 2301 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm) 2302 { 2303 int ret; 2304 2305 lockdep_assert_held(&mvm->mutex); 2306 2307 if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA)) 2308 return -EINVAL; 2309 2310 iwl_mvm_disable_txq(mvm, NULL, mvm->aux_sta.sta_id, 2311 &mvm->aux_queue, IWL_MAX_TID_COUNT); 2312 ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id); 2313 if (ret) 2314 IWL_WARN(mvm, "Failed sending remove station\n"); 2315 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2316 2317 return ret; 2318 } 2319 2320 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 2321 { 2322 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 2323 } 2324 2325 /* 2326 * Send the add station command for the vif's broadcast station. 2327 * Assumes that the station was already allocated. 2328 * 2329 * @mvm: the mvm component 2330 * @vif: the interface to which the broadcast station is added 2331 * @bsta: the broadcast station to add. 2332 */ 2333 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2334 { 2335 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2336 struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; 2337 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 2338 const u8 *baddr = _baddr; 2339 int queue; 2340 int ret; 2341 unsigned int wdg_timeout = 2342 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2343 struct iwl_trans_txq_scd_cfg cfg = { 2344 .fifo = IWL_MVM_TX_FIFO_VO, 2345 .sta_id = mvmvif->deflink.bcast_sta.sta_id, 2346 .tid = IWL_MAX_TID_COUNT, 2347 .aggregate = false, 2348 .frame_limit = IWL_FRAME_LIMIT, 2349 }; 2350 2351 lockdep_assert_held(&mvm->mutex); 2352 2353 if (!iwl_mvm_has_new_tx_api(mvm)) { 2354 if (vif->type == NL80211_IFTYPE_AP || 2355 vif->type == NL80211_IFTYPE_ADHOC) { 2356 queue = mvm->probe_queue; 2357 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2358 queue = mvm->p2p_dev_queue; 2359 } else { 2360 WARN(1, "Missing required TXQ for adding bcast STA\n"); 2361 return -EINVAL; 2362 } 2363 2364 bsta->tfd_queue_msk |= BIT(queue); 2365 2366 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); 2367 } 2368 2369 if (vif->type == NL80211_IFTYPE_ADHOC) 2370 baddr = vif->bss_conf.bssid; 2371 2372 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 2373 return -ENOSPC; 2374 2375 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 2376 mvmvif->id, mvmvif->color); 2377 if (ret) 2378 return ret; 2379 2380 /* 2381 * For 22000 firmware and on we cannot add queue to a station unknown 2382 * to firmware so enable queue here - after the station was added 2383 */ 2384 if (iwl_mvm_has_new_tx_api(mvm)) { 2385 queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, bsta->sta_id, 2386 IWL_MAX_TID_COUNT, 2387 wdg_timeout); 2388 if (queue < 0) { 2389 iwl_mvm_rm_sta_common(mvm, bsta->sta_id); 2390 return queue; 2391 } 2392 2393 if (vif->type == NL80211_IFTYPE_AP || 2394 vif->type == NL80211_IFTYPE_ADHOC) { 2395 /* for queue management */ 2396 mvm->probe_queue = queue; 2397 /* for use in TX */ 2398 mvmvif->deflink.mgmt_queue = queue; 2399 } else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) { 2400 mvm->p2p_dev_queue = queue; 2401 } 2402 } else if (vif->type == NL80211_IFTYPE_AP || 2403 vif->type == NL80211_IFTYPE_ADHOC) { 2404 /* set it for use in TX */ 2405 mvmvif->deflink.mgmt_queue = mvm->probe_queue; 2406 } 2407 2408 return 0; 2409 } 2410 2411 void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 2412 struct ieee80211_vif *vif) 2413 { 2414 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2415 u16 *queueptr, queue; 2416 2417 lockdep_assert_held(&mvm->mutex); 2418 2419 iwl_mvm_flush_sta(mvm, &mvmvif->deflink.bcast_sta, true); 2420 2421 switch (vif->type) { 2422 case NL80211_IFTYPE_AP: 2423 case NL80211_IFTYPE_ADHOC: 2424 queueptr = &mvm->probe_queue; 2425 break; 2426 case NL80211_IFTYPE_P2P_DEVICE: 2427 queueptr = &mvm->p2p_dev_queue; 2428 break; 2429 default: 2430 WARN(1, "Can't free bcast queue on vif type %d\n", 2431 vif->type); 2432 return; 2433 } 2434 2435 queue = *queueptr; 2436 iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.bcast_sta.sta_id, 2437 queueptr, IWL_MAX_TID_COUNT); 2438 2439 if (vif->type == NL80211_IFTYPE_AP || vif->type == NL80211_IFTYPE_ADHOC) 2440 mvmvif->deflink.mgmt_queue = mvm->probe_queue; 2441 2442 if (iwl_mvm_has_new_tx_api(mvm)) 2443 return; 2444 2445 WARN_ON(!(mvmvif->deflink.bcast_sta.tfd_queue_msk & BIT(queue))); 2446 mvmvif->deflink.bcast_sta.tfd_queue_msk &= ~BIT(queue); 2447 } 2448 2449 /* Send the FW a request to remove the station from it's internal data 2450 * structures, but DO NOT remove the entry from the local data structures. */ 2451 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2452 { 2453 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2454 int ret; 2455 2456 lockdep_assert_held(&mvm->mutex); 2457 2458 iwl_mvm_free_bcast_sta_queues(mvm, vif); 2459 2460 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.bcast_sta.sta_id); 2461 if (ret) 2462 IWL_WARN(mvm, "Failed sending remove station\n"); 2463 return ret; 2464 } 2465 2466 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2467 { 2468 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2469 2470 lockdep_assert_held(&mvm->mutex); 2471 2472 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->deflink.bcast_sta, 0, 2473 ieee80211_vif_type_p2p(vif), 2474 IWL_STA_GENERAL_PURPOSE); 2475 } 2476 2477 /* Allocate a new station entry for the broadcast station to the given vif, 2478 * and send it to the FW. 2479 * Note that each P2P mac should have its own broadcast station. 2480 * 2481 * @mvm: the mvm component 2482 * @vif: the interface to which the broadcast station is added 2483 * @bsta: the broadcast station to add. */ 2484 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2485 { 2486 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2487 struct iwl_mvm_int_sta *bsta = &mvmvif->deflink.bcast_sta; 2488 int ret; 2489 2490 lockdep_assert_held(&mvm->mutex); 2491 2492 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 2493 if (ret) 2494 return ret; 2495 2496 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2497 2498 if (ret) 2499 iwl_mvm_dealloc_int_sta(mvm, bsta); 2500 2501 return ret; 2502 } 2503 2504 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2505 { 2506 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2507 2508 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->deflink.bcast_sta); 2509 } 2510 2511 /* 2512 * Send the FW a request to remove the station from it's internal data 2513 * structures, and in addition remove it from the local data structure. 2514 */ 2515 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2516 { 2517 int ret; 2518 2519 lockdep_assert_held(&mvm->mutex); 2520 2521 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2522 2523 iwl_mvm_dealloc_bcast_sta(mvm, vif); 2524 2525 return ret; 2526 } 2527 2528 /* 2529 * Allocate a new station entry for the multicast station to the given vif, 2530 * and send it to the FW. 2531 * Note that each AP/GO mac should have its own multicast station. 2532 * 2533 * @mvm: the mvm component 2534 * @vif: the interface to which the multicast station is added 2535 */ 2536 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2537 { 2538 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2539 struct iwl_mvm_int_sta *msta = &mvmvif->deflink.mcast_sta; 2540 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2541 const u8 *maddr = _maddr; 2542 struct iwl_trans_txq_scd_cfg cfg = { 2543 .fifo = vif->type == NL80211_IFTYPE_AP ? 2544 IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE, 2545 .sta_id = msta->sta_id, 2546 .tid = 0, 2547 .aggregate = false, 2548 .frame_limit = IWL_FRAME_LIMIT, 2549 }; 2550 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2551 int ret; 2552 2553 lockdep_assert_held(&mvm->mutex); 2554 2555 if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2556 vif->type != NL80211_IFTYPE_ADHOC)) 2557 return -ENOTSUPP; 2558 2559 /* 2560 * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2561 * invalid, so make sure we use the queue we want. 2562 * Note that this is done here as we want to avoid making DQA 2563 * changes in mac80211 layer. 2564 */ 2565 if (vif->type == NL80211_IFTYPE_ADHOC) 2566 mvmvif->deflink.cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2567 2568 /* 2569 * While in previous FWs we had to exclude cab queue from TFD queue 2570 * mask, now it is needed as any other queue. 2571 */ 2572 if (!iwl_mvm_has_new_tx_api(mvm) && 2573 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2574 iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, 2575 &cfg, 2576 timeout); 2577 msta->tfd_queue_msk |= BIT(mvmvif->deflink.cab_queue); 2578 } 2579 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2580 mvmvif->id, mvmvif->color); 2581 if (ret) 2582 goto err; 2583 2584 /* 2585 * Enable cab queue after the ADD_STA command is sent. 2586 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG 2587 * command with unknown station id, and for FW that doesn't support 2588 * station API since the cab queue is not included in the 2589 * tfd_queue_mask. 2590 */ 2591 if (iwl_mvm_has_new_tx_api(mvm)) { 2592 int queue = iwl_mvm_tvqm_enable_txq(mvm, NULL, msta->sta_id, 2593 0, timeout); 2594 if (queue < 0) { 2595 ret = queue; 2596 goto err; 2597 } 2598 mvmvif->deflink.cab_queue = queue; 2599 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2600 IWL_UCODE_TLV_API_STA_TYPE)) 2601 iwl_mvm_enable_txq(mvm, NULL, mvmvif->deflink.cab_queue, 0, 2602 &cfg, 2603 timeout); 2604 2605 return 0; 2606 err: 2607 iwl_mvm_dealloc_int_sta(mvm, msta); 2608 return ret; 2609 } 2610 2611 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 2612 struct ieee80211_key_conf *keyconf, 2613 bool mcast) 2614 { 2615 union { 2616 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 2617 struct iwl_mvm_add_sta_key_cmd cmd; 2618 } u = {}; 2619 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 2620 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 2621 __le16 key_flags; 2622 int ret, size; 2623 u32 status; 2624 2625 /* This is a valid situation for GTK removal */ 2626 if (sta_id == IWL_MVM_INVALID_STA) 2627 return 0; 2628 2629 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 2630 STA_KEY_FLG_KEYID_MSK); 2631 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 2632 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 2633 2634 if (mcast) 2635 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 2636 2637 /* 2638 * The fields assigned here are in the same location at the start 2639 * of the command, so we can do this union trick. 2640 */ 2641 u.cmd.common.key_flags = key_flags; 2642 u.cmd.common.key_offset = keyconf->hw_key_idx; 2643 u.cmd.common.sta_id = sta_id; 2644 2645 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 2646 2647 status = ADD_STA_SUCCESS; 2648 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 2649 &status); 2650 2651 switch (status) { 2652 case ADD_STA_SUCCESS: 2653 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 2654 break; 2655 default: 2656 ret = -EIO; 2657 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 2658 break; 2659 } 2660 2661 return ret; 2662 } 2663 2664 /* 2665 * Send the FW a request to remove the station from it's internal data 2666 * structures, and in addition remove it from the local data structure. 2667 */ 2668 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2669 { 2670 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2671 int ret; 2672 2673 lockdep_assert_held(&mvm->mutex); 2674 2675 iwl_mvm_flush_sta(mvm, &mvmvif->deflink.mcast_sta, true); 2676 2677 iwl_mvm_disable_txq(mvm, NULL, mvmvif->deflink.mcast_sta.sta_id, 2678 &mvmvif->deflink.cab_queue, 0); 2679 2680 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->deflink.mcast_sta.sta_id); 2681 if (ret) 2682 IWL_WARN(mvm, "Failed sending remove station\n"); 2683 2684 return ret; 2685 } 2686 2687 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2688 { 2689 struct iwl_mvm_delba_data notif = { 2690 .baid = baid, 2691 }; 2692 2693 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true, 2694 ¬if, sizeof(notif)); 2695 }; 2696 2697 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2698 struct iwl_mvm_baid_data *data) 2699 { 2700 int i; 2701 2702 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2703 2704 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2705 int j; 2706 struct iwl_mvm_reorder_buffer *reorder_buf = 2707 &data->reorder_buf[i]; 2708 struct iwl_mvm_reorder_buf_entry *entries = 2709 &data->entries[i * data->entries_per_queue]; 2710 2711 spin_lock_bh(&reorder_buf->lock); 2712 if (likely(!reorder_buf->num_stored)) { 2713 spin_unlock_bh(&reorder_buf->lock); 2714 continue; 2715 } 2716 2717 /* 2718 * This shouldn't happen in regular DELBA since the internal 2719 * delBA notification should trigger a release of all frames in 2720 * the reorder buffer. 2721 */ 2722 WARN_ON(1); 2723 2724 for (j = 0; j < reorder_buf->buf_size; j++) 2725 __skb_queue_purge(&entries[j].e.frames); 2726 /* 2727 * Prevent timer re-arm. This prevents a very far fetched case 2728 * where we timed out on the notification. There may be prior 2729 * RX frames pending in the RX queue before the notification 2730 * that might get processed between now and the actual deletion 2731 * and we would re-arm the timer although we are deleting the 2732 * reorder buffer. 2733 */ 2734 reorder_buf->removed = true; 2735 spin_unlock_bh(&reorder_buf->lock); 2736 del_timer_sync(&reorder_buf->reorder_timer); 2737 } 2738 } 2739 2740 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2741 struct iwl_mvm_baid_data *data, 2742 u16 ssn, u16 buf_size) 2743 { 2744 int i; 2745 2746 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2747 struct iwl_mvm_reorder_buffer *reorder_buf = 2748 &data->reorder_buf[i]; 2749 struct iwl_mvm_reorder_buf_entry *entries = 2750 &data->entries[i * data->entries_per_queue]; 2751 int j; 2752 2753 reorder_buf->num_stored = 0; 2754 reorder_buf->head_sn = ssn; 2755 reorder_buf->buf_size = buf_size; 2756 /* rx reorder timer */ 2757 timer_setup(&reorder_buf->reorder_timer, 2758 iwl_mvm_reorder_timer_expired, 0); 2759 spin_lock_init(&reorder_buf->lock); 2760 reorder_buf->mvm = mvm; 2761 reorder_buf->queue = i; 2762 reorder_buf->valid = false; 2763 for (j = 0; j < reorder_buf->buf_size; j++) 2764 __skb_queue_head_init(&entries[j].e.frames); 2765 } 2766 } 2767 2768 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm, 2769 struct ieee80211_sta *sta, 2770 bool start, int tid, u16 ssn, 2771 u16 buf_size) 2772 { 2773 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2774 struct iwl_mvm_add_sta_cmd cmd = { 2775 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 2776 .sta_id = mvm_sta->deflink.sta_id, 2777 .add_modify = STA_MODE_MODIFY, 2778 }; 2779 u32 status; 2780 int ret; 2781 2782 if (start) { 2783 cmd.add_immediate_ba_tid = tid; 2784 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2785 cmd.rx_ba_window = cpu_to_le16(buf_size); 2786 cmd.modify_mask = STA_MODIFY_ADD_BA_TID; 2787 } else { 2788 cmd.remove_immediate_ba_tid = tid; 2789 cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID; 2790 } 2791 2792 status = ADD_STA_SUCCESS; 2793 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2794 iwl_mvm_add_sta_cmd_size(mvm), 2795 &cmd, &status); 2796 if (ret) 2797 return ret; 2798 2799 switch (status & IWL_ADD_STA_STATUS_MASK) { 2800 case ADD_STA_SUCCESS: 2801 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2802 start ? "start" : "stopp"); 2803 if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) && 2804 !(status & IWL_ADD_STA_BAID_VALID_MASK))) 2805 return -EINVAL; 2806 return u32_get_bits(status, IWL_ADD_STA_BAID_MASK); 2807 case ADD_STA_IMMEDIATE_BA_FAILURE: 2808 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2809 return -ENOSPC; 2810 default: 2811 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2812 start ? "start" : "stopp", status); 2813 return -EIO; 2814 } 2815 } 2816 2817 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm, 2818 struct ieee80211_sta *sta, 2819 bool start, int tid, u16 ssn, 2820 u16 buf_size, int baid) 2821 { 2822 struct iwl_rx_baid_cfg_cmd cmd = { 2823 .action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) : 2824 cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE), 2825 }; 2826 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD); 2827 int ret; 2828 2829 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid)); 2830 2831 if (start) { 2832 cmd.alloc.sta_id_mask = 2833 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); 2834 cmd.alloc.tid = tid; 2835 cmd.alloc.ssn = cpu_to_le16(ssn); 2836 cmd.alloc.win_size = cpu_to_le16(buf_size); 2837 baid = -EIO; 2838 } else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) { 2839 cmd.remove_v1.baid = cpu_to_le32(baid); 2840 BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove)); 2841 } else { 2842 cmd.remove.sta_id_mask = 2843 cpu_to_le32(iwl_mvm_sta_fw_id_mask(mvm, sta, -1)); 2844 cmd.remove.tid = cpu_to_le32(tid); 2845 } 2846 2847 ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd), 2848 &cmd, &baid); 2849 if (ret) 2850 return ret; 2851 2852 if (!start) { 2853 /* ignore firmware baid on remove */ 2854 baid = 0; 2855 } 2856 2857 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2858 start ? "start" : "stopp"); 2859 2860 if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map)) 2861 return -EINVAL; 2862 2863 return baid; 2864 } 2865 2866 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2867 bool start, int tid, u16 ssn, u16 buf_size, 2868 int baid) 2869 { 2870 if (fw_has_capa(&mvm->fw->ucode_capa, 2871 IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT)) 2872 return iwl_mvm_fw_baid_op_cmd(mvm, sta, start, 2873 tid, ssn, buf_size, baid); 2874 2875 return iwl_mvm_fw_baid_op_sta(mvm, sta, start, 2876 tid, ssn, buf_size); 2877 } 2878 2879 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2880 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) 2881 { 2882 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2883 struct iwl_mvm_baid_data *baid_data = NULL; 2884 int ret, baid; 2885 u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID : 2886 IWL_MAX_BAID_OLD; 2887 2888 lockdep_assert_held(&mvm->mutex); 2889 2890 if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) { 2891 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2892 return -ENOSPC; 2893 } 2894 2895 if (iwl_mvm_has_new_rx_api(mvm) && start) { 2896 u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 2897 2898 /* sparse doesn't like the __align() so don't check */ 2899 #ifndef __CHECKER__ 2900 /* 2901 * The division below will be OK if either the cache line size 2902 * can be divided by the entry size (ALIGN will round up) or if 2903 * if the entry size can be divided by the cache line size, in 2904 * which case the ALIGN() will do nothing. 2905 */ 2906 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 2907 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 2908 #endif 2909 2910 /* 2911 * Upward align the reorder buffer size to fill an entire cache 2912 * line for each queue, to avoid sharing cache lines between 2913 * different queues. 2914 */ 2915 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 2916 2917 /* 2918 * Allocate here so if allocation fails we can bail out early 2919 * before starting the BA session in the firmware 2920 */ 2921 baid_data = kzalloc(sizeof(*baid_data) + 2922 mvm->trans->num_rx_queues * 2923 reorder_buf_size, 2924 GFP_KERNEL); 2925 if (!baid_data) 2926 return -ENOMEM; 2927 2928 /* 2929 * This division is why we need the above BUILD_BUG_ON(), 2930 * if that doesn't hold then this will not be right. 2931 */ 2932 baid_data->entries_per_queue = 2933 reorder_buf_size / sizeof(baid_data->entries[0]); 2934 } 2935 2936 if (iwl_mvm_has_new_rx_api(mvm) && !start) { 2937 baid = mvm_sta->tid_to_baid[tid]; 2938 } else { 2939 /* we don't really need it in this case */ 2940 baid = -1; 2941 } 2942 2943 /* Don't send command to remove (start=0) BAID during restart */ 2944 if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 2945 baid = iwl_mvm_fw_baid_op(mvm, sta, start, tid, ssn, buf_size, 2946 baid); 2947 2948 if (baid < 0) { 2949 ret = baid; 2950 goto out_free; 2951 } 2952 2953 if (start) { 2954 mvm->rx_ba_sessions++; 2955 2956 if (!iwl_mvm_has_new_rx_api(mvm)) 2957 return 0; 2958 2959 baid_data->baid = baid; 2960 baid_data->timeout = timeout; 2961 baid_data->last_rx = jiffies; 2962 baid_data->rcu_ptr = &mvm->baid_map[baid]; 2963 timer_setup(&baid_data->session_timer, 2964 iwl_mvm_rx_agg_session_expired, 0); 2965 baid_data->mvm = mvm; 2966 baid_data->tid = tid; 2967 baid_data->sta_mask = iwl_mvm_sta_fw_id_mask(mvm, sta, -1); 2968 2969 mvm_sta->tid_to_baid[tid] = baid; 2970 if (timeout) 2971 mod_timer(&baid_data->session_timer, 2972 TU_TO_EXP_TIME(timeout * 2)); 2973 2974 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); 2975 /* 2976 * protect the BA data with RCU to cover a case where our 2977 * internal RX sync mechanism will timeout (not that it's 2978 * supposed to happen) and we will free the session data while 2979 * RX is being processed in parallel 2980 */ 2981 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 2982 mvm_sta->deflink.sta_id, tid, baid); 2983 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 2984 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 2985 } else { 2986 baid = mvm_sta->tid_to_baid[tid]; 2987 2988 if (mvm->rx_ba_sessions > 0) 2989 /* check that restart flow didn't zero the counter */ 2990 mvm->rx_ba_sessions--; 2991 if (!iwl_mvm_has_new_rx_api(mvm)) 2992 return 0; 2993 2994 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 2995 return -EINVAL; 2996 2997 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 2998 if (WARN_ON(!baid_data)) 2999 return -EINVAL; 3000 3001 /* synchronize all rx queues so we can safely delete */ 3002 iwl_mvm_free_reorder(mvm, baid_data); 3003 timer_shutdown_sync(&baid_data->session_timer); 3004 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 3005 kfree_rcu(baid_data, rcu_head); 3006 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 3007 3008 /* 3009 * After we've deleted it, do another queue sync 3010 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently 3011 * running it won't find a new session in the old 3012 * BAID. It can find the NULL pointer for the BAID, 3013 * but we must not have it find a different session. 3014 */ 3015 iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY, 3016 true, NULL, 0); 3017 } 3018 return 0; 3019 3020 out_free: 3021 kfree(baid_data); 3022 return ret; 3023 } 3024 3025 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 3026 int tid, u8 queue, bool start) 3027 { 3028 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3029 struct iwl_mvm_add_sta_cmd cmd = {}; 3030 int ret; 3031 u32 status; 3032 3033 lockdep_assert_held(&mvm->mutex); 3034 3035 if (start) { 3036 mvm_sta->tfd_queue_msk |= BIT(queue); 3037 mvm_sta->tid_disable_agg &= ~BIT(tid); 3038 } else { 3039 /* In DQA-mode the queue isn't removed on agg termination */ 3040 mvm_sta->tid_disable_agg |= BIT(tid); 3041 } 3042 3043 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 3044 cmd.sta_id = mvm_sta->deflink.sta_id; 3045 cmd.add_modify = STA_MODE_MODIFY; 3046 if (!iwl_mvm_has_new_tx_api(mvm)) 3047 cmd.modify_mask = STA_MODIFY_QUEUES; 3048 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 3049 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 3050 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 3051 3052 status = ADD_STA_SUCCESS; 3053 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 3054 iwl_mvm_add_sta_cmd_size(mvm), 3055 &cmd, &status); 3056 if (ret) 3057 return ret; 3058 3059 switch (status & IWL_ADD_STA_STATUS_MASK) { 3060 case ADD_STA_SUCCESS: 3061 break; 3062 default: 3063 ret = -EIO; 3064 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 3065 start ? "start" : "stopp", status); 3066 break; 3067 } 3068 3069 return ret; 3070 } 3071 3072 const u8 tid_to_mac80211_ac[] = { 3073 IEEE80211_AC_BE, 3074 IEEE80211_AC_BK, 3075 IEEE80211_AC_BK, 3076 IEEE80211_AC_BE, 3077 IEEE80211_AC_VI, 3078 IEEE80211_AC_VI, 3079 IEEE80211_AC_VO, 3080 IEEE80211_AC_VO, 3081 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 3082 }; 3083 3084 static const u8 tid_to_ucode_ac[] = { 3085 AC_BE, 3086 AC_BK, 3087 AC_BK, 3088 AC_BE, 3089 AC_VI, 3090 AC_VI, 3091 AC_VO, 3092 AC_VO, 3093 }; 3094 3095 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3096 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 3097 { 3098 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3099 struct iwl_mvm_tid_data *tid_data; 3100 u16 normalized_ssn; 3101 u16 txq_id; 3102 int ret; 3103 3104 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 3105 return -EINVAL; 3106 3107 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && 3108 mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 3109 IWL_ERR(mvm, 3110 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", 3111 mvmsta->tid_data[tid].state); 3112 return -ENXIO; 3113 } 3114 3115 lockdep_assert_held(&mvm->mutex); 3116 3117 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && 3118 iwl_mvm_has_new_tx_api(mvm)) { 3119 u8 ac = tid_to_mac80211_ac[tid]; 3120 3121 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 3122 if (ret) 3123 return ret; 3124 } 3125 3126 spin_lock_bh(&mvmsta->lock); 3127 3128 /* 3129 * Note the possible cases: 3130 * 1. An enabled TXQ - TXQ needs to become agg'ed 3131 * 2. The TXQ hasn't yet been enabled, so find a free one and mark 3132 * it as reserved 3133 */ 3134 txq_id = mvmsta->tid_data[tid].txq_id; 3135 if (txq_id == IWL_MVM_INVALID_QUEUE) { 3136 ret = iwl_mvm_find_free_queue(mvm, mvmsta->deflink.sta_id, 3137 IWL_MVM_DQA_MIN_DATA_QUEUE, 3138 IWL_MVM_DQA_MAX_DATA_QUEUE); 3139 if (ret < 0) { 3140 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 3141 goto out; 3142 } 3143 3144 txq_id = ret; 3145 3146 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 3147 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 3148 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { 3149 ret = -ENXIO; 3150 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", 3151 tid, IWL_MAX_HW_QUEUES - 1); 3152 goto out; 3153 3154 } else if (unlikely(mvm->queue_info[txq_id].status == 3155 IWL_MVM_QUEUE_SHARED)) { 3156 ret = -ENXIO; 3157 IWL_DEBUG_TX_QUEUES(mvm, 3158 "Can't start tid %d agg on shared queue!\n", 3159 tid); 3160 goto out; 3161 } 3162 3163 IWL_DEBUG_TX_QUEUES(mvm, 3164 "AGG for tid %d will be on queue #%d\n", 3165 tid, txq_id); 3166 3167 tid_data = &mvmsta->tid_data[tid]; 3168 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3169 tid_data->txq_id = txq_id; 3170 *ssn = tid_data->ssn; 3171 3172 IWL_DEBUG_TX_QUEUES(mvm, 3173 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 3174 mvmsta->deflink.sta_id, tid, txq_id, 3175 tid_data->ssn, 3176 tid_data->next_reclaimed); 3177 3178 /* 3179 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 3180 * to align the wrap around of ssn so we compare relevant values. 3181 */ 3182 normalized_ssn = tid_data->ssn; 3183 if (mvm->trans->trans_cfg->gen2) 3184 normalized_ssn &= 0xff; 3185 3186 if (normalized_ssn == tid_data->next_reclaimed) { 3187 tid_data->state = IWL_AGG_STARTING; 3188 ret = IEEE80211_AMPDU_TX_START_IMMEDIATE; 3189 } else { 3190 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 3191 ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA; 3192 } 3193 3194 out: 3195 spin_unlock_bh(&mvmsta->lock); 3196 3197 return ret; 3198 } 3199 3200 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3201 struct ieee80211_sta *sta, u16 tid, u16 buf_size, 3202 bool amsdu) 3203 { 3204 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3205 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3206 unsigned int wdg_timeout = 3207 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 3208 int queue, ret; 3209 bool alloc_queue = true; 3210 enum iwl_mvm_queue_status queue_status; 3211 u16 ssn; 3212 3213 struct iwl_trans_txq_scd_cfg cfg = { 3214 .sta_id = mvmsta->deflink.sta_id, 3215 .tid = tid, 3216 .frame_limit = buf_size, 3217 .aggregate = true, 3218 }; 3219 3220 /* 3221 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation 3222 * manager, so this function should never be called in this case. 3223 */ 3224 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) 3225 return -EINVAL; 3226 3227 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 3228 != IWL_MAX_TID_COUNT); 3229 3230 spin_lock_bh(&mvmsta->lock); 3231 ssn = tid_data->ssn; 3232 queue = tid_data->txq_id; 3233 tid_data->state = IWL_AGG_ON; 3234 mvmsta->agg_tids |= BIT(tid); 3235 tid_data->ssn = 0xffff; 3236 tid_data->amsdu_in_ampdu_allowed = amsdu; 3237 spin_unlock_bh(&mvmsta->lock); 3238 3239 if (iwl_mvm_has_new_tx_api(mvm)) { 3240 /* 3241 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() 3242 * would have failed, so if we are here there is no need to 3243 * allocate a queue. 3244 * However, if aggregation size is different than the default 3245 * size, the scheduler should be reconfigured. 3246 * We cannot do this with the new TX API, so return unsupported 3247 * for now, until it will be offloaded to firmware.. 3248 * Note that if SCD default value changes - this condition 3249 * should be updated as well. 3250 */ 3251 if (buf_size < IWL_FRAME_LIMIT) 3252 return -ENOTSUPP; 3253 3254 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 3255 if (ret) 3256 return -EIO; 3257 goto out; 3258 } 3259 3260 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 3261 3262 queue_status = mvm->queue_info[queue].status; 3263 3264 /* Maybe there is no need to even alloc a queue... */ 3265 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 3266 alloc_queue = false; 3267 3268 /* 3269 * Only reconfig the SCD for the queue if the window size has 3270 * changed from current (become smaller) 3271 */ 3272 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { 3273 /* 3274 * If reconfiguring an existing queue, it first must be 3275 * drained 3276 */ 3277 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 3278 BIT(queue)); 3279 if (ret) { 3280 IWL_ERR(mvm, 3281 "Error draining queue before reconfig\n"); 3282 return ret; 3283 } 3284 3285 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 3286 mvmsta->deflink.sta_id, tid, 3287 buf_size, ssn); 3288 if (ret) { 3289 IWL_ERR(mvm, 3290 "Error reconfiguring TXQ #%d\n", queue); 3291 return ret; 3292 } 3293 } 3294 3295 if (alloc_queue) 3296 iwl_mvm_enable_txq(mvm, sta, queue, ssn, 3297 &cfg, wdg_timeout); 3298 3299 /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 3300 if (queue_status != IWL_MVM_QUEUE_SHARED) { 3301 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 3302 if (ret) 3303 return -EIO; 3304 } 3305 3306 /* No need to mark as reserved */ 3307 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 3308 3309 out: 3310 /* 3311 * Even though in theory the peer could have different 3312 * aggregation reorder buffer sizes for different sessions, 3313 * our ucode doesn't allow for that and has a global limit 3314 * for each station. Therefore, use the minimum of all the 3315 * aggregation sessions and our default value. 3316 */ 3317 mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize = 3318 min(mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize, 3319 buf_size); 3320 mvmsta->deflink.lq_sta.rs_drv.lq.agg_frame_cnt_limit = 3321 mvmsta->deflink.lq_sta.rs_drv.pers.max_agg_bufsize; 3322 3323 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 3324 sta->addr, tid); 3325 3326 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->deflink.lq_sta.rs_drv.lq); 3327 } 3328 3329 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 3330 struct iwl_mvm_sta *mvmsta, 3331 struct iwl_mvm_tid_data *tid_data) 3332 { 3333 u16 txq_id = tid_data->txq_id; 3334 3335 lockdep_assert_held(&mvm->mutex); 3336 3337 if (iwl_mvm_has_new_tx_api(mvm)) 3338 return; 3339 3340 /* 3341 * The TXQ is marked as reserved only if no traffic came through yet 3342 * This means no traffic has been sent on this TID (agg'd or not), so 3343 * we no longer have use for the queue. Since it hasn't even been 3344 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 3345 * free. 3346 */ 3347 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { 3348 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 3349 tid_data->txq_id = IWL_MVM_INVALID_QUEUE; 3350 } 3351 } 3352 3353 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3354 struct ieee80211_sta *sta, u16 tid) 3355 { 3356 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3357 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3358 u16 txq_id; 3359 int err; 3360 3361 /* 3362 * If mac80211 is cleaning its state, then say that we finished since 3363 * our state has been cleared anyway. 3364 */ 3365 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 3366 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3367 return 0; 3368 } 3369 3370 spin_lock_bh(&mvmsta->lock); 3371 3372 txq_id = tid_data->txq_id; 3373 3374 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 3375 mvmsta->deflink.sta_id, tid, txq_id, 3376 tid_data->state); 3377 3378 mvmsta->agg_tids &= ~BIT(tid); 3379 3380 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3381 3382 switch (tid_data->state) { 3383 case IWL_AGG_ON: 3384 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3385 3386 IWL_DEBUG_TX_QUEUES(mvm, 3387 "ssn = %d, next_recl = %d\n", 3388 tid_data->ssn, tid_data->next_reclaimed); 3389 3390 tid_data->ssn = 0xffff; 3391 tid_data->state = IWL_AGG_OFF; 3392 spin_unlock_bh(&mvmsta->lock); 3393 3394 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3395 3396 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3397 return 0; 3398 case IWL_AGG_STARTING: 3399 case IWL_EMPTYING_HW_QUEUE_ADDBA: 3400 /* 3401 * The agg session has been stopped before it was set up. This 3402 * can happen when the AddBA timer times out for example. 3403 */ 3404 3405 /* No barriers since we are under mutex */ 3406 lockdep_assert_held(&mvm->mutex); 3407 3408 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3409 tid_data->state = IWL_AGG_OFF; 3410 err = 0; 3411 break; 3412 default: 3413 IWL_ERR(mvm, 3414 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 3415 mvmsta->deflink.sta_id, tid, tid_data->state); 3416 IWL_ERR(mvm, 3417 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 3418 err = -EINVAL; 3419 } 3420 3421 spin_unlock_bh(&mvmsta->lock); 3422 3423 return err; 3424 } 3425 3426 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3427 struct ieee80211_sta *sta, u16 tid) 3428 { 3429 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3430 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3431 u16 txq_id; 3432 enum iwl_mvm_agg_state old_state; 3433 3434 /* 3435 * First set the agg state to OFF to avoid calling 3436 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 3437 */ 3438 spin_lock_bh(&mvmsta->lock); 3439 txq_id = tid_data->txq_id; 3440 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 3441 mvmsta->deflink.sta_id, tid, txq_id, 3442 tid_data->state); 3443 old_state = tid_data->state; 3444 tid_data->state = IWL_AGG_OFF; 3445 mvmsta->agg_tids &= ~BIT(tid); 3446 spin_unlock_bh(&mvmsta->lock); 3447 3448 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3449 3450 if (old_state >= IWL_AGG_ON) { 3451 iwl_mvm_drain_sta(mvm, mvmsta, true); 3452 3453 if (iwl_mvm_has_new_tx_api(mvm)) { 3454 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->deflink.sta_id, 3455 BIT(tid))) 3456 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3457 iwl_trans_wait_txq_empty(mvm->trans, txq_id); 3458 } else { 3459 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id))) 3460 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3461 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); 3462 } 3463 3464 iwl_mvm_drain_sta(mvm, mvmsta, false); 3465 3466 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3467 } 3468 3469 return 0; 3470 } 3471 3472 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 3473 { 3474 int i, max = -1, max_offs = -1; 3475 3476 lockdep_assert_held(&mvm->mutex); 3477 3478 /* Pick the unused key offset with the highest 'deleted' 3479 * counter. Every time a key is deleted, all the counters 3480 * are incremented and the one that was just deleted is 3481 * reset to zero. Thus, the highest counter is the one 3482 * that was deleted longest ago. Pick that one. 3483 */ 3484 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3485 if (test_bit(i, mvm->fw_key_table)) 3486 continue; 3487 if (mvm->fw_key_deleted[i] > max) { 3488 max = mvm->fw_key_deleted[i]; 3489 max_offs = i; 3490 } 3491 } 3492 3493 if (max_offs < 0) 3494 return STA_KEY_IDX_INVALID; 3495 3496 return max_offs; 3497 } 3498 3499 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 3500 struct ieee80211_vif *vif, 3501 struct ieee80211_sta *sta) 3502 { 3503 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3504 3505 if (sta) 3506 return iwl_mvm_sta_from_mac80211(sta); 3507 3508 /* 3509 * The device expects GTKs for station interfaces to be 3510 * installed as GTKs for the AP station. If we have no 3511 * station ID, then use AP's station ID. 3512 */ 3513 if (vif->type == NL80211_IFTYPE_STATION && 3514 mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { 3515 u8 sta_id = mvmvif->deflink.ap_sta_id; 3516 3517 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 3518 lockdep_is_held(&mvm->mutex)); 3519 3520 /* 3521 * It is possible that the 'sta' parameter is NULL, 3522 * for example when a GTK is removed - the sta_id will then 3523 * be the AP ID, and no station was passed by mac80211. 3524 */ 3525 if (IS_ERR_OR_NULL(sta)) 3526 return NULL; 3527 3528 return iwl_mvm_sta_from_mac80211(sta); 3529 } 3530 3531 return NULL; 3532 } 3533 3534 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len) 3535 { 3536 int i; 3537 3538 for (i = len - 1; i >= 0; i--) { 3539 if (pn1[i] > pn2[i]) 3540 return 1; 3541 if (pn1[i] < pn2[i]) 3542 return -1; 3543 } 3544 3545 return 0; 3546 } 3547 3548 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 3549 u32 sta_id, 3550 struct ieee80211_key_conf *key, bool mcast, 3551 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 3552 u8 key_offset, bool mfp) 3553 { 3554 union { 3555 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3556 struct iwl_mvm_add_sta_key_cmd cmd; 3557 } u = {}; 3558 __le16 key_flags; 3559 int ret; 3560 u32 status; 3561 u16 keyidx; 3562 u64 pn = 0; 3563 int i, size; 3564 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3565 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3566 int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY, 3567 new_api ? 2 : 1); 3568 3569 if (sta_id == IWL_MVM_INVALID_STA) 3570 return -EINVAL; 3571 3572 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 3573 STA_KEY_FLG_KEYID_MSK; 3574 key_flags = cpu_to_le16(keyidx); 3575 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 3576 3577 switch (key->cipher) { 3578 case WLAN_CIPHER_SUITE_TKIP: 3579 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 3580 if (api_ver >= 2) { 3581 memcpy((void *)&u.cmd.tx_mic_key, 3582 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 3583 IWL_MIC_KEY_SIZE); 3584 3585 memcpy((void *)&u.cmd.rx_mic_key, 3586 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 3587 IWL_MIC_KEY_SIZE); 3588 pn = atomic64_read(&key->tx_pn); 3589 3590 } else { 3591 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 3592 for (i = 0; i < 5; i++) 3593 u.cmd_v1.tkip_rx_ttak[i] = 3594 cpu_to_le16(tkip_p1k[i]); 3595 } 3596 memcpy(u.cmd.common.key, key->key, key->keylen); 3597 break; 3598 case WLAN_CIPHER_SUITE_CCMP: 3599 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 3600 memcpy(u.cmd.common.key, key->key, key->keylen); 3601 if (api_ver >= 2) 3602 pn = atomic64_read(&key->tx_pn); 3603 break; 3604 case WLAN_CIPHER_SUITE_WEP104: 3605 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 3606 fallthrough; 3607 case WLAN_CIPHER_SUITE_WEP40: 3608 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 3609 memcpy(u.cmd.common.key + 3, key->key, key->keylen); 3610 break; 3611 case WLAN_CIPHER_SUITE_GCMP_256: 3612 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 3613 fallthrough; 3614 case WLAN_CIPHER_SUITE_GCMP: 3615 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 3616 memcpy(u.cmd.common.key, key->key, key->keylen); 3617 if (api_ver >= 2) 3618 pn = atomic64_read(&key->tx_pn); 3619 break; 3620 default: 3621 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 3622 memcpy(u.cmd.common.key, key->key, key->keylen); 3623 } 3624 3625 if (mcast) 3626 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3627 if (mfp) 3628 key_flags |= cpu_to_le16(STA_KEY_MFP); 3629 3630 u.cmd.common.key_offset = key_offset; 3631 u.cmd.common.key_flags = key_flags; 3632 u.cmd.common.sta_id = sta_id; 3633 3634 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) 3635 i = 0; 3636 else 3637 i = -1; 3638 3639 for (; i < IEEE80211_NUM_TIDS; i++) { 3640 struct ieee80211_key_seq seq = {}; 3641 u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn; 3642 int rx_pn_len = 8; 3643 /* there's a hole at 2/3 in FW format depending on version */ 3644 int hole = api_ver >= 3 ? 0 : 2; 3645 3646 ieee80211_get_key_rx_seq(key, i, &seq); 3647 3648 if (key->cipher == WLAN_CIPHER_SUITE_TKIP) { 3649 rx_pn[0] = seq.tkip.iv16; 3650 rx_pn[1] = seq.tkip.iv16 >> 8; 3651 rx_pn[2 + hole] = seq.tkip.iv32; 3652 rx_pn[3 + hole] = seq.tkip.iv32 >> 8; 3653 rx_pn[4 + hole] = seq.tkip.iv32 >> 16; 3654 rx_pn[5 + hole] = seq.tkip.iv32 >> 24; 3655 } else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) { 3656 rx_pn = seq.hw.seq; 3657 rx_pn_len = seq.hw.seq_len; 3658 } else { 3659 rx_pn[0] = seq.ccmp.pn[0]; 3660 rx_pn[1] = seq.ccmp.pn[1]; 3661 rx_pn[2 + hole] = seq.ccmp.pn[2]; 3662 rx_pn[3 + hole] = seq.ccmp.pn[3]; 3663 rx_pn[4 + hole] = seq.ccmp.pn[4]; 3664 rx_pn[5 + hole] = seq.ccmp.pn[5]; 3665 } 3666 3667 if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt, 3668 rx_pn_len) > 0) 3669 memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn, 3670 rx_pn_len); 3671 } 3672 3673 if (api_ver >= 2) { 3674 u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 3675 size = sizeof(u.cmd); 3676 } else { 3677 size = sizeof(u.cmd_v1); 3678 } 3679 3680 status = ADD_STA_SUCCESS; 3681 if (cmd_flags & CMD_ASYNC) 3682 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 3683 &u.cmd); 3684 else 3685 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 3686 &u.cmd, &status); 3687 3688 switch (status) { 3689 case ADD_STA_SUCCESS: 3690 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 3691 break; 3692 default: 3693 ret = -EIO; 3694 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3695 break; 3696 } 3697 3698 return ret; 3699 } 3700 3701 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3702 struct ieee80211_key_conf *keyconf, 3703 u8 sta_id, bool remove_key) 3704 { 3705 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3706 3707 /* verify the key details match the required command's expectations */ 3708 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3709 (keyconf->keyidx != 4 && keyconf->keyidx != 5 && 3710 keyconf->keyidx != 6 && keyconf->keyidx != 7) || 3711 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3712 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3713 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3714 return -EINVAL; 3715 3716 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3717 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3718 return -EINVAL; 3719 3720 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3721 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3722 3723 if (remove_key) { 3724 /* This is a valid situation for IGTK */ 3725 if (sta_id == IWL_MVM_INVALID_STA) 3726 return 0; 3727 3728 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3729 } else { 3730 struct ieee80211_key_seq seq; 3731 const u8 *pn; 3732 3733 switch (keyconf->cipher) { 3734 case WLAN_CIPHER_SUITE_AES_CMAC: 3735 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3736 break; 3737 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3738 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3739 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3740 break; 3741 default: 3742 return -EINVAL; 3743 } 3744 3745 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3746 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3747 igtk_cmd.ctrl_flags |= 3748 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3749 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3750 pn = seq.aes_cmac.pn; 3751 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3752 ((u64) pn[4] << 8) | 3753 ((u64) pn[3] << 16) | 3754 ((u64) pn[2] << 24) | 3755 ((u64) pn[1] << 32) | 3756 ((u64) pn[0] << 40)); 3757 } 3758 3759 IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n", 3760 remove_key ? "removing" : "installing", 3761 keyconf->keyidx >= 6 ? "B" : "", 3762 keyconf->keyidx, igtk_cmd.sta_id); 3763 3764 if (!iwl_mvm_has_new_rx_api(mvm)) { 3765 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3766 .ctrl_flags = igtk_cmd.ctrl_flags, 3767 .key_id = igtk_cmd.key_id, 3768 .sta_id = igtk_cmd.sta_id, 3769 .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3770 }; 3771 3772 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3773 ARRAY_SIZE(igtk_cmd_v1.igtk)); 3774 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3775 sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3776 } 3777 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3778 sizeof(igtk_cmd), &igtk_cmd); 3779 } 3780 3781 3782 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3783 struct ieee80211_vif *vif, 3784 struct ieee80211_sta *sta) 3785 { 3786 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3787 3788 if (sta) 3789 return sta->addr; 3790 3791 if (vif->type == NL80211_IFTYPE_STATION && 3792 mvmvif->deflink.ap_sta_id != IWL_MVM_INVALID_STA) { 3793 u8 sta_id = mvmvif->deflink.ap_sta_id; 3794 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3795 lockdep_is_held(&mvm->mutex)); 3796 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 3797 return NULL; 3798 3799 return sta->addr; 3800 } 3801 3802 3803 return NULL; 3804 } 3805 3806 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3807 struct ieee80211_vif *vif, 3808 struct ieee80211_sta *sta, 3809 struct ieee80211_key_conf *keyconf, 3810 u8 key_offset, 3811 bool mcast) 3812 { 3813 const u8 *addr; 3814 struct ieee80211_key_seq seq; 3815 u16 p1k[5]; 3816 u32 sta_id; 3817 bool mfp = false; 3818 3819 if (sta) { 3820 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3821 3822 sta_id = mvm_sta->deflink.sta_id; 3823 mfp = sta->mfp; 3824 } else if (vif->type == NL80211_IFTYPE_AP && 3825 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 3826 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3827 3828 sta_id = mvmvif->deflink.mcast_sta.sta_id; 3829 } else { 3830 IWL_ERR(mvm, "Failed to find station id\n"); 3831 return -EINVAL; 3832 } 3833 3834 if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) { 3835 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 3836 if (!addr) { 3837 IWL_ERR(mvm, "Failed to find mac address\n"); 3838 return -EINVAL; 3839 } 3840 3841 /* get phase 1 key from mac80211 */ 3842 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3843 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3844 3845 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3846 seq.tkip.iv32, p1k, 0, key_offset, 3847 mfp); 3848 } 3849 3850 return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3851 0, NULL, 0, key_offset, mfp); 3852 } 3853 3854 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3855 struct ieee80211_vif *vif, 3856 struct ieee80211_sta *sta, 3857 struct ieee80211_key_conf *keyconf, 3858 u8 key_offset) 3859 { 3860 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3861 struct iwl_mvm_sta *mvm_sta; 3862 u8 sta_id = IWL_MVM_INVALID_STA; 3863 int ret; 3864 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3865 3866 lockdep_assert_held(&mvm->mutex); 3867 3868 if (vif->type != NL80211_IFTYPE_AP || 3869 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3870 /* Get the station id from the mvm local station table */ 3871 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3872 if (!mvm_sta) { 3873 IWL_ERR(mvm, "Failed to find station\n"); 3874 return -EINVAL; 3875 } 3876 sta_id = mvm_sta->deflink.sta_id; 3877 3878 /* 3879 * It is possible that the 'sta' parameter is NULL, and thus 3880 * there is a need to retrieve the sta from the local station 3881 * table. 3882 */ 3883 if (!sta) { 3884 sta = rcu_dereference_protected( 3885 mvm->fw_id_to_mac_id[sta_id], 3886 lockdep_is_held(&mvm->mutex)); 3887 if (IS_ERR_OR_NULL(sta)) { 3888 IWL_ERR(mvm, "Invalid station id\n"); 3889 return -EINVAL; 3890 } 3891 } 3892 3893 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3894 return -EINVAL; 3895 } else { 3896 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3897 3898 sta_id = mvmvif->deflink.mcast_sta.sta_id; 3899 } 3900 3901 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3902 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3903 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3904 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3905 goto end; 3906 } 3907 3908 /* If the key_offset is not pre-assigned, we need to find a 3909 * new offset to use. In normal cases, the offset is not 3910 * pre-assigned, but during HW_RESTART we want to reuse the 3911 * same indices, so we pass them when this function is called. 3912 * 3913 * In D3 entry, we need to hardcoded the indices (because the 3914 * firmware hardcodes the PTK offset to 0). In this case, we 3915 * need to make sure we don't overwrite the hw_key_idx in the 3916 * keyconf structure, because otherwise we cannot configure 3917 * the original ones back when resuming. 3918 */ 3919 if (key_offset == STA_KEY_IDX_INVALID) { 3920 key_offset = iwl_mvm_set_fw_key_idx(mvm); 3921 if (key_offset == STA_KEY_IDX_INVALID) 3922 return -ENOSPC; 3923 keyconf->hw_key_idx = key_offset; 3924 } 3925 3926 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3927 if (ret) 3928 goto end; 3929 3930 /* 3931 * For WEP, the same key is used for multicast and unicast. Upload it 3932 * again, using the same key offset, and now pointing the other one 3933 * to the same key slot (offset). 3934 * If this fails, remove the original as well. 3935 */ 3936 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3937 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && 3938 sta) { 3939 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3940 key_offset, !mcast); 3941 if (ret) { 3942 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3943 goto end; 3944 } 3945 } 3946 3947 __set_bit(key_offset, mvm->fw_key_table); 3948 3949 end: 3950 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3951 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3952 sta ? sta->addr : zero_addr, ret); 3953 return ret; 3954 } 3955 3956 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3957 struct ieee80211_vif *vif, 3958 struct ieee80211_sta *sta, 3959 struct ieee80211_key_conf *keyconf) 3960 { 3961 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3962 struct iwl_mvm_sta *mvm_sta; 3963 u8 sta_id = IWL_MVM_INVALID_STA; 3964 int ret, i; 3965 3966 lockdep_assert_held(&mvm->mutex); 3967 3968 /* Get the station from the mvm local station table */ 3969 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3970 if (mvm_sta) 3971 sta_id = mvm_sta->deflink.sta_id; 3972 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) 3973 sta_id = iwl_mvm_vif_from_mac80211(vif)->deflink.mcast_sta.sta_id; 3974 3975 3976 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3977 keyconf->keyidx, sta_id); 3978 3979 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3980 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3981 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3982 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3983 3984 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3985 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 3986 keyconf->hw_key_idx); 3987 return -ENOENT; 3988 } 3989 3990 /* track which key was deleted last */ 3991 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3992 if (mvm->fw_key_deleted[i] < U8_MAX) 3993 mvm->fw_key_deleted[i]++; 3994 } 3995 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 3996 3997 if (sta && !mvm_sta) { 3998 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 3999 return 0; 4000 } 4001 4002 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 4003 if (ret) 4004 return ret; 4005 4006 /* delete WEP key twice to get rid of (now useless) offset */ 4007 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 4008 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 4009 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 4010 4011 return ret; 4012 } 4013 4014 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 4015 struct ieee80211_vif *vif, 4016 struct ieee80211_key_conf *keyconf, 4017 struct ieee80211_sta *sta, u32 iv32, 4018 u16 *phase1key) 4019 { 4020 struct iwl_mvm_sta *mvm_sta; 4021 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 4022 bool mfp = sta ? sta->mfp : false; 4023 4024 rcu_read_lock(); 4025 4026 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 4027 if (WARN_ON_ONCE(!mvm_sta)) 4028 goto unlock; 4029 iwl_mvm_send_sta_key(mvm, mvm_sta->deflink.sta_id, keyconf, mcast, 4030 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, 4031 mfp); 4032 4033 unlock: 4034 rcu_read_unlock(); 4035 } 4036 4037 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 4038 struct ieee80211_sta *sta) 4039 { 4040 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4041 struct iwl_mvm_add_sta_cmd cmd = { 4042 .add_modify = STA_MODE_MODIFY, 4043 .sta_id = mvmsta->deflink.sta_id, 4044 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 4045 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4046 }; 4047 int ret; 4048 4049 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4050 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4051 if (ret) 4052 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4053 } 4054 4055 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 4056 struct ieee80211_sta *sta, 4057 enum ieee80211_frame_release_type reason, 4058 u16 cnt, u16 tids, bool more_data, 4059 bool single_sta_queue) 4060 { 4061 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 4062 struct iwl_mvm_add_sta_cmd cmd = { 4063 .add_modify = STA_MODE_MODIFY, 4064 .sta_id = mvmsta->deflink.sta_id, 4065 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 4066 .sleep_tx_count = cpu_to_le16(cnt), 4067 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4068 }; 4069 int tid, ret; 4070 unsigned long _tids = tids; 4071 4072 /* convert TIDs to ACs - we don't support TSPEC so that's OK 4073 * Note that this field is reserved and unused by firmware not 4074 * supporting GO uAPSD, so it's safe to always do this. 4075 */ 4076 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 4077 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 4078 4079 /* If we're releasing frames from aggregation or dqa queues then check 4080 * if all the queues that we're releasing frames from, combined, have: 4081 * - more frames than the service period, in which case more_data 4082 * needs to be set 4083 * - fewer than 'cnt' frames, in which case we need to adjust the 4084 * firmware command (but do that unconditionally) 4085 */ 4086 if (single_sta_queue) { 4087 int remaining = cnt; 4088 int sleep_tx_count; 4089 4090 spin_lock_bh(&mvmsta->lock); 4091 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 4092 struct iwl_mvm_tid_data *tid_data; 4093 u16 n_queued; 4094 4095 tid_data = &mvmsta->tid_data[tid]; 4096 4097 n_queued = iwl_mvm_tid_queued(mvm, tid_data); 4098 if (n_queued > remaining) { 4099 more_data = true; 4100 remaining = 0; 4101 break; 4102 } 4103 remaining -= n_queued; 4104 } 4105 sleep_tx_count = cnt - remaining; 4106 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 4107 mvmsta->sleep_tx_count = sleep_tx_count; 4108 spin_unlock_bh(&mvmsta->lock); 4109 4110 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 4111 if (WARN_ON(cnt - remaining == 0)) { 4112 ieee80211_sta_eosp(sta); 4113 return; 4114 } 4115 } 4116 4117 /* Note: this is ignored by firmware not supporting GO uAPSD */ 4118 if (more_data) 4119 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 4120 4121 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 4122 mvmsta->next_status_eosp = true; 4123 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 4124 } else { 4125 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 4126 } 4127 4128 /* block the Tx queues until the FW updated the sleep Tx count */ 4129 iwl_trans_block_txq_ptrs(mvm->trans, true); 4130 4131 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 4132 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 4133 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4134 if (ret) 4135 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4136 } 4137 4138 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 4139 struct iwl_rx_cmd_buffer *rxb) 4140 { 4141 struct iwl_rx_packet *pkt = rxb_addr(rxb); 4142 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 4143 struct ieee80211_sta *sta; 4144 u32 sta_id = le32_to_cpu(notif->sta_id); 4145 4146 if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations)) 4147 return; 4148 4149 rcu_read_lock(); 4150 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 4151 if (!IS_ERR_OR_NULL(sta)) 4152 ieee80211_sta_eosp(sta); 4153 rcu_read_unlock(); 4154 } 4155 4156 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 4157 struct iwl_mvm_sta *mvmsta, 4158 bool disable) 4159 { 4160 struct iwl_mvm_add_sta_cmd cmd = { 4161 .add_modify = STA_MODE_MODIFY, 4162 .sta_id = mvmsta->deflink.sta_id, 4163 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 4164 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 4165 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 4166 }; 4167 int ret; 4168 4169 if (mvm->mld_api_is_used) { 4170 iwl_mvm_mld_sta_modify_disable_tx(mvm, mvmsta, disable); 4171 return; 4172 } 4173 4174 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4175 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4176 if (ret) 4177 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4178 } 4179 4180 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 4181 struct ieee80211_sta *sta, 4182 bool disable) 4183 { 4184 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4185 4186 if (mvm->mld_api_is_used) { 4187 iwl_mvm_mld_sta_modify_disable_tx_ap(mvm, sta, disable); 4188 return; 4189 } 4190 4191 spin_lock_bh(&mvm_sta->lock); 4192 4193 if (mvm_sta->disable_tx == disable) { 4194 spin_unlock_bh(&mvm_sta->lock); 4195 return; 4196 } 4197 4198 mvm_sta->disable_tx = disable; 4199 4200 /* 4201 * If sta PS state is handled by mac80211, tell it to start/stop 4202 * queuing tx for this station. 4203 */ 4204 if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS)) 4205 ieee80211_sta_block_awake(mvm->hw, sta, disable); 4206 4207 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 4208 4209 spin_unlock_bh(&mvm_sta->lock); 4210 } 4211 4212 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 4213 struct iwl_mvm_vif *mvmvif, 4214 struct iwl_mvm_int_sta *sta, 4215 bool disable) 4216 { 4217 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 4218 struct iwl_mvm_add_sta_cmd cmd = { 4219 .add_modify = STA_MODE_MODIFY, 4220 .sta_id = sta->sta_id, 4221 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 4222 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 4223 .mac_id_n_color = cpu_to_le32(id), 4224 }; 4225 int ret; 4226 4227 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 4228 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 4229 if (ret) 4230 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 4231 } 4232 4233 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 4234 struct iwl_mvm_vif *mvmvif, 4235 bool disable) 4236 { 4237 struct ieee80211_sta *sta; 4238 struct iwl_mvm_sta *mvm_sta; 4239 int i; 4240 4241 if (mvm->mld_api_is_used) { 4242 iwl_mvm_mld_modify_all_sta_disable_tx(mvm, mvmvif, disable); 4243 return; 4244 } 4245 4246 rcu_read_lock(); 4247 4248 /* Block/unblock all the stations of the given mvmvif */ 4249 for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) { 4250 sta = rcu_dereference(mvm->fw_id_to_mac_id[i]); 4251 if (IS_ERR_OR_NULL(sta)) 4252 continue; 4253 4254 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 4255 if (mvm_sta->mac_id_n_color != 4256 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 4257 continue; 4258 4259 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 4260 } 4261 4262 rcu_read_unlock(); 4263 4264 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 4265 return; 4266 4267 /* Need to block/unblock also multicast station */ 4268 if (mvmvif->deflink.mcast_sta.sta_id != IWL_MVM_INVALID_STA) 4269 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 4270 &mvmvif->deflink.mcast_sta, 4271 disable); 4272 4273 /* 4274 * Only unblock the broadcast station (FW blocks it for immediate 4275 * quiet, not the driver) 4276 */ 4277 if (!disable && mvmvif->deflink.bcast_sta.sta_id != IWL_MVM_INVALID_STA) 4278 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 4279 &mvmvif->deflink.bcast_sta, 4280 disable); 4281 } 4282 4283 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 4284 { 4285 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4286 struct iwl_mvm_sta *mvmsta; 4287 4288 rcu_read_lock(); 4289 4290 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->deflink.ap_sta_id); 4291 4292 if (mvmsta) 4293 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 4294 4295 rcu_read_unlock(); 4296 } 4297 4298 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) 4299 { 4300 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 4301 4302 /* 4303 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 4304 * to align the wrap around of ssn so we compare relevant values. 4305 */ 4306 if (mvm->trans->trans_cfg->gen2) 4307 sn &= 0xff; 4308 4309 return ieee80211_sn_sub(sn, tid_data->next_reclaimed); 4310 } 4311 4312 #if defined(__linux__) 4313 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 4314 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher, 4315 u8 *key, u32 key_len) 4316 { 4317 int ret; 4318 u16 queue; 4319 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 4320 struct ieee80211_key_conf *keyconf; 4321 unsigned int wdg_timeout = 4322 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 4323 bool mld = iwl_mvm_has_mld_api(mvm->fw); 4324 u32 type = mld ? STATION_TYPE_PEER : IWL_STA_LINK; 4325 4326 ret = iwl_mvm_allocate_int_sta(mvm, sta, 0, 4327 NL80211_IFTYPE_UNSPECIFIED, type); 4328 if (ret) 4329 return ret; 4330 4331 if (mld) 4332 ret = iwl_mvm_mld_add_int_sta_with_queue(mvm, sta, addr, 4333 mvmvif->deflink.fw_link_id, 4334 &queue, 4335 IWL_MAX_TID_COUNT, 4336 &wdg_timeout); 4337 else 4338 ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, 4339 mvmvif->color, addr, sta, 4340 &queue, 4341 IWL_MVM_TX_FIFO_BE); 4342 if (ret) 4343 goto out; 4344 4345 keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL); 4346 if (!keyconf) { 4347 ret = -ENOBUFS; 4348 goto out; 4349 } 4350 4351 keyconf->cipher = cipher; 4352 memcpy(keyconf->key, key, key_len); 4353 keyconf->keylen = key_len; 4354 keyconf->flags = IEEE80211_KEY_FLAG_PAIRWISE; 4355 4356 if (mld) { 4357 /* The MFP flag is set according to the station mfp field. Since 4358 * we don't have a station, set it manually. 4359 */ 4360 u32 key_flags = 4361 iwl_mvm_get_sec_flags(mvm, vif, NULL, keyconf) | 4362 IWL_SEC_KEY_FLAG_MFP; 4363 u32 sta_mask = BIT(sta->sta_id); 4364 4365 ret = iwl_mvm_mld_send_key(mvm, sta_mask, key_flags, keyconf); 4366 } else { 4367 ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false, 4368 0, NULL, 0, 0, true); 4369 } 4370 4371 kfree(keyconf); 4372 return 0; 4373 out: 4374 iwl_mvm_dealloc_int_sta(mvm, sta); 4375 return ret; 4376 } 4377 #endif 4378 4379 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm, 4380 struct ieee80211_vif *vif, 4381 u32 id) 4382 { 4383 struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = { 4384 .id = cpu_to_le32(id), 4385 }; 4386 int ret; 4387 4388 ret = iwl_mvm_send_cmd_pdu(mvm, 4389 WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD), 4390 CMD_ASYNC, 4391 sizeof(cancel_channel_switch_cmd), 4392 &cancel_channel_switch_cmd); 4393 if (ret) 4394 IWL_ERR(mvm, "Failed to cancel the channel switch\n"); 4395 } 4396