1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 11 * Copyright(c) 2018 Intel Corporation 12 * 13 * This program is free software; you can redistribute it and/or modify 14 * it under the terms of version 2 of the GNU General Public License as 15 * published by the Free Software Foundation. 16 * 17 * This program is distributed in the hope that it will be useful, but 18 * WITHOUT ANY WARRANTY; without even the implied warranty of 19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 20 * General Public License for more details. 21 * 22 * The full GNU General Public License is included in this distribution 23 * in the file called COPYING. 24 * 25 * Contact Information: 26 * Intel Linux Wireless <linuxwifi@intel.com> 27 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 28 * 29 * BSD LICENSE 30 * 31 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 32 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 33 * Copyright(c) 2016 - 2017 Intel Deutschland GmbH 34 * Copyright(c) 2018 Intel Corporation 35 * All rights reserved. 36 * 37 * Redistribution and use in source and binary forms, with or without 38 * modification, are permitted provided that the following conditions 39 * are met: 40 * 41 * * Redistributions of source code must retain the above copyright 42 * notice, this list of conditions and the following disclaimer. 43 * * Redistributions in binary form must reproduce the above copyright 44 * notice, this list of conditions and the following disclaimer in 45 * the documentation and/or other materials provided with the 46 * distribution. 47 * * Neither the name Intel Corporation nor the names of its 48 * contributors may be used to endorse or promote products derived 49 * from this software without specific prior written permission. 50 * 51 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 52 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 53 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 54 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 55 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 56 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 57 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 58 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 59 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 60 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 61 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 62 * 63 *****************************************************************************/ 64 #include <net/mac80211.h> 65 66 #include "mvm.h" 67 #include "sta.h" 68 #include "rs.h" 69 70 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm); 71 72 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 73 u32 sta_id, 74 struct ieee80211_key_conf *key, bool mcast, 75 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 76 u8 key_offset, bool mfp); 77 78 /* 79 * New version of ADD_STA_sta command added new fields at the end of the 80 * structure, so sending the size of the relevant API's structure is enough to 81 * support both API versions. 82 */ 83 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 84 { 85 if (iwl_mvm_has_new_rx_api(mvm) || 86 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 87 return sizeof(struct iwl_mvm_add_sta_cmd); 88 else 89 return sizeof(struct iwl_mvm_add_sta_cmd_v7); 90 } 91 92 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 93 enum nl80211_iftype iftype) 94 { 95 int sta_id; 96 u32 reserved_ids = 0; 97 98 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); 99 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 100 101 lockdep_assert_held(&mvm->mutex); 102 103 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 104 if (iftype != NL80211_IFTYPE_STATION) 105 reserved_ids = BIT(0); 106 107 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 108 for (sta_id = 0; sta_id < ARRAY_SIZE(mvm->fw_id_to_mac_id); sta_id++) { 109 if (BIT(sta_id) & reserved_ids) 110 continue; 111 112 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 113 lockdep_is_held(&mvm->mutex))) 114 return sta_id; 115 } 116 return IWL_MVM_INVALID_STA; 117 } 118 119 /* send station add/update command to firmware */ 120 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 121 bool update, unsigned int flags) 122 { 123 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 124 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 125 .sta_id = mvm_sta->sta_id, 126 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 127 .add_modify = update ? 1 : 0, 128 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 129 STA_FLG_MIMO_EN_MSK | 130 STA_FLG_RTS_MIMO_PROT), 131 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 132 }; 133 int ret; 134 u32 status; 135 u32 agg_size = 0, mpdu_dens = 0; 136 137 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 138 add_sta_cmd.station_type = mvm_sta->sta_type; 139 140 if (!update || (flags & STA_MODIFY_QUEUES)) { 141 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 142 143 if (!iwl_mvm_has_new_tx_api(mvm)) { 144 add_sta_cmd.tfd_queue_msk = 145 cpu_to_le32(mvm_sta->tfd_queue_msk); 146 147 if (flags & STA_MODIFY_QUEUES) 148 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 149 } else { 150 WARN_ON(flags & STA_MODIFY_QUEUES); 151 } 152 } 153 154 switch (sta->bandwidth) { 155 case IEEE80211_STA_RX_BW_160: 156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 157 /* fall through */ 158 case IEEE80211_STA_RX_BW_80: 159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 160 /* fall through */ 161 case IEEE80211_STA_RX_BW_40: 162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 163 /* fall through */ 164 case IEEE80211_STA_RX_BW_20: 165 if (sta->ht_cap.ht_supported) 166 add_sta_cmd.station_flags |= 167 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 168 break; 169 } 170 171 switch (sta->rx_nss) { 172 case 1: 173 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 174 break; 175 case 2: 176 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 177 break; 178 case 3 ... 8: 179 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 180 break; 181 } 182 183 switch (sta->smps_mode) { 184 case IEEE80211_SMPS_AUTOMATIC: 185 case IEEE80211_SMPS_NUM_MODES: 186 WARN_ON(1); 187 break; 188 case IEEE80211_SMPS_STATIC: 189 /* override NSS */ 190 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 191 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 192 break; 193 case IEEE80211_SMPS_DYNAMIC: 194 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 195 break; 196 case IEEE80211_SMPS_OFF: 197 /* nothing */ 198 break; 199 } 200 201 if (sta->ht_cap.ht_supported) { 202 add_sta_cmd.station_flags_msk |= 203 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 204 STA_FLG_AGG_MPDU_DENS_MSK); 205 206 mpdu_dens = sta->ht_cap.ampdu_density; 207 } 208 209 if (sta->vht_cap.vht_supported) { 210 agg_size = sta->vht_cap.cap & 211 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 212 agg_size >>= 213 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 214 } else if (sta->ht_cap.ht_supported) { 215 agg_size = sta->ht_cap.ampdu_factor; 216 } 217 218 add_sta_cmd.station_flags |= 219 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 220 add_sta_cmd.station_flags |= 221 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 222 if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC) 223 add_sta_cmd.assoc_id = cpu_to_le16(sta->aid); 224 225 if (sta->wme) { 226 add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS; 227 228 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK) 229 add_sta_cmd.uapsd_acs |= BIT(AC_BK); 230 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE) 231 add_sta_cmd.uapsd_acs |= BIT(AC_BE); 232 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI) 233 add_sta_cmd.uapsd_acs |= BIT(AC_VI); 234 if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO) 235 add_sta_cmd.uapsd_acs |= BIT(AC_VO); 236 add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4; 237 add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128; 238 } 239 240 status = ADD_STA_SUCCESS; 241 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 242 iwl_mvm_add_sta_cmd_size(mvm), 243 &add_sta_cmd, &status); 244 if (ret) 245 return ret; 246 247 switch (status & IWL_ADD_STA_STATUS_MASK) { 248 case ADD_STA_SUCCESS: 249 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 250 break; 251 default: 252 ret = -EIO; 253 IWL_ERR(mvm, "ADD_STA failed\n"); 254 break; 255 } 256 257 return ret; 258 } 259 260 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t) 261 { 262 struct iwl_mvm_baid_data *data = 263 from_timer(data, t, session_timer); 264 struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr; 265 struct iwl_mvm_baid_data *ba_data; 266 struct ieee80211_sta *sta; 267 struct iwl_mvm_sta *mvm_sta; 268 unsigned long timeout; 269 270 rcu_read_lock(); 271 272 ba_data = rcu_dereference(*rcu_ptr); 273 274 if (WARN_ON(!ba_data)) 275 goto unlock; 276 277 if (!ba_data->timeout) 278 goto unlock; 279 280 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 281 if (time_is_after_jiffies(timeout)) { 282 mod_timer(&ba_data->session_timer, timeout); 283 goto unlock; 284 } 285 286 /* Timer expired */ 287 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 288 289 /* 290 * sta should be valid unless the following happens: 291 * The firmware asserts which triggers a reconfig flow, but 292 * the reconfig fails before we set the pointer to sta into 293 * the fw_id_to_mac_id pointer table. Mac80211 can't stop 294 * A-MDPU and hence the timer continues to run. Then, the 295 * timer expires and sta is NULL. 296 */ 297 if (!sta) 298 goto unlock; 299 300 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 301 ieee80211_rx_ba_timer_expired(mvm_sta->vif, 302 sta->addr, ba_data->tid); 303 unlock: 304 rcu_read_unlock(); 305 } 306 307 /* Disable aggregations for a bitmap of TIDs for a given station */ 308 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 309 unsigned long disable_agg_tids, 310 bool remove_queue) 311 { 312 struct iwl_mvm_add_sta_cmd cmd = {}; 313 struct ieee80211_sta *sta; 314 struct iwl_mvm_sta *mvmsta; 315 u32 status; 316 u8 sta_id; 317 int ret; 318 319 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 320 return -EINVAL; 321 322 sta_id = mvm->queue_info[queue].ra_sta_id; 323 324 rcu_read_lock(); 325 326 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 327 328 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 329 rcu_read_unlock(); 330 return -EINVAL; 331 } 332 333 mvmsta = iwl_mvm_sta_from_mac80211(sta); 334 335 mvmsta->tid_disable_agg |= disable_agg_tids; 336 337 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 338 cmd.sta_id = mvmsta->sta_id; 339 cmd.add_modify = STA_MODE_MODIFY; 340 cmd.modify_mask = STA_MODIFY_QUEUES; 341 if (disable_agg_tids) 342 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 343 if (remove_queue) 344 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 345 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 346 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 347 348 rcu_read_unlock(); 349 350 /* Notify FW of queue removal from the STA queues */ 351 status = ADD_STA_SUCCESS; 352 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 353 iwl_mvm_add_sta_cmd_size(mvm), 354 &cmd, &status); 355 356 return ret; 357 } 358 359 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 360 int queue, u8 tid, u8 flags) 361 { 362 struct iwl_scd_txq_cfg_cmd cmd = { 363 .scd_queue = queue, 364 .action = SCD_CFG_DISABLE_QUEUE, 365 }; 366 int ret; 367 368 if (iwl_mvm_has_new_tx_api(mvm)) { 369 iwl_trans_txq_free(mvm->trans, queue); 370 371 return 0; 372 } 373 374 if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0)) 375 return 0; 376 377 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 378 379 cmd.action = mvm->queue_info[queue].tid_bitmap ? 380 SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE; 381 if (cmd.action == SCD_CFG_DISABLE_QUEUE) 382 mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE; 383 384 IWL_DEBUG_TX_QUEUES(mvm, 385 "Disabling TXQ #%d tids=0x%x\n", 386 queue, 387 mvm->queue_info[queue].tid_bitmap); 388 389 /* If the queue is still enabled - nothing left to do in this func */ 390 if (cmd.action == SCD_CFG_ENABLE_QUEUE) 391 return 0; 392 393 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 394 cmd.tid = mvm->queue_info[queue].txq_tid; 395 396 /* Make sure queue info is correct even though we overwrite it */ 397 WARN(mvm->queue_info[queue].tid_bitmap, 398 "TXQ #%d info out-of-sync - tids=0x%x\n", 399 queue, mvm->queue_info[queue].tid_bitmap); 400 401 /* If we are here - the queue is freed and we can zero out these vals */ 402 mvm->queue_info[queue].tid_bitmap = 0; 403 404 if (sta) { 405 struct iwl_mvm_txq *mvmtxq = 406 iwl_mvm_txq_from_tid(sta, tid); 407 408 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 409 } 410 411 /* Regardless if this is a reserved TXQ for a STA - mark it as false */ 412 mvm->queue_info[queue].reserved = false; 413 414 iwl_trans_txq_disable(mvm->trans, queue, false); 415 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, flags, 416 sizeof(struct iwl_scd_txq_cfg_cmd), &cmd); 417 418 if (ret) 419 IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n", 420 queue, ret); 421 return ret; 422 } 423 424 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 425 { 426 struct ieee80211_sta *sta; 427 struct iwl_mvm_sta *mvmsta; 428 unsigned long tid_bitmap; 429 unsigned long agg_tids = 0; 430 u8 sta_id; 431 int tid; 432 433 lockdep_assert_held(&mvm->mutex); 434 435 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 436 return -EINVAL; 437 438 sta_id = mvm->queue_info[queue].ra_sta_id; 439 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 440 441 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 442 lockdep_is_held(&mvm->mutex)); 443 444 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 445 return -EINVAL; 446 447 mvmsta = iwl_mvm_sta_from_mac80211(sta); 448 449 spin_lock_bh(&mvmsta->lock); 450 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 451 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 452 agg_tids |= BIT(tid); 453 } 454 spin_unlock_bh(&mvmsta->lock); 455 456 return agg_tids; 457 } 458 459 /* 460 * Remove a queue from a station's resources. 461 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 462 * doesn't disable the queue 463 */ 464 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 465 { 466 struct ieee80211_sta *sta; 467 struct iwl_mvm_sta *mvmsta; 468 unsigned long tid_bitmap; 469 unsigned long disable_agg_tids = 0; 470 u8 sta_id; 471 int tid; 472 473 lockdep_assert_held(&mvm->mutex); 474 475 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 476 return -EINVAL; 477 478 sta_id = mvm->queue_info[queue].ra_sta_id; 479 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 480 481 rcu_read_lock(); 482 483 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 484 485 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 486 rcu_read_unlock(); 487 return 0; 488 } 489 490 mvmsta = iwl_mvm_sta_from_mac80211(sta); 491 492 spin_lock_bh(&mvmsta->lock); 493 /* Unmap MAC queues and TIDs from this queue */ 494 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 495 struct iwl_mvm_txq *mvmtxq = 496 iwl_mvm_txq_from_tid(sta, tid); 497 498 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 499 disable_agg_tids |= BIT(tid); 500 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 501 502 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 503 } 504 505 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 506 spin_unlock_bh(&mvmsta->lock); 507 508 rcu_read_unlock(); 509 510 /* 511 * The TX path may have been using this TXQ_ID from the tid_data, 512 * so make sure it's no longer running so that we can safely reuse 513 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE 514 * above, but nothing guarantees we've stopped using them. Thus, 515 * without this, we could get to iwl_mvm_disable_txq() and remove 516 * the queue while still sending frames to it. 517 */ 518 synchronize_net(); 519 520 return disable_agg_tids; 521 } 522 523 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue, 524 struct ieee80211_sta *old_sta, 525 u8 new_sta_id) 526 { 527 struct iwl_mvm_sta *mvmsta; 528 u8 sta_id, tid; 529 unsigned long disable_agg_tids = 0; 530 bool same_sta; 531 int ret; 532 533 lockdep_assert_held(&mvm->mutex); 534 535 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 536 return -EINVAL; 537 538 sta_id = mvm->queue_info[queue].ra_sta_id; 539 tid = mvm->queue_info[queue].txq_tid; 540 541 same_sta = sta_id == new_sta_id; 542 543 mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id); 544 if (WARN_ON(!mvmsta)) 545 return -EINVAL; 546 547 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 548 /* Disable the queue */ 549 if (disable_agg_tids) 550 iwl_mvm_invalidate_sta_queue(mvm, queue, 551 disable_agg_tids, false); 552 553 ret = iwl_mvm_disable_txq(mvm, old_sta, queue, tid, 0); 554 if (ret) { 555 IWL_ERR(mvm, 556 "Failed to free inactive queue %d (ret=%d)\n", 557 queue, ret); 558 559 return ret; 560 } 561 562 /* If TXQ is allocated to another STA, update removal in FW */ 563 if (!same_sta) 564 iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true); 565 566 return 0; 567 } 568 569 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 570 unsigned long tfd_queue_mask, u8 ac) 571 { 572 int queue = 0; 573 u8 ac_to_queue[IEEE80211_NUM_ACS]; 574 int i; 575 576 /* 577 * This protects us against grabbing a queue that's being reconfigured 578 * by the inactivity checker. 579 */ 580 lockdep_assert_held(&mvm->mutex); 581 582 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 583 return -EINVAL; 584 585 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 586 587 /* See what ACs the existing queues for this STA have */ 588 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 589 /* Only DATA queues can be shared */ 590 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 591 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 592 continue; 593 594 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 595 } 596 597 /* 598 * The queue to share is chosen only from DATA queues as follows (in 599 * descending priority): 600 * 1. An AC_BE queue 601 * 2. Same AC queue 602 * 3. Highest AC queue that is lower than new AC 603 * 4. Any existing AC (there always is at least 1 DATA queue) 604 */ 605 606 /* Priority 1: An AC_BE queue */ 607 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 608 queue = ac_to_queue[IEEE80211_AC_BE]; 609 /* Priority 2: Same AC queue */ 610 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 611 queue = ac_to_queue[ac]; 612 /* Priority 3a: If new AC is VO and VI exists - use VI */ 613 else if (ac == IEEE80211_AC_VO && 614 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 615 queue = ac_to_queue[IEEE80211_AC_VI]; 616 /* Priority 3b: No BE so only AC less than the new one is BK */ 617 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 618 queue = ac_to_queue[IEEE80211_AC_BK]; 619 /* Priority 4a: No BE nor BK - use VI if exists */ 620 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 621 queue = ac_to_queue[IEEE80211_AC_VI]; 622 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 623 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 624 queue = ac_to_queue[IEEE80211_AC_VO]; 625 626 /* Make sure queue found (or not) is legal */ 627 if (!iwl_mvm_is_dqa_data_queue(mvm, queue) && 628 !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) && 629 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) { 630 IWL_ERR(mvm, "No DATA queues available to share\n"); 631 return -ENOSPC; 632 } 633 634 return queue; 635 } 636 637 /* 638 * If a given queue has a higher AC than the TID stream that is being compared 639 * to, the queue needs to be redirected to the lower AC. This function does that 640 * in such a case, otherwise - if no redirection required - it does nothing, 641 * unless the %force param is true. 642 */ 643 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid, 644 int ac, int ssn, unsigned int wdg_timeout, 645 bool force, struct iwl_mvm_txq *txq) 646 { 647 struct iwl_scd_txq_cfg_cmd cmd = { 648 .scd_queue = queue, 649 .action = SCD_CFG_DISABLE_QUEUE, 650 }; 651 bool shared_queue; 652 int ret; 653 654 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 655 return -EINVAL; 656 657 /* 658 * If the AC is lower than current one - FIFO needs to be redirected to 659 * the lowest one of the streams in the queue. Check if this is needed 660 * here. 661 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 662 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 663 * we need to check if the numerical value of X is LARGER than of Y. 664 */ 665 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 666 IWL_DEBUG_TX_QUEUES(mvm, 667 "No redirection needed on TXQ #%d\n", 668 queue); 669 return 0; 670 } 671 672 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 673 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 674 cmd.tid = mvm->queue_info[queue].txq_tid; 675 shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1; 676 677 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n", 678 queue, iwl_mvm_ac_to_tx_fifo[ac]); 679 680 /* Stop the queue and wait for it to empty */ 681 txq->stopped = true; 682 683 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue)); 684 if (ret) { 685 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 686 queue); 687 ret = -EIO; 688 goto out; 689 } 690 691 /* Before redirecting the queue we need to de-activate it */ 692 iwl_trans_txq_disable(mvm->trans, queue, false); 693 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 694 if (ret) 695 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 696 ret); 697 698 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 699 iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout); 700 701 /* Update the TID "owner" of the queue */ 702 mvm->queue_info[queue].txq_tid = tid; 703 704 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 705 706 /* Redirect to lower AC */ 707 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 708 cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn); 709 710 /* Update AC marking of the queue */ 711 mvm->queue_info[queue].mac80211_ac = ac; 712 713 /* 714 * Mark queue as shared in transport if shared 715 * Note this has to be done after queue enablement because enablement 716 * can also set this value, and there is no indication there to shared 717 * queues 718 */ 719 if (shared_queue) 720 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 721 722 out: 723 /* Continue using the queue */ 724 txq->stopped = false; 725 726 return ret; 727 } 728 729 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id, 730 u8 minq, u8 maxq) 731 { 732 int i; 733 734 lockdep_assert_held(&mvm->mutex); 735 736 /* This should not be hit with new TX path */ 737 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 738 return -ENOSPC; 739 740 /* Start by looking for a free queue */ 741 for (i = minq; i <= maxq; i++) 742 if (mvm->queue_info[i].tid_bitmap == 0 && 743 mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE) 744 return i; 745 746 return -ENOSPC; 747 } 748 749 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm, 750 u8 sta_id, u8 tid, unsigned int timeout) 751 { 752 int queue, size = IWL_DEFAULT_QUEUE_SIZE; 753 754 if (tid == IWL_MAX_TID_COUNT) { 755 tid = IWL_MGMT_TID; 756 size = IWL_MGMT_QUEUE_SIZE; 757 } 758 queue = iwl_trans_txq_alloc(mvm->trans, 759 cpu_to_le16(TX_QUEUE_CFG_ENABLE_QUEUE), 760 sta_id, tid, SCD_QUEUE_CFG, size, timeout); 761 762 if (queue < 0) { 763 IWL_DEBUG_TX_QUEUES(mvm, 764 "Failed allocating TXQ for sta %d tid %d, ret: %d\n", 765 sta_id, tid, queue); 766 return queue; 767 } 768 769 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n", 770 queue, sta_id, tid); 771 772 IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d\n", queue); 773 774 return queue; 775 } 776 777 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm, 778 struct ieee80211_sta *sta, u8 ac, 779 int tid) 780 { 781 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 782 struct iwl_mvm_txq *mvmtxq = 783 iwl_mvm_txq_from_tid(sta, tid); 784 unsigned int wdg_timeout = 785 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 786 int queue = -1; 787 788 lockdep_assert_held(&mvm->mutex); 789 790 IWL_DEBUG_TX_QUEUES(mvm, 791 "Allocating queue for sta %d on tid %d\n", 792 mvmsta->sta_id, tid); 793 queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout); 794 if (queue < 0) 795 return queue; 796 797 if (sta) { 798 mvmtxq->txq_id = queue; 799 mvm->tvqm_info[queue].txq_tid = tid; 800 mvm->tvqm_info[queue].sta_id = mvmsta->sta_id; 801 } 802 803 IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue); 804 805 spin_lock_bh(&mvmsta->lock); 806 mvmsta->tid_data[tid].txq_id = queue; 807 spin_unlock_bh(&mvmsta->lock); 808 809 return 0; 810 } 811 812 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm, 813 struct ieee80211_sta *sta, 814 int queue, u8 sta_id, u8 tid) 815 { 816 bool enable_queue = true; 817 818 /* Make sure this TID isn't already enabled */ 819 if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) { 820 IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n", 821 queue, tid); 822 return false; 823 } 824 825 /* Update mappings and refcounts */ 826 if (mvm->queue_info[queue].tid_bitmap) 827 enable_queue = false; 828 829 mvm->queue_info[queue].tid_bitmap |= BIT(tid); 830 mvm->queue_info[queue].ra_sta_id = sta_id; 831 832 if (enable_queue) { 833 if (tid != IWL_MAX_TID_COUNT) 834 mvm->queue_info[queue].mac80211_ac = 835 tid_to_mac80211_ac[tid]; 836 else 837 mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO; 838 839 mvm->queue_info[queue].txq_tid = tid; 840 } 841 842 if (sta) { 843 struct iwl_mvm_txq *mvmtxq = 844 iwl_mvm_txq_from_tid(sta, tid); 845 846 mvmtxq->txq_id = queue; 847 } 848 849 IWL_DEBUG_TX_QUEUES(mvm, 850 "Enabling TXQ #%d tids=0x%x\n", 851 queue, mvm->queue_info[queue].tid_bitmap); 852 853 return enable_queue; 854 } 855 856 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 857 int queue, u16 ssn, 858 const struct iwl_trans_txq_scd_cfg *cfg, 859 unsigned int wdg_timeout) 860 { 861 struct iwl_scd_txq_cfg_cmd cmd = { 862 .scd_queue = queue, 863 .action = SCD_CFG_ENABLE_QUEUE, 864 .window = cfg->frame_limit, 865 .sta_id = cfg->sta_id, 866 .ssn = cpu_to_le16(ssn), 867 .tx_fifo = cfg->fifo, 868 .aggregate = cfg->aggregate, 869 .tid = cfg->tid, 870 }; 871 bool inc_ssn; 872 873 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 874 return false; 875 876 /* Send the enabling command if we need to */ 877 if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid)) 878 return false; 879 880 inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, 881 NULL, wdg_timeout); 882 if (inc_ssn) 883 le16_add_cpu(&cmd.ssn, 1); 884 885 WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd), 886 "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo); 887 888 return inc_ssn; 889 } 890 891 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue) 892 { 893 struct iwl_scd_txq_cfg_cmd cmd = { 894 .scd_queue = queue, 895 .action = SCD_CFG_UPDATE_QUEUE_TID, 896 }; 897 int tid; 898 unsigned long tid_bitmap; 899 int ret; 900 901 lockdep_assert_held(&mvm->mutex); 902 903 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 904 return; 905 906 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 907 908 if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue)) 909 return; 910 911 /* Find any TID for queue */ 912 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 913 cmd.tid = tid; 914 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 915 916 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 917 if (ret) { 918 IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n", 919 queue, ret); 920 return; 921 } 922 923 mvm->queue_info[queue].txq_tid = tid; 924 IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n", 925 queue, tid); 926 } 927 928 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue) 929 { 930 struct ieee80211_sta *sta; 931 struct iwl_mvm_sta *mvmsta; 932 u8 sta_id; 933 int tid = -1; 934 unsigned long tid_bitmap; 935 unsigned int wdg_timeout; 936 int ssn; 937 int ret = true; 938 939 /* queue sharing is disabled on new TX path */ 940 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 941 return; 942 943 lockdep_assert_held(&mvm->mutex); 944 945 sta_id = mvm->queue_info[queue].ra_sta_id; 946 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 947 948 /* Find TID for queue, and make sure it is the only one on the queue */ 949 tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1); 950 if (tid_bitmap != BIT(tid)) { 951 IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n", 952 queue, tid_bitmap); 953 return; 954 } 955 956 IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue, 957 tid); 958 959 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 960 lockdep_is_held(&mvm->mutex)); 961 962 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 963 return; 964 965 mvmsta = iwl_mvm_sta_from_mac80211(sta); 966 wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 967 968 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 969 970 ret = iwl_mvm_redirect_queue(mvm, queue, tid, 971 tid_to_mac80211_ac[tid], ssn, 972 wdg_timeout, true, 973 iwl_mvm_txq_from_tid(sta, tid)); 974 if (ret) { 975 IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue); 976 return; 977 } 978 979 /* If aggs should be turned back on - do it */ 980 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) { 981 struct iwl_mvm_add_sta_cmd cmd = {0}; 982 983 mvmsta->tid_disable_agg &= ~BIT(tid); 984 985 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 986 cmd.sta_id = mvmsta->sta_id; 987 cmd.add_modify = STA_MODE_MODIFY; 988 cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX; 989 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 990 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 991 992 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 993 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 994 if (!ret) { 995 IWL_DEBUG_TX_QUEUES(mvm, 996 "TXQ #%d is now aggregated again\n", 997 queue); 998 999 /* Mark queue intenally as aggregating again */ 1000 iwl_trans_txq_set_shared_mode(mvm->trans, queue, false); 1001 } 1002 } 1003 1004 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1005 } 1006 1007 /* 1008 * Remove inactive TIDs of a given queue. 1009 * If all queue TIDs are inactive - mark the queue as inactive 1010 * If only some the queue TIDs are inactive - unmap them from the queue 1011 * 1012 * Returns %true if all TIDs were removed and the queue could be reused. 1013 */ 1014 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm, 1015 struct iwl_mvm_sta *mvmsta, int queue, 1016 unsigned long tid_bitmap, 1017 unsigned long *unshare_queues, 1018 unsigned long *changetid_queues) 1019 { 1020 int tid; 1021 1022 lockdep_assert_held(&mvmsta->lock); 1023 lockdep_assert_held(&mvm->mutex); 1024 1025 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1026 return false; 1027 1028 /* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */ 1029 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1030 /* If some TFDs are still queued - don't mark TID as inactive */ 1031 if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid])) 1032 tid_bitmap &= ~BIT(tid); 1033 1034 /* Don't mark as inactive any TID that has an active BA */ 1035 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) 1036 tid_bitmap &= ~BIT(tid); 1037 } 1038 1039 /* If all TIDs in the queue are inactive - return it can be reused */ 1040 if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) { 1041 IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue); 1042 return true; 1043 } 1044 1045 /* 1046 * If we are here, this is a shared queue and not all TIDs timed-out. 1047 * Remove the ones that did. 1048 */ 1049 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 1050 u16 tid_bitmap; 1051 1052 mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE; 1053 mvm->queue_info[queue].tid_bitmap &= ~BIT(tid); 1054 1055 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1056 1057 /* 1058 * We need to take into account a situation in which a TXQ was 1059 * allocated to TID x, and then turned shared by adding TIDs y 1060 * and z. If TID x becomes inactive and is removed from the TXQ, 1061 * ownership must be given to one of the remaining TIDs. 1062 * This is mainly because if TID x continues - a new queue can't 1063 * be allocated for it as long as it is an owner of another TXQ. 1064 * 1065 * Mark this queue in the right bitmap, we'll send the command 1066 * to the firmware later. 1067 */ 1068 if (!(tid_bitmap & BIT(mvm->queue_info[queue].txq_tid))) 1069 set_bit(queue, changetid_queues); 1070 1071 IWL_DEBUG_TX_QUEUES(mvm, 1072 "Removing inactive TID %d from shared Q:%d\n", 1073 tid, queue); 1074 } 1075 1076 IWL_DEBUG_TX_QUEUES(mvm, 1077 "TXQ #%d left with tid bitmap 0x%x\n", queue, 1078 mvm->queue_info[queue].tid_bitmap); 1079 1080 /* 1081 * There may be different TIDs with the same mac queues, so make 1082 * sure all TIDs have existing corresponding mac queues enabled 1083 */ 1084 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 1085 1086 /* If the queue is marked as shared - "unshare" it */ 1087 if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 && 1088 mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) { 1089 IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n", 1090 queue); 1091 set_bit(queue, unshare_queues); 1092 } 1093 1094 return false; 1095 } 1096 1097 /* 1098 * Check for inactivity - this includes checking if any queue 1099 * can be unshared and finding one (and only one) that can be 1100 * reused. 1101 * This function is also invoked as a sort of clean-up task, 1102 * in which case @alloc_for_sta is IWL_MVM_INVALID_STA. 1103 * 1104 * Returns the queue number, or -ENOSPC. 1105 */ 1106 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta) 1107 { 1108 unsigned long now = jiffies; 1109 unsigned long unshare_queues = 0; 1110 unsigned long changetid_queues = 0; 1111 int i, ret, free_queue = -ENOSPC; 1112 struct ieee80211_sta *queue_owner = NULL; 1113 1114 lockdep_assert_held(&mvm->mutex); 1115 1116 if (iwl_mvm_has_new_tx_api(mvm)) 1117 return -ENOSPC; 1118 1119 rcu_read_lock(); 1120 1121 /* we skip the CMD queue below by starting at 1 */ 1122 BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0); 1123 1124 for (i = 1; i < IWL_MAX_HW_QUEUES; i++) { 1125 struct ieee80211_sta *sta; 1126 struct iwl_mvm_sta *mvmsta; 1127 u8 sta_id; 1128 int tid; 1129 unsigned long inactive_tid_bitmap = 0; 1130 unsigned long queue_tid_bitmap; 1131 1132 queue_tid_bitmap = mvm->queue_info[i].tid_bitmap; 1133 if (!queue_tid_bitmap) 1134 continue; 1135 1136 /* If TXQ isn't in active use anyway - nothing to do here... */ 1137 if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY && 1138 mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED) 1139 continue; 1140 1141 /* Check to see if there are inactive TIDs on this queue */ 1142 for_each_set_bit(tid, &queue_tid_bitmap, 1143 IWL_MAX_TID_COUNT + 1) { 1144 if (time_after(mvm->queue_info[i].last_frame_time[tid] + 1145 IWL_MVM_DQA_QUEUE_TIMEOUT, now)) 1146 continue; 1147 1148 inactive_tid_bitmap |= BIT(tid); 1149 } 1150 1151 /* If all TIDs are active - finish check on this queue */ 1152 if (!inactive_tid_bitmap) 1153 continue; 1154 1155 /* 1156 * If we are here - the queue hadn't been served recently and is 1157 * in use 1158 */ 1159 1160 sta_id = mvm->queue_info[i].ra_sta_id; 1161 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 1162 1163 /* 1164 * If the STA doesn't exist anymore, it isn't an error. It could 1165 * be that it was removed since getting the queues, and in this 1166 * case it should've inactivated its queues anyway. 1167 */ 1168 if (IS_ERR_OR_NULL(sta)) 1169 continue; 1170 1171 mvmsta = iwl_mvm_sta_from_mac80211(sta); 1172 1173 spin_lock_bh(&mvmsta->lock); 1174 ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i, 1175 inactive_tid_bitmap, 1176 &unshare_queues, 1177 &changetid_queues); 1178 if (ret >= 0 && free_queue < 0) { 1179 queue_owner = sta; 1180 free_queue = ret; 1181 } 1182 /* only unlock sta lock - we still need the queue info lock */ 1183 spin_unlock_bh(&mvmsta->lock); 1184 } 1185 1186 1187 /* Reconfigure queues requiring reconfiguation */ 1188 for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES) 1189 iwl_mvm_unshare_queue(mvm, i); 1190 for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES) 1191 iwl_mvm_change_queue_tid(mvm, i); 1192 1193 if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) { 1194 ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner, 1195 alloc_for_sta); 1196 if (ret) { 1197 rcu_read_unlock(); 1198 return ret; 1199 } 1200 } 1201 1202 rcu_read_unlock(); 1203 1204 return free_queue; 1205 } 1206 1207 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 1208 struct ieee80211_sta *sta, u8 ac, int tid) 1209 { 1210 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1211 struct iwl_trans_txq_scd_cfg cfg = { 1212 .fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac), 1213 .sta_id = mvmsta->sta_id, 1214 .tid = tid, 1215 .frame_limit = IWL_FRAME_LIMIT, 1216 }; 1217 unsigned int wdg_timeout = 1218 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 1219 int queue = -1; 1220 unsigned long disable_agg_tids = 0; 1221 enum iwl_mvm_agg_state queue_state; 1222 bool shared_queue = false, inc_ssn; 1223 int ssn; 1224 unsigned long tfd_queue_mask; 1225 int ret; 1226 1227 lockdep_assert_held(&mvm->mutex); 1228 1229 if (iwl_mvm_has_new_tx_api(mvm)) 1230 return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 1231 1232 spin_lock_bh(&mvmsta->lock); 1233 tfd_queue_mask = mvmsta->tfd_queue_msk; 1234 ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number); 1235 spin_unlock_bh(&mvmsta->lock); 1236 1237 if (tid == IWL_MAX_TID_COUNT) { 1238 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1239 IWL_MVM_DQA_MIN_MGMT_QUEUE, 1240 IWL_MVM_DQA_MAX_MGMT_QUEUE); 1241 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 1242 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 1243 queue); 1244 1245 /* If no such queue is found, we'll use a DATA queue instead */ 1246 } 1247 1248 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 1249 (mvm->queue_info[mvmsta->reserved_queue].status == 1250 IWL_MVM_QUEUE_RESERVED)) { 1251 queue = mvmsta->reserved_queue; 1252 mvm->queue_info[queue].reserved = true; 1253 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 1254 } 1255 1256 if (queue < 0) 1257 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1258 IWL_MVM_DQA_MIN_DATA_QUEUE, 1259 IWL_MVM_DQA_MAX_DATA_QUEUE); 1260 if (queue < 0) { 1261 /* try harder - perhaps kill an inactive queue */ 1262 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1263 } 1264 1265 /* No free queue - we'll have to share */ 1266 if (queue <= 0) { 1267 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 1268 if (queue > 0) { 1269 shared_queue = true; 1270 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 1271 } 1272 } 1273 1274 /* 1275 * Mark TXQ as ready, even though it hasn't been fully configured yet, 1276 * to make sure no one else takes it. 1277 * This will allow avoiding re-acquiring the lock at the end of the 1278 * configuration. On error we'll mark it back as free. 1279 */ 1280 if (queue > 0 && !shared_queue) 1281 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 1282 1283 /* This shouldn't happen - out of queues */ 1284 if (WARN_ON(queue <= 0)) { 1285 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 1286 tid, cfg.sta_id); 1287 return queue; 1288 } 1289 1290 /* 1291 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 1292 * but for configuring the SCD to send A-MPDUs we need to mark the queue 1293 * as aggregatable. 1294 * Mark all DATA queues as allowing to be aggregated at some point 1295 */ 1296 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1297 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1298 1299 IWL_DEBUG_TX_QUEUES(mvm, 1300 "Allocating %squeue #%d to sta %d on tid %d\n", 1301 shared_queue ? "shared " : "", queue, 1302 mvmsta->sta_id, tid); 1303 1304 if (shared_queue) { 1305 /* Disable any open aggs on this queue */ 1306 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 1307 1308 if (disable_agg_tids) { 1309 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 1310 queue); 1311 iwl_mvm_invalidate_sta_queue(mvm, queue, 1312 disable_agg_tids, false); 1313 } 1314 } 1315 1316 inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout); 1317 1318 /* 1319 * Mark queue as shared in transport if shared 1320 * Note this has to be done after queue enablement because enablement 1321 * can also set this value, and there is no indication there to shared 1322 * queues 1323 */ 1324 if (shared_queue) 1325 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 1326 1327 spin_lock_bh(&mvmsta->lock); 1328 /* 1329 * This looks racy, but it is not. We have only one packet for 1330 * this ra/tid in our Tx path since we stop the Qdisc when we 1331 * need to allocate a new TFD queue. 1332 */ 1333 if (inc_ssn) { 1334 mvmsta->tid_data[tid].seq_number += 0x10; 1335 ssn = (ssn + 1) & IEEE80211_SCTL_SEQ; 1336 } 1337 mvmsta->tid_data[tid].txq_id = queue; 1338 mvmsta->tfd_queue_msk |= BIT(queue); 1339 queue_state = mvmsta->tid_data[tid].state; 1340 1341 if (mvmsta->reserved_queue == queue) 1342 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 1343 spin_unlock_bh(&mvmsta->lock); 1344 1345 if (!shared_queue) { 1346 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 1347 if (ret) 1348 goto out_err; 1349 1350 /* If we need to re-enable aggregations... */ 1351 if (queue_state == IWL_AGG_ON) { 1352 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 1353 if (ret) 1354 goto out_err; 1355 } 1356 } else { 1357 /* Redirect queue, if needed */ 1358 ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn, 1359 wdg_timeout, false, 1360 iwl_mvm_txq_from_tid(sta, tid)); 1361 if (ret) 1362 goto out_err; 1363 } 1364 1365 return 0; 1366 1367 out_err: 1368 iwl_mvm_disable_txq(mvm, sta, queue, tid, 0); 1369 1370 return ret; 1371 } 1372 1373 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 1374 { 1375 if (tid == IWL_MAX_TID_COUNT) 1376 return IEEE80211_AC_VO; /* MGMT */ 1377 1378 return tid_to_mac80211_ac[tid]; 1379 } 1380 1381 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 1382 { 1383 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 1384 add_stream_wk); 1385 1386 mutex_lock(&mvm->mutex); 1387 1388 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1389 1390 while (!list_empty(&mvm->add_stream_txqs)) { 1391 struct iwl_mvm_txq *mvmtxq; 1392 struct ieee80211_txq *txq; 1393 u8 tid; 1394 1395 mvmtxq = list_first_entry(&mvm->add_stream_txqs, 1396 struct iwl_mvm_txq, list); 1397 1398 txq = container_of((void *)mvmtxq, struct ieee80211_txq, 1399 drv_priv); 1400 tid = txq->tid; 1401 if (tid == IEEE80211_NUM_TIDS) 1402 tid = IWL_MAX_TID_COUNT; 1403 1404 iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid); 1405 list_del_init(&mvmtxq->list); 1406 iwl_mvm_mac_itxq_xmit(mvm->hw, txq); 1407 } 1408 1409 mutex_unlock(&mvm->mutex); 1410 } 1411 1412 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 1413 struct ieee80211_sta *sta, 1414 enum nl80211_iftype vif_type) 1415 { 1416 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1417 int queue; 1418 1419 /* queue reserving is disabled on new TX path */ 1420 if (WARN_ON(iwl_mvm_has_new_tx_api(mvm))) 1421 return 0; 1422 1423 /* run the general cleanup/unsharing of queues */ 1424 iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA); 1425 1426 /* Make sure we have free resources for this STA */ 1427 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 1428 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap && 1429 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 1430 IWL_MVM_QUEUE_FREE)) 1431 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 1432 else 1433 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1434 IWL_MVM_DQA_MIN_DATA_QUEUE, 1435 IWL_MVM_DQA_MAX_DATA_QUEUE); 1436 if (queue < 0) { 1437 /* try again - this time kick out a queue if needed */ 1438 queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id); 1439 if (queue < 0) { 1440 IWL_ERR(mvm, "No available queues for new station\n"); 1441 return -ENOSPC; 1442 } 1443 } 1444 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 1445 1446 mvmsta->reserved_queue = queue; 1447 1448 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 1449 queue, mvmsta->sta_id); 1450 1451 return 0; 1452 } 1453 1454 /* 1455 * In DQA mode, after a HW restart the queues should be allocated as before, in 1456 * order to avoid race conditions when there are shared queues. This function 1457 * does the re-mapping and queue allocation. 1458 * 1459 * Note that re-enabling aggregations isn't done in this function. 1460 */ 1461 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm, 1462 struct ieee80211_sta *sta) 1463 { 1464 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1465 unsigned int wdg = 1466 iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false); 1467 int i; 1468 struct iwl_trans_txq_scd_cfg cfg = { 1469 .sta_id = mvm_sta->sta_id, 1470 .frame_limit = IWL_FRAME_LIMIT, 1471 }; 1472 1473 /* Make sure reserved queue is still marked as such (if allocated) */ 1474 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) 1475 mvm->queue_info[mvm_sta->reserved_queue].status = 1476 IWL_MVM_QUEUE_RESERVED; 1477 1478 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1479 struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i]; 1480 int txq_id = tid_data->txq_id; 1481 int ac; 1482 1483 if (txq_id == IWL_MVM_INVALID_QUEUE) 1484 continue; 1485 1486 ac = tid_to_mac80211_ac[i]; 1487 1488 if (iwl_mvm_has_new_tx_api(mvm)) { 1489 IWL_DEBUG_TX_QUEUES(mvm, 1490 "Re-mapping sta %d tid %d\n", 1491 mvm_sta->sta_id, i); 1492 txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id, 1493 i, wdg); 1494 tid_data->txq_id = txq_id; 1495 1496 /* 1497 * Since we don't set the seq number after reset, and HW 1498 * sets it now, FW reset will cause the seq num to start 1499 * at 0 again, so driver will need to update it 1500 * internally as well, so it keeps in sync with real val 1501 */ 1502 tid_data->seq_number = 0; 1503 } else { 1504 u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1505 1506 cfg.tid = i; 1507 cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac); 1508 cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE || 1509 txq_id == 1510 IWL_MVM_DQA_BSS_CLIENT_QUEUE); 1511 1512 IWL_DEBUG_TX_QUEUES(mvm, 1513 "Re-mapping sta %d tid %d to queue %d\n", 1514 mvm_sta->sta_id, i, txq_id); 1515 1516 iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg); 1517 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY; 1518 } 1519 } 1520 } 1521 1522 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1523 struct iwl_mvm_int_sta *sta, 1524 const u8 *addr, 1525 u16 mac_id, u16 color) 1526 { 1527 struct iwl_mvm_add_sta_cmd cmd; 1528 int ret; 1529 u32 status = ADD_STA_SUCCESS; 1530 1531 lockdep_assert_held(&mvm->mutex); 1532 1533 memset(&cmd, 0, sizeof(cmd)); 1534 cmd.sta_id = sta->sta_id; 1535 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1536 color)); 1537 if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 1538 cmd.station_type = sta->type; 1539 1540 if (!iwl_mvm_has_new_tx_api(mvm)) 1541 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1542 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1543 1544 if (addr) 1545 memcpy(cmd.addr, addr, ETH_ALEN); 1546 1547 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1548 iwl_mvm_add_sta_cmd_size(mvm), 1549 &cmd, &status); 1550 if (ret) 1551 return ret; 1552 1553 switch (status & IWL_ADD_STA_STATUS_MASK) { 1554 case ADD_STA_SUCCESS: 1555 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1556 return 0; 1557 default: 1558 ret = -EIO; 1559 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1560 status); 1561 break; 1562 } 1563 return ret; 1564 } 1565 1566 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 1567 struct ieee80211_vif *vif, 1568 struct ieee80211_sta *sta) 1569 { 1570 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1571 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1572 struct iwl_mvm_rxq_dup_data *dup_data; 1573 int i, ret, sta_id; 1574 bool sta_update = false; 1575 unsigned int sta_flags = 0; 1576 1577 lockdep_assert_held(&mvm->mutex); 1578 1579 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 1580 sta_id = iwl_mvm_find_free_sta_id(mvm, 1581 ieee80211_vif_type_p2p(vif)); 1582 else 1583 sta_id = mvm_sta->sta_id; 1584 1585 if (sta_id == IWL_MVM_INVALID_STA) 1586 return -ENOSPC; 1587 1588 spin_lock_init(&mvm_sta->lock); 1589 1590 /* if this is a HW restart re-alloc existing queues */ 1591 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1592 struct iwl_mvm_int_sta tmp_sta = { 1593 .sta_id = sta_id, 1594 .type = mvm_sta->sta_type, 1595 }; 1596 1597 /* 1598 * First add an empty station since allocating 1599 * a queue requires a valid station 1600 */ 1601 ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr, 1602 mvmvif->id, mvmvif->color); 1603 if (ret) 1604 goto err; 1605 1606 iwl_mvm_realloc_queues_after_restart(mvm, sta); 1607 sta_update = true; 1608 sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES; 1609 goto update_fw; 1610 } 1611 1612 mvm_sta->sta_id = sta_id; 1613 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 1614 mvmvif->color); 1615 mvm_sta->vif = vif; 1616 if (!mvm->trans->cfg->gen2) 1617 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 1618 else 1619 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF; 1620 mvm_sta->tx_protection = 0; 1621 mvm_sta->tt_tx_protection = false; 1622 mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK; 1623 1624 /* HW restart, don't assume the memory has been zeroed */ 1625 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 1626 mvm_sta->tfd_queue_msk = 0; 1627 1628 /* for HW restart - reset everything but the sequence number */ 1629 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1630 u16 seq = mvm_sta->tid_data[i].seq_number; 1631 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 1632 mvm_sta->tid_data[i].seq_number = seq; 1633 1634 /* 1635 * Mark all queues for this STA as unallocated and defer TX 1636 * frames until the queue is allocated 1637 */ 1638 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1639 } 1640 1641 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1642 struct iwl_mvm_txq *mvmtxq = 1643 iwl_mvm_txq_from_mac80211(sta->txq[i]); 1644 1645 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1646 INIT_LIST_HEAD(&mvmtxq->list); 1647 atomic_set(&mvmtxq->tx_request, 0); 1648 } 1649 1650 mvm_sta->agg_tids = 0; 1651 1652 if (iwl_mvm_has_new_rx_api(mvm) && 1653 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1654 int q; 1655 1656 dup_data = kcalloc(mvm->trans->num_rx_queues, 1657 sizeof(*dup_data), GFP_KERNEL); 1658 if (!dup_data) 1659 return -ENOMEM; 1660 /* 1661 * Initialize all the last_seq values to 0xffff which can never 1662 * compare equal to the frame's seq_ctrl in the check in 1663 * iwl_mvm_is_dup() since the lower 4 bits are the fragment 1664 * number and fragmented packets don't reach that function. 1665 * 1666 * This thus allows receiving a packet with seqno 0 and the 1667 * retry bit set as the very first packet on a new TID. 1668 */ 1669 for (q = 0; q < mvm->trans->num_rx_queues; q++) 1670 memset(dup_data[q].last_seq, 0xff, 1671 sizeof(dup_data[q].last_seq)); 1672 mvm_sta->dup_data = dup_data; 1673 } 1674 1675 if (!iwl_mvm_has_new_tx_api(mvm)) { 1676 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1677 ieee80211_vif_type_p2p(vif)); 1678 if (ret) 1679 goto err; 1680 } 1681 1682 /* 1683 * if rs is registered with mac80211, then "add station" will be handled 1684 * via the corresponding ops, otherwise need to notify rate scaling here 1685 */ 1686 if (iwl_mvm_has_tlc_offload(mvm)) 1687 iwl_mvm_rs_add_sta(mvm, mvm_sta); 1688 1689 iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant); 1690 1691 update_fw: 1692 ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags); 1693 if (ret) 1694 goto err; 1695 1696 if (vif->type == NL80211_IFTYPE_STATION) { 1697 if (!sta->tdls) { 1698 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA); 1699 mvmvif->ap_sta_id = sta_id; 1700 } else { 1701 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA); 1702 } 1703 } 1704 1705 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1706 1707 return 0; 1708 1709 err: 1710 return ret; 1711 } 1712 1713 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1714 bool drain) 1715 { 1716 struct iwl_mvm_add_sta_cmd cmd = {}; 1717 int ret; 1718 u32 status; 1719 1720 lockdep_assert_held(&mvm->mutex); 1721 1722 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1723 cmd.sta_id = mvmsta->sta_id; 1724 cmd.add_modify = STA_MODE_MODIFY; 1725 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1726 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1727 1728 status = ADD_STA_SUCCESS; 1729 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1730 iwl_mvm_add_sta_cmd_size(mvm), 1731 &cmd, &status); 1732 if (ret) 1733 return ret; 1734 1735 switch (status & IWL_ADD_STA_STATUS_MASK) { 1736 case ADD_STA_SUCCESS: 1737 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1738 mvmsta->sta_id); 1739 break; 1740 default: 1741 ret = -EIO; 1742 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1743 mvmsta->sta_id); 1744 break; 1745 } 1746 1747 return ret; 1748 } 1749 1750 /* 1751 * Remove a station from the FW table. Before sending the command to remove 1752 * the station validate that the station is indeed known to the driver (sanity 1753 * only). 1754 */ 1755 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1756 { 1757 struct ieee80211_sta *sta; 1758 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1759 .sta_id = sta_id, 1760 }; 1761 int ret; 1762 1763 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1764 lockdep_is_held(&mvm->mutex)); 1765 1766 /* Note: internal stations are marked as error values */ 1767 if (!sta) { 1768 IWL_ERR(mvm, "Invalid station id\n"); 1769 return -EINVAL; 1770 } 1771 1772 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1773 sizeof(rm_sta_cmd), &rm_sta_cmd); 1774 if (ret) { 1775 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1776 return ret; 1777 } 1778 1779 return 0; 1780 } 1781 1782 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1783 struct ieee80211_vif *vif, 1784 struct ieee80211_sta *sta) 1785 { 1786 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1787 int i; 1788 1789 lockdep_assert_held(&mvm->mutex); 1790 1791 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1792 if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE) 1793 continue; 1794 1795 iwl_mvm_disable_txq(mvm, sta, mvm_sta->tid_data[i].txq_id, i, 1796 0); 1797 mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE; 1798 } 1799 1800 for (i = 0; i < ARRAY_SIZE(sta->txq); i++) { 1801 struct iwl_mvm_txq *mvmtxq = 1802 iwl_mvm_txq_from_mac80211(sta->txq[i]); 1803 1804 mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE; 1805 } 1806 } 1807 1808 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm, 1809 struct iwl_mvm_sta *mvm_sta) 1810 { 1811 int i; 1812 1813 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1814 u16 txq_id; 1815 int ret; 1816 1817 spin_lock_bh(&mvm_sta->lock); 1818 txq_id = mvm_sta->tid_data[i].txq_id; 1819 spin_unlock_bh(&mvm_sta->lock); 1820 1821 if (txq_id == IWL_MVM_INVALID_QUEUE) 1822 continue; 1823 1824 ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id); 1825 if (ret) 1826 return ret; 1827 } 1828 1829 return 0; 1830 } 1831 1832 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 1833 struct ieee80211_vif *vif, 1834 struct ieee80211_sta *sta) 1835 { 1836 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1837 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1838 u8 sta_id = mvm_sta->sta_id; 1839 int ret; 1840 1841 lockdep_assert_held(&mvm->mutex); 1842 1843 if (iwl_mvm_has_new_rx_api(mvm)) 1844 kfree(mvm_sta->dup_data); 1845 1846 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1847 if (ret) 1848 return ret; 1849 1850 /* flush its queues here since we are freeing mvm_sta */ 1851 ret = iwl_mvm_flush_sta(mvm, mvm_sta, false, 0); 1852 if (ret) 1853 return ret; 1854 if (iwl_mvm_has_new_tx_api(mvm)) { 1855 ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta); 1856 } else { 1857 u32 q_mask = mvm_sta->tfd_queue_msk; 1858 1859 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 1860 q_mask); 1861 } 1862 if (ret) 1863 return ret; 1864 1865 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 1866 1867 iwl_mvm_disable_sta_queues(mvm, vif, sta); 1868 1869 /* If there is a TXQ still marked as reserved - free it */ 1870 if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) { 1871 u8 reserved_txq = mvm_sta->reserved_queue; 1872 enum iwl_mvm_queue_status *status; 1873 1874 /* 1875 * If no traffic has gone through the reserved TXQ - it 1876 * is still marked as IWL_MVM_QUEUE_RESERVED, and 1877 * should be manually marked as free again 1878 */ 1879 status = &mvm->queue_info[reserved_txq].status; 1880 if (WARN((*status != IWL_MVM_QUEUE_RESERVED) && 1881 (*status != IWL_MVM_QUEUE_FREE), 1882 "sta_id %d reserved txq %d status %d", 1883 sta_id, reserved_txq, *status)) 1884 return -EINVAL; 1885 1886 *status = IWL_MVM_QUEUE_FREE; 1887 } 1888 1889 if (vif->type == NL80211_IFTYPE_STATION && 1890 mvmvif->ap_sta_id == sta_id) { 1891 /* if associated - we can't remove the AP STA now */ 1892 if (vif->bss_conf.assoc) 1893 return ret; 1894 1895 /* unassoc - go ahead - remove the AP STA now */ 1896 mvmvif->ap_sta_id = IWL_MVM_INVALID_STA; 1897 1898 /* clear d0i3_ap_sta_id if no longer relevant */ 1899 if (mvm->d0i3_ap_sta_id == sta_id) 1900 mvm->d0i3_ap_sta_id = IWL_MVM_INVALID_STA; 1901 } 1902 1903 /* 1904 * This shouldn't happen - the TDLS channel switch should be canceled 1905 * before the STA is removed. 1906 */ 1907 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) { 1908 mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA; 1909 cancel_delayed_work(&mvm->tdls_cs.dwork); 1910 } 1911 1912 /* 1913 * Make sure that the tx response code sees the station as -EBUSY and 1914 * calls the drain worker. 1915 */ 1916 spin_lock_bh(&mvm_sta->lock); 1917 spin_unlock_bh(&mvm_sta->lock); 1918 1919 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 1920 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 1921 1922 return ret; 1923 } 1924 1925 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 1926 struct ieee80211_vif *vif, 1927 u8 sta_id) 1928 { 1929 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1930 1931 lockdep_assert_held(&mvm->mutex); 1932 1933 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1934 return ret; 1935 } 1936 1937 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 1938 struct iwl_mvm_int_sta *sta, 1939 u32 qmask, enum nl80211_iftype iftype, 1940 enum iwl_sta_type type) 1941 { 1942 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) || 1943 sta->sta_id == IWL_MVM_INVALID_STA) { 1944 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 1945 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA)) 1946 return -ENOSPC; 1947 } 1948 1949 sta->tfd_queue_msk = qmask; 1950 sta->type = type; 1951 1952 /* put a non-NULL value so iterating over the stations won't stop */ 1953 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 1954 return 0; 1955 } 1956 1957 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta) 1958 { 1959 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 1960 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 1961 sta->sta_id = IWL_MVM_INVALID_STA; 1962 } 1963 1964 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 *queue, 1965 u8 sta_id, u8 fifo) 1966 { 1967 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1968 mvm->cfg->base_params->wd_timeout : 1969 IWL_WATCHDOG_DISABLED; 1970 1971 if (iwl_mvm_has_new_tx_api(mvm)) { 1972 int tvqm_queue = 1973 iwl_mvm_tvqm_enable_txq(mvm, sta_id, 1974 IWL_MAX_TID_COUNT, 1975 wdg_timeout); 1976 *queue = tvqm_queue; 1977 } else { 1978 struct iwl_trans_txq_scd_cfg cfg = { 1979 .fifo = fifo, 1980 .sta_id = sta_id, 1981 .tid = IWL_MAX_TID_COUNT, 1982 .aggregate = false, 1983 .frame_limit = IWL_FRAME_LIMIT, 1984 }; 1985 1986 iwl_mvm_enable_txq(mvm, NULL, *queue, 0, &cfg, wdg_timeout); 1987 } 1988 } 1989 1990 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 1991 { 1992 int ret; 1993 1994 lockdep_assert_held(&mvm->mutex); 1995 1996 /* Allocate aux station and assign to it the aux queue */ 1997 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 1998 NL80211_IFTYPE_UNSPECIFIED, 1999 IWL_STA_AUX_ACTIVITY); 2000 if (ret) 2001 return ret; 2002 2003 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 2004 if (!iwl_mvm_has_new_tx_api(mvm)) 2005 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 2006 mvm->aux_sta.sta_id, 2007 IWL_MVM_TX_FIFO_MCAST); 2008 2009 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 2010 MAC_INDEX_AUX, 0); 2011 if (ret) { 2012 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2013 return ret; 2014 } 2015 2016 /* 2017 * For 22000 firmware and on we cannot add queue to a station unknown 2018 * to firmware so enable queue here - after the station was added 2019 */ 2020 if (iwl_mvm_has_new_tx_api(mvm)) 2021 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->aux_queue, 2022 mvm->aux_sta.sta_id, 2023 IWL_MVM_TX_FIFO_MCAST); 2024 2025 return 0; 2026 } 2027 2028 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2029 { 2030 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2031 int ret; 2032 2033 lockdep_assert_held(&mvm->mutex); 2034 2035 /* Map snif queue to fifo - must happen before adding snif station */ 2036 if (!iwl_mvm_has_new_tx_api(mvm)) 2037 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 2038 mvm->snif_sta.sta_id, 2039 IWL_MVM_TX_FIFO_BE); 2040 2041 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 2042 mvmvif->id, 0); 2043 if (ret) 2044 return ret; 2045 2046 /* 2047 * For 22000 firmware and on we cannot add queue to a station unknown 2048 * to firmware so enable queue here - after the station was added 2049 */ 2050 if (iwl_mvm_has_new_tx_api(mvm)) 2051 iwl_mvm_enable_aux_snif_queue(mvm, &mvm->snif_queue, 2052 mvm->snif_sta.sta_id, 2053 IWL_MVM_TX_FIFO_BE); 2054 2055 return 0; 2056 } 2057 2058 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2059 { 2060 int ret; 2061 2062 lockdep_assert_held(&mvm->mutex); 2063 2064 iwl_mvm_disable_txq(mvm, NULL, mvm->snif_queue, IWL_MAX_TID_COUNT, 0); 2065 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 2066 if (ret) 2067 IWL_WARN(mvm, "Failed sending remove station\n"); 2068 2069 return ret; 2070 } 2071 2072 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 2073 { 2074 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 2075 } 2076 2077 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) 2078 { 2079 lockdep_assert_held(&mvm->mutex); 2080 2081 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 2082 } 2083 2084 /* 2085 * Send the add station command for the vif's broadcast station. 2086 * Assumes that the station was already allocated. 2087 * 2088 * @mvm: the mvm component 2089 * @vif: the interface to which the broadcast station is added 2090 * @bsta: the broadcast station to add. 2091 */ 2092 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2093 { 2094 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2095 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 2096 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 2097 const u8 *baddr = _baddr; 2098 int queue; 2099 int ret; 2100 unsigned int wdg_timeout = 2101 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2102 struct iwl_trans_txq_scd_cfg cfg = { 2103 .fifo = IWL_MVM_TX_FIFO_VO, 2104 .sta_id = mvmvif->bcast_sta.sta_id, 2105 .tid = IWL_MAX_TID_COUNT, 2106 .aggregate = false, 2107 .frame_limit = IWL_FRAME_LIMIT, 2108 }; 2109 2110 lockdep_assert_held(&mvm->mutex); 2111 2112 if (!iwl_mvm_has_new_tx_api(mvm)) { 2113 if (vif->type == NL80211_IFTYPE_AP || 2114 vif->type == NL80211_IFTYPE_ADHOC) 2115 queue = mvm->probe_queue; 2116 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2117 queue = mvm->p2p_dev_queue; 2118 else if (WARN(1, "Missing required TXQ for adding bcast STA\n")) 2119 return -EINVAL; 2120 2121 bsta->tfd_queue_msk |= BIT(queue); 2122 2123 iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout); 2124 } 2125 2126 if (vif->type == NL80211_IFTYPE_ADHOC) 2127 baddr = vif->bss_conf.bssid; 2128 2129 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA)) 2130 return -ENOSPC; 2131 2132 ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 2133 mvmvif->id, mvmvif->color); 2134 if (ret) 2135 return ret; 2136 2137 /* 2138 * For 22000 firmware and on we cannot add queue to a station unknown 2139 * to firmware so enable queue here - after the station was added 2140 */ 2141 if (iwl_mvm_has_new_tx_api(mvm)) { 2142 queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id, 2143 IWL_MAX_TID_COUNT, 2144 wdg_timeout); 2145 2146 if (vif->type == NL80211_IFTYPE_AP || 2147 vif->type == NL80211_IFTYPE_ADHOC) 2148 mvm->probe_queue = queue; 2149 else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) 2150 mvm->p2p_dev_queue = queue; 2151 } 2152 2153 return 0; 2154 } 2155 2156 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm, 2157 struct ieee80211_vif *vif) 2158 { 2159 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2160 int queue; 2161 2162 lockdep_assert_held(&mvm->mutex); 2163 2164 iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true, 0); 2165 2166 switch (vif->type) { 2167 case NL80211_IFTYPE_AP: 2168 case NL80211_IFTYPE_ADHOC: 2169 queue = mvm->probe_queue; 2170 break; 2171 case NL80211_IFTYPE_P2P_DEVICE: 2172 queue = mvm->p2p_dev_queue; 2173 break; 2174 default: 2175 WARN(1, "Can't free bcast queue on vif type %d\n", 2176 vif->type); 2177 return; 2178 } 2179 2180 iwl_mvm_disable_txq(mvm, NULL, queue, IWL_MAX_TID_COUNT, 0); 2181 if (iwl_mvm_has_new_tx_api(mvm)) 2182 return; 2183 2184 WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue))); 2185 mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue); 2186 } 2187 2188 /* Send the FW a request to remove the station from it's internal data 2189 * structures, but DO NOT remove the entry from the local data structures. */ 2190 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2191 { 2192 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2193 int ret; 2194 2195 lockdep_assert_held(&mvm->mutex); 2196 2197 iwl_mvm_free_bcast_sta_queues(mvm, vif); 2198 2199 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); 2200 if (ret) 2201 IWL_WARN(mvm, "Failed sending remove station\n"); 2202 return ret; 2203 } 2204 2205 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2206 { 2207 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2208 2209 lockdep_assert_held(&mvm->mutex); 2210 2211 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0, 2212 ieee80211_vif_type_p2p(vif), 2213 IWL_STA_GENERAL_PURPOSE); 2214 } 2215 2216 /* Allocate a new station entry for the broadcast station to the given vif, 2217 * and send it to the FW. 2218 * Note that each P2P mac should have its own broadcast station. 2219 * 2220 * @mvm: the mvm component 2221 * @vif: the interface to which the broadcast station is added 2222 * @bsta: the broadcast station to add. */ 2223 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2224 { 2225 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2226 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 2227 int ret; 2228 2229 lockdep_assert_held(&mvm->mutex); 2230 2231 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 2232 if (ret) 2233 return ret; 2234 2235 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 2236 2237 if (ret) 2238 iwl_mvm_dealloc_int_sta(mvm, bsta); 2239 2240 return ret; 2241 } 2242 2243 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2244 { 2245 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2246 2247 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 2248 } 2249 2250 /* 2251 * Send the FW a request to remove the station from it's internal data 2252 * structures, and in addition remove it from the local data structure. 2253 */ 2254 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2255 { 2256 int ret; 2257 2258 lockdep_assert_held(&mvm->mutex); 2259 2260 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 2261 2262 iwl_mvm_dealloc_bcast_sta(mvm, vif); 2263 2264 return ret; 2265 } 2266 2267 /* 2268 * Allocate a new station entry for the multicast station to the given vif, 2269 * and send it to the FW. 2270 * Note that each AP/GO mac should have its own multicast station. 2271 * 2272 * @mvm: the mvm component 2273 * @vif: the interface to which the multicast station is added 2274 */ 2275 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2276 { 2277 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2278 struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta; 2279 static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00}; 2280 const u8 *maddr = _maddr; 2281 struct iwl_trans_txq_scd_cfg cfg = { 2282 .fifo = IWL_MVM_TX_FIFO_MCAST, 2283 .sta_id = msta->sta_id, 2284 .tid = 0, 2285 .aggregate = false, 2286 .frame_limit = IWL_FRAME_LIMIT, 2287 }; 2288 unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false); 2289 int ret; 2290 2291 lockdep_assert_held(&mvm->mutex); 2292 2293 if (WARN_ON(vif->type != NL80211_IFTYPE_AP && 2294 vif->type != NL80211_IFTYPE_ADHOC)) 2295 return -ENOTSUPP; 2296 2297 /* 2298 * In IBSS, ieee80211_check_queues() sets the cab_queue to be 2299 * invalid, so make sure we use the queue we want. 2300 * Note that this is done here as we want to avoid making DQA 2301 * changes in mac80211 layer. 2302 */ 2303 if (vif->type == NL80211_IFTYPE_ADHOC) 2304 mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE; 2305 2306 /* 2307 * While in previous FWs we had to exclude cab queue from TFD queue 2308 * mask, now it is needed as any other queue. 2309 */ 2310 if (!iwl_mvm_has_new_tx_api(mvm) && 2311 fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) { 2312 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, 2313 timeout); 2314 msta->tfd_queue_msk |= BIT(mvmvif->cab_queue); 2315 } 2316 ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr, 2317 mvmvif->id, mvmvif->color); 2318 if (ret) { 2319 iwl_mvm_dealloc_int_sta(mvm, msta); 2320 return ret; 2321 } 2322 2323 /* 2324 * Enable cab queue after the ADD_STA command is sent. 2325 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG 2326 * command with unknown station id, and for FW that doesn't support 2327 * station API since the cab queue is not included in the 2328 * tfd_queue_mask. 2329 */ 2330 if (iwl_mvm_has_new_tx_api(mvm)) { 2331 int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id, 2332 0, 2333 timeout); 2334 mvmvif->cab_queue = queue; 2335 } else if (!fw_has_api(&mvm->fw->ucode_capa, 2336 IWL_UCODE_TLV_API_STA_TYPE)) 2337 iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg, 2338 timeout); 2339 2340 if (mvmvif->ap_wep_key) { 2341 u8 key_offset = iwl_mvm_set_fw_key_idx(mvm); 2342 2343 if (key_offset == STA_KEY_IDX_INVALID) 2344 return -ENOSPC; 2345 2346 ret = iwl_mvm_send_sta_key(mvm, mvmvif->mcast_sta.sta_id, 2347 mvmvif->ap_wep_key, 1, 0, NULL, 0, 2348 key_offset, 0); 2349 if (ret) 2350 return ret; 2351 } 2352 2353 return 0; 2354 } 2355 2356 /* 2357 * Send the FW a request to remove the station from it's internal data 2358 * structures, and in addition remove it from the local data structure. 2359 */ 2360 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2361 { 2362 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2363 int ret; 2364 2365 lockdep_assert_held(&mvm->mutex); 2366 2367 iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true, 0); 2368 2369 iwl_mvm_disable_txq(mvm, NULL, mvmvif->cab_queue, 0, 0); 2370 2371 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id); 2372 if (ret) 2373 IWL_WARN(mvm, "Failed sending remove station\n"); 2374 2375 return ret; 2376 } 2377 2378 #define IWL_MAX_RX_BA_SESSIONS 16 2379 2380 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 2381 { 2382 struct iwl_mvm_delba_notif notif = { 2383 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 2384 .metadata.sync = 1, 2385 .delba.baid = baid, 2386 }; 2387 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); 2388 }; 2389 2390 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 2391 struct iwl_mvm_baid_data *data) 2392 { 2393 int i; 2394 2395 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 2396 2397 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2398 int j; 2399 struct iwl_mvm_reorder_buffer *reorder_buf = 2400 &data->reorder_buf[i]; 2401 struct iwl_mvm_reorder_buf_entry *entries = 2402 &data->entries[i * data->entries_per_queue]; 2403 2404 spin_lock_bh(&reorder_buf->lock); 2405 if (likely(!reorder_buf->num_stored)) { 2406 spin_unlock_bh(&reorder_buf->lock); 2407 continue; 2408 } 2409 2410 /* 2411 * This shouldn't happen in regular DELBA since the internal 2412 * delBA notification should trigger a release of all frames in 2413 * the reorder buffer. 2414 */ 2415 WARN_ON(1); 2416 2417 for (j = 0; j < reorder_buf->buf_size; j++) 2418 __skb_queue_purge(&entries[j].e.frames); 2419 /* 2420 * Prevent timer re-arm. This prevents a very far fetched case 2421 * where we timed out on the notification. There may be prior 2422 * RX frames pending in the RX queue before the notification 2423 * that might get processed between now and the actual deletion 2424 * and we would re-arm the timer although we are deleting the 2425 * reorder buffer. 2426 */ 2427 reorder_buf->removed = true; 2428 spin_unlock_bh(&reorder_buf->lock); 2429 del_timer_sync(&reorder_buf->reorder_timer); 2430 } 2431 } 2432 2433 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 2434 struct iwl_mvm_baid_data *data, 2435 u16 ssn, u16 buf_size) 2436 { 2437 int i; 2438 2439 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 2440 struct iwl_mvm_reorder_buffer *reorder_buf = 2441 &data->reorder_buf[i]; 2442 struct iwl_mvm_reorder_buf_entry *entries = 2443 &data->entries[i * data->entries_per_queue]; 2444 int j; 2445 2446 reorder_buf->num_stored = 0; 2447 reorder_buf->head_sn = ssn; 2448 reorder_buf->buf_size = buf_size; 2449 /* rx reorder timer */ 2450 timer_setup(&reorder_buf->reorder_timer, 2451 iwl_mvm_reorder_timer_expired, 0); 2452 spin_lock_init(&reorder_buf->lock); 2453 reorder_buf->mvm = mvm; 2454 reorder_buf->queue = i; 2455 reorder_buf->valid = false; 2456 for (j = 0; j < reorder_buf->buf_size; j++) 2457 __skb_queue_head_init(&entries[j].e.frames); 2458 } 2459 } 2460 2461 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2462 int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) 2463 { 2464 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2465 struct iwl_mvm_add_sta_cmd cmd = {}; 2466 struct iwl_mvm_baid_data *baid_data = NULL; 2467 int ret; 2468 u32 status; 2469 2470 lockdep_assert_held(&mvm->mutex); 2471 2472 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { 2473 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 2474 return -ENOSPC; 2475 } 2476 2477 if (iwl_mvm_has_new_rx_api(mvm) && start) { 2478 u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]); 2479 2480 /* sparse doesn't like the __align() so don't check */ 2481 #ifndef __CHECKER__ 2482 /* 2483 * The division below will be OK if either the cache line size 2484 * can be divided by the entry size (ALIGN will round up) or if 2485 * if the entry size can be divided by the cache line size, in 2486 * which case the ALIGN() will do nothing. 2487 */ 2488 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) && 2489 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES); 2490 #endif 2491 2492 /* 2493 * Upward align the reorder buffer size to fill an entire cache 2494 * line for each queue, to avoid sharing cache lines between 2495 * different queues. 2496 */ 2497 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES); 2498 2499 /* 2500 * Allocate here so if allocation fails we can bail out early 2501 * before starting the BA session in the firmware 2502 */ 2503 baid_data = kzalloc(sizeof(*baid_data) + 2504 mvm->trans->num_rx_queues * 2505 reorder_buf_size, 2506 GFP_KERNEL); 2507 if (!baid_data) 2508 return -ENOMEM; 2509 2510 /* 2511 * This division is why we need the above BUILD_BUG_ON(), 2512 * if that doesn't hold then this will not be right. 2513 */ 2514 baid_data->entries_per_queue = 2515 reorder_buf_size / sizeof(baid_data->entries[0]); 2516 } 2517 2518 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2519 cmd.sta_id = mvm_sta->sta_id; 2520 cmd.add_modify = STA_MODE_MODIFY; 2521 if (start) { 2522 cmd.add_immediate_ba_tid = (u8) tid; 2523 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 2524 cmd.rx_ba_window = cpu_to_le16(buf_size); 2525 } else { 2526 cmd.remove_immediate_ba_tid = (u8) tid; 2527 } 2528 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 2529 STA_MODIFY_REMOVE_BA_TID; 2530 2531 status = ADD_STA_SUCCESS; 2532 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2533 iwl_mvm_add_sta_cmd_size(mvm), 2534 &cmd, &status); 2535 if (ret) 2536 goto out_free; 2537 2538 switch (status & IWL_ADD_STA_STATUS_MASK) { 2539 case ADD_STA_SUCCESS: 2540 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 2541 start ? "start" : "stopp"); 2542 break; 2543 case ADD_STA_IMMEDIATE_BA_FAILURE: 2544 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 2545 ret = -ENOSPC; 2546 break; 2547 default: 2548 ret = -EIO; 2549 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 2550 start ? "start" : "stopp", status); 2551 break; 2552 } 2553 2554 if (ret) 2555 goto out_free; 2556 2557 if (start) { 2558 u8 baid; 2559 2560 mvm->rx_ba_sessions++; 2561 2562 if (!iwl_mvm_has_new_rx_api(mvm)) 2563 return 0; 2564 2565 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { 2566 ret = -EINVAL; 2567 goto out_free; 2568 } 2569 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> 2570 IWL_ADD_STA_BAID_SHIFT); 2571 baid_data->baid = baid; 2572 baid_data->timeout = timeout; 2573 baid_data->last_rx = jiffies; 2574 baid_data->rcu_ptr = &mvm->baid_map[baid]; 2575 timer_setup(&baid_data->session_timer, 2576 iwl_mvm_rx_agg_session_expired, 0); 2577 baid_data->mvm = mvm; 2578 baid_data->tid = tid; 2579 baid_data->sta_id = mvm_sta->sta_id; 2580 2581 mvm_sta->tid_to_baid[tid] = baid; 2582 if (timeout) 2583 mod_timer(&baid_data->session_timer, 2584 TU_TO_EXP_TIME(timeout * 2)); 2585 2586 iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size); 2587 /* 2588 * protect the BA data with RCU to cover a case where our 2589 * internal RX sync mechanism will timeout (not that it's 2590 * supposed to happen) and we will free the session data while 2591 * RX is being processed in parallel 2592 */ 2593 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 2594 mvm_sta->sta_id, tid, baid); 2595 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 2596 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 2597 } else { 2598 u8 baid = mvm_sta->tid_to_baid[tid]; 2599 2600 if (mvm->rx_ba_sessions > 0) 2601 /* check that restart flow didn't zero the counter */ 2602 mvm->rx_ba_sessions--; 2603 if (!iwl_mvm_has_new_rx_api(mvm)) 2604 return 0; 2605 2606 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 2607 return -EINVAL; 2608 2609 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 2610 if (WARN_ON(!baid_data)) 2611 return -EINVAL; 2612 2613 /* synchronize all rx queues so we can safely delete */ 2614 iwl_mvm_free_reorder(mvm, baid_data); 2615 del_timer_sync(&baid_data->session_timer); 2616 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 2617 kfree_rcu(baid_data, rcu_head); 2618 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 2619 } 2620 return 0; 2621 2622 out_free: 2623 kfree(baid_data); 2624 return ret; 2625 } 2626 2627 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 2628 int tid, u8 queue, bool start) 2629 { 2630 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2631 struct iwl_mvm_add_sta_cmd cmd = {}; 2632 int ret; 2633 u32 status; 2634 2635 lockdep_assert_held(&mvm->mutex); 2636 2637 if (start) { 2638 mvm_sta->tfd_queue_msk |= BIT(queue); 2639 mvm_sta->tid_disable_agg &= ~BIT(tid); 2640 } else { 2641 /* In DQA-mode the queue isn't removed on agg termination */ 2642 mvm_sta->tid_disable_agg |= BIT(tid); 2643 } 2644 2645 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 2646 cmd.sta_id = mvm_sta->sta_id; 2647 cmd.add_modify = STA_MODE_MODIFY; 2648 if (!iwl_mvm_has_new_tx_api(mvm)) 2649 cmd.modify_mask = STA_MODIFY_QUEUES; 2650 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 2651 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 2652 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 2653 2654 status = ADD_STA_SUCCESS; 2655 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 2656 iwl_mvm_add_sta_cmd_size(mvm), 2657 &cmd, &status); 2658 if (ret) 2659 return ret; 2660 2661 switch (status & IWL_ADD_STA_STATUS_MASK) { 2662 case ADD_STA_SUCCESS: 2663 break; 2664 default: 2665 ret = -EIO; 2666 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 2667 start ? "start" : "stopp", status); 2668 break; 2669 } 2670 2671 return ret; 2672 } 2673 2674 const u8 tid_to_mac80211_ac[] = { 2675 IEEE80211_AC_BE, 2676 IEEE80211_AC_BK, 2677 IEEE80211_AC_BK, 2678 IEEE80211_AC_BE, 2679 IEEE80211_AC_VI, 2680 IEEE80211_AC_VI, 2681 IEEE80211_AC_VO, 2682 IEEE80211_AC_VO, 2683 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 2684 }; 2685 2686 static const u8 tid_to_ucode_ac[] = { 2687 AC_BE, 2688 AC_BK, 2689 AC_BK, 2690 AC_BE, 2691 AC_VI, 2692 AC_VI, 2693 AC_VO, 2694 AC_VO, 2695 }; 2696 2697 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2698 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 2699 { 2700 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2701 struct iwl_mvm_tid_data *tid_data; 2702 u16 normalized_ssn; 2703 u16 txq_id; 2704 int ret; 2705 2706 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 2707 return -EINVAL; 2708 2709 if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED && 2710 mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 2711 IWL_ERR(mvm, 2712 "Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n", 2713 mvmsta->tid_data[tid].state); 2714 return -ENXIO; 2715 } 2716 2717 lockdep_assert_held(&mvm->mutex); 2718 2719 if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE && 2720 iwl_mvm_has_new_tx_api(mvm)) { 2721 u8 ac = tid_to_mac80211_ac[tid]; 2722 2723 ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid); 2724 if (ret) 2725 return ret; 2726 } 2727 2728 spin_lock_bh(&mvmsta->lock); 2729 2730 /* possible race condition - we entered D0i3 while starting agg */ 2731 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { 2732 spin_unlock_bh(&mvmsta->lock); 2733 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); 2734 return -EIO; 2735 } 2736 2737 /* 2738 * Note the possible cases: 2739 * 1. An enabled TXQ - TXQ needs to become agg'ed 2740 * 2. The TXQ hasn't yet been enabled, so find a free one and mark 2741 * it as reserved 2742 */ 2743 txq_id = mvmsta->tid_data[tid].txq_id; 2744 if (txq_id == IWL_MVM_INVALID_QUEUE) { 2745 ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 2746 IWL_MVM_DQA_MIN_DATA_QUEUE, 2747 IWL_MVM_DQA_MAX_DATA_QUEUE); 2748 if (ret < 0) { 2749 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 2750 goto out; 2751 } 2752 2753 txq_id = ret; 2754 2755 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 2756 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 2757 } else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) { 2758 ret = -ENXIO; 2759 IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n", 2760 tid, IWL_MAX_HW_QUEUES - 1); 2761 goto out; 2762 2763 } else if (unlikely(mvm->queue_info[txq_id].status == 2764 IWL_MVM_QUEUE_SHARED)) { 2765 ret = -ENXIO; 2766 IWL_DEBUG_TX_QUEUES(mvm, 2767 "Can't start tid %d agg on shared queue!\n", 2768 tid); 2769 goto out; 2770 } 2771 2772 IWL_DEBUG_TX_QUEUES(mvm, 2773 "AGG for tid %d will be on queue #%d\n", 2774 tid, txq_id); 2775 2776 tid_data = &mvmsta->tid_data[tid]; 2777 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2778 tid_data->txq_id = txq_id; 2779 *ssn = tid_data->ssn; 2780 2781 IWL_DEBUG_TX_QUEUES(mvm, 2782 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 2783 mvmsta->sta_id, tid, txq_id, tid_data->ssn, 2784 tid_data->next_reclaimed); 2785 2786 /* 2787 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 2788 * to align the wrap around of ssn so we compare relevant values. 2789 */ 2790 normalized_ssn = tid_data->ssn; 2791 if (mvm->trans->cfg->gen2) 2792 normalized_ssn &= 0xff; 2793 2794 if (normalized_ssn == tid_data->next_reclaimed) { 2795 tid_data->state = IWL_AGG_STARTING; 2796 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2797 } else { 2798 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 2799 } 2800 2801 ret = 0; 2802 2803 out: 2804 spin_unlock_bh(&mvmsta->lock); 2805 2806 return ret; 2807 } 2808 2809 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2810 struct ieee80211_sta *sta, u16 tid, u16 buf_size, 2811 bool amsdu) 2812 { 2813 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2814 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2815 unsigned int wdg_timeout = 2816 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2817 int queue, ret; 2818 bool alloc_queue = true; 2819 enum iwl_mvm_queue_status queue_status; 2820 u16 ssn; 2821 2822 struct iwl_trans_txq_scd_cfg cfg = { 2823 .sta_id = mvmsta->sta_id, 2824 .tid = tid, 2825 .frame_limit = buf_size, 2826 .aggregate = true, 2827 }; 2828 2829 /* 2830 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation 2831 * manager, so this function should never be called in this case. 2832 */ 2833 if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm))) 2834 return -EINVAL; 2835 2836 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 2837 != IWL_MAX_TID_COUNT); 2838 2839 spin_lock_bh(&mvmsta->lock); 2840 ssn = tid_data->ssn; 2841 queue = tid_data->txq_id; 2842 tid_data->state = IWL_AGG_ON; 2843 mvmsta->agg_tids |= BIT(tid); 2844 tid_data->ssn = 0xffff; 2845 tid_data->amsdu_in_ampdu_allowed = amsdu; 2846 spin_unlock_bh(&mvmsta->lock); 2847 2848 if (iwl_mvm_has_new_tx_api(mvm)) { 2849 /* 2850 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start() 2851 * would have failed, so if we are here there is no need to 2852 * allocate a queue. 2853 * However, if aggregation size is different than the default 2854 * size, the scheduler should be reconfigured. 2855 * We cannot do this with the new TX API, so return unsupported 2856 * for now, until it will be offloaded to firmware.. 2857 * Note that if SCD default value changes - this condition 2858 * should be updated as well. 2859 */ 2860 if (buf_size < IWL_FRAME_LIMIT) 2861 return -ENOTSUPP; 2862 2863 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2864 if (ret) 2865 return -EIO; 2866 goto out; 2867 } 2868 2869 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2870 2871 queue_status = mvm->queue_info[queue].status; 2872 2873 /* Maybe there is no need to even alloc a queue... */ 2874 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2875 alloc_queue = false; 2876 2877 /* 2878 * Only reconfig the SCD for the queue if the window size has 2879 * changed from current (become smaller) 2880 */ 2881 if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) { 2882 /* 2883 * If reconfiguring an existing queue, it first must be 2884 * drained 2885 */ 2886 ret = iwl_trans_wait_tx_queues_empty(mvm->trans, 2887 BIT(queue)); 2888 if (ret) { 2889 IWL_ERR(mvm, 2890 "Error draining queue before reconfig\n"); 2891 return ret; 2892 } 2893 2894 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 2895 mvmsta->sta_id, tid, 2896 buf_size, ssn); 2897 if (ret) { 2898 IWL_ERR(mvm, 2899 "Error reconfiguring TXQ #%d\n", queue); 2900 return ret; 2901 } 2902 } 2903 2904 if (alloc_queue) 2905 iwl_mvm_enable_txq(mvm, sta, queue, ssn, 2906 &cfg, wdg_timeout); 2907 2908 /* Send ADD_STA command to enable aggs only if the queue isn't shared */ 2909 if (queue_status != IWL_MVM_QUEUE_SHARED) { 2910 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2911 if (ret) 2912 return -EIO; 2913 } 2914 2915 /* No need to mark as reserved */ 2916 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 2917 2918 out: 2919 /* 2920 * Even though in theory the peer could have different 2921 * aggregation reorder buffer sizes for different sessions, 2922 * our ucode doesn't allow for that and has a global limit 2923 * for each station. Therefore, use the minimum of all the 2924 * aggregation sessions and our default value. 2925 */ 2926 mvmsta->max_agg_bufsize = 2927 min(mvmsta->max_agg_bufsize, buf_size); 2928 mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 2929 2930 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 2931 sta->addr, tid); 2932 2933 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq, false); 2934 } 2935 2936 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm, 2937 struct iwl_mvm_sta *mvmsta, 2938 struct iwl_mvm_tid_data *tid_data) 2939 { 2940 u16 txq_id = tid_data->txq_id; 2941 2942 lockdep_assert_held(&mvm->mutex); 2943 2944 if (iwl_mvm_has_new_tx_api(mvm)) 2945 return; 2946 2947 /* 2948 * The TXQ is marked as reserved only if no traffic came through yet 2949 * This means no traffic has been sent on this TID (agg'd or not), so 2950 * we no longer have use for the queue. Since it hasn't even been 2951 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 2952 * free. 2953 */ 2954 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) { 2955 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2956 tid_data->txq_id = IWL_MVM_INVALID_QUEUE; 2957 } 2958 } 2959 2960 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2961 struct ieee80211_sta *sta, u16 tid) 2962 { 2963 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2964 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2965 u16 txq_id; 2966 int err; 2967 2968 /* 2969 * If mac80211 is cleaning its state, then say that we finished since 2970 * our state has been cleared anyway. 2971 */ 2972 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2973 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2974 return 0; 2975 } 2976 2977 spin_lock_bh(&mvmsta->lock); 2978 2979 txq_id = tid_data->txq_id; 2980 2981 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 2982 mvmsta->sta_id, tid, txq_id, tid_data->state); 2983 2984 mvmsta->agg_tids &= ~BIT(tid); 2985 2986 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 2987 2988 switch (tid_data->state) { 2989 case IWL_AGG_ON: 2990 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2991 2992 IWL_DEBUG_TX_QUEUES(mvm, 2993 "ssn = %d, next_recl = %d\n", 2994 tid_data->ssn, tid_data->next_reclaimed); 2995 2996 tid_data->ssn = 0xffff; 2997 tid_data->state = IWL_AGG_OFF; 2998 spin_unlock_bh(&mvmsta->lock); 2999 3000 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3001 3002 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3003 return 0; 3004 case IWL_AGG_STARTING: 3005 case IWL_EMPTYING_HW_QUEUE_ADDBA: 3006 /* 3007 * The agg session has been stopped before it was set up. This 3008 * can happen when the AddBA timer times out for example. 3009 */ 3010 3011 /* No barriers since we are under mutex */ 3012 lockdep_assert_held(&mvm->mutex); 3013 3014 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 3015 tid_data->state = IWL_AGG_OFF; 3016 err = 0; 3017 break; 3018 default: 3019 IWL_ERR(mvm, 3020 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 3021 mvmsta->sta_id, tid, tid_data->state); 3022 IWL_ERR(mvm, 3023 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 3024 err = -EINVAL; 3025 } 3026 3027 spin_unlock_bh(&mvmsta->lock); 3028 3029 return err; 3030 } 3031 3032 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 3033 struct ieee80211_sta *sta, u16 tid) 3034 { 3035 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3036 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 3037 u16 txq_id; 3038 enum iwl_mvm_agg_state old_state; 3039 3040 /* 3041 * First set the agg state to OFF to avoid calling 3042 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 3043 */ 3044 spin_lock_bh(&mvmsta->lock); 3045 txq_id = tid_data->txq_id; 3046 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 3047 mvmsta->sta_id, tid, txq_id, tid_data->state); 3048 old_state = tid_data->state; 3049 tid_data->state = IWL_AGG_OFF; 3050 mvmsta->agg_tids &= ~BIT(tid); 3051 spin_unlock_bh(&mvmsta->lock); 3052 3053 iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data); 3054 3055 if (old_state >= IWL_AGG_ON) { 3056 iwl_mvm_drain_sta(mvm, mvmsta, true); 3057 3058 if (iwl_mvm_has_new_tx_api(mvm)) { 3059 if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id, 3060 BIT(tid), 0)) 3061 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3062 iwl_trans_wait_txq_empty(mvm->trans, txq_id); 3063 } else { 3064 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) 3065 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 3066 iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id)); 3067 } 3068 3069 iwl_mvm_drain_sta(mvm, mvmsta, false); 3070 3071 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 3072 } 3073 3074 return 0; 3075 } 3076 3077 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 3078 { 3079 int i, max = -1, max_offs = -1; 3080 3081 lockdep_assert_held(&mvm->mutex); 3082 3083 /* Pick the unused key offset with the highest 'deleted' 3084 * counter. Every time a key is deleted, all the counters 3085 * are incremented and the one that was just deleted is 3086 * reset to zero. Thus, the highest counter is the one 3087 * that was deleted longest ago. Pick that one. 3088 */ 3089 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3090 if (test_bit(i, mvm->fw_key_table)) 3091 continue; 3092 if (mvm->fw_key_deleted[i] > max) { 3093 max = mvm->fw_key_deleted[i]; 3094 max_offs = i; 3095 } 3096 } 3097 3098 if (max_offs < 0) 3099 return STA_KEY_IDX_INVALID; 3100 3101 return max_offs; 3102 } 3103 3104 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 3105 struct ieee80211_vif *vif, 3106 struct ieee80211_sta *sta) 3107 { 3108 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3109 3110 if (sta) 3111 return iwl_mvm_sta_from_mac80211(sta); 3112 3113 /* 3114 * The device expects GTKs for station interfaces to be 3115 * installed as GTKs for the AP station. If we have no 3116 * station ID, then use AP's station ID. 3117 */ 3118 if (vif->type == NL80211_IFTYPE_STATION && 3119 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3120 u8 sta_id = mvmvif->ap_sta_id; 3121 3122 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 3123 lockdep_is_held(&mvm->mutex)); 3124 3125 /* 3126 * It is possible that the 'sta' parameter is NULL, 3127 * for example when a GTK is removed - the sta_id will then 3128 * be the AP ID, and no station was passed by mac80211. 3129 */ 3130 if (IS_ERR_OR_NULL(sta)) 3131 return NULL; 3132 3133 return iwl_mvm_sta_from_mac80211(sta); 3134 } 3135 3136 return NULL; 3137 } 3138 3139 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 3140 u32 sta_id, 3141 struct ieee80211_key_conf *key, bool mcast, 3142 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 3143 u8 key_offset, bool mfp) 3144 { 3145 union { 3146 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3147 struct iwl_mvm_add_sta_key_cmd cmd; 3148 } u = {}; 3149 __le16 key_flags; 3150 int ret; 3151 u32 status; 3152 u16 keyidx; 3153 u64 pn = 0; 3154 int i, size; 3155 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3156 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3157 3158 if (sta_id == IWL_MVM_INVALID_STA) 3159 return -EINVAL; 3160 3161 keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) & 3162 STA_KEY_FLG_KEYID_MSK; 3163 key_flags = cpu_to_le16(keyidx); 3164 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 3165 3166 switch (key->cipher) { 3167 case WLAN_CIPHER_SUITE_TKIP: 3168 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 3169 if (new_api) { 3170 memcpy((void *)&u.cmd.tx_mic_key, 3171 &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY], 3172 IWL_MIC_KEY_SIZE); 3173 3174 memcpy((void *)&u.cmd.rx_mic_key, 3175 &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY], 3176 IWL_MIC_KEY_SIZE); 3177 pn = atomic64_read(&key->tx_pn); 3178 3179 } else { 3180 u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32; 3181 for (i = 0; i < 5; i++) 3182 u.cmd_v1.tkip_rx_ttak[i] = 3183 cpu_to_le16(tkip_p1k[i]); 3184 } 3185 memcpy(u.cmd.common.key, key->key, key->keylen); 3186 break; 3187 case WLAN_CIPHER_SUITE_CCMP: 3188 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 3189 memcpy(u.cmd.common.key, key->key, key->keylen); 3190 if (new_api) 3191 pn = atomic64_read(&key->tx_pn); 3192 break; 3193 case WLAN_CIPHER_SUITE_WEP104: 3194 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 3195 /* fall through */ 3196 case WLAN_CIPHER_SUITE_WEP40: 3197 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 3198 memcpy(u.cmd.common.key + 3, key->key, key->keylen); 3199 break; 3200 case WLAN_CIPHER_SUITE_GCMP_256: 3201 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 3202 /* fall through */ 3203 case WLAN_CIPHER_SUITE_GCMP: 3204 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 3205 memcpy(u.cmd.common.key, key->key, key->keylen); 3206 if (new_api) 3207 pn = atomic64_read(&key->tx_pn); 3208 break; 3209 default: 3210 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 3211 memcpy(u.cmd.common.key, key->key, key->keylen); 3212 } 3213 3214 if (mcast) 3215 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3216 if (mfp) 3217 key_flags |= cpu_to_le16(STA_KEY_MFP); 3218 3219 u.cmd.common.key_offset = key_offset; 3220 u.cmd.common.key_flags = key_flags; 3221 u.cmd.common.sta_id = sta_id; 3222 3223 if (new_api) { 3224 u.cmd.transmit_seq_cnt = cpu_to_le64(pn); 3225 size = sizeof(u.cmd); 3226 } else { 3227 size = sizeof(u.cmd_v1); 3228 } 3229 3230 status = ADD_STA_SUCCESS; 3231 if (cmd_flags & CMD_ASYNC) 3232 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size, 3233 &u.cmd); 3234 else 3235 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, 3236 &u.cmd, &status); 3237 3238 switch (status) { 3239 case ADD_STA_SUCCESS: 3240 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 3241 break; 3242 default: 3243 ret = -EIO; 3244 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 3245 break; 3246 } 3247 3248 return ret; 3249 } 3250 3251 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 3252 struct ieee80211_key_conf *keyconf, 3253 u8 sta_id, bool remove_key) 3254 { 3255 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 3256 3257 /* verify the key details match the required command's expectations */ 3258 if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 3259 (keyconf->keyidx != 4 && keyconf->keyidx != 5) || 3260 (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC && 3261 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 && 3262 keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256))) 3263 return -EINVAL; 3264 3265 if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) && 3266 keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC)) 3267 return -EINVAL; 3268 3269 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 3270 igtk_cmd.sta_id = cpu_to_le32(sta_id); 3271 3272 if (remove_key) { 3273 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 3274 } else { 3275 struct ieee80211_key_seq seq; 3276 const u8 *pn; 3277 3278 switch (keyconf->cipher) { 3279 case WLAN_CIPHER_SUITE_AES_CMAC: 3280 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 3281 break; 3282 case WLAN_CIPHER_SUITE_BIP_GMAC_128: 3283 case WLAN_CIPHER_SUITE_BIP_GMAC_256: 3284 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP); 3285 break; 3286 default: 3287 return -EINVAL; 3288 } 3289 3290 memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen); 3291 if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) 3292 igtk_cmd.ctrl_flags |= 3293 cpu_to_le32(STA_KEY_FLG_KEY_32BYTES); 3294 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3295 pn = seq.aes_cmac.pn; 3296 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 3297 ((u64) pn[4] << 8) | 3298 ((u64) pn[3] << 16) | 3299 ((u64) pn[2] << 24) | 3300 ((u64) pn[1] << 32) | 3301 ((u64) pn[0] << 40)); 3302 } 3303 3304 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", 3305 remove_key ? "removing" : "installing", 3306 igtk_cmd.sta_id); 3307 3308 if (!iwl_mvm_has_new_rx_api(mvm)) { 3309 struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = { 3310 .ctrl_flags = igtk_cmd.ctrl_flags, 3311 .key_id = igtk_cmd.key_id, 3312 .sta_id = igtk_cmd.sta_id, 3313 .receive_seq_cnt = igtk_cmd.receive_seq_cnt 3314 }; 3315 3316 memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk, 3317 ARRAY_SIZE(igtk_cmd_v1.igtk)); 3318 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3319 sizeof(igtk_cmd_v1), &igtk_cmd_v1); 3320 } 3321 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 3322 sizeof(igtk_cmd), &igtk_cmd); 3323 } 3324 3325 3326 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 3327 struct ieee80211_vif *vif, 3328 struct ieee80211_sta *sta) 3329 { 3330 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3331 3332 if (sta) 3333 return sta->addr; 3334 3335 if (vif->type == NL80211_IFTYPE_STATION && 3336 mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) { 3337 u8 sta_id = mvmvif->ap_sta_id; 3338 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 3339 lockdep_is_held(&mvm->mutex)); 3340 return sta->addr; 3341 } 3342 3343 3344 return NULL; 3345 } 3346 3347 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3348 struct ieee80211_vif *vif, 3349 struct ieee80211_sta *sta, 3350 struct ieee80211_key_conf *keyconf, 3351 u8 key_offset, 3352 bool mcast) 3353 { 3354 int ret; 3355 const u8 *addr; 3356 struct ieee80211_key_seq seq; 3357 u16 p1k[5]; 3358 u32 sta_id; 3359 bool mfp = false; 3360 3361 if (sta) { 3362 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3363 3364 sta_id = mvm_sta->sta_id; 3365 mfp = sta->mfp; 3366 } else if (vif->type == NL80211_IFTYPE_AP && 3367 !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) { 3368 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3369 3370 sta_id = mvmvif->mcast_sta.sta_id; 3371 } else { 3372 IWL_ERR(mvm, "Failed to find station id\n"); 3373 return -EINVAL; 3374 } 3375 3376 switch (keyconf->cipher) { 3377 case WLAN_CIPHER_SUITE_TKIP: 3378 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 3379 /* get phase 1 key from mac80211 */ 3380 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 3381 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 3382 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3383 seq.tkip.iv32, p1k, 0, key_offset, 3384 mfp); 3385 break; 3386 case WLAN_CIPHER_SUITE_CCMP: 3387 case WLAN_CIPHER_SUITE_WEP40: 3388 case WLAN_CIPHER_SUITE_WEP104: 3389 case WLAN_CIPHER_SUITE_GCMP: 3390 case WLAN_CIPHER_SUITE_GCMP_256: 3391 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3392 0, NULL, 0, key_offset, mfp); 3393 break; 3394 default: 3395 ret = iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast, 3396 0, NULL, 0, key_offset, mfp); 3397 } 3398 3399 return ret; 3400 } 3401 3402 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 3403 struct ieee80211_key_conf *keyconf, 3404 bool mcast) 3405 { 3406 union { 3407 struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1; 3408 struct iwl_mvm_add_sta_key_cmd cmd; 3409 } u = {}; 3410 bool new_api = fw_has_api(&mvm->fw->ucode_capa, 3411 IWL_UCODE_TLV_API_TKIP_MIC_KEYS); 3412 __le16 key_flags; 3413 int ret, size; 3414 u32 status; 3415 3416 /* This is a valid situation for GTK removal */ 3417 if (sta_id == IWL_MVM_INVALID_STA) 3418 return 0; 3419 3420 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 3421 STA_KEY_FLG_KEYID_MSK); 3422 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 3423 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 3424 3425 if (mcast) 3426 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 3427 3428 /* 3429 * The fields assigned here are in the same location at the start 3430 * of the command, so we can do this union trick. 3431 */ 3432 u.cmd.common.key_flags = key_flags; 3433 u.cmd.common.key_offset = keyconf->hw_key_idx; 3434 u.cmd.common.sta_id = sta_id; 3435 3436 size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1); 3437 3438 status = ADD_STA_SUCCESS; 3439 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd, 3440 &status); 3441 3442 switch (status) { 3443 case ADD_STA_SUCCESS: 3444 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 3445 break; 3446 default: 3447 ret = -EIO; 3448 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 3449 break; 3450 } 3451 3452 return ret; 3453 } 3454 3455 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 3456 struct ieee80211_vif *vif, 3457 struct ieee80211_sta *sta, 3458 struct ieee80211_key_conf *keyconf, 3459 u8 key_offset) 3460 { 3461 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3462 struct iwl_mvm_sta *mvm_sta; 3463 u8 sta_id = IWL_MVM_INVALID_STA; 3464 int ret; 3465 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 3466 3467 lockdep_assert_held(&mvm->mutex); 3468 3469 if (vif->type != NL80211_IFTYPE_AP || 3470 keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) { 3471 /* Get the station id from the mvm local station table */ 3472 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3473 if (!mvm_sta) { 3474 IWL_ERR(mvm, "Failed to find station\n"); 3475 return -EINVAL; 3476 } 3477 sta_id = mvm_sta->sta_id; 3478 3479 /* 3480 * It is possible that the 'sta' parameter is NULL, and thus 3481 * there is a need to retrieve the sta from the local station 3482 * table. 3483 */ 3484 if (!sta) { 3485 sta = rcu_dereference_protected( 3486 mvm->fw_id_to_mac_id[sta_id], 3487 lockdep_is_held(&mvm->mutex)); 3488 if (IS_ERR_OR_NULL(sta)) { 3489 IWL_ERR(mvm, "Invalid station id\n"); 3490 return -EINVAL; 3491 } 3492 } 3493 3494 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 3495 return -EINVAL; 3496 } else { 3497 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3498 3499 sta_id = mvmvif->mcast_sta.sta_id; 3500 } 3501 3502 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3503 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3504 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) { 3505 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 3506 goto end; 3507 } 3508 3509 /* If the key_offset is not pre-assigned, we need to find a 3510 * new offset to use. In normal cases, the offset is not 3511 * pre-assigned, but during HW_RESTART we want to reuse the 3512 * same indices, so we pass them when this function is called. 3513 * 3514 * In D3 entry, we need to hardcoded the indices (because the 3515 * firmware hardcodes the PTK offset to 0). In this case, we 3516 * need to make sure we don't overwrite the hw_key_idx in the 3517 * keyconf structure, because otherwise we cannot configure 3518 * the original ones back when resuming. 3519 */ 3520 if (key_offset == STA_KEY_IDX_INVALID) { 3521 key_offset = iwl_mvm_set_fw_key_idx(mvm); 3522 if (key_offset == STA_KEY_IDX_INVALID) 3523 return -ENOSPC; 3524 keyconf->hw_key_idx = key_offset; 3525 } 3526 3527 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 3528 if (ret) 3529 goto end; 3530 3531 /* 3532 * For WEP, the same key is used for multicast and unicast. Upload it 3533 * again, using the same key offset, and now pointing the other one 3534 * to the same key slot (offset). 3535 * If this fails, remove the original as well. 3536 */ 3537 if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3538 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) && 3539 sta) { 3540 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 3541 key_offset, !mcast); 3542 if (ret) { 3543 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3544 goto end; 3545 } 3546 } 3547 3548 __set_bit(key_offset, mvm->fw_key_table); 3549 3550 end: 3551 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 3552 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 3553 sta ? sta->addr : zero_addr, ret); 3554 return ret; 3555 } 3556 3557 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 3558 struct ieee80211_vif *vif, 3559 struct ieee80211_sta *sta, 3560 struct ieee80211_key_conf *keyconf) 3561 { 3562 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3563 struct iwl_mvm_sta *mvm_sta; 3564 u8 sta_id = IWL_MVM_INVALID_STA; 3565 int ret, i; 3566 3567 lockdep_assert_held(&mvm->mutex); 3568 3569 /* Get the station from the mvm local station table */ 3570 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3571 if (mvm_sta) 3572 sta_id = mvm_sta->sta_id; 3573 else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast) 3574 sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id; 3575 3576 3577 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 3578 keyconf->keyidx, sta_id); 3579 3580 if (mvm_sta && (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC || 3581 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 || 3582 keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)) 3583 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 3584 3585 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 3586 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 3587 keyconf->hw_key_idx); 3588 return -ENOENT; 3589 } 3590 3591 /* track which key was deleted last */ 3592 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 3593 if (mvm->fw_key_deleted[i] < U8_MAX) 3594 mvm->fw_key_deleted[i]++; 3595 } 3596 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 3597 3598 if (sta && !mvm_sta) { 3599 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 3600 return 0; 3601 } 3602 3603 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 3604 if (ret) 3605 return ret; 3606 3607 /* delete WEP key twice to get rid of (now useless) offset */ 3608 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 3609 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 3610 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 3611 3612 return ret; 3613 } 3614 3615 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 3616 struct ieee80211_vif *vif, 3617 struct ieee80211_key_conf *keyconf, 3618 struct ieee80211_sta *sta, u32 iv32, 3619 u16 *phase1key) 3620 { 3621 struct iwl_mvm_sta *mvm_sta; 3622 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 3623 bool mfp = sta ? sta->mfp : false; 3624 3625 rcu_read_lock(); 3626 3627 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 3628 if (WARN_ON_ONCE(!mvm_sta)) 3629 goto unlock; 3630 iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast, 3631 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx, 3632 mfp); 3633 3634 unlock: 3635 rcu_read_unlock(); 3636 } 3637 3638 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 3639 struct ieee80211_sta *sta) 3640 { 3641 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3642 struct iwl_mvm_add_sta_cmd cmd = { 3643 .add_modify = STA_MODE_MODIFY, 3644 .sta_id = mvmsta->sta_id, 3645 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 3646 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3647 }; 3648 int ret; 3649 3650 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3651 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3652 if (ret) 3653 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3654 } 3655 3656 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 3657 struct ieee80211_sta *sta, 3658 enum ieee80211_frame_release_type reason, 3659 u16 cnt, u16 tids, bool more_data, 3660 bool single_sta_queue) 3661 { 3662 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 3663 struct iwl_mvm_add_sta_cmd cmd = { 3664 .add_modify = STA_MODE_MODIFY, 3665 .sta_id = mvmsta->sta_id, 3666 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 3667 .sleep_tx_count = cpu_to_le16(cnt), 3668 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3669 }; 3670 int tid, ret; 3671 unsigned long _tids = tids; 3672 3673 /* convert TIDs to ACs - we don't support TSPEC so that's OK 3674 * Note that this field is reserved and unused by firmware not 3675 * supporting GO uAPSD, so it's safe to always do this. 3676 */ 3677 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 3678 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 3679 3680 /* If we're releasing frames from aggregation or dqa queues then check 3681 * if all the queues that we're releasing frames from, combined, have: 3682 * - more frames than the service period, in which case more_data 3683 * needs to be set 3684 * - fewer than 'cnt' frames, in which case we need to adjust the 3685 * firmware command (but do that unconditionally) 3686 */ 3687 if (single_sta_queue) { 3688 int remaining = cnt; 3689 int sleep_tx_count; 3690 3691 spin_lock_bh(&mvmsta->lock); 3692 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 3693 struct iwl_mvm_tid_data *tid_data; 3694 u16 n_queued; 3695 3696 tid_data = &mvmsta->tid_data[tid]; 3697 3698 n_queued = iwl_mvm_tid_queued(mvm, tid_data); 3699 if (n_queued > remaining) { 3700 more_data = true; 3701 remaining = 0; 3702 break; 3703 } 3704 remaining -= n_queued; 3705 } 3706 sleep_tx_count = cnt - remaining; 3707 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 3708 mvmsta->sleep_tx_count = sleep_tx_count; 3709 spin_unlock_bh(&mvmsta->lock); 3710 3711 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 3712 if (WARN_ON(cnt - remaining == 0)) { 3713 ieee80211_sta_eosp(sta); 3714 return; 3715 } 3716 } 3717 3718 /* Note: this is ignored by firmware not supporting GO uAPSD */ 3719 if (more_data) 3720 cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA; 3721 3722 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 3723 mvmsta->next_status_eosp = true; 3724 cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL; 3725 } else { 3726 cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD; 3727 } 3728 3729 /* block the Tx queues until the FW updated the sleep Tx count */ 3730 iwl_trans_block_txq_ptrs(mvm->trans, true); 3731 3732 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 3733 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 3734 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3735 if (ret) 3736 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3737 } 3738 3739 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 3740 struct iwl_rx_cmd_buffer *rxb) 3741 { 3742 struct iwl_rx_packet *pkt = rxb_addr(rxb); 3743 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 3744 struct ieee80211_sta *sta; 3745 u32 sta_id = le32_to_cpu(notif->sta_id); 3746 3747 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) 3748 return; 3749 3750 rcu_read_lock(); 3751 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 3752 if (!IS_ERR_OR_NULL(sta)) 3753 ieee80211_sta_eosp(sta); 3754 rcu_read_unlock(); 3755 } 3756 3757 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 3758 struct iwl_mvm_sta *mvmsta, bool disable) 3759 { 3760 struct iwl_mvm_add_sta_cmd cmd = { 3761 .add_modify = STA_MODE_MODIFY, 3762 .sta_id = mvmsta->sta_id, 3763 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3764 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3765 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 3766 }; 3767 int ret; 3768 3769 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 3770 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3771 if (ret) 3772 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3773 } 3774 3775 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 3776 struct ieee80211_sta *sta, 3777 bool disable) 3778 { 3779 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3780 3781 spin_lock_bh(&mvm_sta->lock); 3782 3783 if (mvm_sta->disable_tx == disable) { 3784 spin_unlock_bh(&mvm_sta->lock); 3785 return; 3786 } 3787 3788 mvm_sta->disable_tx = disable; 3789 3790 /* Tell mac80211 to start/stop queuing tx for this station */ 3791 ieee80211_sta_block_awake(mvm->hw, sta, disable); 3792 3793 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 3794 3795 spin_unlock_bh(&mvm_sta->lock); 3796 } 3797 3798 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm, 3799 struct iwl_mvm_vif *mvmvif, 3800 struct iwl_mvm_int_sta *sta, 3801 bool disable) 3802 { 3803 u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color); 3804 struct iwl_mvm_add_sta_cmd cmd = { 3805 .add_modify = STA_MODE_MODIFY, 3806 .sta_id = sta->sta_id, 3807 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 3808 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 3809 .mac_id_n_color = cpu_to_le32(id), 3810 }; 3811 int ret; 3812 3813 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 0, 3814 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 3815 if (ret) 3816 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 3817 } 3818 3819 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 3820 struct iwl_mvm_vif *mvmvif, 3821 bool disable) 3822 { 3823 struct ieee80211_sta *sta; 3824 struct iwl_mvm_sta *mvm_sta; 3825 int i; 3826 3827 lockdep_assert_held(&mvm->mutex); 3828 3829 /* Block/unblock all the stations of the given mvmvif */ 3830 for (i = 0; i < ARRAY_SIZE(mvm->fw_id_to_mac_id); i++) { 3831 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 3832 lockdep_is_held(&mvm->mutex)); 3833 if (IS_ERR_OR_NULL(sta)) 3834 continue; 3835 3836 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 3837 if (mvm_sta->mac_id_n_color != 3838 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 3839 continue; 3840 3841 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 3842 } 3843 3844 if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) 3845 return; 3846 3847 /* Need to block/unblock also multicast station */ 3848 if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA) 3849 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3850 &mvmvif->mcast_sta, disable); 3851 3852 /* 3853 * Only unblock the broadcast station (FW blocks it for immediate 3854 * quiet, not the driver) 3855 */ 3856 if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA) 3857 iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif, 3858 &mvmvif->bcast_sta, disable); 3859 } 3860 3861 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 3862 { 3863 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 3864 struct iwl_mvm_sta *mvmsta; 3865 3866 rcu_read_lock(); 3867 3868 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 3869 3870 if (!WARN_ON(!mvmsta)) 3871 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 3872 3873 rcu_read_unlock(); 3874 } 3875 3876 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data) 3877 { 3878 u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 3879 3880 /* 3881 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need 3882 * to align the wrap around of ssn so we compare relevant values. 3883 */ 3884 if (mvm->trans->cfg->gen2) 3885 sn &= 0xff; 3886 3887 return ieee80211_sn_sub(sn, tid_data->next_reclaimed); 3888 } 3889