1 /****************************************************************************** 2 * 3 * This file is provided under a dual BSD/GPLv2 license. When using or 4 * redistributing this file, you may do so under either license. 5 * 6 * GPL LICENSE SUMMARY 7 * 8 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 9 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 10 * Copyright(c) 2016 Intel Deutschland GmbH 11 * 12 * This program is free software; you can redistribute it and/or modify 13 * it under the terms of version 2 of the GNU General Public License as 14 * published by the Free Software Foundation. 15 * 16 * This program is distributed in the hope that it will be useful, but 17 * WITHOUT ANY WARRANTY; without even the implied warranty of 18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU 19 * General Public License for more details. 20 * 21 * You should have received a copy of the GNU General Public License 22 * along with this program; if not, write to the Free Software 23 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, 24 * USA 25 * 26 * The full GNU General Public License is included in this distribution 27 * in the file called COPYING. 28 * 29 * Contact Information: 30 * Intel Linux Wireless <linuxwifi@intel.com> 31 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 32 * 33 * BSD LICENSE 34 * 35 * Copyright(c) 2012 - 2015 Intel Corporation. All rights reserved. 36 * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH 37 * Copyright(c) 2016 Intel Deutschland GmbH 38 * All rights reserved. 39 * 40 * Redistribution and use in source and binary forms, with or without 41 * modification, are permitted provided that the following conditions 42 * are met: 43 * 44 * * Redistributions of source code must retain the above copyright 45 * notice, this list of conditions and the following disclaimer. 46 * * Redistributions in binary form must reproduce the above copyright 47 * notice, this list of conditions and the following disclaimer in 48 * the documentation and/or other materials provided with the 49 * distribution. 50 * * Neither the name Intel Corporation nor the names of its 51 * contributors may be used to endorse or promote products derived 52 * from this software without specific prior written permission. 53 * 54 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 55 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 56 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 57 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 58 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 59 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 60 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 61 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 62 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 63 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 64 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 65 * 66 *****************************************************************************/ 67 #include <net/mac80211.h> 68 69 #include "mvm.h" 70 #include "sta.h" 71 #include "rs.h" 72 73 /* 74 * New version of ADD_STA_sta command added new fields at the end of the 75 * structure, so sending the size of the relevant API's structure is enough to 76 * support both API versions. 77 */ 78 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm) 79 { 80 return iwl_mvm_has_new_rx_api(mvm) ? 81 sizeof(struct iwl_mvm_add_sta_cmd) : 82 sizeof(struct iwl_mvm_add_sta_cmd_v7); 83 } 84 85 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm, 86 enum nl80211_iftype iftype) 87 { 88 int sta_id; 89 u32 reserved_ids = 0; 90 91 BUILD_BUG_ON(IWL_MVM_STATION_COUNT > 32); 92 WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)); 93 94 lockdep_assert_held(&mvm->mutex); 95 96 /* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */ 97 if (iftype != NL80211_IFTYPE_STATION) 98 reserved_ids = BIT(0); 99 100 /* Don't take rcu_read_lock() since we are protected by mvm->mutex */ 101 for (sta_id = 0; sta_id < IWL_MVM_STATION_COUNT; sta_id++) { 102 if (BIT(sta_id) & reserved_ids) 103 continue; 104 105 if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 106 lockdep_is_held(&mvm->mutex))) 107 return sta_id; 108 } 109 return IWL_MVM_STATION_COUNT; 110 } 111 112 /* send station add/update command to firmware */ 113 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 114 bool update, unsigned int flags) 115 { 116 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 117 struct iwl_mvm_add_sta_cmd add_sta_cmd = { 118 .sta_id = mvm_sta->sta_id, 119 .mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color), 120 .add_modify = update ? 1 : 0, 121 .station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK | 122 STA_FLG_MIMO_EN_MSK), 123 .tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg), 124 }; 125 int ret; 126 u32 status; 127 u32 agg_size = 0, mpdu_dens = 0; 128 129 if (!update || (flags & STA_MODIFY_QUEUES)) { 130 add_sta_cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 131 memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN); 132 133 if (flags & STA_MODIFY_QUEUES) 134 add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES; 135 } 136 137 switch (sta->bandwidth) { 138 case IEEE80211_STA_RX_BW_160: 139 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ); 140 /* fall through */ 141 case IEEE80211_STA_RX_BW_80: 142 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ); 143 /* fall through */ 144 case IEEE80211_STA_RX_BW_40: 145 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ); 146 /* fall through */ 147 case IEEE80211_STA_RX_BW_20: 148 if (sta->ht_cap.ht_supported) 149 add_sta_cmd.station_flags |= 150 cpu_to_le32(STA_FLG_FAT_EN_20MHZ); 151 break; 152 } 153 154 switch (sta->rx_nss) { 155 case 1: 156 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 157 break; 158 case 2: 159 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2); 160 break; 161 case 3 ... 8: 162 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3); 163 break; 164 } 165 166 switch (sta->smps_mode) { 167 case IEEE80211_SMPS_AUTOMATIC: 168 case IEEE80211_SMPS_NUM_MODES: 169 WARN_ON(1); 170 break; 171 case IEEE80211_SMPS_STATIC: 172 /* override NSS */ 173 add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK); 174 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO); 175 break; 176 case IEEE80211_SMPS_DYNAMIC: 177 add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT); 178 break; 179 case IEEE80211_SMPS_OFF: 180 /* nothing */ 181 break; 182 } 183 184 if (sta->ht_cap.ht_supported) { 185 add_sta_cmd.station_flags_msk |= 186 cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK | 187 STA_FLG_AGG_MPDU_DENS_MSK); 188 189 mpdu_dens = sta->ht_cap.ampdu_density; 190 } 191 192 if (sta->vht_cap.vht_supported) { 193 agg_size = sta->vht_cap.cap & 194 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK; 195 agg_size >>= 196 IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT; 197 } else if (sta->ht_cap.ht_supported) { 198 agg_size = sta->ht_cap.ampdu_factor; 199 } 200 201 add_sta_cmd.station_flags |= 202 cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT); 203 add_sta_cmd.station_flags |= 204 cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT); 205 206 status = ADD_STA_SUCCESS; 207 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 208 iwl_mvm_add_sta_cmd_size(mvm), 209 &add_sta_cmd, &status); 210 if (ret) 211 return ret; 212 213 switch (status & IWL_ADD_STA_STATUS_MASK) { 214 case ADD_STA_SUCCESS: 215 IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n"); 216 break; 217 default: 218 ret = -EIO; 219 IWL_ERR(mvm, "ADD_STA failed\n"); 220 break; 221 } 222 223 return ret; 224 } 225 226 static void iwl_mvm_rx_agg_session_expired(unsigned long data) 227 { 228 struct iwl_mvm_baid_data __rcu **rcu_ptr = (void *)data; 229 struct iwl_mvm_baid_data *ba_data; 230 struct ieee80211_sta *sta; 231 struct iwl_mvm_sta *mvm_sta; 232 unsigned long timeout; 233 234 rcu_read_lock(); 235 236 ba_data = rcu_dereference(*rcu_ptr); 237 238 if (WARN_ON(!ba_data)) 239 goto unlock; 240 241 if (!ba_data->timeout) 242 goto unlock; 243 244 timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2); 245 if (time_is_after_jiffies(timeout)) { 246 mod_timer(&ba_data->session_timer, timeout); 247 goto unlock; 248 } 249 250 /* Timer expired */ 251 sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]); 252 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 253 ieee80211_stop_rx_ba_session_offl(mvm_sta->vif, 254 sta->addr, ba_data->tid); 255 unlock: 256 rcu_read_unlock(); 257 } 258 259 static int iwl_mvm_tdls_sta_init(struct iwl_mvm *mvm, 260 struct ieee80211_sta *sta) 261 { 262 unsigned long used_hw_queues; 263 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 264 unsigned int wdg_timeout = 265 iwl_mvm_get_wd_timeout(mvm, NULL, true, false); 266 u32 ac; 267 268 lockdep_assert_held(&mvm->mutex); 269 270 used_hw_queues = iwl_mvm_get_used_hw_queues(mvm, NULL); 271 272 /* Find available queues, and allocate them to the ACs */ 273 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 274 u8 queue = find_first_zero_bit(&used_hw_queues, 275 mvm->first_agg_queue); 276 277 if (queue >= mvm->first_agg_queue) { 278 IWL_ERR(mvm, "Failed to allocate STA queue\n"); 279 return -EBUSY; 280 } 281 282 __set_bit(queue, &used_hw_queues); 283 mvmsta->hw_queue[ac] = queue; 284 } 285 286 /* Found a place for all queues - enable them */ 287 for (ac = 0; ac < IEEE80211_NUM_ACS; ac++) { 288 iwl_mvm_enable_ac_txq(mvm, mvmsta->hw_queue[ac], 289 mvmsta->hw_queue[ac], 290 iwl_mvm_ac_to_tx_fifo[ac], 0, 291 wdg_timeout); 292 mvmsta->tfd_queue_msk |= BIT(mvmsta->hw_queue[ac]); 293 } 294 295 return 0; 296 } 297 298 static void iwl_mvm_tdls_sta_deinit(struct iwl_mvm *mvm, 299 struct ieee80211_sta *sta) 300 { 301 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 302 unsigned long sta_msk; 303 int i; 304 305 lockdep_assert_held(&mvm->mutex); 306 307 /* disable the TDLS STA-specific queues */ 308 sta_msk = mvmsta->tfd_queue_msk; 309 for_each_set_bit(i, &sta_msk, sizeof(sta_msk) * BITS_PER_BYTE) 310 iwl_mvm_disable_txq(mvm, i, i, IWL_MAX_TID_COUNT, 0); 311 } 312 313 /* Disable aggregations for a bitmap of TIDs for a given station */ 314 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue, 315 unsigned long disable_agg_tids, 316 bool remove_queue) 317 { 318 struct iwl_mvm_add_sta_cmd cmd = {}; 319 struct ieee80211_sta *sta; 320 struct iwl_mvm_sta *mvmsta; 321 u32 status; 322 u8 sta_id; 323 int ret; 324 325 spin_lock_bh(&mvm->queue_info_lock); 326 sta_id = mvm->queue_info[queue].ra_sta_id; 327 spin_unlock_bh(&mvm->queue_info_lock); 328 329 rcu_read_lock(); 330 331 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 332 333 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 334 rcu_read_unlock(); 335 return -EINVAL; 336 } 337 338 mvmsta = iwl_mvm_sta_from_mac80211(sta); 339 340 mvmsta->tid_disable_agg |= disable_agg_tids; 341 342 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 343 cmd.sta_id = mvmsta->sta_id; 344 cmd.add_modify = STA_MODE_MODIFY; 345 cmd.modify_mask = STA_MODIFY_QUEUES; 346 if (disable_agg_tids) 347 cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX; 348 if (remove_queue) 349 cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL; 350 cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk); 351 cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg); 352 353 rcu_read_unlock(); 354 355 /* Notify FW of queue removal from the STA queues */ 356 status = ADD_STA_SUCCESS; 357 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 358 iwl_mvm_add_sta_cmd_size(mvm), 359 &cmd, &status); 360 361 return ret; 362 } 363 364 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue) 365 { 366 struct ieee80211_sta *sta; 367 struct iwl_mvm_sta *mvmsta; 368 unsigned long tid_bitmap; 369 unsigned long agg_tids = 0; 370 s8 sta_id; 371 int tid; 372 373 lockdep_assert_held(&mvm->mutex); 374 375 spin_lock_bh(&mvm->queue_info_lock); 376 sta_id = mvm->queue_info[queue].ra_sta_id; 377 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 378 spin_unlock_bh(&mvm->queue_info_lock); 379 380 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 381 lockdep_is_held(&mvm->mutex)); 382 383 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) 384 return -EINVAL; 385 386 mvmsta = iwl_mvm_sta_from_mac80211(sta); 387 388 spin_lock_bh(&mvmsta->lock); 389 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 390 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 391 agg_tids |= BIT(tid); 392 } 393 spin_unlock_bh(&mvmsta->lock); 394 395 return agg_tids; 396 } 397 398 /* 399 * Remove a queue from a station's resources. 400 * Note that this only marks as free. It DOESN'T delete a BA agreement, and 401 * doesn't disable the queue 402 */ 403 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue) 404 { 405 struct ieee80211_sta *sta; 406 struct iwl_mvm_sta *mvmsta; 407 unsigned long tid_bitmap; 408 unsigned long disable_agg_tids = 0; 409 u8 sta_id; 410 int tid; 411 412 lockdep_assert_held(&mvm->mutex); 413 414 spin_lock_bh(&mvm->queue_info_lock); 415 sta_id = mvm->queue_info[queue].ra_sta_id; 416 tid_bitmap = mvm->queue_info[queue].tid_bitmap; 417 spin_unlock_bh(&mvm->queue_info_lock); 418 419 rcu_read_lock(); 420 421 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 422 423 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) { 424 rcu_read_unlock(); 425 return 0; 426 } 427 428 mvmsta = iwl_mvm_sta_from_mac80211(sta); 429 430 spin_lock_bh(&mvmsta->lock); 431 /* Unmap MAC queues and TIDs from this queue */ 432 for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) { 433 if (mvmsta->tid_data[tid].state == IWL_AGG_ON) 434 disable_agg_tids |= BIT(tid); 435 mvmsta->tid_data[tid].txq_id = IEEE80211_INVAL_HW_QUEUE; 436 } 437 438 mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */ 439 spin_unlock_bh(&mvmsta->lock); 440 441 rcu_read_unlock(); 442 443 spin_lock_bh(&mvm->queue_info_lock); 444 /* Unmap MAC queues and TIDs from this queue */ 445 mvm->queue_info[queue].hw_queue_to_mac80211 = 0; 446 mvm->queue_info[queue].hw_queue_refcount = 0; 447 mvm->queue_info[queue].tid_bitmap = 0; 448 spin_unlock_bh(&mvm->queue_info_lock); 449 450 return disable_agg_tids; 451 } 452 453 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm, 454 unsigned long tfd_queue_mask, u8 ac) 455 { 456 int queue = 0; 457 u8 ac_to_queue[IEEE80211_NUM_ACS]; 458 int i; 459 460 lockdep_assert_held(&mvm->queue_info_lock); 461 462 memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue)); 463 464 /* See what ACs the existing queues for this STA have */ 465 for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) { 466 /* Only DATA queues can be shared */ 467 if (i < IWL_MVM_DQA_MIN_DATA_QUEUE && 468 i != IWL_MVM_DQA_BSS_CLIENT_QUEUE) 469 continue; 470 471 ac_to_queue[mvm->queue_info[i].mac80211_ac] = i; 472 } 473 474 /* 475 * The queue to share is chosen only from DATA queues as follows (in 476 * descending priority): 477 * 1. An AC_BE queue 478 * 2. Same AC queue 479 * 3. Highest AC queue that is lower than new AC 480 * 4. Any existing AC (there always is at least 1 DATA queue) 481 */ 482 483 /* Priority 1: An AC_BE queue */ 484 if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE) 485 queue = ac_to_queue[IEEE80211_AC_BE]; 486 /* Priority 2: Same AC queue */ 487 else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE) 488 queue = ac_to_queue[ac]; 489 /* Priority 3a: If new AC is VO and VI exists - use VI */ 490 else if (ac == IEEE80211_AC_VO && 491 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 492 queue = ac_to_queue[IEEE80211_AC_VI]; 493 /* Priority 3b: No BE so only AC less than the new one is BK */ 494 else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE) 495 queue = ac_to_queue[IEEE80211_AC_BK]; 496 /* Priority 4a: No BE nor BK - use VI if exists */ 497 else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE) 498 queue = ac_to_queue[IEEE80211_AC_VI]; 499 /* Priority 4b: No BE, BK nor VI - use VO if exists */ 500 else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE) 501 queue = ac_to_queue[IEEE80211_AC_VO]; 502 503 /* Make sure queue found (or not) is legal */ 504 if (!((queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE && 505 queue <= IWL_MVM_DQA_MAX_MGMT_QUEUE) || 506 (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE && 507 queue <= IWL_MVM_DQA_MAX_DATA_QUEUE) || 508 (queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE))) { 509 IWL_ERR(mvm, "No DATA queues available to share\n"); 510 queue = -ENOSPC; 511 } 512 513 return queue; 514 } 515 516 /* 517 * If a given queue has a higher AC than the TID stream that is being added to 518 * it, the queue needs to be redirected to the lower AC. This function does that 519 * in such a case, otherwise - if no redirection required - it does nothing, 520 * unless the %force param is true. 521 */ 522 static int iwl_mvm_scd_queue_redirect(struct iwl_mvm *mvm, int queue, int tid, 523 int ac, int ssn, unsigned int wdg_timeout, 524 bool force) 525 { 526 struct iwl_scd_txq_cfg_cmd cmd = { 527 .scd_queue = queue, 528 .enable = 0, 529 }; 530 bool shared_queue; 531 unsigned long mq; 532 int ret; 533 534 /* 535 * If the AC is lower than current one - FIFO needs to be redirected to 536 * the lowest one of the streams in the queue. Check if this is needed 537 * here. 538 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with 539 * value 3 and VO with value 0, so to check if ac X is lower than ac Y 540 * we need to check if the numerical value of X is LARGER than of Y. 541 */ 542 spin_lock_bh(&mvm->queue_info_lock); 543 if (ac <= mvm->queue_info[queue].mac80211_ac && !force) { 544 spin_unlock_bh(&mvm->queue_info_lock); 545 546 IWL_DEBUG_TX_QUEUES(mvm, 547 "No redirection needed on TXQ #%d\n", 548 queue); 549 return 0; 550 } 551 552 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 553 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac]; 554 mq = mvm->queue_info[queue].hw_queue_to_mac80211; 555 shared_queue = (mvm->queue_info[queue].hw_queue_refcount > 1); 556 spin_unlock_bh(&mvm->queue_info_lock); 557 558 IWL_DEBUG_TX_QUEUES(mvm, "Redirecting shared TXQ #%d to FIFO #%d\n", 559 queue, iwl_mvm_ac_to_tx_fifo[ac]); 560 561 /* Stop MAC queues and wait for this queue to empty */ 562 iwl_mvm_stop_mac_queues(mvm, mq); 563 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, BIT(queue)); 564 if (ret) { 565 IWL_ERR(mvm, "Error draining queue %d before reconfig\n", 566 queue); 567 ret = -EIO; 568 goto out; 569 } 570 571 /* Before redirecting the queue we need to de-activate it */ 572 iwl_trans_txq_disable(mvm->trans, queue, false); 573 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd); 574 if (ret) 575 IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue, 576 ret); 577 578 /* Make sure the SCD wrptr is correctly set before reconfiguring */ 579 iwl_trans_txq_enable(mvm->trans, queue, iwl_mvm_ac_to_tx_fifo[ac], 580 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, 581 ssn, wdg_timeout); 582 583 /* TODO: Work-around SCD bug when moving back by multiples of 0x40 */ 584 585 /* Redirect to lower AC */ 586 iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac], 587 cmd.sta_id, tid, LINK_QUAL_AGG_FRAME_LIMIT_DEF, 588 ssn); 589 590 /* Update AC marking of the queue */ 591 spin_lock_bh(&mvm->queue_info_lock); 592 mvm->queue_info[queue].mac80211_ac = ac; 593 spin_unlock_bh(&mvm->queue_info_lock); 594 595 /* 596 * Mark queue as shared in transport if shared 597 * Note this has to be done after queue enablement because enablement 598 * can also set this value, and there is no indication there to shared 599 * queues 600 */ 601 if (shared_queue) 602 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 603 604 out: 605 /* Continue using the MAC queues */ 606 iwl_mvm_start_mac_queues(mvm, mq); 607 608 return ret; 609 } 610 611 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm, 612 struct ieee80211_sta *sta, u8 ac, int tid, 613 struct ieee80211_hdr *hdr) 614 { 615 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 616 struct iwl_trans_txq_scd_cfg cfg = { 617 .fifo = iwl_mvm_ac_to_tx_fifo[ac], 618 .sta_id = mvmsta->sta_id, 619 .tid = tid, 620 .frame_limit = IWL_FRAME_LIMIT, 621 }; 622 unsigned int wdg_timeout = 623 iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false); 624 u8 mac_queue = mvmsta->vif->hw_queue[ac]; 625 int queue = -1; 626 bool using_inactive_queue = false; 627 unsigned long disable_agg_tids = 0; 628 enum iwl_mvm_agg_state queue_state; 629 bool shared_queue = false; 630 int ssn; 631 unsigned long tfd_queue_mask; 632 int ret; 633 634 lockdep_assert_held(&mvm->mutex); 635 636 spin_lock_bh(&mvmsta->lock); 637 tfd_queue_mask = mvmsta->tfd_queue_msk; 638 spin_unlock_bh(&mvmsta->lock); 639 640 spin_lock_bh(&mvm->queue_info_lock); 641 642 /* 643 * Non-QoS, QoS NDP and MGMT frames should go to a MGMT queue, if one 644 * exists 645 */ 646 if (!ieee80211_is_data_qos(hdr->frame_control) || 647 ieee80211_is_qos_nullfunc(hdr->frame_control)) { 648 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 649 IWL_MVM_DQA_MIN_MGMT_QUEUE, 650 IWL_MVM_DQA_MAX_MGMT_QUEUE); 651 if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE) 652 IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n", 653 queue); 654 655 /* If no such queue is found, we'll use a DATA queue instead */ 656 } 657 658 if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) && 659 (mvm->queue_info[mvmsta->reserved_queue].status == 660 IWL_MVM_QUEUE_RESERVED || 661 mvm->queue_info[mvmsta->reserved_queue].status == 662 IWL_MVM_QUEUE_INACTIVE)) { 663 queue = mvmsta->reserved_queue; 664 mvm->queue_info[queue].reserved = true; 665 IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue); 666 } 667 668 if (queue < 0) 669 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 670 IWL_MVM_DQA_MIN_DATA_QUEUE, 671 IWL_MVM_DQA_MAX_DATA_QUEUE); 672 673 /* 674 * Check if this queue is already allocated but inactive. 675 * In such a case, we'll need to first free this queue before enabling 676 * it again, so we'll mark it as reserved to make sure no new traffic 677 * arrives on it 678 */ 679 if (queue > 0 && 680 mvm->queue_info[queue].status == IWL_MVM_QUEUE_INACTIVE) { 681 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 682 using_inactive_queue = true; 683 IWL_DEBUG_TX_QUEUES(mvm, 684 "Re-assigning TXQ %d: sta_id=%d, tid=%d\n", 685 queue, mvmsta->sta_id, tid); 686 } 687 688 /* No free queue - we'll have to share */ 689 if (queue <= 0) { 690 queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac); 691 if (queue > 0) { 692 shared_queue = true; 693 mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED; 694 } 695 } 696 697 /* 698 * Mark TXQ as ready, even though it hasn't been fully configured yet, 699 * to make sure no one else takes it. 700 * This will allow avoiding re-acquiring the lock at the end of the 701 * configuration. On error we'll mark it back as free. 702 */ 703 if ((queue > 0) && !shared_queue) 704 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 705 706 spin_unlock_bh(&mvm->queue_info_lock); 707 708 /* This shouldn't happen - out of queues */ 709 if (WARN_ON(queue <= 0)) { 710 IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n", 711 tid, cfg.sta_id); 712 return -ENOSPC; 713 } 714 715 /* 716 * Actual en/disablement of aggregations is through the ADD_STA HCMD, 717 * but for configuring the SCD to send A-MPDUs we need to mark the queue 718 * as aggregatable. 719 * Mark all DATA queues as allowing to be aggregated at some point 720 */ 721 cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE || 722 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE); 723 724 /* 725 * If this queue was previously inactive (idle) - we need to free it 726 * first 727 */ 728 if (using_inactive_queue) { 729 struct iwl_scd_txq_cfg_cmd cmd = { 730 .scd_queue = queue, 731 .enable = 0, 732 }; 733 u8 ac; 734 735 disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue); 736 737 spin_lock_bh(&mvm->queue_info_lock); 738 ac = mvm->queue_info[queue].mac80211_ac; 739 cmd.sta_id = mvm->queue_info[queue].ra_sta_id; 740 cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[ac]; 741 spin_unlock_bh(&mvm->queue_info_lock); 742 743 /* Disable the queue */ 744 iwl_mvm_invalidate_sta_queue(mvm, queue, disable_agg_tids, 745 true); 746 iwl_trans_txq_disable(mvm->trans, queue, false); 747 ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), 748 &cmd); 749 if (ret) { 750 IWL_ERR(mvm, 751 "Failed to free inactive queue %d (ret=%d)\n", 752 queue, ret); 753 754 /* Re-mark the inactive queue as inactive */ 755 spin_lock_bh(&mvm->queue_info_lock); 756 mvm->queue_info[queue].status = IWL_MVM_QUEUE_INACTIVE; 757 spin_unlock_bh(&mvm->queue_info_lock); 758 759 return ret; 760 } 761 } 762 763 IWL_DEBUG_TX_QUEUES(mvm, 764 "Allocating %squeue #%d to sta %d on tid %d\n", 765 shared_queue ? "shared " : "", queue, 766 mvmsta->sta_id, tid); 767 768 if (shared_queue) { 769 /* Disable any open aggs on this queue */ 770 disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue); 771 772 if (disable_agg_tids) { 773 IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n", 774 queue); 775 iwl_mvm_invalidate_sta_queue(mvm, queue, 776 disable_agg_tids, false); 777 } 778 } 779 780 ssn = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 781 iwl_mvm_enable_txq(mvm, queue, mac_queue, ssn, &cfg, 782 wdg_timeout); 783 784 /* 785 * Mark queue as shared in transport if shared 786 * Note this has to be done after queue enablement because enablement 787 * can also set this value, and there is no indication there to shared 788 * queues 789 */ 790 if (shared_queue) 791 iwl_trans_txq_set_shared_mode(mvm->trans, queue, true); 792 793 spin_lock_bh(&mvmsta->lock); 794 mvmsta->tid_data[tid].txq_id = queue; 795 mvmsta->tid_data[tid].is_tid_active = true; 796 mvmsta->tfd_queue_msk |= BIT(queue); 797 queue_state = mvmsta->tid_data[tid].state; 798 799 if (mvmsta->reserved_queue == queue) 800 mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE; 801 spin_unlock_bh(&mvmsta->lock); 802 803 if (!shared_queue) { 804 ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES); 805 if (ret) 806 goto out_err; 807 808 /* If we need to re-enable aggregations... */ 809 if (queue_state == IWL_AGG_ON) { 810 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 811 if (ret) 812 goto out_err; 813 } 814 } else { 815 /* Redirect queue, if needed */ 816 ret = iwl_mvm_scd_queue_redirect(mvm, queue, tid, ac, ssn, 817 wdg_timeout, false); 818 if (ret) 819 goto out_err; 820 } 821 822 return 0; 823 824 out_err: 825 iwl_mvm_disable_txq(mvm, queue, mac_queue, tid, 0); 826 827 return ret; 828 } 829 830 static inline u8 iwl_mvm_tid_to_ac_queue(int tid) 831 { 832 if (tid == IWL_MAX_TID_COUNT) 833 return IEEE80211_AC_VO; /* MGMT */ 834 835 return tid_to_mac80211_ac[tid]; 836 } 837 838 static void iwl_mvm_tx_deferred_stream(struct iwl_mvm *mvm, 839 struct ieee80211_sta *sta, int tid) 840 { 841 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 842 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 843 struct sk_buff *skb; 844 struct ieee80211_hdr *hdr; 845 struct sk_buff_head deferred_tx; 846 u8 mac_queue; 847 bool no_queue = false; /* Marks if there is a problem with the queue */ 848 u8 ac; 849 850 lockdep_assert_held(&mvm->mutex); 851 852 skb = skb_peek(&tid_data->deferred_tx_frames); 853 if (!skb) 854 return; 855 hdr = (void *)skb->data; 856 857 ac = iwl_mvm_tid_to_ac_queue(tid); 858 mac_queue = IEEE80211_SKB_CB(skb)->hw_queue; 859 860 if (tid_data->txq_id == IEEE80211_INVAL_HW_QUEUE && 861 iwl_mvm_sta_alloc_queue(mvm, sta, ac, tid, hdr)) { 862 IWL_ERR(mvm, 863 "Can't alloc TXQ for sta %d tid %d - dropping frame\n", 864 mvmsta->sta_id, tid); 865 866 /* 867 * Mark queue as problematic so later the deferred traffic is 868 * freed, as we can do nothing with it 869 */ 870 no_queue = true; 871 } 872 873 __skb_queue_head_init(&deferred_tx); 874 875 /* Disable bottom-halves when entering TX path */ 876 local_bh_disable(); 877 spin_lock(&mvmsta->lock); 878 skb_queue_splice_init(&tid_data->deferred_tx_frames, &deferred_tx); 879 spin_unlock(&mvmsta->lock); 880 881 while ((skb = __skb_dequeue(&deferred_tx))) 882 if (no_queue || iwl_mvm_tx_skb(mvm, skb, sta)) 883 ieee80211_free_txskb(mvm->hw, skb); 884 local_bh_enable(); 885 886 /* Wake queue */ 887 iwl_mvm_start_mac_queues(mvm, BIT(mac_queue)); 888 } 889 890 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk) 891 { 892 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, 893 add_stream_wk); 894 struct ieee80211_sta *sta; 895 struct iwl_mvm_sta *mvmsta; 896 unsigned long deferred_tid_traffic; 897 int sta_id, tid; 898 899 /* Check inactivity of queues */ 900 iwl_mvm_inactivity_check(mvm); 901 902 mutex_lock(&mvm->mutex); 903 904 /* Go over all stations with deferred traffic */ 905 for_each_set_bit(sta_id, mvm->sta_deferred_frames, 906 IWL_MVM_STATION_COUNT) { 907 clear_bit(sta_id, mvm->sta_deferred_frames); 908 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 909 lockdep_is_held(&mvm->mutex)); 910 if (IS_ERR_OR_NULL(sta)) 911 continue; 912 913 mvmsta = iwl_mvm_sta_from_mac80211(sta); 914 deferred_tid_traffic = mvmsta->deferred_traffic_tid_map; 915 916 for_each_set_bit(tid, &deferred_tid_traffic, 917 IWL_MAX_TID_COUNT + 1) 918 iwl_mvm_tx_deferred_stream(mvm, sta, tid); 919 } 920 921 mutex_unlock(&mvm->mutex); 922 } 923 924 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm, 925 struct ieee80211_sta *sta, 926 enum nl80211_iftype vif_type) 927 { 928 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 929 int queue; 930 931 /* 932 * Check for inactive queues, so we don't reach a situation where we 933 * can't add a STA due to a shortage in queues that doesn't really exist 934 */ 935 iwl_mvm_inactivity_check(mvm); 936 937 spin_lock_bh(&mvm->queue_info_lock); 938 939 /* Make sure we have free resources for this STA */ 940 if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls && 941 !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].hw_queue_refcount && 942 (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status == 943 IWL_MVM_QUEUE_FREE)) 944 queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE; 945 else 946 queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 947 IWL_MVM_DQA_MIN_DATA_QUEUE, 948 IWL_MVM_DQA_MAX_DATA_QUEUE); 949 if (queue < 0) { 950 spin_unlock_bh(&mvm->queue_info_lock); 951 IWL_ERR(mvm, "No available queues for new station\n"); 952 return -ENOSPC; 953 } 954 mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED; 955 956 spin_unlock_bh(&mvm->queue_info_lock); 957 958 mvmsta->reserved_queue = queue; 959 960 IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n", 961 queue, mvmsta->sta_id); 962 963 return 0; 964 } 965 966 int iwl_mvm_add_sta(struct iwl_mvm *mvm, 967 struct ieee80211_vif *vif, 968 struct ieee80211_sta *sta) 969 { 970 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 971 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 972 struct iwl_mvm_rxq_dup_data *dup_data; 973 int i, ret, sta_id; 974 975 lockdep_assert_held(&mvm->mutex); 976 977 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) 978 sta_id = iwl_mvm_find_free_sta_id(mvm, 979 ieee80211_vif_type_p2p(vif)); 980 else 981 sta_id = mvm_sta->sta_id; 982 983 if (sta_id == IWL_MVM_STATION_COUNT) 984 return -ENOSPC; 985 986 spin_lock_init(&mvm_sta->lock); 987 988 mvm_sta->sta_id = sta_id; 989 mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id, 990 mvmvif->color); 991 mvm_sta->vif = vif; 992 mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF; 993 mvm_sta->tx_protection = 0; 994 mvm_sta->tt_tx_protection = false; 995 996 /* HW restart, don't assume the memory has been zeroed */ 997 atomic_set(&mvm->pending_frames[sta_id], 0); 998 mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */ 999 mvm_sta->tfd_queue_msk = 0; 1000 1001 /* 1002 * Allocate new queues for a TDLS station, unless we're in DQA mode, 1003 * and then they'll be allocated dynamically 1004 */ 1005 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) { 1006 ret = iwl_mvm_tdls_sta_init(mvm, sta); 1007 if (ret) 1008 return ret; 1009 } else if (!iwl_mvm_is_dqa_supported(mvm)) { 1010 for (i = 0; i < IEEE80211_NUM_ACS; i++) 1011 if (vif->hw_queue[i] != IEEE80211_INVAL_HW_QUEUE) 1012 mvm_sta->tfd_queue_msk |= BIT(vif->hw_queue[i]); 1013 } 1014 1015 /* for HW restart - reset everything but the sequence number */ 1016 for (i = 0; i <= IWL_MAX_TID_COUNT; i++) { 1017 u16 seq = mvm_sta->tid_data[i].seq_number; 1018 memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i])); 1019 mvm_sta->tid_data[i].seq_number = seq; 1020 1021 if (!iwl_mvm_is_dqa_supported(mvm)) 1022 continue; 1023 1024 /* 1025 * Mark all queues for this STA as unallocated and defer TX 1026 * frames until the queue is allocated 1027 */ 1028 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; 1029 skb_queue_head_init(&mvm_sta->tid_data[i].deferred_tx_frames); 1030 } 1031 mvm_sta->deferred_traffic_tid_map = 0; 1032 mvm_sta->agg_tids = 0; 1033 1034 if (iwl_mvm_has_new_rx_api(mvm) && 1035 !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1036 dup_data = kcalloc(mvm->trans->num_rx_queues, 1037 sizeof(*dup_data), 1038 GFP_KERNEL); 1039 if (!dup_data) 1040 return -ENOMEM; 1041 mvm_sta->dup_data = dup_data; 1042 } 1043 1044 if (iwl_mvm_is_dqa_supported(mvm)) { 1045 ret = iwl_mvm_reserve_sta_stream(mvm, sta, 1046 ieee80211_vif_type_p2p(vif)); 1047 if (ret) 1048 goto err; 1049 } 1050 1051 ret = iwl_mvm_sta_send_to_fw(mvm, sta, false, 0); 1052 if (ret) 1053 goto err; 1054 1055 if (vif->type == NL80211_IFTYPE_STATION) { 1056 if (!sta->tdls) { 1057 WARN_ON(mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT); 1058 mvmvif->ap_sta_id = sta_id; 1059 } else { 1060 WARN_ON(mvmvif->ap_sta_id == IWL_MVM_STATION_COUNT); 1061 } 1062 } 1063 1064 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta); 1065 1066 return 0; 1067 1068 err: 1069 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) 1070 iwl_mvm_tdls_sta_deinit(mvm, sta); 1071 return ret; 1072 } 1073 1074 int iwl_mvm_update_sta(struct iwl_mvm *mvm, 1075 struct ieee80211_vif *vif, 1076 struct ieee80211_sta *sta) 1077 { 1078 return iwl_mvm_sta_send_to_fw(mvm, sta, true, 0); 1079 } 1080 1081 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta, 1082 bool drain) 1083 { 1084 struct iwl_mvm_add_sta_cmd cmd = {}; 1085 int ret; 1086 u32 status; 1087 1088 lockdep_assert_held(&mvm->mutex); 1089 1090 cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color); 1091 cmd.sta_id = mvmsta->sta_id; 1092 cmd.add_modify = STA_MODE_MODIFY; 1093 cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0; 1094 cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW); 1095 1096 status = ADD_STA_SUCCESS; 1097 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1098 iwl_mvm_add_sta_cmd_size(mvm), 1099 &cmd, &status); 1100 if (ret) 1101 return ret; 1102 1103 switch (status & IWL_ADD_STA_STATUS_MASK) { 1104 case ADD_STA_SUCCESS: 1105 IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n", 1106 mvmsta->sta_id); 1107 break; 1108 default: 1109 ret = -EIO; 1110 IWL_ERR(mvm, "Couldn't drain frames for staid %d\n", 1111 mvmsta->sta_id); 1112 break; 1113 } 1114 1115 return ret; 1116 } 1117 1118 /* 1119 * Remove a station from the FW table. Before sending the command to remove 1120 * the station validate that the station is indeed known to the driver (sanity 1121 * only). 1122 */ 1123 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id) 1124 { 1125 struct ieee80211_sta *sta; 1126 struct iwl_mvm_rm_sta_cmd rm_sta_cmd = { 1127 .sta_id = sta_id, 1128 }; 1129 int ret; 1130 1131 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1132 lockdep_is_held(&mvm->mutex)); 1133 1134 /* Note: internal stations are marked as error values */ 1135 if (!sta) { 1136 IWL_ERR(mvm, "Invalid station id\n"); 1137 return -EINVAL; 1138 } 1139 1140 ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0, 1141 sizeof(rm_sta_cmd), &rm_sta_cmd); 1142 if (ret) { 1143 IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id); 1144 return ret; 1145 } 1146 1147 return 0; 1148 } 1149 1150 void iwl_mvm_sta_drained_wk(struct work_struct *wk) 1151 { 1152 struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm, sta_drained_wk); 1153 u8 sta_id; 1154 1155 /* 1156 * The mutex is needed because of the SYNC cmd, but not only: if the 1157 * work would run concurrently with iwl_mvm_rm_sta, it would run before 1158 * iwl_mvm_rm_sta sets the station as busy, and exit. Then 1159 * iwl_mvm_rm_sta would set the station as busy, and nobody will clean 1160 * that later. 1161 */ 1162 mutex_lock(&mvm->mutex); 1163 1164 for_each_set_bit(sta_id, mvm->sta_drained, IWL_MVM_STATION_COUNT) { 1165 int ret; 1166 struct ieee80211_sta *sta = 1167 rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 1168 lockdep_is_held(&mvm->mutex)); 1169 1170 /* 1171 * This station is in use or RCU-removed; the latter happens in 1172 * managed mode, where mac80211 removes the station before we 1173 * can remove it from firmware (we can only do that after the 1174 * MAC is marked unassociated), and possibly while the deauth 1175 * frame to disconnect from the AP is still queued. Then, the 1176 * station pointer is -ENOENT when the last skb is reclaimed. 1177 */ 1178 if (!IS_ERR(sta) || PTR_ERR(sta) == -ENOENT) 1179 continue; 1180 1181 if (PTR_ERR(sta) == -EINVAL) { 1182 IWL_ERR(mvm, "Drained sta %d, but it is internal?\n", 1183 sta_id); 1184 continue; 1185 } 1186 1187 if (!sta) { 1188 IWL_ERR(mvm, "Drained sta %d, but it was NULL?\n", 1189 sta_id); 1190 continue; 1191 } 1192 1193 WARN_ON(PTR_ERR(sta) != -EBUSY); 1194 /* This station was removed and we waited until it got drained, 1195 * we can now proceed and remove it. 1196 */ 1197 ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1198 if (ret) { 1199 IWL_ERR(mvm, 1200 "Couldn't remove sta %d after it was drained\n", 1201 sta_id); 1202 continue; 1203 } 1204 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1205 clear_bit(sta_id, mvm->sta_drained); 1206 1207 if (mvm->tfd_drained[sta_id]) { 1208 unsigned long i, msk = mvm->tfd_drained[sta_id]; 1209 1210 for_each_set_bit(i, &msk, sizeof(msk) * BITS_PER_BYTE) 1211 iwl_mvm_disable_txq(mvm, i, i, 1212 IWL_MAX_TID_COUNT, 0); 1213 1214 mvm->tfd_drained[sta_id] = 0; 1215 IWL_DEBUG_TDLS(mvm, "Drained sta %d, with queues %ld\n", 1216 sta_id, msk); 1217 } 1218 } 1219 1220 mutex_unlock(&mvm->mutex); 1221 } 1222 1223 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm, 1224 struct ieee80211_vif *vif, 1225 struct iwl_mvm_sta *mvm_sta) 1226 { 1227 int ac; 1228 int i; 1229 1230 lockdep_assert_held(&mvm->mutex); 1231 1232 for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) { 1233 if (mvm_sta->tid_data[i].txq_id == IEEE80211_INVAL_HW_QUEUE) 1234 continue; 1235 1236 ac = iwl_mvm_tid_to_ac_queue(i); 1237 iwl_mvm_disable_txq(mvm, mvm_sta->tid_data[i].txq_id, 1238 vif->hw_queue[ac], i, 0); 1239 mvm_sta->tid_data[i].txq_id = IEEE80211_INVAL_HW_QUEUE; 1240 } 1241 } 1242 1243 int iwl_mvm_rm_sta(struct iwl_mvm *mvm, 1244 struct ieee80211_vif *vif, 1245 struct ieee80211_sta *sta) 1246 { 1247 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1248 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1249 int ret; 1250 1251 lockdep_assert_held(&mvm->mutex); 1252 1253 if (iwl_mvm_has_new_rx_api(mvm)) 1254 kfree(mvm_sta->dup_data); 1255 1256 if ((vif->type == NL80211_IFTYPE_STATION && 1257 mvmvif->ap_sta_id == mvm_sta->sta_id) || 1258 iwl_mvm_is_dqa_supported(mvm)){ 1259 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1260 if (ret) 1261 return ret; 1262 /* flush its queues here since we are freeing mvm_sta */ 1263 ret = iwl_mvm_flush_tx_path(mvm, mvm_sta->tfd_queue_msk, 0); 1264 if (ret) 1265 return ret; 1266 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, 1267 mvm_sta->tfd_queue_msk); 1268 if (ret) 1269 return ret; 1270 ret = iwl_mvm_drain_sta(mvm, mvm_sta, false); 1271 1272 /* If DQA is supported - the queues can be disabled now */ 1273 if (iwl_mvm_is_dqa_supported(mvm)) 1274 iwl_mvm_disable_sta_queues(mvm, vif, mvm_sta); 1275 1276 if (vif->type == NL80211_IFTYPE_STATION && 1277 mvmvif->ap_sta_id == mvm_sta->sta_id) { 1278 /* if associated - we can't remove the AP STA now */ 1279 if (vif->bss_conf.assoc) 1280 return ret; 1281 1282 /* unassoc - go ahead - remove the AP STA now */ 1283 mvmvif->ap_sta_id = IWL_MVM_STATION_COUNT; 1284 1285 /* clear d0i3_ap_sta_id if no longer relevant */ 1286 if (mvm->d0i3_ap_sta_id == mvm_sta->sta_id) 1287 mvm->d0i3_ap_sta_id = IWL_MVM_STATION_COUNT; 1288 } 1289 } 1290 1291 /* 1292 * This shouldn't happen - the TDLS channel switch should be canceled 1293 * before the STA is removed. 1294 */ 1295 if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == mvm_sta->sta_id)) { 1296 mvm->tdls_cs.peer.sta_id = IWL_MVM_STATION_COUNT; 1297 cancel_delayed_work(&mvm->tdls_cs.dwork); 1298 } 1299 1300 /* 1301 * Make sure that the tx response code sees the station as -EBUSY and 1302 * calls the drain worker. 1303 */ 1304 spin_lock_bh(&mvm_sta->lock); 1305 /* 1306 * There are frames pending on the AC queues for this station. 1307 * We need to wait until all the frames are drained... 1308 */ 1309 if (atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) { 1310 rcu_assign_pointer(mvm->fw_id_to_mac_id[mvm_sta->sta_id], 1311 ERR_PTR(-EBUSY)); 1312 spin_unlock_bh(&mvm_sta->lock); 1313 1314 /* disable TDLS sta queues on drain complete */ 1315 if (sta->tdls) { 1316 mvm->tfd_drained[mvm_sta->sta_id] = 1317 mvm_sta->tfd_queue_msk; 1318 IWL_DEBUG_TDLS(mvm, "Draining TDLS sta %d\n", 1319 mvm_sta->sta_id); 1320 } 1321 1322 ret = iwl_mvm_drain_sta(mvm, mvm_sta, true); 1323 } else { 1324 spin_unlock_bh(&mvm_sta->lock); 1325 1326 if (!iwl_mvm_is_dqa_supported(mvm) && sta->tdls) 1327 iwl_mvm_tdls_sta_deinit(mvm, sta); 1328 1329 ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id); 1330 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL); 1331 } 1332 1333 return ret; 1334 } 1335 1336 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm, 1337 struct ieee80211_vif *vif, 1338 u8 sta_id) 1339 { 1340 int ret = iwl_mvm_rm_sta_common(mvm, sta_id); 1341 1342 lockdep_assert_held(&mvm->mutex); 1343 1344 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL); 1345 return ret; 1346 } 1347 1348 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm, 1349 struct iwl_mvm_int_sta *sta, 1350 u32 qmask, enum nl80211_iftype iftype) 1351 { 1352 if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 1353 sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype); 1354 if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_STATION_COUNT)) 1355 return -ENOSPC; 1356 } 1357 1358 sta->tfd_queue_msk = qmask; 1359 1360 /* put a non-NULL value so iterating over the stations won't stop */ 1361 rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL)); 1362 return 0; 1363 } 1364 1365 static void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, 1366 struct iwl_mvm_int_sta *sta) 1367 { 1368 RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL); 1369 memset(sta, 0, sizeof(struct iwl_mvm_int_sta)); 1370 sta->sta_id = IWL_MVM_STATION_COUNT; 1371 } 1372 1373 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm, 1374 struct iwl_mvm_int_sta *sta, 1375 const u8 *addr, 1376 u16 mac_id, u16 color) 1377 { 1378 struct iwl_mvm_add_sta_cmd cmd; 1379 int ret; 1380 u32 status; 1381 1382 lockdep_assert_held(&mvm->mutex); 1383 1384 memset(&cmd, 0, sizeof(cmd)); 1385 cmd.sta_id = sta->sta_id; 1386 cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id, 1387 color)); 1388 1389 cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk); 1390 cmd.tid_disable_tx = cpu_to_le16(0xffff); 1391 1392 if (addr) 1393 memcpy(cmd.addr, addr, ETH_ALEN); 1394 1395 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1396 iwl_mvm_add_sta_cmd_size(mvm), 1397 &cmd, &status); 1398 if (ret) 1399 return ret; 1400 1401 switch (status & IWL_ADD_STA_STATUS_MASK) { 1402 case ADD_STA_SUCCESS: 1403 IWL_DEBUG_INFO(mvm, "Internal station added.\n"); 1404 return 0; 1405 default: 1406 ret = -EIO; 1407 IWL_ERR(mvm, "Add internal station failed, status=0x%x\n", 1408 status); 1409 break; 1410 } 1411 return ret; 1412 } 1413 1414 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm) 1415 { 1416 unsigned int wdg_timeout = iwlmvm_mod_params.tfd_q_hang_detect ? 1417 mvm->cfg->base_params->wd_timeout : 1418 IWL_WATCHDOG_DISABLED; 1419 int ret; 1420 1421 lockdep_assert_held(&mvm->mutex); 1422 1423 /* Map Aux queue to fifo - needs to happen before adding Aux station */ 1424 if (!iwl_mvm_is_dqa_supported(mvm)) 1425 iwl_mvm_enable_ac_txq(mvm, mvm->aux_queue, mvm->aux_queue, 1426 IWL_MVM_TX_FIFO_MCAST, 0, wdg_timeout); 1427 1428 /* Allocate aux station and assign to it the aux queue */ 1429 ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue), 1430 NL80211_IFTYPE_UNSPECIFIED); 1431 if (ret) 1432 return ret; 1433 1434 if (iwl_mvm_is_dqa_supported(mvm)) { 1435 struct iwl_trans_txq_scd_cfg cfg = { 1436 .fifo = IWL_MVM_TX_FIFO_MCAST, 1437 .sta_id = mvm->aux_sta.sta_id, 1438 .tid = IWL_MAX_TID_COUNT, 1439 .aggregate = false, 1440 .frame_limit = IWL_FRAME_LIMIT, 1441 }; 1442 1443 iwl_mvm_enable_txq(mvm, mvm->aux_queue, mvm->aux_queue, 0, &cfg, 1444 wdg_timeout); 1445 } 1446 1447 ret = iwl_mvm_add_int_sta_common(mvm, &mvm->aux_sta, NULL, 1448 MAC_INDEX_AUX, 0); 1449 1450 if (ret) 1451 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1452 return ret; 1453 } 1454 1455 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1456 { 1457 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1458 1459 lockdep_assert_held(&mvm->mutex); 1460 return iwl_mvm_add_int_sta_common(mvm, &mvm->snif_sta, vif->addr, 1461 mvmvif->id, 0); 1462 } 1463 1464 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1465 { 1466 int ret; 1467 1468 lockdep_assert_held(&mvm->mutex); 1469 1470 ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id); 1471 if (ret) 1472 IWL_WARN(mvm, "Failed sending remove station\n"); 1473 1474 return ret; 1475 } 1476 1477 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm) 1478 { 1479 iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta); 1480 } 1481 1482 void iwl_mvm_del_aux_sta(struct iwl_mvm *mvm) 1483 { 1484 lockdep_assert_held(&mvm->mutex); 1485 1486 iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta); 1487 } 1488 1489 /* 1490 * Send the add station command for the vif's broadcast station. 1491 * Assumes that the station was already allocated. 1492 * 1493 * @mvm: the mvm component 1494 * @vif: the interface to which the broadcast station is added 1495 * @bsta: the broadcast station to add. 1496 */ 1497 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1498 { 1499 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1500 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 1501 static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF}; 1502 const u8 *baddr = _baddr; 1503 1504 lockdep_assert_held(&mvm->mutex); 1505 1506 if (iwl_mvm_is_dqa_supported(mvm)) { 1507 struct iwl_trans_txq_scd_cfg cfg = { 1508 .fifo = IWL_MVM_TX_FIFO_VO, 1509 .sta_id = mvmvif->bcast_sta.sta_id, 1510 .tid = IWL_MAX_TID_COUNT, 1511 .aggregate = false, 1512 .frame_limit = IWL_FRAME_LIMIT, 1513 }; 1514 unsigned int wdg_timeout = 1515 iwl_mvm_get_wd_timeout(mvm, vif, false, false); 1516 int queue; 1517 1518 if ((vif->type == NL80211_IFTYPE_AP) && 1519 (mvmvif->bcast_sta.tfd_queue_msk & 1520 BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE))) 1521 queue = IWL_MVM_DQA_AP_PROBE_RESP_QUEUE; 1522 else if ((vif->type == NL80211_IFTYPE_P2P_DEVICE) && 1523 (mvmvif->bcast_sta.tfd_queue_msk & 1524 BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE))) 1525 queue = IWL_MVM_DQA_P2P_DEVICE_QUEUE; 1526 else if (WARN(1, "Missed required TXQ for adding bcast STA\n")) 1527 return -EINVAL; 1528 1529 iwl_mvm_enable_txq(mvm, queue, vif->hw_queue[0], 0, &cfg, 1530 wdg_timeout); 1531 } 1532 1533 if (vif->type == NL80211_IFTYPE_ADHOC) 1534 baddr = vif->bss_conf.bssid; 1535 1536 if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_STATION_COUNT)) 1537 return -ENOSPC; 1538 1539 return iwl_mvm_add_int_sta_common(mvm, bsta, baddr, 1540 mvmvif->id, mvmvif->color); 1541 } 1542 1543 /* Send the FW a request to remove the station from it's internal data 1544 * structures, but DO NOT remove the entry from the local data structures. */ 1545 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1546 { 1547 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1548 int ret; 1549 1550 lockdep_assert_held(&mvm->mutex); 1551 1552 ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id); 1553 if (ret) 1554 IWL_WARN(mvm, "Failed sending remove station\n"); 1555 return ret; 1556 } 1557 1558 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1559 { 1560 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1561 u32 qmask = 0; 1562 1563 lockdep_assert_held(&mvm->mutex); 1564 1565 if (!iwl_mvm_is_dqa_supported(mvm)) 1566 qmask = iwl_mvm_mac_get_queues_mask(vif); 1567 1568 if (vif->type == NL80211_IFTYPE_AP) { 1569 /* 1570 * The firmware defines the TFD queue mask to only be relevant 1571 * for *unicast* queues, so the multicast (CAB) queue shouldn't 1572 * be included. 1573 */ 1574 qmask &= ~BIT(vif->cab_queue); 1575 1576 if (iwl_mvm_is_dqa_supported(mvm)) 1577 qmask |= BIT(IWL_MVM_DQA_AP_PROBE_RESP_QUEUE); 1578 } else if (iwl_mvm_is_dqa_supported(mvm) && 1579 vif->type == NL80211_IFTYPE_P2P_DEVICE) { 1580 qmask |= BIT(IWL_MVM_DQA_P2P_DEVICE_QUEUE); 1581 } 1582 1583 return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, qmask, 1584 ieee80211_vif_type_p2p(vif)); 1585 } 1586 1587 /* Allocate a new station entry for the broadcast station to the given vif, 1588 * and send it to the FW. 1589 * Note that each P2P mac should have its own broadcast station. 1590 * 1591 * @mvm: the mvm component 1592 * @vif: the interface to which the broadcast station is added 1593 * @bsta: the broadcast station to add. */ 1594 int iwl_mvm_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1595 { 1596 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1597 struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta; 1598 int ret; 1599 1600 lockdep_assert_held(&mvm->mutex); 1601 1602 ret = iwl_mvm_alloc_bcast_sta(mvm, vif); 1603 if (ret) 1604 return ret; 1605 1606 ret = iwl_mvm_send_add_bcast_sta(mvm, vif); 1607 1608 if (ret) 1609 iwl_mvm_dealloc_int_sta(mvm, bsta); 1610 1611 return ret; 1612 } 1613 1614 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1615 { 1616 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 1617 1618 iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta); 1619 } 1620 1621 /* 1622 * Send the FW a request to remove the station from it's internal data 1623 * structures, and in addition remove it from the local data structure. 1624 */ 1625 int iwl_mvm_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 1626 { 1627 int ret; 1628 1629 lockdep_assert_held(&mvm->mutex); 1630 1631 ret = iwl_mvm_send_rm_bcast_sta(mvm, vif); 1632 1633 iwl_mvm_dealloc_bcast_sta(mvm, vif); 1634 1635 return ret; 1636 } 1637 1638 #define IWL_MAX_RX_BA_SESSIONS 16 1639 1640 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid) 1641 { 1642 struct iwl_mvm_delba_notif notif = { 1643 .metadata.type = IWL_MVM_RXQ_NOTIF_DEL_BA, 1644 .metadata.sync = 1, 1645 .delba.baid = baid, 1646 }; 1647 iwl_mvm_sync_rx_queues_internal(mvm, (void *)¬if, sizeof(notif)); 1648 }; 1649 1650 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, 1651 struct iwl_mvm_baid_data *data) 1652 { 1653 int i; 1654 1655 iwl_mvm_sync_rxq_del_ba(mvm, data->baid); 1656 1657 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 1658 int j; 1659 struct iwl_mvm_reorder_buffer *reorder_buf = 1660 &data->reorder_buf[i]; 1661 1662 spin_lock_bh(&reorder_buf->lock); 1663 if (likely(!reorder_buf->num_stored)) { 1664 spin_unlock_bh(&reorder_buf->lock); 1665 continue; 1666 } 1667 1668 /* 1669 * This shouldn't happen in regular DELBA since the internal 1670 * delBA notification should trigger a release of all frames in 1671 * the reorder buffer. 1672 */ 1673 WARN_ON(1); 1674 1675 for (j = 0; j < reorder_buf->buf_size; j++) 1676 __skb_queue_purge(&reorder_buf->entries[j]); 1677 /* 1678 * Prevent timer re-arm. This prevents a very far fetched case 1679 * where we timed out on the notification. There may be prior 1680 * RX frames pending in the RX queue before the notification 1681 * that might get processed between now and the actual deletion 1682 * and we would re-arm the timer although we are deleting the 1683 * reorder buffer. 1684 */ 1685 reorder_buf->removed = true; 1686 spin_unlock_bh(&reorder_buf->lock); 1687 del_timer_sync(&reorder_buf->reorder_timer); 1688 } 1689 } 1690 1691 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, 1692 u32 sta_id, 1693 struct iwl_mvm_baid_data *data, 1694 u16 ssn, u8 buf_size) 1695 { 1696 int i; 1697 1698 for (i = 0; i < mvm->trans->num_rx_queues; i++) { 1699 struct iwl_mvm_reorder_buffer *reorder_buf = 1700 &data->reorder_buf[i]; 1701 int j; 1702 1703 reorder_buf->num_stored = 0; 1704 reorder_buf->head_sn = ssn; 1705 reorder_buf->buf_size = buf_size; 1706 /* rx reorder timer */ 1707 reorder_buf->reorder_timer.function = 1708 iwl_mvm_reorder_timer_expired; 1709 reorder_buf->reorder_timer.data = (unsigned long)reorder_buf; 1710 init_timer(&reorder_buf->reorder_timer); 1711 spin_lock_init(&reorder_buf->lock); 1712 reorder_buf->mvm = mvm; 1713 reorder_buf->queue = i; 1714 reorder_buf->sta_id = sta_id; 1715 for (j = 0; j < reorder_buf->buf_size; j++) 1716 __skb_queue_head_init(&reorder_buf->entries[j]); 1717 } 1718 } 1719 1720 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 1721 int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) 1722 { 1723 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1724 struct iwl_mvm_add_sta_cmd cmd = {}; 1725 struct iwl_mvm_baid_data *baid_data = NULL; 1726 int ret; 1727 u32 status; 1728 1729 lockdep_assert_held(&mvm->mutex); 1730 1731 if (start && mvm->rx_ba_sessions >= IWL_MAX_RX_BA_SESSIONS) { 1732 IWL_WARN(mvm, "Not enough RX BA SESSIONS\n"); 1733 return -ENOSPC; 1734 } 1735 1736 if (iwl_mvm_has_new_rx_api(mvm) && start) { 1737 /* 1738 * Allocate here so if allocation fails we can bail out early 1739 * before starting the BA session in the firmware 1740 */ 1741 baid_data = kzalloc(sizeof(*baid_data) + 1742 mvm->trans->num_rx_queues * 1743 sizeof(baid_data->reorder_buf[0]), 1744 GFP_KERNEL); 1745 if (!baid_data) 1746 return -ENOMEM; 1747 } 1748 1749 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 1750 cmd.sta_id = mvm_sta->sta_id; 1751 cmd.add_modify = STA_MODE_MODIFY; 1752 if (start) { 1753 cmd.add_immediate_ba_tid = (u8) tid; 1754 cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); 1755 cmd.rx_ba_window = cpu_to_le16((u16)buf_size); 1756 } else { 1757 cmd.remove_immediate_ba_tid = (u8) tid; 1758 } 1759 cmd.modify_mask = start ? STA_MODIFY_ADD_BA_TID : 1760 STA_MODIFY_REMOVE_BA_TID; 1761 1762 status = ADD_STA_SUCCESS; 1763 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1764 iwl_mvm_add_sta_cmd_size(mvm), 1765 &cmd, &status); 1766 if (ret) 1767 goto out_free; 1768 1769 switch (status & IWL_ADD_STA_STATUS_MASK) { 1770 case ADD_STA_SUCCESS: 1771 IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n", 1772 start ? "start" : "stopp"); 1773 break; 1774 case ADD_STA_IMMEDIATE_BA_FAILURE: 1775 IWL_WARN(mvm, "RX BA Session refused by fw\n"); 1776 ret = -ENOSPC; 1777 break; 1778 default: 1779 ret = -EIO; 1780 IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n", 1781 start ? "start" : "stopp", status); 1782 break; 1783 } 1784 1785 if (ret) 1786 goto out_free; 1787 1788 if (start) { 1789 u8 baid; 1790 1791 mvm->rx_ba_sessions++; 1792 1793 if (!iwl_mvm_has_new_rx_api(mvm)) 1794 return 0; 1795 1796 if (WARN_ON(!(status & IWL_ADD_STA_BAID_VALID_MASK))) { 1797 ret = -EINVAL; 1798 goto out_free; 1799 } 1800 baid = (u8)((status & IWL_ADD_STA_BAID_MASK) >> 1801 IWL_ADD_STA_BAID_SHIFT); 1802 baid_data->baid = baid; 1803 baid_data->timeout = timeout; 1804 baid_data->last_rx = jiffies; 1805 init_timer(&baid_data->session_timer); 1806 baid_data->session_timer.function = 1807 iwl_mvm_rx_agg_session_expired; 1808 baid_data->session_timer.data = 1809 (unsigned long)&mvm->baid_map[baid]; 1810 baid_data->mvm = mvm; 1811 baid_data->tid = tid; 1812 baid_data->sta_id = mvm_sta->sta_id; 1813 1814 mvm_sta->tid_to_baid[tid] = baid; 1815 if (timeout) 1816 mod_timer(&baid_data->session_timer, 1817 TU_TO_EXP_TIME(timeout * 2)); 1818 1819 iwl_mvm_init_reorder_buffer(mvm, mvm_sta->sta_id, 1820 baid_data, ssn, buf_size); 1821 /* 1822 * protect the BA data with RCU to cover a case where our 1823 * internal RX sync mechanism will timeout (not that it's 1824 * supposed to happen) and we will free the session data while 1825 * RX is being processed in parallel 1826 */ 1827 IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n", 1828 mvm_sta->sta_id, tid, baid); 1829 WARN_ON(rcu_access_pointer(mvm->baid_map[baid])); 1830 rcu_assign_pointer(mvm->baid_map[baid], baid_data); 1831 } else { 1832 u8 baid = mvm_sta->tid_to_baid[tid]; 1833 1834 if (mvm->rx_ba_sessions > 0) 1835 /* check that restart flow didn't zero the counter */ 1836 mvm->rx_ba_sessions--; 1837 if (!iwl_mvm_has_new_rx_api(mvm)) 1838 return 0; 1839 1840 if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID)) 1841 return -EINVAL; 1842 1843 baid_data = rcu_access_pointer(mvm->baid_map[baid]); 1844 if (WARN_ON(!baid_data)) 1845 return -EINVAL; 1846 1847 /* synchronize all rx queues so we can safely delete */ 1848 iwl_mvm_free_reorder(mvm, baid_data); 1849 del_timer_sync(&baid_data->session_timer); 1850 RCU_INIT_POINTER(mvm->baid_map[baid], NULL); 1851 kfree_rcu(baid_data, rcu_head); 1852 IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid); 1853 } 1854 return 0; 1855 1856 out_free: 1857 kfree(baid_data); 1858 return ret; 1859 } 1860 1861 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, 1862 int tid, u8 queue, bool start) 1863 { 1864 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 1865 struct iwl_mvm_add_sta_cmd cmd = {}; 1866 int ret; 1867 u32 status; 1868 1869 lockdep_assert_held(&mvm->mutex); 1870 1871 if (start) { 1872 mvm_sta->tfd_queue_msk |= BIT(queue); 1873 mvm_sta->tid_disable_agg &= ~BIT(tid); 1874 } else { 1875 /* In DQA-mode the queue isn't removed on agg termination */ 1876 if (!iwl_mvm_is_dqa_supported(mvm)) 1877 mvm_sta->tfd_queue_msk &= ~BIT(queue); 1878 mvm_sta->tid_disable_agg |= BIT(tid); 1879 } 1880 1881 cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color); 1882 cmd.sta_id = mvm_sta->sta_id; 1883 cmd.add_modify = STA_MODE_MODIFY; 1884 cmd.modify_mask = STA_MODIFY_QUEUES | STA_MODIFY_TID_DISABLE_TX; 1885 cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk); 1886 cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg); 1887 1888 status = ADD_STA_SUCCESS; 1889 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA, 1890 iwl_mvm_add_sta_cmd_size(mvm), 1891 &cmd, &status); 1892 if (ret) 1893 return ret; 1894 1895 switch (status & IWL_ADD_STA_STATUS_MASK) { 1896 case ADD_STA_SUCCESS: 1897 break; 1898 default: 1899 ret = -EIO; 1900 IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n", 1901 start ? "start" : "stopp", status); 1902 break; 1903 } 1904 1905 return ret; 1906 } 1907 1908 const u8 tid_to_mac80211_ac[] = { 1909 IEEE80211_AC_BE, 1910 IEEE80211_AC_BK, 1911 IEEE80211_AC_BK, 1912 IEEE80211_AC_BE, 1913 IEEE80211_AC_VI, 1914 IEEE80211_AC_VI, 1915 IEEE80211_AC_VO, 1916 IEEE80211_AC_VO, 1917 IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */ 1918 }; 1919 1920 static const u8 tid_to_ucode_ac[] = { 1921 AC_BE, 1922 AC_BK, 1923 AC_BK, 1924 AC_BE, 1925 AC_VI, 1926 AC_VI, 1927 AC_VO, 1928 AC_VO, 1929 }; 1930 1931 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 1932 struct ieee80211_sta *sta, u16 tid, u16 *ssn) 1933 { 1934 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 1935 struct iwl_mvm_tid_data *tid_data; 1936 int txq_id; 1937 int ret; 1938 1939 if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT)) 1940 return -EINVAL; 1941 1942 if (mvmsta->tid_data[tid].state != IWL_AGG_OFF) { 1943 IWL_ERR(mvm, "Start AGG when state is not IWL_AGG_OFF %d!\n", 1944 mvmsta->tid_data[tid].state); 1945 return -ENXIO; 1946 } 1947 1948 lockdep_assert_held(&mvm->mutex); 1949 1950 spin_lock_bh(&mvmsta->lock); 1951 1952 /* possible race condition - we entered D0i3 while starting agg */ 1953 if (test_bit(IWL_MVM_STATUS_IN_D0I3, &mvm->status)) { 1954 spin_unlock_bh(&mvmsta->lock); 1955 IWL_ERR(mvm, "Entered D0i3 while starting Tx agg\n"); 1956 return -EIO; 1957 } 1958 1959 spin_lock_bh(&mvm->queue_info_lock); 1960 1961 /* 1962 * Note the possible cases: 1963 * 1. In DQA mode with an enabled TXQ - TXQ needs to become agg'ed 1964 * 2. Non-DQA mode: the TXQ hasn't yet been enabled, so find a free 1965 * one and mark it as reserved 1966 * 3. In DQA mode, but no traffic yet on this TID: same treatment as in 1967 * non-DQA mode, since the TXQ hasn't yet been allocated 1968 */ 1969 txq_id = mvmsta->tid_data[tid].txq_id; 1970 if (!iwl_mvm_is_dqa_supported(mvm) || 1971 mvm->queue_info[txq_id].status != IWL_MVM_QUEUE_READY) { 1972 txq_id = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id, 1973 mvm->first_agg_queue, 1974 mvm->last_agg_queue); 1975 if (txq_id < 0) { 1976 ret = txq_id; 1977 spin_unlock_bh(&mvm->queue_info_lock); 1978 IWL_ERR(mvm, "Failed to allocate agg queue\n"); 1979 goto release_locks; 1980 } 1981 1982 /* TXQ hasn't yet been enabled, so mark it only as reserved */ 1983 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED; 1984 } 1985 spin_unlock_bh(&mvm->queue_info_lock); 1986 1987 IWL_DEBUG_TX_QUEUES(mvm, 1988 "AGG for tid %d will be on queue #%d\n", 1989 tid, txq_id); 1990 1991 tid_data = &mvmsta->tid_data[tid]; 1992 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 1993 tid_data->txq_id = txq_id; 1994 *ssn = tid_data->ssn; 1995 1996 IWL_DEBUG_TX_QUEUES(mvm, 1997 "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n", 1998 mvmsta->sta_id, tid, txq_id, tid_data->ssn, 1999 tid_data->next_reclaimed); 2000 2001 if (tid_data->ssn == tid_data->next_reclaimed) { 2002 tid_data->state = IWL_AGG_STARTING; 2003 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2004 } else { 2005 tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA; 2006 } 2007 2008 ret = 0; 2009 2010 release_locks: 2011 spin_unlock_bh(&mvmsta->lock); 2012 2013 return ret; 2014 } 2015 2016 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2017 struct ieee80211_sta *sta, u16 tid, u8 buf_size, 2018 bool amsdu) 2019 { 2020 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2021 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2022 unsigned int wdg_timeout = 2023 iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false); 2024 int queue, ret; 2025 bool alloc_queue = true; 2026 u16 ssn; 2027 2028 struct iwl_trans_txq_scd_cfg cfg = { 2029 .sta_id = mvmsta->sta_id, 2030 .tid = tid, 2031 .frame_limit = buf_size, 2032 .aggregate = true, 2033 }; 2034 2035 BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE) 2036 != IWL_MAX_TID_COUNT); 2037 2038 buf_size = min_t(int, buf_size, LINK_QUAL_AGG_FRAME_LIMIT_DEF); 2039 2040 spin_lock_bh(&mvmsta->lock); 2041 ssn = tid_data->ssn; 2042 queue = tid_data->txq_id; 2043 tid_data->state = IWL_AGG_ON; 2044 mvmsta->agg_tids |= BIT(tid); 2045 tid_data->ssn = 0xffff; 2046 tid_data->amsdu_in_ampdu_allowed = amsdu; 2047 spin_unlock_bh(&mvmsta->lock); 2048 2049 cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]]; 2050 2051 /* In DQA mode, the existing queue might need to be reconfigured */ 2052 if (iwl_mvm_is_dqa_supported(mvm)) { 2053 spin_lock_bh(&mvm->queue_info_lock); 2054 /* Maybe there is no need to even alloc a queue... */ 2055 if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY) 2056 alloc_queue = false; 2057 spin_unlock_bh(&mvm->queue_info_lock); 2058 2059 /* 2060 * Only reconfig the SCD for the queue if the window size has 2061 * changed from current (become smaller) 2062 */ 2063 if (!alloc_queue && buf_size < mvmsta->max_agg_bufsize) { 2064 /* 2065 * If reconfiguring an existing queue, it first must be 2066 * drained 2067 */ 2068 ret = iwl_trans_wait_tx_queue_empty(mvm->trans, 2069 BIT(queue)); 2070 if (ret) { 2071 IWL_ERR(mvm, 2072 "Error draining queue before reconfig\n"); 2073 return ret; 2074 } 2075 2076 ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo, 2077 mvmsta->sta_id, tid, 2078 buf_size, ssn); 2079 if (ret) { 2080 IWL_ERR(mvm, 2081 "Error reconfiguring TXQ #%d\n", queue); 2082 return ret; 2083 } 2084 } 2085 } 2086 2087 if (alloc_queue) 2088 iwl_mvm_enable_txq(mvm, queue, 2089 vif->hw_queue[tid_to_mac80211_ac[tid]], ssn, 2090 &cfg, wdg_timeout); 2091 2092 ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true); 2093 if (ret) 2094 return -EIO; 2095 2096 /* No need to mark as reserved */ 2097 spin_lock_bh(&mvm->queue_info_lock); 2098 mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY; 2099 spin_unlock_bh(&mvm->queue_info_lock); 2100 2101 /* 2102 * Even though in theory the peer could have different 2103 * aggregation reorder buffer sizes for different sessions, 2104 * our ucode doesn't allow for that and has a global limit 2105 * for each station. Therefore, use the minimum of all the 2106 * aggregation sessions and our default value. 2107 */ 2108 mvmsta->max_agg_bufsize = 2109 min(mvmsta->max_agg_bufsize, buf_size); 2110 mvmsta->lq_sta.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize; 2111 2112 IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n", 2113 sta->addr, tid); 2114 2115 return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.lq, false); 2116 } 2117 2118 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2119 struct ieee80211_sta *sta, u16 tid) 2120 { 2121 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2122 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2123 u16 txq_id; 2124 int err; 2125 2126 2127 /* 2128 * If mac80211 is cleaning its state, then say that we finished since 2129 * our state has been cleared anyway. 2130 */ 2131 if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) { 2132 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2133 return 0; 2134 } 2135 2136 spin_lock_bh(&mvmsta->lock); 2137 2138 txq_id = tid_data->txq_id; 2139 2140 IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n", 2141 mvmsta->sta_id, tid, txq_id, tid_data->state); 2142 2143 mvmsta->agg_tids &= ~BIT(tid); 2144 2145 spin_lock_bh(&mvm->queue_info_lock); 2146 /* 2147 * The TXQ is marked as reserved only if no traffic came through yet 2148 * This means no traffic has been sent on this TID (agg'd or not), so 2149 * we no longer have use for the queue. Since it hasn't even been 2150 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 2151 * free. 2152 */ 2153 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) 2154 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2155 spin_unlock_bh(&mvm->queue_info_lock); 2156 2157 switch (tid_data->state) { 2158 case IWL_AGG_ON: 2159 tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number); 2160 2161 IWL_DEBUG_TX_QUEUES(mvm, 2162 "ssn = %d, next_recl = %d\n", 2163 tid_data->ssn, tid_data->next_reclaimed); 2164 2165 /* There are still packets for this RA / TID in the HW */ 2166 if (tid_data->ssn != tid_data->next_reclaimed) { 2167 tid_data->state = IWL_EMPTYING_HW_QUEUE_DELBA; 2168 err = 0; 2169 break; 2170 } 2171 2172 tid_data->ssn = 0xffff; 2173 tid_data->state = IWL_AGG_OFF; 2174 spin_unlock_bh(&mvmsta->lock); 2175 2176 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2177 2178 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2179 2180 if (!iwl_mvm_is_dqa_supported(mvm)) { 2181 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; 2182 2183 iwl_mvm_disable_txq(mvm, txq_id, mac_queue, tid, 0); 2184 } 2185 return 0; 2186 case IWL_AGG_STARTING: 2187 case IWL_EMPTYING_HW_QUEUE_ADDBA: 2188 /* 2189 * The agg session has been stopped before it was set up. This 2190 * can happen when the AddBA timer times out for example. 2191 */ 2192 2193 /* No barriers since we are under mutex */ 2194 lockdep_assert_held(&mvm->mutex); 2195 2196 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid); 2197 tid_data->state = IWL_AGG_OFF; 2198 err = 0; 2199 break; 2200 default: 2201 IWL_ERR(mvm, 2202 "Stopping AGG while state not ON or starting for %d on %d (%d)\n", 2203 mvmsta->sta_id, tid, tid_data->state); 2204 IWL_ERR(mvm, 2205 "\ttid_data->txq_id = %d\n", tid_data->txq_id); 2206 err = -EINVAL; 2207 } 2208 2209 spin_unlock_bh(&mvmsta->lock); 2210 2211 return err; 2212 } 2213 2214 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif, 2215 struct ieee80211_sta *sta, u16 tid) 2216 { 2217 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2218 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid]; 2219 u16 txq_id; 2220 enum iwl_mvm_agg_state old_state; 2221 2222 /* 2223 * First set the agg state to OFF to avoid calling 2224 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty. 2225 */ 2226 spin_lock_bh(&mvmsta->lock); 2227 txq_id = tid_data->txq_id; 2228 IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n", 2229 mvmsta->sta_id, tid, txq_id, tid_data->state); 2230 old_state = tid_data->state; 2231 tid_data->state = IWL_AGG_OFF; 2232 mvmsta->agg_tids &= ~BIT(tid); 2233 spin_unlock_bh(&mvmsta->lock); 2234 2235 spin_lock_bh(&mvm->queue_info_lock); 2236 /* 2237 * The TXQ is marked as reserved only if no traffic came through yet 2238 * This means no traffic has been sent on this TID (agg'd or not), so 2239 * we no longer have use for the queue. Since it hasn't even been 2240 * allocated through iwl_mvm_enable_txq, so we can just mark it back as 2241 * free. 2242 */ 2243 if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) 2244 mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE; 2245 spin_unlock_bh(&mvm->queue_info_lock); 2246 2247 if (old_state >= IWL_AGG_ON) { 2248 iwl_mvm_drain_sta(mvm, mvmsta, true); 2249 if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id), 0)) 2250 IWL_ERR(mvm, "Couldn't flush the AGG queue\n"); 2251 iwl_trans_wait_tx_queue_empty(mvm->trans, 2252 mvmsta->tfd_queue_msk); 2253 iwl_mvm_drain_sta(mvm, mvmsta, false); 2254 2255 iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false); 2256 2257 if (!iwl_mvm_is_dqa_supported(mvm)) { 2258 int mac_queue = vif->hw_queue[tid_to_mac80211_ac[tid]]; 2259 2260 iwl_mvm_disable_txq(mvm, tid_data->txq_id, mac_queue, 2261 tid, 0); 2262 } 2263 } 2264 2265 return 0; 2266 } 2267 2268 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm) 2269 { 2270 int i, max = -1, max_offs = -1; 2271 2272 lockdep_assert_held(&mvm->mutex); 2273 2274 /* Pick the unused key offset with the highest 'deleted' 2275 * counter. Every time a key is deleted, all the counters 2276 * are incremented and the one that was just deleted is 2277 * reset to zero. Thus, the highest counter is the one 2278 * that was deleted longest ago. Pick that one. 2279 */ 2280 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 2281 if (test_bit(i, mvm->fw_key_table)) 2282 continue; 2283 if (mvm->fw_key_deleted[i] > max) { 2284 max = mvm->fw_key_deleted[i]; 2285 max_offs = i; 2286 } 2287 } 2288 2289 if (max_offs < 0) 2290 return STA_KEY_IDX_INVALID; 2291 2292 return max_offs; 2293 } 2294 2295 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm, 2296 struct ieee80211_vif *vif, 2297 struct ieee80211_sta *sta) 2298 { 2299 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2300 2301 if (sta) 2302 return iwl_mvm_sta_from_mac80211(sta); 2303 2304 /* 2305 * The device expects GTKs for station interfaces to be 2306 * installed as GTKs for the AP station. If we have no 2307 * station ID, then use AP's station ID. 2308 */ 2309 if (vif->type == NL80211_IFTYPE_STATION && 2310 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 2311 u8 sta_id = mvmvif->ap_sta_id; 2312 2313 sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id], 2314 lockdep_is_held(&mvm->mutex)); 2315 2316 /* 2317 * It is possible that the 'sta' parameter is NULL, 2318 * for example when a GTK is removed - the sta_id will then 2319 * be the AP ID, and no station was passed by mac80211. 2320 */ 2321 if (IS_ERR_OR_NULL(sta)) 2322 return NULL; 2323 2324 return iwl_mvm_sta_from_mac80211(sta); 2325 } 2326 2327 return NULL; 2328 } 2329 2330 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm, 2331 struct iwl_mvm_sta *mvm_sta, 2332 struct ieee80211_key_conf *keyconf, bool mcast, 2333 u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags, 2334 u8 key_offset) 2335 { 2336 struct iwl_mvm_add_sta_key_cmd cmd = {}; 2337 __le16 key_flags; 2338 int ret; 2339 u32 status; 2340 u16 keyidx; 2341 int i; 2342 u8 sta_id = mvm_sta->sta_id; 2343 2344 keyidx = (keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 2345 STA_KEY_FLG_KEYID_MSK; 2346 key_flags = cpu_to_le16(keyidx); 2347 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP); 2348 2349 switch (keyconf->cipher) { 2350 case WLAN_CIPHER_SUITE_TKIP: 2351 key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP); 2352 cmd.tkip_rx_tsc_byte2 = tkip_iv32; 2353 for (i = 0; i < 5; i++) 2354 cmd.tkip_rx_ttak[i] = cpu_to_le16(tkip_p1k[i]); 2355 memcpy(cmd.key, keyconf->key, keyconf->keylen); 2356 break; 2357 case WLAN_CIPHER_SUITE_CCMP: 2358 key_flags |= cpu_to_le16(STA_KEY_FLG_CCM); 2359 memcpy(cmd.key, keyconf->key, keyconf->keylen); 2360 break; 2361 case WLAN_CIPHER_SUITE_WEP104: 2362 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES); 2363 /* fall through */ 2364 case WLAN_CIPHER_SUITE_WEP40: 2365 key_flags |= cpu_to_le16(STA_KEY_FLG_WEP); 2366 memcpy(cmd.key + 3, keyconf->key, keyconf->keylen); 2367 break; 2368 case WLAN_CIPHER_SUITE_GCMP_256: 2369 key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES); 2370 /* fall through */ 2371 case WLAN_CIPHER_SUITE_GCMP: 2372 key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP); 2373 memcpy(cmd.key, keyconf->key, keyconf->keylen); 2374 break; 2375 default: 2376 key_flags |= cpu_to_le16(STA_KEY_FLG_EXT); 2377 memcpy(cmd.key, keyconf->key, keyconf->keylen); 2378 } 2379 2380 if (mcast) 2381 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 2382 2383 cmd.key_offset = key_offset; 2384 cmd.key_flags = key_flags; 2385 cmd.sta_id = sta_id; 2386 2387 status = ADD_STA_SUCCESS; 2388 if (cmd_flags & CMD_ASYNC) 2389 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, 2390 sizeof(cmd), &cmd); 2391 else 2392 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), 2393 &cmd, &status); 2394 2395 switch (status) { 2396 case ADD_STA_SUCCESS: 2397 IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n"); 2398 break; 2399 default: 2400 ret = -EIO; 2401 IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n"); 2402 break; 2403 } 2404 2405 return ret; 2406 } 2407 2408 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm, 2409 struct ieee80211_key_conf *keyconf, 2410 u8 sta_id, bool remove_key) 2411 { 2412 struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {}; 2413 2414 /* verify the key details match the required command's expectations */ 2415 if (WARN_ON((keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC) || 2416 (keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) || 2417 (keyconf->keyidx != 4 && keyconf->keyidx != 5))) 2418 return -EINVAL; 2419 2420 igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx); 2421 igtk_cmd.sta_id = cpu_to_le32(sta_id); 2422 2423 if (remove_key) { 2424 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID); 2425 } else { 2426 struct ieee80211_key_seq seq; 2427 const u8 *pn; 2428 2429 switch (keyconf->cipher) { 2430 case WLAN_CIPHER_SUITE_AES_CMAC: 2431 igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM); 2432 break; 2433 default: 2434 return -EINVAL; 2435 } 2436 2437 memcpy(igtk_cmd.IGTK, keyconf->key, keyconf->keylen); 2438 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 2439 pn = seq.aes_cmac.pn; 2440 igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) | 2441 ((u64) pn[4] << 8) | 2442 ((u64) pn[3] << 16) | 2443 ((u64) pn[2] << 24) | 2444 ((u64) pn[1] << 32) | 2445 ((u64) pn[0] << 40)); 2446 } 2447 2448 IWL_DEBUG_INFO(mvm, "%s igtk for sta %u\n", 2449 remove_key ? "removing" : "installing", 2450 igtk_cmd.sta_id); 2451 2452 return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0, 2453 sizeof(igtk_cmd), &igtk_cmd); 2454 } 2455 2456 2457 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm, 2458 struct ieee80211_vif *vif, 2459 struct ieee80211_sta *sta) 2460 { 2461 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2462 2463 if (sta) 2464 return sta->addr; 2465 2466 if (vif->type == NL80211_IFTYPE_STATION && 2467 mvmvif->ap_sta_id != IWL_MVM_STATION_COUNT) { 2468 u8 sta_id = mvmvif->ap_sta_id; 2469 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 2470 lockdep_is_held(&mvm->mutex)); 2471 return sta->addr; 2472 } 2473 2474 2475 return NULL; 2476 } 2477 2478 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 2479 struct ieee80211_vif *vif, 2480 struct ieee80211_sta *sta, 2481 struct ieee80211_key_conf *keyconf, 2482 u8 key_offset, 2483 bool mcast) 2484 { 2485 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2486 int ret; 2487 const u8 *addr; 2488 struct ieee80211_key_seq seq; 2489 u16 p1k[5]; 2490 2491 switch (keyconf->cipher) { 2492 case WLAN_CIPHER_SUITE_TKIP: 2493 addr = iwl_mvm_get_mac_addr(mvm, vif, sta); 2494 /* get phase 1 key from mac80211 */ 2495 ieee80211_get_key_rx_seq(keyconf, 0, &seq); 2496 ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k); 2497 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 2498 seq.tkip.iv32, p1k, 0, key_offset); 2499 break; 2500 case WLAN_CIPHER_SUITE_CCMP: 2501 case WLAN_CIPHER_SUITE_WEP40: 2502 case WLAN_CIPHER_SUITE_WEP104: 2503 case WLAN_CIPHER_SUITE_GCMP: 2504 case WLAN_CIPHER_SUITE_GCMP_256: 2505 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 2506 0, NULL, 0, key_offset); 2507 break; 2508 default: 2509 ret = iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 2510 0, NULL, 0, key_offset); 2511 } 2512 2513 return ret; 2514 } 2515 2516 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id, 2517 struct ieee80211_key_conf *keyconf, 2518 bool mcast) 2519 { 2520 struct iwl_mvm_add_sta_key_cmd cmd = {}; 2521 __le16 key_flags; 2522 int ret; 2523 u32 status; 2524 2525 key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) & 2526 STA_KEY_FLG_KEYID_MSK); 2527 key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP); 2528 key_flags |= cpu_to_le16(STA_KEY_NOT_VALID); 2529 2530 if (mcast) 2531 key_flags |= cpu_to_le16(STA_KEY_MULTICAST); 2532 2533 cmd.key_flags = key_flags; 2534 cmd.key_offset = keyconf->hw_key_idx; 2535 cmd.sta_id = sta_id; 2536 2537 status = ADD_STA_SUCCESS; 2538 ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, sizeof(cmd), 2539 &cmd, &status); 2540 2541 switch (status) { 2542 case ADD_STA_SUCCESS: 2543 IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n"); 2544 break; 2545 default: 2546 ret = -EIO; 2547 IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n"); 2548 break; 2549 } 2550 2551 return ret; 2552 } 2553 2554 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm, 2555 struct ieee80211_vif *vif, 2556 struct ieee80211_sta *sta, 2557 struct ieee80211_key_conf *keyconf, 2558 u8 key_offset) 2559 { 2560 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 2561 struct iwl_mvm_sta *mvm_sta; 2562 u8 sta_id; 2563 int ret; 2564 static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0}; 2565 2566 lockdep_assert_held(&mvm->mutex); 2567 2568 /* Get the station id from the mvm local station table */ 2569 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 2570 if (!mvm_sta) { 2571 IWL_ERR(mvm, "Failed to find station\n"); 2572 return -EINVAL; 2573 } 2574 sta_id = mvm_sta->sta_id; 2575 2576 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) { 2577 ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false); 2578 goto end; 2579 } 2580 2581 /* 2582 * It is possible that the 'sta' parameter is NULL, and thus 2583 * there is a need to retrieve the sta from the local station table. 2584 */ 2585 if (!sta) { 2586 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id], 2587 lockdep_is_held(&mvm->mutex)); 2588 if (IS_ERR_OR_NULL(sta)) { 2589 IWL_ERR(mvm, "Invalid station id\n"); 2590 return -EINVAL; 2591 } 2592 } 2593 2594 if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif)) 2595 return -EINVAL; 2596 2597 /* If the key_offset is not pre-assigned, we need to find a 2598 * new offset to use. In normal cases, the offset is not 2599 * pre-assigned, but during HW_RESTART we want to reuse the 2600 * same indices, so we pass them when this function is called. 2601 * 2602 * In D3 entry, we need to hardcoded the indices (because the 2603 * firmware hardcodes the PTK offset to 0). In this case, we 2604 * need to make sure we don't overwrite the hw_key_idx in the 2605 * keyconf structure, because otherwise we cannot configure 2606 * the original ones back when resuming. 2607 */ 2608 if (key_offset == STA_KEY_IDX_INVALID) { 2609 key_offset = iwl_mvm_set_fw_key_idx(mvm); 2610 if (key_offset == STA_KEY_IDX_INVALID) 2611 return -ENOSPC; 2612 keyconf->hw_key_idx = key_offset; 2613 } 2614 2615 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast); 2616 if (ret) 2617 goto end; 2618 2619 /* 2620 * For WEP, the same key is used for multicast and unicast. Upload it 2621 * again, using the same key offset, and now pointing the other one 2622 * to the same key slot (offset). 2623 * If this fails, remove the original as well. 2624 */ 2625 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 2626 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) { 2627 ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, 2628 key_offset, !mcast); 2629 if (ret) { 2630 __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 2631 goto end; 2632 } 2633 } 2634 2635 __set_bit(key_offset, mvm->fw_key_table); 2636 2637 end: 2638 IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n", 2639 keyconf->cipher, keyconf->keylen, keyconf->keyidx, 2640 sta ? sta->addr : zero_addr, ret); 2641 return ret; 2642 } 2643 2644 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, 2645 struct ieee80211_vif *vif, 2646 struct ieee80211_sta *sta, 2647 struct ieee80211_key_conf *keyconf) 2648 { 2649 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 2650 struct iwl_mvm_sta *mvm_sta; 2651 u8 sta_id = IWL_MVM_STATION_COUNT; 2652 int ret, i; 2653 2654 lockdep_assert_held(&mvm->mutex); 2655 2656 /* Get the station from the mvm local station table */ 2657 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 2658 2659 IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n", 2660 keyconf->keyidx, sta_id); 2661 2662 if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC) 2663 return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true); 2664 2665 if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) { 2666 IWL_ERR(mvm, "offset %d not used in fw key table.\n", 2667 keyconf->hw_key_idx); 2668 return -ENOENT; 2669 } 2670 2671 /* track which key was deleted last */ 2672 for (i = 0; i < STA_KEY_MAX_NUM; i++) { 2673 if (mvm->fw_key_deleted[i] < U8_MAX) 2674 mvm->fw_key_deleted[i]++; 2675 } 2676 mvm->fw_key_deleted[keyconf->hw_key_idx] = 0; 2677 2678 if (!mvm_sta) { 2679 IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n"); 2680 return 0; 2681 } 2682 2683 sta_id = mvm_sta->sta_id; 2684 2685 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast); 2686 if (ret) 2687 return ret; 2688 2689 /* delete WEP key twice to get rid of (now useless) offset */ 2690 if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 || 2691 keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) 2692 ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast); 2693 2694 return ret; 2695 } 2696 2697 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm, 2698 struct ieee80211_vif *vif, 2699 struct ieee80211_key_conf *keyconf, 2700 struct ieee80211_sta *sta, u32 iv32, 2701 u16 *phase1key) 2702 { 2703 struct iwl_mvm_sta *mvm_sta; 2704 bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE); 2705 2706 rcu_read_lock(); 2707 2708 mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta); 2709 if (WARN_ON_ONCE(!mvm_sta)) 2710 goto unlock; 2711 iwl_mvm_send_sta_key(mvm, mvm_sta, keyconf, mcast, 2712 iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx); 2713 2714 unlock: 2715 rcu_read_unlock(); 2716 } 2717 2718 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm, 2719 struct ieee80211_sta *sta) 2720 { 2721 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2722 struct iwl_mvm_add_sta_cmd cmd = { 2723 .add_modify = STA_MODE_MODIFY, 2724 .sta_id = mvmsta->sta_id, 2725 .station_flags_msk = cpu_to_le32(STA_FLG_PS), 2726 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 2727 }; 2728 int ret; 2729 2730 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 2731 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 2732 if (ret) 2733 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 2734 } 2735 2736 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm, 2737 struct ieee80211_sta *sta, 2738 enum ieee80211_frame_release_type reason, 2739 u16 cnt, u16 tids, bool more_data, 2740 bool agg) 2741 { 2742 struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); 2743 struct iwl_mvm_add_sta_cmd cmd = { 2744 .add_modify = STA_MODE_MODIFY, 2745 .sta_id = mvmsta->sta_id, 2746 .modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT, 2747 .sleep_tx_count = cpu_to_le16(cnt), 2748 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 2749 }; 2750 int tid, ret; 2751 unsigned long _tids = tids; 2752 2753 /* convert TIDs to ACs - we don't support TSPEC so that's OK 2754 * Note that this field is reserved and unused by firmware not 2755 * supporting GO uAPSD, so it's safe to always do this. 2756 */ 2757 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) 2758 cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]); 2759 2760 /* If we're releasing frames from aggregation queues then check if the 2761 * all queues combined that we're releasing frames from have 2762 * - more frames than the service period, in which case more_data 2763 * needs to be set 2764 * - fewer than 'cnt' frames, in which case we need to adjust the 2765 * firmware command (but do that unconditionally) 2766 */ 2767 if (agg) { 2768 int remaining = cnt; 2769 int sleep_tx_count; 2770 2771 spin_lock_bh(&mvmsta->lock); 2772 for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) { 2773 struct iwl_mvm_tid_data *tid_data; 2774 u16 n_queued; 2775 2776 tid_data = &mvmsta->tid_data[tid]; 2777 if (WARN(tid_data->state != IWL_AGG_ON && 2778 tid_data->state != IWL_EMPTYING_HW_QUEUE_DELBA, 2779 "TID %d state is %d\n", 2780 tid, tid_data->state)) { 2781 spin_unlock_bh(&mvmsta->lock); 2782 ieee80211_sta_eosp(sta); 2783 return; 2784 } 2785 2786 n_queued = iwl_mvm_tid_queued(tid_data); 2787 if (n_queued > remaining) { 2788 more_data = true; 2789 remaining = 0; 2790 break; 2791 } 2792 remaining -= n_queued; 2793 } 2794 sleep_tx_count = cnt - remaining; 2795 if (reason == IEEE80211_FRAME_RELEASE_UAPSD) 2796 mvmsta->sleep_tx_count = sleep_tx_count; 2797 spin_unlock_bh(&mvmsta->lock); 2798 2799 cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count); 2800 if (WARN_ON(cnt - remaining == 0)) { 2801 ieee80211_sta_eosp(sta); 2802 return; 2803 } 2804 } 2805 2806 /* Note: this is ignored by firmware not supporting GO uAPSD */ 2807 if (more_data) 2808 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_MOREDATA); 2809 2810 if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) { 2811 mvmsta->next_status_eosp = true; 2812 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_PS_POLL); 2813 } else { 2814 cmd.sleep_state_flags |= cpu_to_le16(STA_SLEEP_STATE_UAPSD); 2815 } 2816 2817 /* block the Tx queues until the FW updated the sleep Tx count */ 2818 iwl_trans_block_txq_ptrs(mvm->trans, true); 2819 2820 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, 2821 CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK, 2822 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 2823 if (ret) 2824 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 2825 } 2826 2827 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, 2828 struct iwl_rx_cmd_buffer *rxb) 2829 { 2830 struct iwl_rx_packet *pkt = rxb_addr(rxb); 2831 struct iwl_mvm_eosp_notification *notif = (void *)pkt->data; 2832 struct ieee80211_sta *sta; 2833 u32 sta_id = le32_to_cpu(notif->sta_id); 2834 2835 if (WARN_ON_ONCE(sta_id >= IWL_MVM_STATION_COUNT)) 2836 return; 2837 2838 rcu_read_lock(); 2839 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]); 2840 if (!IS_ERR_OR_NULL(sta)) 2841 ieee80211_sta_eosp(sta); 2842 rcu_read_unlock(); 2843 } 2844 2845 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm, 2846 struct iwl_mvm_sta *mvmsta, bool disable) 2847 { 2848 struct iwl_mvm_add_sta_cmd cmd = { 2849 .add_modify = STA_MODE_MODIFY, 2850 .sta_id = mvmsta->sta_id, 2851 .station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0, 2852 .station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX), 2853 .mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color), 2854 }; 2855 int ret; 2856 2857 ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC, 2858 iwl_mvm_add_sta_cmd_size(mvm), &cmd); 2859 if (ret) 2860 IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret); 2861 } 2862 2863 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm, 2864 struct ieee80211_sta *sta, 2865 bool disable) 2866 { 2867 struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2868 2869 spin_lock_bh(&mvm_sta->lock); 2870 2871 if (mvm_sta->disable_tx == disable) { 2872 spin_unlock_bh(&mvm_sta->lock); 2873 return; 2874 } 2875 2876 mvm_sta->disable_tx = disable; 2877 2878 /* 2879 * Tell mac80211 to start/stop queuing tx for this station, 2880 * but don't stop queuing if there are still pending frames 2881 * for this station. 2882 */ 2883 if (disable || !atomic_read(&mvm->pending_frames[mvm_sta->sta_id])) 2884 ieee80211_sta_block_awake(mvm->hw, sta, disable); 2885 2886 iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable); 2887 2888 spin_unlock_bh(&mvm_sta->lock); 2889 } 2890 2891 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm, 2892 struct iwl_mvm_vif *mvmvif, 2893 bool disable) 2894 { 2895 struct ieee80211_sta *sta; 2896 struct iwl_mvm_sta *mvm_sta; 2897 int i; 2898 2899 lockdep_assert_held(&mvm->mutex); 2900 2901 /* Block/unblock all the stations of the given mvmvif */ 2902 for (i = 0; i < IWL_MVM_STATION_COUNT; i++) { 2903 sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[i], 2904 lockdep_is_held(&mvm->mutex)); 2905 if (IS_ERR_OR_NULL(sta)) 2906 continue; 2907 2908 mvm_sta = iwl_mvm_sta_from_mac80211(sta); 2909 if (mvm_sta->mac_id_n_color != 2910 FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color)) 2911 continue; 2912 2913 iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable); 2914 } 2915 } 2916 2917 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif) 2918 { 2919 struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); 2920 struct iwl_mvm_sta *mvmsta; 2921 2922 rcu_read_lock(); 2923 2924 mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id); 2925 2926 if (!WARN_ON(!mvmsta)) 2927 iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true); 2928 2929 rcu_read_unlock(); 2930 } 2931