xref: /freebsd/sys/contrib/dev/iwlwifi/mvm/sta.c (revision e3aa18ad71782a73d3dd9dd3d526bbd2b607ca16)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2012-2015, 2018-2022 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <net/mac80211.h>
8 #if defined(__FreeBSD__)
9 #include <linux/cache.h>
10 #endif
11 
12 #include "mvm.h"
13 #include "sta.h"
14 #include "rs.h"
15 
16 /*
17  * New version of ADD_STA_sta command added new fields at the end of the
18  * structure, so sending the size of the relevant API's structure is enough to
19  * support both API versions.
20  */
21 static inline int iwl_mvm_add_sta_cmd_size(struct iwl_mvm *mvm)
22 {
23 	if (iwl_mvm_has_new_rx_api(mvm) ||
24 	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
25 		return sizeof(struct iwl_mvm_add_sta_cmd);
26 	else
27 		return sizeof(struct iwl_mvm_add_sta_cmd_v7);
28 }
29 
30 static int iwl_mvm_find_free_sta_id(struct iwl_mvm *mvm,
31 				    enum nl80211_iftype iftype)
32 {
33 	int sta_id;
34 	u32 reserved_ids = 0;
35 
36 	BUILD_BUG_ON(IWL_MVM_STATION_COUNT_MAX > 32);
37 	WARN_ON_ONCE(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status));
38 
39 	lockdep_assert_held(&mvm->mutex);
40 
41 	/* d0i3/d3 assumes the AP's sta_id (of sta vif) is 0. reserve it. */
42 	if (iftype != NL80211_IFTYPE_STATION)
43 		reserved_ids = BIT(0);
44 
45 	/* Don't take rcu_read_lock() since we are protected by mvm->mutex */
46 	for (sta_id = 0; sta_id < mvm->fw->ucode_capa.num_stations; sta_id++) {
47 		if (BIT(sta_id) & reserved_ids)
48 			continue;
49 
50 		if (!rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
51 					       lockdep_is_held(&mvm->mutex)))
52 			return sta_id;
53 	}
54 	return IWL_MVM_INVALID_STA;
55 }
56 
57 /* send station add/update command to firmware */
58 int iwl_mvm_sta_send_to_fw(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
59 			   bool update, unsigned int flags)
60 {
61 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
62 	struct iwl_mvm_add_sta_cmd add_sta_cmd = {
63 		.sta_id = mvm_sta->sta_id,
64 		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
65 		.add_modify = update ? 1 : 0,
66 		.station_flags_msk = cpu_to_le32(STA_FLG_FAT_EN_MSK |
67 						 STA_FLG_MIMO_EN_MSK |
68 						 STA_FLG_RTS_MIMO_PROT),
69 		.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg),
70 	};
71 	int ret;
72 	u32 status;
73 	u32 agg_size = 0, mpdu_dens = 0;
74 
75 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
76 		add_sta_cmd.station_type = mvm_sta->sta_type;
77 
78 	if (!update || (flags & STA_MODIFY_QUEUES)) {
79 		memcpy(&add_sta_cmd.addr, sta->addr, ETH_ALEN);
80 
81 		if (!iwl_mvm_has_new_tx_api(mvm)) {
82 			add_sta_cmd.tfd_queue_msk =
83 				cpu_to_le32(mvm_sta->tfd_queue_msk);
84 
85 			if (flags & STA_MODIFY_QUEUES)
86 				add_sta_cmd.modify_mask |= STA_MODIFY_QUEUES;
87 		} else {
88 			WARN_ON(flags & STA_MODIFY_QUEUES);
89 		}
90 	}
91 
92 	switch (sta->deflink.bandwidth) {
93 	case IEEE80211_STA_RX_BW_320:
94 	case IEEE80211_STA_RX_BW_160:
95 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_160MHZ);
96 		fallthrough;
97 	case IEEE80211_STA_RX_BW_80:
98 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_80MHZ);
99 		fallthrough;
100 	case IEEE80211_STA_RX_BW_40:
101 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_FAT_EN_40MHZ);
102 		fallthrough;
103 	case IEEE80211_STA_RX_BW_20:
104 		if (sta->deflink.ht_cap.ht_supported)
105 			add_sta_cmd.station_flags |=
106 				cpu_to_le32(STA_FLG_FAT_EN_20MHZ);
107 		break;
108 	}
109 
110 	switch (sta->deflink.rx_nss) {
111 	case 1:
112 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
113 		break;
114 	case 2:
115 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO2);
116 		break;
117 	case 3 ... 8:
118 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_MIMO3);
119 		break;
120 	}
121 
122 	switch (sta->smps_mode) {
123 	case IEEE80211_SMPS_AUTOMATIC:
124 	case IEEE80211_SMPS_NUM_MODES:
125 		WARN_ON(1);
126 		break;
127 	case IEEE80211_SMPS_STATIC:
128 		/* override NSS */
129 		add_sta_cmd.station_flags &= ~cpu_to_le32(STA_FLG_MIMO_EN_MSK);
130 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_MIMO_EN_SISO);
131 		break;
132 	case IEEE80211_SMPS_DYNAMIC:
133 		add_sta_cmd.station_flags |= cpu_to_le32(STA_FLG_RTS_MIMO_PROT);
134 		break;
135 	case IEEE80211_SMPS_OFF:
136 		/* nothing */
137 		break;
138 	}
139 
140 	if (sta->deflink.ht_cap.ht_supported) {
141 		add_sta_cmd.station_flags_msk |=
142 			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
143 				    STA_FLG_AGG_MPDU_DENS_MSK);
144 
145 		mpdu_dens = sta->deflink.ht_cap.ampdu_density;
146 	}
147 
148 	if (mvm_sta->vif->bss_conf.chandef.chan->band == NL80211_BAND_6GHZ) {
149 		add_sta_cmd.station_flags_msk |=
150 			cpu_to_le32(STA_FLG_MAX_AGG_SIZE_MSK |
151 				    STA_FLG_AGG_MPDU_DENS_MSK);
152 
153 		mpdu_dens = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
154 					  IEEE80211_HE_6GHZ_CAP_MIN_MPDU_START);
155 		agg_size = le16_get_bits(sta->deflink.he_6ghz_capa.capa,
156 					 IEEE80211_HE_6GHZ_CAP_MAX_AMPDU_LEN_EXP);
157 	} else if (sta->deflink.vht_cap.vht_supported) {
158 		agg_size = sta->deflink.vht_cap.cap &
159 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK;
160 		agg_size >>=
161 			IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_SHIFT;
162 	} else if (sta->deflink.ht_cap.ht_supported) {
163 		agg_size = sta->deflink.ht_cap.ampdu_factor;
164 	}
165 
166 	/* D6.0 10.12.2 A-MPDU length limit rules
167 	 * A STA indicates the maximum length of the A-MPDU preEOF padding
168 	 * that it can receive in an HE PPDU in the Maximum A-MPDU Length
169 	 * Exponent field in its HT Capabilities, VHT Capabilities,
170 	 * and HE 6 GHz Band Capabilities elements (if present) and the
171 	 * Maximum AMPDU Length Exponent Extension field in its HE
172 	 * Capabilities element
173 	 */
174 	if (sta->deflink.he_cap.has_he)
175 		agg_size += u8_get_bits(sta->deflink.he_cap.he_cap_elem.mac_cap_info[3],
176 					IEEE80211_HE_MAC_CAP3_MAX_AMPDU_LEN_EXP_MASK);
177 
178 	/* Limit to max A-MPDU supported by FW */
179 	if (agg_size > (STA_FLG_MAX_AGG_SIZE_4M >> STA_FLG_MAX_AGG_SIZE_SHIFT))
180 		agg_size = (STA_FLG_MAX_AGG_SIZE_4M >>
181 			    STA_FLG_MAX_AGG_SIZE_SHIFT);
182 
183 	add_sta_cmd.station_flags |=
184 		cpu_to_le32(agg_size << STA_FLG_MAX_AGG_SIZE_SHIFT);
185 	add_sta_cmd.station_flags |=
186 		cpu_to_le32(mpdu_dens << STA_FLG_AGG_MPDU_DENS_SHIFT);
187 	if (mvm_sta->sta_state >= IEEE80211_STA_ASSOC)
188 		add_sta_cmd.assoc_id = cpu_to_le16(sta->aid);
189 
190 	if (sta->wme) {
191 		add_sta_cmd.modify_mask |= STA_MODIFY_UAPSD_ACS;
192 
193 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BK)
194 			add_sta_cmd.uapsd_acs |= BIT(AC_BK);
195 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_BE)
196 			add_sta_cmd.uapsd_acs |= BIT(AC_BE);
197 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VI)
198 			add_sta_cmd.uapsd_acs |= BIT(AC_VI);
199 		if (sta->uapsd_queues & IEEE80211_WMM_IE_STA_QOSINFO_AC_VO)
200 			add_sta_cmd.uapsd_acs |= BIT(AC_VO);
201 		add_sta_cmd.uapsd_acs |= add_sta_cmd.uapsd_acs << 4;
202 		add_sta_cmd.sp_length = sta->max_sp ? sta->max_sp * 2 : 128;
203 	}
204 
205 	status = ADD_STA_SUCCESS;
206 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
207 					  iwl_mvm_add_sta_cmd_size(mvm),
208 					  &add_sta_cmd, &status);
209 	if (ret)
210 		return ret;
211 
212 	switch (status & IWL_ADD_STA_STATUS_MASK) {
213 	case ADD_STA_SUCCESS:
214 		IWL_DEBUG_ASSOC(mvm, "ADD_STA PASSED\n");
215 		break;
216 	default:
217 		ret = -EIO;
218 		IWL_ERR(mvm, "ADD_STA failed\n");
219 		break;
220 	}
221 
222 	return ret;
223 }
224 
225 static void iwl_mvm_rx_agg_session_expired(struct timer_list *t)
226 {
227 	struct iwl_mvm_baid_data *data =
228 		from_timer(data, t, session_timer);
229 	struct iwl_mvm_baid_data __rcu **rcu_ptr = data->rcu_ptr;
230 	struct iwl_mvm_baid_data *ba_data;
231 	struct ieee80211_sta *sta;
232 	struct iwl_mvm_sta *mvm_sta;
233 	unsigned long timeout;
234 
235 	rcu_read_lock();
236 
237 	ba_data = rcu_dereference(*rcu_ptr);
238 
239 	if (WARN_ON(!ba_data))
240 		goto unlock;
241 
242 	if (!ba_data->timeout)
243 		goto unlock;
244 
245 	timeout = ba_data->last_rx + TU_TO_JIFFIES(ba_data->timeout * 2);
246 	if (time_is_after_jiffies(timeout)) {
247 		mod_timer(&ba_data->session_timer, timeout);
248 		goto unlock;
249 	}
250 
251 	/* Timer expired */
252 	sta = rcu_dereference(ba_data->mvm->fw_id_to_mac_id[ba_data->sta_id]);
253 
254 	/*
255 	 * sta should be valid unless the following happens:
256 	 * The firmware asserts which triggers a reconfig flow, but
257 	 * the reconfig fails before we set the pointer to sta into
258 	 * the fw_id_to_mac_id pointer table. Mac80211 can't stop
259 	 * A-MDPU and hence the timer continues to run. Then, the
260 	 * timer expires and sta is NULL.
261 	 */
262 	if (!sta)
263 		goto unlock;
264 
265 	mvm_sta = iwl_mvm_sta_from_mac80211(sta);
266 	ieee80211_rx_ba_timer_expired(mvm_sta->vif,
267 				      sta->addr, ba_data->tid);
268 unlock:
269 	rcu_read_unlock();
270 }
271 
272 /* Disable aggregations for a bitmap of TIDs for a given station */
273 static int iwl_mvm_invalidate_sta_queue(struct iwl_mvm *mvm, int queue,
274 					unsigned long disable_agg_tids,
275 					bool remove_queue)
276 {
277 	struct iwl_mvm_add_sta_cmd cmd = {};
278 	struct ieee80211_sta *sta;
279 	struct iwl_mvm_sta *mvmsta;
280 	u32 status;
281 	u8 sta_id;
282 
283 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
284 		return -EINVAL;
285 
286 	sta_id = mvm->queue_info[queue].ra_sta_id;
287 
288 	rcu_read_lock();
289 
290 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
291 
292 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
293 		rcu_read_unlock();
294 		return -EINVAL;
295 	}
296 
297 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
298 
299 	mvmsta->tid_disable_agg |= disable_agg_tids;
300 
301 	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
302 	cmd.sta_id = mvmsta->sta_id;
303 	cmd.add_modify = STA_MODE_MODIFY;
304 	cmd.modify_mask = STA_MODIFY_QUEUES;
305 	if (disable_agg_tids)
306 		cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
307 	if (remove_queue)
308 		cmd.modify_mask |= STA_MODIFY_QUEUE_REMOVAL;
309 	cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
310 	cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
311 
312 	rcu_read_unlock();
313 
314 	/* Notify FW of queue removal from the STA queues */
315 	status = ADD_STA_SUCCESS;
316 	return iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
317 					   iwl_mvm_add_sta_cmd_size(mvm),
318 					   &cmd, &status);
319 }
320 
321 static int iwl_mvm_disable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
322 			       u16 *queueptr, u8 tid)
323 {
324 	int queue = *queueptr;
325 	struct iwl_scd_txq_cfg_cmd cmd = {
326 		.scd_queue = queue,
327 		.action = SCD_CFG_DISABLE_QUEUE,
328 	};
329 	int ret;
330 
331 	lockdep_assert_held(&mvm->mutex);
332 
333 	if (iwl_mvm_has_new_tx_api(mvm)) {
334 		if (mvm->sta_remove_requires_queue_remove) {
335 			u32 cmd_id = WIDE_ID(DATA_PATH_GROUP,
336 					     SCD_QUEUE_CONFIG_CMD);
337 			struct iwl_scd_queue_cfg_cmd remove_cmd = {
338 				.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
339 				.u.remove.queue = cpu_to_le32(queue),
340 			};
341 
342 			ret = iwl_mvm_send_cmd_pdu(mvm, cmd_id, 0,
343 						   sizeof(remove_cmd),
344 						   &remove_cmd);
345 		} else {
346 			ret = 0;
347 		}
348 
349 		iwl_trans_txq_free(mvm->trans, queue);
350 		*queueptr = IWL_MVM_INVALID_QUEUE;
351 
352 		return ret;
353 	}
354 
355 	if (WARN_ON(mvm->queue_info[queue].tid_bitmap == 0))
356 		return 0;
357 
358 	mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
359 
360 	cmd.action = mvm->queue_info[queue].tid_bitmap ?
361 		SCD_CFG_ENABLE_QUEUE : SCD_CFG_DISABLE_QUEUE;
362 	if (cmd.action == SCD_CFG_DISABLE_QUEUE)
363 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_FREE;
364 
365 	IWL_DEBUG_TX_QUEUES(mvm,
366 			    "Disabling TXQ #%d tids=0x%x\n",
367 			    queue,
368 			    mvm->queue_info[queue].tid_bitmap);
369 
370 	/* If the queue is still enabled - nothing left to do in this func */
371 	if (cmd.action == SCD_CFG_ENABLE_QUEUE)
372 		return 0;
373 
374 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
375 	cmd.tid = mvm->queue_info[queue].txq_tid;
376 
377 	/* Make sure queue info is correct even though we overwrite it */
378 	WARN(mvm->queue_info[queue].tid_bitmap,
379 	     "TXQ #%d info out-of-sync - tids=0x%x\n",
380 	     queue, mvm->queue_info[queue].tid_bitmap);
381 
382 	/* If we are here - the queue is freed and we can zero out these vals */
383 	mvm->queue_info[queue].tid_bitmap = 0;
384 
385 	if (sta) {
386 		struct iwl_mvm_txq *mvmtxq =
387 			iwl_mvm_txq_from_tid(sta, tid);
388 
389 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
390 	}
391 
392 	/* Regardless if this is a reserved TXQ for a STA - mark it as false */
393 	mvm->queue_info[queue].reserved = false;
394 
395 	iwl_trans_txq_disable(mvm->trans, queue, false);
396 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0,
397 				   sizeof(struct iwl_scd_txq_cfg_cmd), &cmd);
398 
399 	if (ret)
400 		IWL_ERR(mvm, "Failed to disable queue %d (ret=%d)\n",
401 			queue, ret);
402 	return ret;
403 }
404 
405 static int iwl_mvm_get_queue_agg_tids(struct iwl_mvm *mvm, int queue)
406 {
407 	struct ieee80211_sta *sta;
408 	struct iwl_mvm_sta *mvmsta;
409 	unsigned long tid_bitmap;
410 	unsigned long agg_tids = 0;
411 	u8 sta_id;
412 	int tid;
413 
414 	lockdep_assert_held(&mvm->mutex);
415 
416 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
417 		return -EINVAL;
418 
419 	sta_id = mvm->queue_info[queue].ra_sta_id;
420 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
421 
422 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
423 					lockdep_is_held(&mvm->mutex));
424 
425 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
426 		return -EINVAL;
427 
428 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
429 
430 	spin_lock_bh(&mvmsta->lock);
431 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
432 		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
433 			agg_tids |= BIT(tid);
434 	}
435 	spin_unlock_bh(&mvmsta->lock);
436 
437 	return agg_tids;
438 }
439 
440 /*
441  * Remove a queue from a station's resources.
442  * Note that this only marks as free. It DOESN'T delete a BA agreement, and
443  * doesn't disable the queue
444  */
445 static int iwl_mvm_remove_sta_queue_marking(struct iwl_mvm *mvm, int queue)
446 {
447 	struct ieee80211_sta *sta;
448 	struct iwl_mvm_sta *mvmsta;
449 	unsigned long tid_bitmap;
450 	unsigned long disable_agg_tids = 0;
451 	u8 sta_id;
452 	int tid;
453 
454 	lockdep_assert_held(&mvm->mutex);
455 
456 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
457 		return -EINVAL;
458 
459 	sta_id = mvm->queue_info[queue].ra_sta_id;
460 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
461 
462 	rcu_read_lock();
463 
464 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
465 
466 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta))) {
467 		rcu_read_unlock();
468 		return 0;
469 	}
470 
471 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
472 
473 	spin_lock_bh(&mvmsta->lock);
474 	/* Unmap MAC queues and TIDs from this queue */
475 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
476 		struct iwl_mvm_txq *mvmtxq =
477 			iwl_mvm_txq_from_tid(sta, tid);
478 
479 		if (mvmsta->tid_data[tid].state == IWL_AGG_ON)
480 			disable_agg_tids |= BIT(tid);
481 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
482 
483 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
484 	}
485 
486 	mvmsta->tfd_queue_msk &= ~BIT(queue); /* Don't use this queue anymore */
487 	spin_unlock_bh(&mvmsta->lock);
488 
489 	rcu_read_unlock();
490 
491 	/*
492 	 * The TX path may have been using this TXQ_ID from the tid_data,
493 	 * so make sure it's no longer running so that we can safely reuse
494 	 * this TXQ later. We've set all the TIDs to IWL_MVM_INVALID_QUEUE
495 	 * above, but nothing guarantees we've stopped using them. Thus,
496 	 * without this, we could get to iwl_mvm_disable_txq() and remove
497 	 * the queue while still sending frames to it.
498 	 */
499 	synchronize_net();
500 
501 	return disable_agg_tids;
502 }
503 
504 static int iwl_mvm_free_inactive_queue(struct iwl_mvm *mvm, int queue,
505 				       struct ieee80211_sta *old_sta,
506 				       u8 new_sta_id)
507 {
508 	struct iwl_mvm_sta *mvmsta;
509 	u8 sta_id, tid;
510 	unsigned long disable_agg_tids = 0;
511 	bool same_sta;
512 	u16 queue_tmp = queue;
513 	int ret;
514 
515 	lockdep_assert_held(&mvm->mutex);
516 
517 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
518 		return -EINVAL;
519 
520 	sta_id = mvm->queue_info[queue].ra_sta_id;
521 	tid = mvm->queue_info[queue].txq_tid;
522 
523 	same_sta = sta_id == new_sta_id;
524 
525 	mvmsta = iwl_mvm_sta_from_staid_protected(mvm, sta_id);
526 	if (WARN_ON(!mvmsta))
527 		return -EINVAL;
528 
529 	disable_agg_tids = iwl_mvm_remove_sta_queue_marking(mvm, queue);
530 	/* Disable the queue */
531 	if (disable_agg_tids)
532 		iwl_mvm_invalidate_sta_queue(mvm, queue,
533 					     disable_agg_tids, false);
534 
535 	ret = iwl_mvm_disable_txq(mvm, old_sta, &queue_tmp, tid);
536 	if (ret) {
537 		IWL_ERR(mvm,
538 			"Failed to free inactive queue %d (ret=%d)\n",
539 			queue, ret);
540 
541 		return ret;
542 	}
543 
544 	/* If TXQ is allocated to another STA, update removal in FW */
545 	if (!same_sta)
546 		iwl_mvm_invalidate_sta_queue(mvm, queue, 0, true);
547 
548 	return 0;
549 }
550 
551 static int iwl_mvm_get_shared_queue(struct iwl_mvm *mvm,
552 				    unsigned long tfd_queue_mask, u8 ac)
553 {
554 	int queue = 0;
555 	u8 ac_to_queue[IEEE80211_NUM_ACS];
556 	int i;
557 
558 	/*
559 	 * This protects us against grabbing a queue that's being reconfigured
560 	 * by the inactivity checker.
561 	 */
562 	lockdep_assert_held(&mvm->mutex);
563 
564 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
565 		return -EINVAL;
566 
567 	memset(&ac_to_queue, IEEE80211_INVAL_HW_QUEUE, sizeof(ac_to_queue));
568 
569 	/* See what ACs the existing queues for this STA have */
570 	for_each_set_bit(i, &tfd_queue_mask, IWL_MVM_DQA_MAX_DATA_QUEUE) {
571 		/* Only DATA queues can be shared */
572 		if (i < IWL_MVM_DQA_MIN_DATA_QUEUE &&
573 		    i != IWL_MVM_DQA_BSS_CLIENT_QUEUE)
574 			continue;
575 
576 		ac_to_queue[mvm->queue_info[i].mac80211_ac] = i;
577 	}
578 
579 	/*
580 	 * The queue to share is chosen only from DATA queues as follows (in
581 	 * descending priority):
582 	 * 1. An AC_BE queue
583 	 * 2. Same AC queue
584 	 * 3. Highest AC queue that is lower than new AC
585 	 * 4. Any existing AC (there always is at least 1 DATA queue)
586 	 */
587 
588 	/* Priority 1: An AC_BE queue */
589 	if (ac_to_queue[IEEE80211_AC_BE] != IEEE80211_INVAL_HW_QUEUE)
590 		queue = ac_to_queue[IEEE80211_AC_BE];
591 	/* Priority 2: Same AC queue */
592 	else if (ac_to_queue[ac] != IEEE80211_INVAL_HW_QUEUE)
593 		queue = ac_to_queue[ac];
594 	/* Priority 3a: If new AC is VO and VI exists - use VI */
595 	else if (ac == IEEE80211_AC_VO &&
596 		 ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
597 		queue = ac_to_queue[IEEE80211_AC_VI];
598 	/* Priority 3b: No BE so only AC less than the new one is BK */
599 	else if (ac_to_queue[IEEE80211_AC_BK] != IEEE80211_INVAL_HW_QUEUE)
600 		queue = ac_to_queue[IEEE80211_AC_BK];
601 	/* Priority 4a: No BE nor BK - use VI if exists */
602 	else if (ac_to_queue[IEEE80211_AC_VI] != IEEE80211_INVAL_HW_QUEUE)
603 		queue = ac_to_queue[IEEE80211_AC_VI];
604 	/* Priority 4b: No BE, BK nor VI - use VO if exists */
605 	else if (ac_to_queue[IEEE80211_AC_VO] != IEEE80211_INVAL_HW_QUEUE)
606 		queue = ac_to_queue[IEEE80211_AC_VO];
607 
608 	/* Make sure queue found (or not) is legal */
609 	if (!iwl_mvm_is_dqa_data_queue(mvm, queue) &&
610 	    !iwl_mvm_is_dqa_mgmt_queue(mvm, queue) &&
611 	    (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)) {
612 		IWL_ERR(mvm, "No DATA queues available to share\n");
613 		return -ENOSPC;
614 	}
615 
616 	return queue;
617 }
618 
619 /* Re-configure the SCD for a queue that has already been configured */
620 static int iwl_mvm_reconfig_scd(struct iwl_mvm *mvm, int queue, int fifo,
621 				int sta_id, int tid, int frame_limit, u16 ssn)
622 {
623 	struct iwl_scd_txq_cfg_cmd cmd = {
624 		.scd_queue = queue,
625 		.action = SCD_CFG_ENABLE_QUEUE,
626 		.window = frame_limit,
627 		.sta_id = sta_id,
628 		.ssn = cpu_to_le16(ssn),
629 		.tx_fifo = fifo,
630 		.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
631 			      queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE),
632 		.tid = tid,
633 	};
634 	int ret;
635 
636 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
637 		return -EINVAL;
638 
639 	if (WARN(mvm->queue_info[queue].tid_bitmap == 0,
640 		 "Trying to reconfig unallocated queue %d\n", queue))
641 		return -ENXIO;
642 
643 	IWL_DEBUG_TX_QUEUES(mvm, "Reconfig SCD for TXQ #%d\n", queue);
644 
645 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
646 	WARN_ONCE(ret, "Failed to re-configure queue %d on FIFO %d, ret=%d\n",
647 		  queue, fifo, ret);
648 
649 	return ret;
650 }
651 
652 /*
653  * If a given queue has a higher AC than the TID stream that is being compared
654  * to, the queue needs to be redirected to the lower AC. This function does that
655  * in such a case, otherwise - if no redirection required - it does nothing,
656  * unless the %force param is true.
657  */
658 static int iwl_mvm_redirect_queue(struct iwl_mvm *mvm, int queue, int tid,
659 				  int ac, int ssn, unsigned int wdg_timeout,
660 				  bool force, struct iwl_mvm_txq *txq)
661 {
662 	struct iwl_scd_txq_cfg_cmd cmd = {
663 		.scd_queue = queue,
664 		.action = SCD_CFG_DISABLE_QUEUE,
665 	};
666 	bool shared_queue;
667 	int ret;
668 
669 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
670 		return -EINVAL;
671 
672 	/*
673 	 * If the AC is lower than current one - FIFO needs to be redirected to
674 	 * the lowest one of the streams in the queue. Check if this is needed
675 	 * here.
676 	 * Notice that the enum ieee80211_ac_numbers is "flipped", so BK is with
677 	 * value 3 and VO with value 0, so to check if ac X is lower than ac Y
678 	 * we need to check if the numerical value of X is LARGER than of Y.
679 	 */
680 	if (ac <= mvm->queue_info[queue].mac80211_ac && !force) {
681 		IWL_DEBUG_TX_QUEUES(mvm,
682 				    "No redirection needed on TXQ #%d\n",
683 				    queue);
684 		return 0;
685 	}
686 
687 	cmd.sta_id = mvm->queue_info[queue].ra_sta_id;
688 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[mvm->queue_info[queue].mac80211_ac];
689 	cmd.tid = mvm->queue_info[queue].txq_tid;
690 	shared_queue = hweight16(mvm->queue_info[queue].tid_bitmap) > 1;
691 
692 	IWL_DEBUG_TX_QUEUES(mvm, "Redirecting TXQ #%d to FIFO #%d\n",
693 			    queue, iwl_mvm_ac_to_tx_fifo[ac]);
694 
695 	/* Stop the queue and wait for it to empty */
696 	txq->stopped = true;
697 
698 	ret = iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(queue));
699 	if (ret) {
700 		IWL_ERR(mvm, "Error draining queue %d before reconfig\n",
701 			queue);
702 		ret = -EIO;
703 		goto out;
704 	}
705 
706 	/* Before redirecting the queue we need to de-activate it */
707 	iwl_trans_txq_disable(mvm->trans, queue, false);
708 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
709 	if (ret)
710 		IWL_ERR(mvm, "Failed SCD disable TXQ %d (ret=%d)\n", queue,
711 			ret);
712 
713 	/* Make sure the SCD wrptr is correctly set before reconfiguring */
714 	iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn, NULL, wdg_timeout);
715 
716 	/* Update the TID "owner" of the queue */
717 	mvm->queue_info[queue].txq_tid = tid;
718 
719 	/* TODO: Work-around SCD bug when moving back by multiples of 0x40 */
720 
721 	/* Redirect to lower AC */
722 	iwl_mvm_reconfig_scd(mvm, queue, iwl_mvm_ac_to_tx_fifo[ac],
723 			     cmd.sta_id, tid, IWL_FRAME_LIMIT, ssn);
724 
725 	/* Update AC marking of the queue */
726 	mvm->queue_info[queue].mac80211_ac = ac;
727 
728 	/*
729 	 * Mark queue as shared in transport if shared
730 	 * Note this has to be done after queue enablement because enablement
731 	 * can also set this value, and there is no indication there to shared
732 	 * queues
733 	 */
734 	if (shared_queue)
735 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
736 
737 out:
738 	/* Continue using the queue */
739 	txq->stopped = false;
740 
741 	return ret;
742 }
743 
744 static int iwl_mvm_find_free_queue(struct iwl_mvm *mvm, u8 sta_id,
745 				   u8 minq, u8 maxq)
746 {
747 	int i;
748 
749 	lockdep_assert_held(&mvm->mutex);
750 
751 	if (WARN(maxq >= mvm->trans->trans_cfg->base_params->num_of_queues,
752 		 "max queue %d >= num_of_queues (%d)", maxq,
753 		 mvm->trans->trans_cfg->base_params->num_of_queues))
754 		maxq = mvm->trans->trans_cfg->base_params->num_of_queues - 1;
755 
756 	/* This should not be hit with new TX path */
757 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
758 		return -ENOSPC;
759 
760 	/* Start by looking for a free queue */
761 	for (i = minq; i <= maxq; i++)
762 		if (mvm->queue_info[i].tid_bitmap == 0 &&
763 		    mvm->queue_info[i].status == IWL_MVM_QUEUE_FREE)
764 			return i;
765 
766 	return -ENOSPC;
767 }
768 
769 static int iwl_mvm_tvqm_enable_txq(struct iwl_mvm *mvm,
770 				   u8 sta_id, u8 tid, unsigned int timeout)
771 {
772 	int queue, size;
773 
774 	if (tid == IWL_MAX_TID_COUNT) {
775 		tid = IWL_MGMT_TID;
776 		size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
777 			     mvm->trans->cfg->min_txq_size);
778 	} else {
779 		struct ieee80211_sta *sta;
780 
781 		rcu_read_lock();
782 		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
783 
784 		/* this queue isn't used for traffic (cab_queue) */
785 		if (IS_ERR_OR_NULL(sta)) {
786 			size = IWL_MGMT_QUEUE_SIZE;
787 		} else if (sta->deflink.he_cap.has_he) {
788 			/* support for 256 ba size */
789 			size = IWL_DEFAULT_QUEUE_SIZE_HE;
790 		} else {
791 			size = IWL_DEFAULT_QUEUE_SIZE;
792 		}
793 
794 		rcu_read_unlock();
795 	}
796 
797 	/* take the min with bc tbl entries allowed */
798 	size = min_t(u32, size, mvm->trans->txqs.bc_tbl_size / sizeof(u16));
799 
800 	/* size needs to be power of 2 values for calculating read/write pointers */
801 	size = rounddown_pow_of_two(size);
802 
803 	do {
804 		queue = iwl_trans_txq_alloc(mvm->trans, 0, BIT(sta_id),
805 					    tid, size, timeout);
806 
807 		if (queue < 0)
808 			IWL_DEBUG_TX_QUEUES(mvm,
809 					    "Failed allocating TXQ of size %d for sta %d tid %d, ret: %d\n",
810 					    size, sta_id, tid, queue);
811 		size /= 2;
812 	} while (queue < 0 && size >= 16);
813 
814 	if (queue < 0)
815 		return queue;
816 
817 	IWL_DEBUG_TX_QUEUES(mvm, "Enabling TXQ #%d for sta %d tid %d\n",
818 			    queue, sta_id, tid);
819 
820 	return queue;
821 }
822 
823 static int iwl_mvm_sta_alloc_queue_tvqm(struct iwl_mvm *mvm,
824 					struct ieee80211_sta *sta, u8 ac,
825 					int tid)
826 {
827 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
828 	struct iwl_mvm_txq *mvmtxq =
829 		iwl_mvm_txq_from_tid(sta, tid);
830 	unsigned int wdg_timeout =
831 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
832 	int queue = -1;
833 
834 	lockdep_assert_held(&mvm->mutex);
835 
836 	IWL_DEBUG_TX_QUEUES(mvm,
837 			    "Allocating queue for sta %d on tid %d\n",
838 			    mvmsta->sta_id, tid);
839 	queue = iwl_mvm_tvqm_enable_txq(mvm, mvmsta->sta_id, tid, wdg_timeout);
840 	if (queue < 0)
841 		return queue;
842 
843 	mvmtxq->txq_id = queue;
844 	mvm->tvqm_info[queue].txq_tid = tid;
845 	mvm->tvqm_info[queue].sta_id = mvmsta->sta_id;
846 
847 	IWL_DEBUG_TX_QUEUES(mvm, "Allocated queue is %d\n", queue);
848 
849 	spin_lock_bh(&mvmsta->lock);
850 	mvmsta->tid_data[tid].txq_id = queue;
851 	spin_unlock_bh(&mvmsta->lock);
852 
853 	return 0;
854 }
855 
856 static bool iwl_mvm_update_txq_mapping(struct iwl_mvm *mvm,
857 				       struct ieee80211_sta *sta,
858 				       int queue, u8 sta_id, u8 tid)
859 {
860 	bool enable_queue = true;
861 
862 	/* Make sure this TID isn't already enabled */
863 	if (mvm->queue_info[queue].tid_bitmap & BIT(tid)) {
864 		IWL_ERR(mvm, "Trying to enable TXQ %d with existing TID %d\n",
865 			queue, tid);
866 		return false;
867 	}
868 
869 	/* Update mappings and refcounts */
870 	if (mvm->queue_info[queue].tid_bitmap)
871 		enable_queue = false;
872 
873 	mvm->queue_info[queue].tid_bitmap |= BIT(tid);
874 	mvm->queue_info[queue].ra_sta_id = sta_id;
875 
876 	if (enable_queue) {
877 		if (tid != IWL_MAX_TID_COUNT)
878 			mvm->queue_info[queue].mac80211_ac =
879 				tid_to_mac80211_ac[tid];
880 		else
881 			mvm->queue_info[queue].mac80211_ac = IEEE80211_AC_VO;
882 
883 		mvm->queue_info[queue].txq_tid = tid;
884 	}
885 
886 	if (sta) {
887 		struct iwl_mvm_txq *mvmtxq =
888 			iwl_mvm_txq_from_tid(sta, tid);
889 
890 		mvmtxq->txq_id = queue;
891 	}
892 
893 	IWL_DEBUG_TX_QUEUES(mvm,
894 			    "Enabling TXQ #%d tids=0x%x\n",
895 			    queue, mvm->queue_info[queue].tid_bitmap);
896 
897 	return enable_queue;
898 }
899 
900 static bool iwl_mvm_enable_txq(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
901 			       int queue, u16 ssn,
902 			       const struct iwl_trans_txq_scd_cfg *cfg,
903 			       unsigned int wdg_timeout)
904 {
905 	struct iwl_scd_txq_cfg_cmd cmd = {
906 		.scd_queue = queue,
907 		.action = SCD_CFG_ENABLE_QUEUE,
908 		.window = cfg->frame_limit,
909 		.sta_id = cfg->sta_id,
910 		.ssn = cpu_to_le16(ssn),
911 		.tx_fifo = cfg->fifo,
912 		.aggregate = cfg->aggregate,
913 		.tid = cfg->tid,
914 	};
915 	bool inc_ssn;
916 
917 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
918 		return false;
919 
920 	/* Send the enabling command if we need to */
921 	if (!iwl_mvm_update_txq_mapping(mvm, sta, queue, cfg->sta_id, cfg->tid))
922 		return false;
923 
924 	inc_ssn = iwl_trans_txq_enable_cfg(mvm->trans, queue, ssn,
925 					   NULL, wdg_timeout);
926 	if (inc_ssn)
927 		le16_add_cpu(&cmd.ssn, 1);
928 
929 	WARN(iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd),
930 	     "Failed to configure queue %d on FIFO %d\n", queue, cfg->fifo);
931 
932 	return inc_ssn;
933 }
934 
935 static void iwl_mvm_change_queue_tid(struct iwl_mvm *mvm, int queue)
936 {
937 	struct iwl_scd_txq_cfg_cmd cmd = {
938 		.scd_queue = queue,
939 		.action = SCD_CFG_UPDATE_QUEUE_TID,
940 	};
941 	int tid;
942 	unsigned long tid_bitmap;
943 	int ret;
944 
945 	lockdep_assert_held(&mvm->mutex);
946 
947 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
948 		return;
949 
950 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
951 
952 	if (WARN(!tid_bitmap, "TXQ %d has no tids assigned to it\n", queue))
953 		return;
954 
955 	/* Find any TID for queue */
956 	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
957 	cmd.tid = tid;
958 	cmd.tx_fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
959 
960 	ret = iwl_mvm_send_cmd_pdu(mvm, SCD_QUEUE_CFG, 0, sizeof(cmd), &cmd);
961 	if (ret) {
962 		IWL_ERR(mvm, "Failed to update owner of TXQ %d (ret=%d)\n",
963 			queue, ret);
964 		return;
965 	}
966 
967 	mvm->queue_info[queue].txq_tid = tid;
968 	IWL_DEBUG_TX_QUEUES(mvm, "Changed TXQ %d ownership to tid %d\n",
969 			    queue, tid);
970 }
971 
972 static void iwl_mvm_unshare_queue(struct iwl_mvm *mvm, int queue)
973 {
974 	struct ieee80211_sta *sta;
975 	struct iwl_mvm_sta *mvmsta;
976 	u8 sta_id;
977 	int tid = -1;
978 	unsigned long tid_bitmap;
979 	unsigned int wdg_timeout;
980 	int ssn;
981 	int ret = true;
982 
983 	/* queue sharing is disabled on new TX path */
984 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
985 		return;
986 
987 	lockdep_assert_held(&mvm->mutex);
988 
989 	sta_id = mvm->queue_info[queue].ra_sta_id;
990 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
991 
992 	/* Find TID for queue, and make sure it is the only one on the queue */
993 	tid = find_first_bit(&tid_bitmap, IWL_MAX_TID_COUNT + 1);
994 	if (tid_bitmap != BIT(tid)) {
995 		IWL_ERR(mvm, "Failed to unshare q %d, active tids=0x%lx\n",
996 			queue, tid_bitmap);
997 		return;
998 	}
999 
1000 	IWL_DEBUG_TX_QUEUES(mvm, "Unsharing TXQ %d, keeping tid %d\n", queue,
1001 			    tid);
1002 
1003 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1004 					lockdep_is_held(&mvm->mutex));
1005 
1006 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta)))
1007 		return;
1008 
1009 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1010 	wdg_timeout = iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1011 
1012 	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1013 
1014 	ret = iwl_mvm_redirect_queue(mvm, queue, tid,
1015 				     tid_to_mac80211_ac[tid], ssn,
1016 				     wdg_timeout, true,
1017 				     iwl_mvm_txq_from_tid(sta, tid));
1018 	if (ret) {
1019 		IWL_ERR(mvm, "Failed to redirect TXQ %d\n", queue);
1020 		return;
1021 	}
1022 
1023 	/* If aggs should be turned back on - do it */
1024 	if (mvmsta->tid_data[tid].state == IWL_AGG_ON) {
1025 		struct iwl_mvm_add_sta_cmd cmd = {0};
1026 
1027 		mvmsta->tid_disable_agg &= ~BIT(tid);
1028 
1029 		cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1030 		cmd.sta_id = mvmsta->sta_id;
1031 		cmd.add_modify = STA_MODE_MODIFY;
1032 		cmd.modify_mask = STA_MODIFY_TID_DISABLE_TX;
1033 		cmd.tfd_queue_msk = cpu_to_le32(mvmsta->tfd_queue_msk);
1034 		cmd.tid_disable_tx = cpu_to_le16(mvmsta->tid_disable_agg);
1035 
1036 		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
1037 					   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
1038 		if (!ret) {
1039 			IWL_DEBUG_TX_QUEUES(mvm,
1040 					    "TXQ #%d is now aggregated again\n",
1041 					    queue);
1042 
1043 			/* Mark queue intenally as aggregating again */
1044 			iwl_trans_txq_set_shared_mode(mvm->trans, queue, false);
1045 		}
1046 	}
1047 
1048 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1049 }
1050 
1051 /*
1052  * Remove inactive TIDs of a given queue.
1053  * If all queue TIDs are inactive - mark the queue as inactive
1054  * If only some the queue TIDs are inactive - unmap them from the queue
1055  *
1056  * Returns %true if all TIDs were removed and the queue could be reused.
1057  */
1058 static bool iwl_mvm_remove_inactive_tids(struct iwl_mvm *mvm,
1059 					 struct iwl_mvm_sta *mvmsta, int queue,
1060 					 unsigned long tid_bitmap,
1061 					 unsigned long *unshare_queues,
1062 					 unsigned long *changetid_queues)
1063 {
1064 	int tid;
1065 
1066 	lockdep_assert_held(&mvmsta->lock);
1067 	lockdep_assert_held(&mvm->mutex);
1068 
1069 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1070 		return false;
1071 
1072 	/* Go over all non-active TIDs, incl. IWL_MAX_TID_COUNT (for mgmt) */
1073 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1074 		/* If some TFDs are still queued - don't mark TID as inactive */
1075 		if (iwl_mvm_tid_queued(mvm, &mvmsta->tid_data[tid]))
1076 			tid_bitmap &= ~BIT(tid);
1077 
1078 		/* Don't mark as inactive any TID that has an active BA */
1079 		if (mvmsta->tid_data[tid].state != IWL_AGG_OFF)
1080 			tid_bitmap &= ~BIT(tid);
1081 	}
1082 
1083 	/* If all TIDs in the queue are inactive - return it can be reused */
1084 	if (tid_bitmap == mvm->queue_info[queue].tid_bitmap) {
1085 		IWL_DEBUG_TX_QUEUES(mvm, "Queue %d is inactive\n", queue);
1086 		return true;
1087 	}
1088 
1089 	/*
1090 	 * If we are here, this is a shared queue and not all TIDs timed-out.
1091 	 * Remove the ones that did.
1092 	 */
1093 	for_each_set_bit(tid, &tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1094 		u16 q_tid_bitmap;
1095 
1096 		mvmsta->tid_data[tid].txq_id = IWL_MVM_INVALID_QUEUE;
1097 		mvm->queue_info[queue].tid_bitmap &= ~BIT(tid);
1098 
1099 		q_tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1100 
1101 		/*
1102 		 * We need to take into account a situation in which a TXQ was
1103 		 * allocated to TID x, and then turned shared by adding TIDs y
1104 		 * and z. If TID x becomes inactive and is removed from the TXQ,
1105 		 * ownership must be given to one of the remaining TIDs.
1106 		 * This is mainly because if TID x continues - a new queue can't
1107 		 * be allocated for it as long as it is an owner of another TXQ.
1108 		 *
1109 		 * Mark this queue in the right bitmap, we'll send the command
1110 		 * to the firmware later.
1111 		 */
1112 		if (!(q_tid_bitmap & BIT(mvm->queue_info[queue].txq_tid)))
1113 			set_bit(queue, changetid_queues);
1114 
1115 		IWL_DEBUG_TX_QUEUES(mvm,
1116 				    "Removing inactive TID %d from shared Q:%d\n",
1117 				    tid, queue);
1118 	}
1119 
1120 	IWL_DEBUG_TX_QUEUES(mvm,
1121 			    "TXQ #%d left with tid bitmap 0x%x\n", queue,
1122 			    mvm->queue_info[queue].tid_bitmap);
1123 
1124 	/*
1125 	 * There may be different TIDs with the same mac queues, so make
1126 	 * sure all TIDs have existing corresponding mac queues enabled
1127 	 */
1128 	tid_bitmap = mvm->queue_info[queue].tid_bitmap;
1129 
1130 	/* If the queue is marked as shared - "unshare" it */
1131 	if (hweight16(mvm->queue_info[queue].tid_bitmap) == 1 &&
1132 	    mvm->queue_info[queue].status == IWL_MVM_QUEUE_SHARED) {
1133 		IWL_DEBUG_TX_QUEUES(mvm, "Marking Q:%d for reconfig\n",
1134 				    queue);
1135 		set_bit(queue, unshare_queues);
1136 	}
1137 
1138 	return false;
1139 }
1140 
1141 /*
1142  * Check for inactivity - this includes checking if any queue
1143  * can be unshared and finding one (and only one) that can be
1144  * reused.
1145  * This function is also invoked as a sort of clean-up task,
1146  * in which case @alloc_for_sta is IWL_MVM_INVALID_STA.
1147  *
1148  * Returns the queue number, or -ENOSPC.
1149  */
1150 static int iwl_mvm_inactivity_check(struct iwl_mvm *mvm, u8 alloc_for_sta)
1151 {
1152 	unsigned long now = jiffies;
1153 	unsigned long unshare_queues = 0;
1154 	unsigned long changetid_queues = 0;
1155 	int i, ret, free_queue = -ENOSPC;
1156 	struct ieee80211_sta *queue_owner  = NULL;
1157 
1158 	lockdep_assert_held(&mvm->mutex);
1159 
1160 	if (iwl_mvm_has_new_tx_api(mvm))
1161 		return -ENOSPC;
1162 
1163 	rcu_read_lock();
1164 
1165 	/* we skip the CMD queue below by starting at 1 */
1166 	BUILD_BUG_ON(IWL_MVM_DQA_CMD_QUEUE != 0);
1167 
1168 	for (i = 1; i < IWL_MAX_HW_QUEUES; i++) {
1169 		struct ieee80211_sta *sta;
1170 		struct iwl_mvm_sta *mvmsta;
1171 		u8 sta_id;
1172 		int tid;
1173 		unsigned long inactive_tid_bitmap = 0;
1174 		unsigned long queue_tid_bitmap;
1175 
1176 		queue_tid_bitmap = mvm->queue_info[i].tid_bitmap;
1177 		if (!queue_tid_bitmap)
1178 			continue;
1179 
1180 		/* If TXQ isn't in active use anyway - nothing to do here... */
1181 		if (mvm->queue_info[i].status != IWL_MVM_QUEUE_READY &&
1182 		    mvm->queue_info[i].status != IWL_MVM_QUEUE_SHARED)
1183 			continue;
1184 
1185 		/* Check to see if there are inactive TIDs on this queue */
1186 		for_each_set_bit(tid, &queue_tid_bitmap,
1187 				 IWL_MAX_TID_COUNT + 1) {
1188 			if (time_after(mvm->queue_info[i].last_frame_time[tid] +
1189 				       IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1190 				continue;
1191 
1192 			inactive_tid_bitmap |= BIT(tid);
1193 		}
1194 
1195 		/* If all TIDs are active - finish check on this queue */
1196 		if (!inactive_tid_bitmap)
1197 			continue;
1198 
1199 		/*
1200 		 * If we are here - the queue hadn't been served recently and is
1201 		 * in use
1202 		 */
1203 
1204 		sta_id = mvm->queue_info[i].ra_sta_id;
1205 		sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1206 
1207 		/*
1208 		 * If the STA doesn't exist anymore, it isn't an error. It could
1209 		 * be that it was removed since getting the queues, and in this
1210 		 * case it should've inactivated its queues anyway.
1211 		 */
1212 		if (IS_ERR_OR_NULL(sta))
1213 			continue;
1214 
1215 		mvmsta = iwl_mvm_sta_from_mac80211(sta);
1216 
1217 		spin_lock_bh(&mvmsta->lock);
1218 		ret = iwl_mvm_remove_inactive_tids(mvm, mvmsta, i,
1219 						   inactive_tid_bitmap,
1220 						   &unshare_queues,
1221 						   &changetid_queues);
1222 		if (ret && free_queue < 0) {
1223 			queue_owner = sta;
1224 			free_queue = i;
1225 		}
1226 		/* only unlock sta lock - we still need the queue info lock */
1227 		spin_unlock_bh(&mvmsta->lock);
1228 	}
1229 
1230 
1231 	/* Reconfigure queues requiring reconfiguation */
1232 	for_each_set_bit(i, &unshare_queues, IWL_MAX_HW_QUEUES)
1233 		iwl_mvm_unshare_queue(mvm, i);
1234 	for_each_set_bit(i, &changetid_queues, IWL_MAX_HW_QUEUES)
1235 		iwl_mvm_change_queue_tid(mvm, i);
1236 
1237 	rcu_read_unlock();
1238 
1239 	if (free_queue >= 0 && alloc_for_sta != IWL_MVM_INVALID_STA) {
1240 		ret = iwl_mvm_free_inactive_queue(mvm, free_queue, queue_owner,
1241 						  alloc_for_sta);
1242 		if (ret)
1243 			return ret;
1244 	}
1245 
1246 	return free_queue;
1247 }
1248 
1249 static int iwl_mvm_sta_alloc_queue(struct iwl_mvm *mvm,
1250 				   struct ieee80211_sta *sta, u8 ac, int tid)
1251 {
1252 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1253 	struct iwl_trans_txq_scd_cfg cfg = {
1254 		.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac),
1255 		.sta_id = mvmsta->sta_id,
1256 		.tid = tid,
1257 		.frame_limit = IWL_FRAME_LIMIT,
1258 	};
1259 	unsigned int wdg_timeout =
1260 		iwl_mvm_get_wd_timeout(mvm, mvmsta->vif, false, false);
1261 	int queue = -1;
1262 	u16 queue_tmp;
1263 	unsigned long disable_agg_tids = 0;
1264 	enum iwl_mvm_agg_state queue_state;
1265 	bool shared_queue = false, inc_ssn;
1266 	int ssn;
1267 	unsigned long tfd_queue_mask;
1268 	int ret;
1269 
1270 	lockdep_assert_held(&mvm->mutex);
1271 
1272 	if (iwl_mvm_has_new_tx_api(mvm))
1273 		return iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
1274 
1275 	spin_lock_bh(&mvmsta->lock);
1276 	tfd_queue_mask = mvmsta->tfd_queue_msk;
1277 	ssn = IEEE80211_SEQ_TO_SN(mvmsta->tid_data[tid].seq_number);
1278 	spin_unlock_bh(&mvmsta->lock);
1279 
1280 	if (tid == IWL_MAX_TID_COUNT) {
1281 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1282 						IWL_MVM_DQA_MIN_MGMT_QUEUE,
1283 						IWL_MVM_DQA_MAX_MGMT_QUEUE);
1284 		if (queue >= IWL_MVM_DQA_MIN_MGMT_QUEUE)
1285 			IWL_DEBUG_TX_QUEUES(mvm, "Found free MGMT queue #%d\n",
1286 					    queue);
1287 
1288 		/* If no such queue is found, we'll use a DATA queue instead */
1289 	}
1290 
1291 	if ((queue < 0 && mvmsta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) &&
1292 	    (mvm->queue_info[mvmsta->reserved_queue].status ==
1293 			IWL_MVM_QUEUE_RESERVED)) {
1294 		queue = mvmsta->reserved_queue;
1295 		mvm->queue_info[queue].reserved = true;
1296 		IWL_DEBUG_TX_QUEUES(mvm, "Using reserved queue #%d\n", queue);
1297 	}
1298 
1299 	if (queue < 0)
1300 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1301 						IWL_MVM_DQA_MIN_DATA_QUEUE,
1302 						IWL_MVM_DQA_MAX_DATA_QUEUE);
1303 	if (queue < 0) {
1304 		/* try harder - perhaps kill an inactive queue */
1305 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1306 	}
1307 
1308 	/* No free queue - we'll have to share */
1309 	if (queue <= 0) {
1310 		queue = iwl_mvm_get_shared_queue(mvm, tfd_queue_mask, ac);
1311 		if (queue > 0) {
1312 			shared_queue = true;
1313 			mvm->queue_info[queue].status = IWL_MVM_QUEUE_SHARED;
1314 		}
1315 	}
1316 
1317 	/*
1318 	 * Mark TXQ as ready, even though it hasn't been fully configured yet,
1319 	 * to make sure no one else takes it.
1320 	 * This will allow avoiding re-acquiring the lock at the end of the
1321 	 * configuration. On error we'll mark it back as free.
1322 	 */
1323 	if (queue > 0 && !shared_queue)
1324 		mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
1325 
1326 	/* This shouldn't happen - out of queues */
1327 	if (WARN_ON(queue <= 0)) {
1328 		IWL_ERR(mvm, "No available queues for tid %d on sta_id %d\n",
1329 			tid, cfg.sta_id);
1330 		return queue;
1331 	}
1332 
1333 	/*
1334 	 * Actual en/disablement of aggregations is through the ADD_STA HCMD,
1335 	 * but for configuring the SCD to send A-MPDUs we need to mark the queue
1336 	 * as aggregatable.
1337 	 * Mark all DATA queues as allowing to be aggregated at some point
1338 	 */
1339 	cfg.aggregate = (queue >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1340 			 queue == IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1341 
1342 	IWL_DEBUG_TX_QUEUES(mvm,
1343 			    "Allocating %squeue #%d to sta %d on tid %d\n",
1344 			    shared_queue ? "shared " : "", queue,
1345 			    mvmsta->sta_id, tid);
1346 
1347 	if (shared_queue) {
1348 		/* Disable any open aggs on this queue */
1349 		disable_agg_tids = iwl_mvm_get_queue_agg_tids(mvm, queue);
1350 
1351 		if (disable_agg_tids) {
1352 			IWL_DEBUG_TX_QUEUES(mvm, "Disabling aggs on queue %d\n",
1353 					    queue);
1354 			iwl_mvm_invalidate_sta_queue(mvm, queue,
1355 						     disable_agg_tids, false);
1356 		}
1357 	}
1358 
1359 	inc_ssn = iwl_mvm_enable_txq(mvm, sta, queue, ssn, &cfg, wdg_timeout);
1360 
1361 	/*
1362 	 * Mark queue as shared in transport if shared
1363 	 * Note this has to be done after queue enablement because enablement
1364 	 * can also set this value, and there is no indication there to shared
1365 	 * queues
1366 	 */
1367 	if (shared_queue)
1368 		iwl_trans_txq_set_shared_mode(mvm->trans, queue, true);
1369 
1370 	spin_lock_bh(&mvmsta->lock);
1371 	/*
1372 	 * This looks racy, but it is not. We have only one packet for
1373 	 * this ra/tid in our Tx path since we stop the Qdisc when we
1374 	 * need to allocate a new TFD queue.
1375 	 */
1376 	if (inc_ssn) {
1377 		mvmsta->tid_data[tid].seq_number += 0x10;
1378 		ssn = (ssn + 1) & IEEE80211_SCTL_SEQ;
1379 	}
1380 	mvmsta->tid_data[tid].txq_id = queue;
1381 	mvmsta->tfd_queue_msk |= BIT(queue);
1382 	queue_state = mvmsta->tid_data[tid].state;
1383 
1384 	if (mvmsta->reserved_queue == queue)
1385 		mvmsta->reserved_queue = IEEE80211_INVAL_HW_QUEUE;
1386 	spin_unlock_bh(&mvmsta->lock);
1387 
1388 	if (!shared_queue) {
1389 		ret = iwl_mvm_sta_send_to_fw(mvm, sta, true, STA_MODIFY_QUEUES);
1390 		if (ret)
1391 			goto out_err;
1392 
1393 		/* If we need to re-enable aggregations... */
1394 		if (queue_state == IWL_AGG_ON) {
1395 			ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
1396 			if (ret)
1397 				goto out_err;
1398 		}
1399 	} else {
1400 		/* Redirect queue, if needed */
1401 		ret = iwl_mvm_redirect_queue(mvm, queue, tid, ac, ssn,
1402 					     wdg_timeout, false,
1403 					     iwl_mvm_txq_from_tid(sta, tid));
1404 		if (ret)
1405 			goto out_err;
1406 	}
1407 
1408 	return 0;
1409 
1410 out_err:
1411 	queue_tmp = queue;
1412 	iwl_mvm_disable_txq(mvm, sta, &queue_tmp, tid);
1413 
1414 	return ret;
1415 }
1416 
1417 void iwl_mvm_add_new_dqa_stream_wk(struct work_struct *wk)
1418 {
1419 	struct iwl_mvm *mvm = container_of(wk, struct iwl_mvm,
1420 					   add_stream_wk);
1421 
1422 	mutex_lock(&mvm->mutex);
1423 
1424 	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1425 
1426 	while (!list_empty(&mvm->add_stream_txqs)) {
1427 		struct iwl_mvm_txq *mvmtxq;
1428 		struct ieee80211_txq *txq;
1429 		u8 tid;
1430 
1431 		mvmtxq = list_first_entry(&mvm->add_stream_txqs,
1432 					  struct iwl_mvm_txq, list);
1433 
1434 		txq = container_of((void *)mvmtxq, struct ieee80211_txq,
1435 				   drv_priv);
1436 		tid = txq->tid;
1437 		if (tid == IEEE80211_NUM_TIDS)
1438 			tid = IWL_MAX_TID_COUNT;
1439 
1440 		/*
1441 		 * We can't really do much here, but if this fails we can't
1442 		 * transmit anyway - so just don't transmit the frame etc.
1443 		 * and let them back up ... we've tried our best to allocate
1444 		 * a queue in the function itself.
1445 		 */
1446 		if (iwl_mvm_sta_alloc_queue(mvm, txq->sta, txq->ac, tid)) {
1447 			list_del_init(&mvmtxq->list);
1448 			continue;
1449 		}
1450 
1451 		list_del_init(&mvmtxq->list);
1452 		local_bh_disable();
1453 		iwl_mvm_mac_itxq_xmit(mvm->hw, txq);
1454 		local_bh_enable();
1455 	}
1456 
1457 	mutex_unlock(&mvm->mutex);
1458 }
1459 
1460 static int iwl_mvm_reserve_sta_stream(struct iwl_mvm *mvm,
1461 				      struct ieee80211_sta *sta,
1462 				      enum nl80211_iftype vif_type)
1463 {
1464 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1465 	int queue;
1466 
1467 	/* queue reserving is disabled on new TX path */
1468 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1469 		return 0;
1470 
1471 	/* run the general cleanup/unsharing of queues */
1472 	iwl_mvm_inactivity_check(mvm, IWL_MVM_INVALID_STA);
1473 
1474 	/* Make sure we have free resources for this STA */
1475 	if (vif_type == NL80211_IFTYPE_STATION && !sta->tdls &&
1476 	    !mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].tid_bitmap &&
1477 	    (mvm->queue_info[IWL_MVM_DQA_BSS_CLIENT_QUEUE].status ==
1478 	     IWL_MVM_QUEUE_FREE))
1479 		queue = IWL_MVM_DQA_BSS_CLIENT_QUEUE;
1480 	else
1481 		queue = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
1482 						IWL_MVM_DQA_MIN_DATA_QUEUE,
1483 						IWL_MVM_DQA_MAX_DATA_QUEUE);
1484 	if (queue < 0) {
1485 		/* try again - this time kick out a queue if needed */
1486 		queue = iwl_mvm_inactivity_check(mvm, mvmsta->sta_id);
1487 		if (queue < 0) {
1488 			IWL_ERR(mvm, "No available queues for new station\n");
1489 			return -ENOSPC;
1490 		}
1491 	}
1492 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_RESERVED;
1493 
1494 	mvmsta->reserved_queue = queue;
1495 
1496 	IWL_DEBUG_TX_QUEUES(mvm, "Reserving data queue #%d for sta_id %d\n",
1497 			    queue, mvmsta->sta_id);
1498 
1499 	return 0;
1500 }
1501 
1502 /*
1503  * In DQA mode, after a HW restart the queues should be allocated as before, in
1504  * order to avoid race conditions when there are shared queues. This function
1505  * does the re-mapping and queue allocation.
1506  *
1507  * Note that re-enabling aggregations isn't done in this function.
1508  */
1509 static void iwl_mvm_realloc_queues_after_restart(struct iwl_mvm *mvm,
1510 						 struct ieee80211_sta *sta)
1511 {
1512 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1513 	unsigned int wdg =
1514 		iwl_mvm_get_wd_timeout(mvm, mvm_sta->vif, false, false);
1515 	int i;
1516 	struct iwl_trans_txq_scd_cfg cfg = {
1517 		.sta_id = mvm_sta->sta_id,
1518 		.frame_limit = IWL_FRAME_LIMIT,
1519 	};
1520 
1521 	/* Make sure reserved queue is still marked as such (if allocated) */
1522 	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE)
1523 		mvm->queue_info[mvm_sta->reserved_queue].status =
1524 			IWL_MVM_QUEUE_RESERVED;
1525 
1526 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1527 		struct iwl_mvm_tid_data *tid_data = &mvm_sta->tid_data[i];
1528 		int txq_id = tid_data->txq_id;
1529 		int ac;
1530 
1531 		if (txq_id == IWL_MVM_INVALID_QUEUE)
1532 			continue;
1533 
1534 		ac = tid_to_mac80211_ac[i];
1535 
1536 		if (iwl_mvm_has_new_tx_api(mvm)) {
1537 			IWL_DEBUG_TX_QUEUES(mvm,
1538 					    "Re-mapping sta %d tid %d\n",
1539 					    mvm_sta->sta_id, i);
1540 			txq_id = iwl_mvm_tvqm_enable_txq(mvm, mvm_sta->sta_id,
1541 							 i, wdg);
1542 			/*
1543 			 * on failures, just set it to IWL_MVM_INVALID_QUEUE
1544 			 * to try again later, we have no other good way of
1545 			 * failing here
1546 			 */
1547 			if (txq_id < 0)
1548 				txq_id = IWL_MVM_INVALID_QUEUE;
1549 			tid_data->txq_id = txq_id;
1550 
1551 			/*
1552 			 * Since we don't set the seq number after reset, and HW
1553 			 * sets it now, FW reset will cause the seq num to start
1554 			 * at 0 again, so driver will need to update it
1555 			 * internally as well, so it keeps in sync with real val
1556 			 */
1557 			tid_data->seq_number = 0;
1558 		} else {
1559 			u16 seq = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
1560 
1561 			cfg.tid = i;
1562 			cfg.fifo = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
1563 			cfg.aggregate = (txq_id >= IWL_MVM_DQA_MIN_DATA_QUEUE ||
1564 					 txq_id ==
1565 					 IWL_MVM_DQA_BSS_CLIENT_QUEUE);
1566 
1567 			IWL_DEBUG_TX_QUEUES(mvm,
1568 					    "Re-mapping sta %d tid %d to queue %d\n",
1569 					    mvm_sta->sta_id, i, txq_id);
1570 
1571 			iwl_mvm_enable_txq(mvm, sta, txq_id, seq, &cfg, wdg);
1572 			mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_READY;
1573 		}
1574 	}
1575 }
1576 
1577 static int iwl_mvm_add_int_sta_common(struct iwl_mvm *mvm,
1578 				      struct iwl_mvm_int_sta *sta,
1579 				      const u8 *addr,
1580 				      u16 mac_id, u16 color)
1581 {
1582 	struct iwl_mvm_add_sta_cmd cmd;
1583 	int ret;
1584 	u32 status = ADD_STA_SUCCESS;
1585 
1586 	lockdep_assert_held(&mvm->mutex);
1587 
1588 	memset(&cmd, 0, sizeof(cmd));
1589 	cmd.sta_id = sta->sta_id;
1590 
1591 	if (iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA, 0) >= 12 &&
1592 	    sta->type == IWL_STA_AUX_ACTIVITY)
1593 		cmd.mac_id_n_color = cpu_to_le32(mac_id);
1594 	else
1595 		cmd.mac_id_n_color = cpu_to_le32(FW_CMD_ID_AND_COLOR(mac_id,
1596 								     color));
1597 
1598 	if (fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
1599 		cmd.station_type = sta->type;
1600 
1601 	if (!iwl_mvm_has_new_tx_api(mvm))
1602 		cmd.tfd_queue_msk = cpu_to_le32(sta->tfd_queue_msk);
1603 	cmd.tid_disable_tx = cpu_to_le16(0xffff);
1604 
1605 	if (addr)
1606 		memcpy(cmd.addr, addr, ETH_ALEN);
1607 
1608 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1609 					  iwl_mvm_add_sta_cmd_size(mvm),
1610 					  &cmd, &status);
1611 	if (ret)
1612 		return ret;
1613 
1614 	switch (status & IWL_ADD_STA_STATUS_MASK) {
1615 	case ADD_STA_SUCCESS:
1616 		IWL_DEBUG_INFO(mvm, "Internal station added.\n");
1617 		return 0;
1618 	default:
1619 		ret = -EIO;
1620 		IWL_ERR(mvm, "Add internal station failed, status=0x%x\n",
1621 			status);
1622 		break;
1623 	}
1624 	return ret;
1625 }
1626 
1627 int iwl_mvm_add_sta(struct iwl_mvm *mvm,
1628 		    struct ieee80211_vif *vif,
1629 		    struct ieee80211_sta *sta)
1630 {
1631 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1632 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1633 	struct iwl_mvm_rxq_dup_data *dup_data;
1634 	int i, ret, sta_id;
1635 	bool sta_update = false;
1636 	unsigned int sta_flags = 0;
1637 
1638 	lockdep_assert_held(&mvm->mutex);
1639 
1640 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
1641 		sta_id = iwl_mvm_find_free_sta_id(mvm,
1642 						  ieee80211_vif_type_p2p(vif));
1643 	else
1644 		sta_id = mvm_sta->sta_id;
1645 
1646 	if (sta_id == IWL_MVM_INVALID_STA)
1647 		return -ENOSPC;
1648 
1649 	spin_lock_init(&mvm_sta->lock);
1650 
1651 	/* if this is a HW restart re-alloc existing queues */
1652 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1653 		struct iwl_mvm_int_sta tmp_sta = {
1654 			.sta_id = sta_id,
1655 			.type = mvm_sta->sta_type,
1656 		};
1657 
1658 		/*
1659 		 * First add an empty station since allocating
1660 		 * a queue requires a valid station
1661 		 */
1662 		ret = iwl_mvm_add_int_sta_common(mvm, &tmp_sta, sta->addr,
1663 						 mvmvif->id, mvmvif->color);
1664 		if (ret)
1665 			goto err;
1666 
1667 		iwl_mvm_realloc_queues_after_restart(mvm, sta);
1668 		sta_update = true;
1669 		sta_flags = iwl_mvm_has_new_tx_api(mvm) ? 0 : STA_MODIFY_QUEUES;
1670 		goto update_fw;
1671 	}
1672 
1673 	mvm_sta->sta_id = sta_id;
1674 	mvm_sta->mac_id_n_color = FW_CMD_ID_AND_COLOR(mvmvif->id,
1675 						      mvmvif->color);
1676 	mvm_sta->vif = vif;
1677 	if (!mvm->trans->trans_cfg->gen2)
1678 		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_DEF;
1679 	else
1680 		mvm_sta->max_agg_bufsize = LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF;
1681 	mvm_sta->tx_protection = 0;
1682 	mvm_sta->tt_tx_protection = false;
1683 	mvm_sta->sta_type = sta->tdls ? IWL_STA_TDLS_LINK : IWL_STA_LINK;
1684 
1685 	/* HW restart, don't assume the memory has been zeroed */
1686 	mvm_sta->tid_disable_agg = 0xffff; /* No aggs at first */
1687 	mvm_sta->tfd_queue_msk = 0;
1688 
1689 	/* for HW restart - reset everything but the sequence number */
1690 	for (i = 0; i <= IWL_MAX_TID_COUNT; i++) {
1691 		u16 seq = mvm_sta->tid_data[i].seq_number;
1692 		memset(&mvm_sta->tid_data[i], 0, sizeof(mvm_sta->tid_data[i]));
1693 		mvm_sta->tid_data[i].seq_number = seq;
1694 
1695 		/*
1696 		 * Mark all queues for this STA as unallocated and defer TX
1697 		 * frames until the queue is allocated
1698 		 */
1699 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1700 	}
1701 
1702 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1703 		struct iwl_mvm_txq *mvmtxq =
1704 			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1705 
1706 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1707 		INIT_LIST_HEAD(&mvmtxq->list);
1708 		atomic_set(&mvmtxq->tx_request, 0);
1709 	}
1710 
1711 	mvm_sta->agg_tids = 0;
1712 
1713 	if (iwl_mvm_has_new_rx_api(mvm) &&
1714 	    !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
1715 		int q;
1716 
1717 		dup_data = kcalloc(mvm->trans->num_rx_queues,
1718 				   sizeof(*dup_data), GFP_KERNEL);
1719 		if (!dup_data)
1720 			return -ENOMEM;
1721 		/*
1722 		 * Initialize all the last_seq values to 0xffff which can never
1723 		 * compare equal to the frame's seq_ctrl in the check in
1724 		 * iwl_mvm_is_dup() since the lower 4 bits are the fragment
1725 		 * number and fragmented packets don't reach that function.
1726 		 *
1727 		 * This thus allows receiving a packet with seqno 0 and the
1728 		 * retry bit set as the very first packet on a new TID.
1729 		 */
1730 		for (q = 0; q < mvm->trans->num_rx_queues; q++)
1731 			memset(dup_data[q].last_seq, 0xff,
1732 			       sizeof(dup_data[q].last_seq));
1733 		mvm_sta->dup_data = dup_data;
1734 	}
1735 
1736 	if (!iwl_mvm_has_new_tx_api(mvm)) {
1737 		ret = iwl_mvm_reserve_sta_stream(mvm, sta,
1738 						 ieee80211_vif_type_p2p(vif));
1739 		if (ret)
1740 			goto err;
1741 	}
1742 
1743 	/*
1744 	 * if rs is registered with mac80211, then "add station" will be handled
1745 	 * via the corresponding ops, otherwise need to notify rate scaling here
1746 	 */
1747 	if (iwl_mvm_has_tlc_offload(mvm))
1748 		iwl_mvm_rs_add_sta(mvm, mvm_sta);
1749 	else
1750 		spin_lock_init(&mvm_sta->lq_sta.rs_drv.pers.lock);
1751 
1752 	iwl_mvm_toggle_tx_ant(mvm, &mvm_sta->tx_ant);
1753 
1754 update_fw:
1755 	ret = iwl_mvm_sta_send_to_fw(mvm, sta, sta_update, sta_flags);
1756 	if (ret)
1757 		goto err;
1758 
1759 	if (vif->type == NL80211_IFTYPE_STATION) {
1760 		if (!sta->tdls) {
1761 			WARN_ON(mvmvif->ap_sta_id != IWL_MVM_INVALID_STA);
1762 			mvmvif->ap_sta_id = sta_id;
1763 		} else {
1764 			WARN_ON(mvmvif->ap_sta_id == IWL_MVM_INVALID_STA);
1765 		}
1766 	}
1767 
1768 	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta_id], sta);
1769 
1770 	return 0;
1771 
1772 err:
1773 	return ret;
1774 }
1775 
1776 int iwl_mvm_drain_sta(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvmsta,
1777 		      bool drain)
1778 {
1779 	struct iwl_mvm_add_sta_cmd cmd = {};
1780 	int ret;
1781 	u32 status;
1782 
1783 	lockdep_assert_held(&mvm->mutex);
1784 
1785 	cmd.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color);
1786 	cmd.sta_id = mvmsta->sta_id;
1787 	cmd.add_modify = STA_MODE_MODIFY;
1788 	cmd.station_flags = drain ? cpu_to_le32(STA_FLG_DRAIN_FLOW) : 0;
1789 	cmd.station_flags_msk = cpu_to_le32(STA_FLG_DRAIN_FLOW);
1790 
1791 	status = ADD_STA_SUCCESS;
1792 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
1793 					  iwl_mvm_add_sta_cmd_size(mvm),
1794 					  &cmd, &status);
1795 	if (ret)
1796 		return ret;
1797 
1798 	switch (status & IWL_ADD_STA_STATUS_MASK) {
1799 	case ADD_STA_SUCCESS:
1800 		IWL_DEBUG_INFO(mvm, "Frames for staid %d will drained in fw\n",
1801 			       mvmsta->sta_id);
1802 		break;
1803 	default:
1804 		ret = -EIO;
1805 #if defined(__linux__)
1806 		IWL_ERR(mvm, "Couldn't drain frames for staid %d\n",
1807 			mvmsta->sta_id);
1808 #elif defined(__FreeBSD__)
1809 		IWL_ERR(mvm, "Couldn't drain frames for staid %d, status %#x\n",
1810 			mvmsta->sta_id, status);
1811 #endif
1812 		break;
1813 	}
1814 
1815 	return ret;
1816 }
1817 
1818 /*
1819  * Remove a station from the FW table. Before sending the command to remove
1820  * the station validate that the station is indeed known to the driver (sanity
1821  * only).
1822  */
1823 static int iwl_mvm_rm_sta_common(struct iwl_mvm *mvm, u8 sta_id)
1824 {
1825 	struct ieee80211_sta *sta;
1826 	struct iwl_mvm_rm_sta_cmd rm_sta_cmd = {
1827 		.sta_id = sta_id,
1828 	};
1829 	int ret;
1830 
1831 	sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
1832 					lockdep_is_held(&mvm->mutex));
1833 
1834 	/* Note: internal stations are marked as error values */
1835 	if (!sta) {
1836 		IWL_ERR(mvm, "Invalid station id\n");
1837 		return -EINVAL;
1838 	}
1839 
1840 	ret = iwl_mvm_send_cmd_pdu(mvm, REMOVE_STA, 0,
1841 				   sizeof(rm_sta_cmd), &rm_sta_cmd);
1842 	if (ret) {
1843 		IWL_ERR(mvm, "Failed to remove station. Id=%d\n", sta_id);
1844 		return ret;
1845 	}
1846 
1847 	return 0;
1848 }
1849 
1850 static void iwl_mvm_disable_sta_queues(struct iwl_mvm *mvm,
1851 				       struct ieee80211_vif *vif,
1852 				       struct ieee80211_sta *sta)
1853 {
1854 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1855 	int i;
1856 
1857 	lockdep_assert_held(&mvm->mutex);
1858 
1859 	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1860 		if (mvm_sta->tid_data[i].txq_id == IWL_MVM_INVALID_QUEUE)
1861 			continue;
1862 
1863 		iwl_mvm_disable_txq(mvm, sta, &mvm_sta->tid_data[i].txq_id, i);
1864 		mvm_sta->tid_data[i].txq_id = IWL_MVM_INVALID_QUEUE;
1865 	}
1866 
1867 	for (i = 0; i < ARRAY_SIZE(sta->txq); i++) {
1868 		struct iwl_mvm_txq *mvmtxq =
1869 			iwl_mvm_txq_from_mac80211(sta->txq[i]);
1870 
1871 		mvmtxq->txq_id = IWL_MVM_INVALID_QUEUE;
1872 	}
1873 }
1874 
1875 int iwl_mvm_wait_sta_queues_empty(struct iwl_mvm *mvm,
1876 				  struct iwl_mvm_sta *mvm_sta)
1877 {
1878 	int i;
1879 
1880 	for (i = 0; i < ARRAY_SIZE(mvm_sta->tid_data); i++) {
1881 		u16 txq_id;
1882 		int ret;
1883 
1884 		spin_lock_bh(&mvm_sta->lock);
1885 		txq_id = mvm_sta->tid_data[i].txq_id;
1886 		spin_unlock_bh(&mvm_sta->lock);
1887 
1888 		if (txq_id == IWL_MVM_INVALID_QUEUE)
1889 			continue;
1890 
1891 		ret = iwl_trans_wait_txq_empty(mvm->trans, txq_id);
1892 		if (ret)
1893 			return ret;
1894 	}
1895 
1896 	return 0;
1897 }
1898 
1899 int iwl_mvm_rm_sta(struct iwl_mvm *mvm,
1900 		   struct ieee80211_vif *vif,
1901 		   struct ieee80211_sta *sta)
1902 {
1903 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
1904 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
1905 	u8 sta_id = mvm_sta->sta_id;
1906 	int ret;
1907 
1908 	lockdep_assert_held(&mvm->mutex);
1909 
1910 #if defined(__linux__)
1911 	if (iwl_mvm_has_new_rx_api(mvm))
1912 		kfree(mvm_sta->dup_data);
1913 #elif defined(__FreeBSD__)
1914 	if (iwl_mvm_has_new_rx_api(mvm)) {
1915 		kfree(mvm_sta->dup_data);
1916 		mvm_sta->dup_data = NULL;
1917 	}
1918 #endif
1919 
1920 	ret = iwl_mvm_drain_sta(mvm, mvm_sta, true);
1921 	if (ret)
1922 		return ret;
1923 
1924 	/* flush its queues here since we are freeing mvm_sta */
1925 	ret = iwl_mvm_flush_sta(mvm, mvm_sta, false);
1926 	if (ret)
1927 		return ret;
1928 	if (iwl_mvm_has_new_tx_api(mvm)) {
1929 		ret = iwl_mvm_wait_sta_queues_empty(mvm, mvm_sta);
1930 	} else {
1931 		u32 q_mask = mvm_sta->tfd_queue_msk;
1932 
1933 		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
1934 						     q_mask);
1935 	}
1936 	if (ret)
1937 		return ret;
1938 
1939 	ret = iwl_mvm_drain_sta(mvm, mvm_sta, false);
1940 
1941 	iwl_mvm_disable_sta_queues(mvm, vif, sta);
1942 
1943 	/* If there is a TXQ still marked as reserved - free it */
1944 	if (mvm_sta->reserved_queue != IEEE80211_INVAL_HW_QUEUE) {
1945 		u8 reserved_txq = mvm_sta->reserved_queue;
1946 		enum iwl_mvm_queue_status *status;
1947 
1948 		/*
1949 		 * If no traffic has gone through the reserved TXQ - it
1950 		 * is still marked as IWL_MVM_QUEUE_RESERVED, and
1951 		 * should be manually marked as free again
1952 		 */
1953 		status = &mvm->queue_info[reserved_txq].status;
1954 		if (WARN((*status != IWL_MVM_QUEUE_RESERVED) &&
1955 			 (*status != IWL_MVM_QUEUE_FREE),
1956 			 "sta_id %d reserved txq %d status %d",
1957 			 sta_id, reserved_txq, *status))
1958 			return -EINVAL;
1959 
1960 		*status = IWL_MVM_QUEUE_FREE;
1961 	}
1962 
1963 	if (vif->type == NL80211_IFTYPE_STATION &&
1964 	    mvmvif->ap_sta_id == sta_id) {
1965 		/* if associated - we can't remove the AP STA now */
1966 		if (vif->bss_conf.assoc)
1967 			return ret;
1968 
1969 		/* unassoc - go ahead - remove the AP STA now */
1970 		mvmvif->ap_sta_id = IWL_MVM_INVALID_STA;
1971 	}
1972 
1973 	/*
1974 	 * This shouldn't happen - the TDLS channel switch should be canceled
1975 	 * before the STA is removed.
1976 	 */
1977 	if (WARN_ON_ONCE(mvm->tdls_cs.peer.sta_id == sta_id)) {
1978 		mvm->tdls_cs.peer.sta_id = IWL_MVM_INVALID_STA;
1979 		cancel_delayed_work(&mvm->tdls_cs.dwork);
1980 	}
1981 
1982 	/*
1983 	 * Make sure that the tx response code sees the station as -EBUSY and
1984 	 * calls the drain worker.
1985 	 */
1986 	spin_lock_bh(&mvm_sta->lock);
1987 	spin_unlock_bh(&mvm_sta->lock);
1988 
1989 	ret = iwl_mvm_rm_sta_common(mvm, mvm_sta->sta_id);
1990 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[mvm_sta->sta_id], NULL);
1991 
1992 	return ret;
1993 }
1994 
1995 int iwl_mvm_rm_sta_id(struct iwl_mvm *mvm,
1996 		      struct ieee80211_vif *vif,
1997 		      u8 sta_id)
1998 {
1999 	int ret = iwl_mvm_rm_sta_common(mvm, sta_id);
2000 
2001 	lockdep_assert_held(&mvm->mutex);
2002 
2003 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta_id], NULL);
2004 	return ret;
2005 }
2006 
2007 int iwl_mvm_allocate_int_sta(struct iwl_mvm *mvm,
2008 			     struct iwl_mvm_int_sta *sta,
2009 			     u32 qmask, enum nl80211_iftype iftype,
2010 			     enum iwl_sta_type type)
2011 {
2012 	if (!test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status) ||
2013 	    sta->sta_id == IWL_MVM_INVALID_STA) {
2014 		sta->sta_id = iwl_mvm_find_free_sta_id(mvm, iftype);
2015 		if (WARN_ON_ONCE(sta->sta_id == IWL_MVM_INVALID_STA))
2016 			return -ENOSPC;
2017 	}
2018 
2019 	sta->tfd_queue_msk = qmask;
2020 	sta->type = type;
2021 
2022 	/* put a non-NULL value so iterating over the stations won't stop */
2023 	rcu_assign_pointer(mvm->fw_id_to_mac_id[sta->sta_id], ERR_PTR(-EINVAL));
2024 	return 0;
2025 }
2026 
2027 void iwl_mvm_dealloc_int_sta(struct iwl_mvm *mvm, struct iwl_mvm_int_sta *sta)
2028 {
2029 	RCU_INIT_POINTER(mvm->fw_id_to_mac_id[sta->sta_id], NULL);
2030 	memset(sta, 0, sizeof(struct iwl_mvm_int_sta));
2031 	sta->sta_id = IWL_MVM_INVALID_STA;
2032 }
2033 
2034 static void iwl_mvm_enable_aux_snif_queue(struct iwl_mvm *mvm, u16 queue,
2035 					  u8 sta_id, u8 fifo)
2036 {
2037 	unsigned int wdg_timeout =
2038 		mvm->trans->trans_cfg->base_params->wd_timeout;
2039 	struct iwl_trans_txq_scd_cfg cfg = {
2040 		.fifo = fifo,
2041 		.sta_id = sta_id,
2042 		.tid = IWL_MAX_TID_COUNT,
2043 		.aggregate = false,
2044 		.frame_limit = IWL_FRAME_LIMIT,
2045 	};
2046 
2047 	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2048 
2049 	iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2050 }
2051 
2052 static int iwl_mvm_enable_aux_snif_queue_tvqm(struct iwl_mvm *mvm, u8 sta_id)
2053 {
2054 	unsigned int wdg_timeout =
2055 		mvm->trans->trans_cfg->base_params->wd_timeout;
2056 
2057 	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2058 
2059 	return iwl_mvm_tvqm_enable_txq(mvm, sta_id, IWL_MAX_TID_COUNT,
2060 				       wdg_timeout);
2061 }
2062 
2063 static int iwl_mvm_add_int_sta_with_queue(struct iwl_mvm *mvm, int macidx,
2064 					  int maccolor, u8 *addr,
2065 					  struct iwl_mvm_int_sta *sta,
2066 					  u16 *queue, int fifo)
2067 {
2068 	int ret;
2069 
2070 	/* Map queue to fifo - needs to happen before adding station */
2071 	if (!iwl_mvm_has_new_tx_api(mvm))
2072 		iwl_mvm_enable_aux_snif_queue(mvm, *queue, sta->sta_id, fifo);
2073 
2074 	ret = iwl_mvm_add_int_sta_common(mvm, sta, addr, macidx, maccolor);
2075 	if (ret) {
2076 		if (!iwl_mvm_has_new_tx_api(mvm))
2077 			iwl_mvm_disable_txq(mvm, NULL, queue,
2078 					    IWL_MAX_TID_COUNT);
2079 		return ret;
2080 	}
2081 
2082 	/*
2083 	 * For 22000 firmware and on we cannot add queue to a station unknown
2084 	 * to firmware so enable queue here - after the station was added
2085 	 */
2086 	if (iwl_mvm_has_new_tx_api(mvm)) {
2087 		int txq;
2088 
2089 		txq = iwl_mvm_enable_aux_snif_queue_tvqm(mvm, sta->sta_id);
2090 		if (txq < 0) {
2091 			iwl_mvm_rm_sta_common(mvm, sta->sta_id);
2092 			return txq;
2093 		}
2094 
2095 		*queue = txq;
2096 	}
2097 
2098 	return 0;
2099 }
2100 
2101 int iwl_mvm_add_aux_sta(struct iwl_mvm *mvm, u32 lmac_id)
2102 {
2103 	int ret;
2104 
2105 	lockdep_assert_held(&mvm->mutex);
2106 
2107 	/* Allocate aux station and assign to it the aux queue */
2108 	ret = iwl_mvm_allocate_int_sta(mvm, &mvm->aux_sta, BIT(mvm->aux_queue),
2109 				       NL80211_IFTYPE_UNSPECIFIED,
2110 				       IWL_STA_AUX_ACTIVITY);
2111 	if (ret)
2112 		return ret;
2113 
2114 	/*
2115 	 * In CDB NICs we need to specify which lmac to use for aux activity
2116 	 * using the mac_id argument place to send lmac_id to the function
2117 	 */
2118 	ret = iwl_mvm_add_int_sta_with_queue(mvm, lmac_id, 0, NULL,
2119 					     &mvm->aux_sta, &mvm->aux_queue,
2120 					     IWL_MVM_TX_FIFO_MCAST);
2121 	if (ret) {
2122 		iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2123 		return ret;
2124 	}
2125 
2126 	return 0;
2127 }
2128 
2129 int iwl_mvm_add_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2130 {
2131 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2132 
2133 	lockdep_assert_held(&mvm->mutex);
2134 
2135 	return iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
2136 					      NULL, &mvm->snif_sta,
2137 					      &mvm->snif_queue,
2138 					      IWL_MVM_TX_FIFO_BE);
2139 }
2140 
2141 int iwl_mvm_rm_snif_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2142 {
2143 	int ret;
2144 
2145 	lockdep_assert_held(&mvm->mutex);
2146 
2147 	if (WARN_ON_ONCE(mvm->snif_sta.sta_id == IWL_MVM_INVALID_STA))
2148 		return -EINVAL;
2149 
2150 	iwl_mvm_disable_txq(mvm, NULL, &mvm->snif_queue, IWL_MAX_TID_COUNT);
2151 	ret = iwl_mvm_rm_sta_common(mvm, mvm->snif_sta.sta_id);
2152 	if (ret)
2153 		IWL_WARN(mvm, "Failed sending remove station\n");
2154 
2155 	return ret;
2156 }
2157 
2158 int iwl_mvm_rm_aux_sta(struct iwl_mvm *mvm)
2159 {
2160 	int ret;
2161 
2162 	lockdep_assert_held(&mvm->mutex);
2163 
2164 	if (WARN_ON_ONCE(mvm->aux_sta.sta_id == IWL_MVM_INVALID_STA))
2165 		return -EINVAL;
2166 
2167 	iwl_mvm_disable_txq(mvm, NULL, &mvm->aux_queue, IWL_MAX_TID_COUNT);
2168 	ret = iwl_mvm_rm_sta_common(mvm, mvm->aux_sta.sta_id);
2169 	if (ret)
2170 		IWL_WARN(mvm, "Failed sending remove station\n");
2171 	iwl_mvm_dealloc_int_sta(mvm, &mvm->aux_sta);
2172 
2173 	return ret;
2174 }
2175 
2176 void iwl_mvm_dealloc_snif_sta(struct iwl_mvm *mvm)
2177 {
2178 	iwl_mvm_dealloc_int_sta(mvm, &mvm->snif_sta);
2179 }
2180 
2181 /*
2182  * Send the add station command for the vif's broadcast station.
2183  * Assumes that the station was already allocated.
2184  *
2185  * @mvm: the mvm component
2186  * @vif: the interface to which the broadcast station is added
2187  * @bsta: the broadcast station to add.
2188  */
2189 int iwl_mvm_send_add_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2190 {
2191 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2192 	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2193 	static const u8 _baddr[] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
2194 	const u8 *baddr = _baddr;
2195 	int queue;
2196 	int ret;
2197 	unsigned int wdg_timeout =
2198 		iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2199 	struct iwl_trans_txq_scd_cfg cfg = {
2200 		.fifo = IWL_MVM_TX_FIFO_VO,
2201 		.sta_id = mvmvif->bcast_sta.sta_id,
2202 		.tid = IWL_MAX_TID_COUNT,
2203 		.aggregate = false,
2204 		.frame_limit = IWL_FRAME_LIMIT,
2205 	};
2206 
2207 	lockdep_assert_held(&mvm->mutex);
2208 
2209 	if (!iwl_mvm_has_new_tx_api(mvm)) {
2210 		if (vif->type == NL80211_IFTYPE_AP ||
2211 		    vif->type == NL80211_IFTYPE_ADHOC) {
2212 			queue = mvm->probe_queue;
2213 		} else if (vif->type == NL80211_IFTYPE_P2P_DEVICE) {
2214 			queue = mvm->p2p_dev_queue;
2215 		} else {
2216 			WARN(1, "Missing required TXQ for adding bcast STA\n");
2217 			return -EINVAL;
2218 		}
2219 
2220 		bsta->tfd_queue_msk |= BIT(queue);
2221 
2222 		iwl_mvm_enable_txq(mvm, NULL, queue, 0, &cfg, wdg_timeout);
2223 	}
2224 
2225 	if (vif->type == NL80211_IFTYPE_ADHOC)
2226 		baddr = vif->bss_conf.bssid;
2227 
2228 	if (WARN_ON_ONCE(bsta->sta_id == IWL_MVM_INVALID_STA))
2229 		return -ENOSPC;
2230 
2231 	ret = iwl_mvm_add_int_sta_common(mvm, bsta, baddr,
2232 					 mvmvif->id, mvmvif->color);
2233 	if (ret)
2234 		return ret;
2235 
2236 	/*
2237 	 * For 22000 firmware and on we cannot add queue to a station unknown
2238 	 * to firmware so enable queue here - after the station was added
2239 	 */
2240 	if (iwl_mvm_has_new_tx_api(mvm)) {
2241 		queue = iwl_mvm_tvqm_enable_txq(mvm, bsta->sta_id,
2242 						IWL_MAX_TID_COUNT,
2243 						wdg_timeout);
2244 		if (queue < 0) {
2245 			iwl_mvm_rm_sta_common(mvm, bsta->sta_id);
2246 			return queue;
2247 		}
2248 
2249 		if (vif->type == NL80211_IFTYPE_AP ||
2250 		    vif->type == NL80211_IFTYPE_ADHOC)
2251 			mvm->probe_queue = queue;
2252 		else if (vif->type == NL80211_IFTYPE_P2P_DEVICE)
2253 			mvm->p2p_dev_queue = queue;
2254 	}
2255 
2256 	return 0;
2257 }
2258 
2259 static void iwl_mvm_free_bcast_sta_queues(struct iwl_mvm *mvm,
2260 					  struct ieee80211_vif *vif)
2261 {
2262 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2263 	u16 *queueptr, queue;
2264 
2265 	lockdep_assert_held(&mvm->mutex);
2266 
2267 	iwl_mvm_flush_sta(mvm, &mvmvif->bcast_sta, true);
2268 
2269 	switch (vif->type) {
2270 	case NL80211_IFTYPE_AP:
2271 	case NL80211_IFTYPE_ADHOC:
2272 		queueptr = &mvm->probe_queue;
2273 		break;
2274 	case NL80211_IFTYPE_P2P_DEVICE:
2275 		queueptr = &mvm->p2p_dev_queue;
2276 		break;
2277 	default:
2278 		WARN(1, "Can't free bcast queue on vif type %d\n",
2279 		     vif->type);
2280 		return;
2281 	}
2282 
2283 	queue = *queueptr;
2284 	iwl_mvm_disable_txq(mvm, NULL, queueptr, IWL_MAX_TID_COUNT);
2285 	if (iwl_mvm_has_new_tx_api(mvm))
2286 		return;
2287 
2288 	WARN_ON(!(mvmvif->bcast_sta.tfd_queue_msk & BIT(queue)));
2289 	mvmvif->bcast_sta.tfd_queue_msk &= ~BIT(queue);
2290 }
2291 
2292 /* Send the FW a request to remove the station from it's internal data
2293  * structures, but DO NOT remove the entry from the local data structures. */
2294 int iwl_mvm_send_rm_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2295 {
2296 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2297 	int ret;
2298 
2299 	lockdep_assert_held(&mvm->mutex);
2300 
2301 	iwl_mvm_free_bcast_sta_queues(mvm, vif);
2302 
2303 	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->bcast_sta.sta_id);
2304 	if (ret)
2305 		IWL_WARN(mvm, "Failed sending remove station\n");
2306 	return ret;
2307 }
2308 
2309 int iwl_mvm_alloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2310 {
2311 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2312 
2313 	lockdep_assert_held(&mvm->mutex);
2314 
2315 	return iwl_mvm_allocate_int_sta(mvm, &mvmvif->bcast_sta, 0,
2316 					ieee80211_vif_type_p2p(vif),
2317 					IWL_STA_GENERAL_PURPOSE);
2318 }
2319 
2320 /* Allocate a new station entry for the broadcast station to the given vif,
2321  * and send it to the FW.
2322  * Note that each P2P mac should have its own broadcast station.
2323  *
2324  * @mvm: the mvm component
2325  * @vif: the interface to which the broadcast station is added
2326  * @bsta: the broadcast station to add. */
2327 int iwl_mvm_add_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2328 {
2329 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2330 	struct iwl_mvm_int_sta *bsta = &mvmvif->bcast_sta;
2331 	int ret;
2332 
2333 	lockdep_assert_held(&mvm->mutex);
2334 
2335 	ret = iwl_mvm_alloc_bcast_sta(mvm, vif);
2336 	if (ret)
2337 		return ret;
2338 
2339 	ret = iwl_mvm_send_add_bcast_sta(mvm, vif);
2340 
2341 	if (ret)
2342 		iwl_mvm_dealloc_int_sta(mvm, bsta);
2343 
2344 	return ret;
2345 }
2346 
2347 void iwl_mvm_dealloc_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2348 {
2349 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2350 
2351 	iwl_mvm_dealloc_int_sta(mvm, &mvmvif->bcast_sta);
2352 }
2353 
2354 /*
2355  * Send the FW a request to remove the station from it's internal data
2356  * structures, and in addition remove it from the local data structure.
2357  */
2358 int iwl_mvm_rm_p2p_bcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2359 {
2360 	int ret;
2361 
2362 	lockdep_assert_held(&mvm->mutex);
2363 
2364 	ret = iwl_mvm_send_rm_bcast_sta(mvm, vif);
2365 
2366 	iwl_mvm_dealloc_bcast_sta(mvm, vif);
2367 
2368 	return ret;
2369 }
2370 
2371 /*
2372  * Allocate a new station entry for the multicast station to the given vif,
2373  * and send it to the FW.
2374  * Note that each AP/GO mac should have its own multicast station.
2375  *
2376  * @mvm: the mvm component
2377  * @vif: the interface to which the multicast station is added
2378  */
2379 int iwl_mvm_add_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2380 {
2381 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2382 	struct iwl_mvm_int_sta *msta = &mvmvif->mcast_sta;
2383 	static const u8 _maddr[] = {0x03, 0x00, 0x00, 0x00, 0x00, 0x00};
2384 	const u8 *maddr = _maddr;
2385 	struct iwl_trans_txq_scd_cfg cfg = {
2386 		.fifo = vif->type == NL80211_IFTYPE_AP ?
2387 			IWL_MVM_TX_FIFO_MCAST : IWL_MVM_TX_FIFO_BE,
2388 		.sta_id = msta->sta_id,
2389 		.tid = 0,
2390 		.aggregate = false,
2391 		.frame_limit = IWL_FRAME_LIMIT,
2392 	};
2393 	unsigned int timeout = iwl_mvm_get_wd_timeout(mvm, vif, false, false);
2394 	int ret;
2395 
2396 	lockdep_assert_held(&mvm->mutex);
2397 
2398 	if (WARN_ON(vif->type != NL80211_IFTYPE_AP &&
2399 		    vif->type != NL80211_IFTYPE_ADHOC))
2400 		return -ENOTSUPP;
2401 
2402 	/*
2403 	 * In IBSS, ieee80211_check_queues() sets the cab_queue to be
2404 	 * invalid, so make sure we use the queue we want.
2405 	 * Note that this is done here as we want to avoid making DQA
2406 	 * changes in mac80211 layer.
2407 	 */
2408 	if (vif->type == NL80211_IFTYPE_ADHOC)
2409 		mvmvif->cab_queue = IWL_MVM_DQA_GCAST_QUEUE;
2410 
2411 	/*
2412 	 * While in previous FWs we had to exclude cab queue from TFD queue
2413 	 * mask, now it is needed as any other queue.
2414 	 */
2415 	if (!iwl_mvm_has_new_tx_api(mvm) &&
2416 	    fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE)) {
2417 		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2418 				   timeout);
2419 		msta->tfd_queue_msk |= BIT(mvmvif->cab_queue);
2420 	}
2421 	ret = iwl_mvm_add_int_sta_common(mvm, msta, maddr,
2422 					 mvmvif->id, mvmvif->color);
2423 	if (ret)
2424 		goto err;
2425 
2426 	/*
2427 	 * Enable cab queue after the ADD_STA command is sent.
2428 	 * This is needed for 22000 firmware which won't accept SCD_QUEUE_CFG
2429 	 * command with unknown station id, and for FW that doesn't support
2430 	 * station API since the cab queue is not included in the
2431 	 * tfd_queue_mask.
2432 	 */
2433 	if (iwl_mvm_has_new_tx_api(mvm)) {
2434 		int queue = iwl_mvm_tvqm_enable_txq(mvm, msta->sta_id,
2435 						    0,
2436 						    timeout);
2437 		if (queue < 0) {
2438 			ret = queue;
2439 			goto err;
2440 		}
2441 		mvmvif->cab_queue = queue;
2442 	} else if (!fw_has_api(&mvm->fw->ucode_capa,
2443 			       IWL_UCODE_TLV_API_STA_TYPE))
2444 		iwl_mvm_enable_txq(mvm, NULL, mvmvif->cab_queue, 0, &cfg,
2445 				   timeout);
2446 
2447 	return 0;
2448 err:
2449 	iwl_mvm_dealloc_int_sta(mvm, msta);
2450 	return ret;
2451 }
2452 
2453 static int __iwl_mvm_remove_sta_key(struct iwl_mvm *mvm, u8 sta_id,
2454 				    struct ieee80211_key_conf *keyconf,
2455 				    bool mcast)
2456 {
2457 	union {
2458 		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
2459 		struct iwl_mvm_add_sta_key_cmd cmd;
2460 	} u = {};
2461 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
2462 				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
2463 	__le16 key_flags;
2464 	int ret, size;
2465 	u32 status;
2466 
2467 	/* This is a valid situation for GTK removal */
2468 	if (sta_id == IWL_MVM_INVALID_STA)
2469 		return 0;
2470 
2471 	key_flags = cpu_to_le16((keyconf->keyidx << STA_KEY_FLG_KEYID_POS) &
2472 				 STA_KEY_FLG_KEYID_MSK);
2473 	key_flags |= cpu_to_le16(STA_KEY_FLG_NO_ENC | STA_KEY_FLG_WEP_KEY_MAP);
2474 	key_flags |= cpu_to_le16(STA_KEY_NOT_VALID);
2475 
2476 	if (mcast)
2477 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
2478 
2479 	/*
2480 	 * The fields assigned here are in the same location at the start
2481 	 * of the command, so we can do this union trick.
2482 	 */
2483 	u.cmd.common.key_flags = key_flags;
2484 	u.cmd.common.key_offset = keyconf->hw_key_idx;
2485 	u.cmd.common.sta_id = sta_id;
2486 
2487 	size = new_api ? sizeof(u.cmd) : sizeof(u.cmd_v1);
2488 
2489 	status = ADD_STA_SUCCESS;
2490 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size, &u.cmd,
2491 					  &status);
2492 
2493 	switch (status) {
2494 	case ADD_STA_SUCCESS:
2495 		IWL_DEBUG_WEP(mvm, "MODIFY_STA: remove sta key passed\n");
2496 		break;
2497 	default:
2498 		ret = -EIO;
2499 		IWL_ERR(mvm, "MODIFY_STA: remove sta key failed\n");
2500 		break;
2501 	}
2502 
2503 	return ret;
2504 }
2505 
2506 /*
2507  * Send the FW a request to remove the station from it's internal data
2508  * structures, and in addition remove it from the local data structure.
2509  */
2510 int iwl_mvm_rm_mcast_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
2511 {
2512 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
2513 	int ret;
2514 
2515 	lockdep_assert_held(&mvm->mutex);
2516 
2517 	iwl_mvm_flush_sta(mvm, &mvmvif->mcast_sta, true);
2518 
2519 	iwl_mvm_disable_txq(mvm, NULL, &mvmvif->cab_queue, 0);
2520 
2521 	ret = iwl_mvm_rm_sta_common(mvm, mvmvif->mcast_sta.sta_id);
2522 	if (ret)
2523 		IWL_WARN(mvm, "Failed sending remove station\n");
2524 
2525 	return ret;
2526 }
2527 
2528 static void iwl_mvm_sync_rxq_del_ba(struct iwl_mvm *mvm, u8 baid)
2529 {
2530 	struct iwl_mvm_delba_data notif = {
2531 		.baid = baid,
2532 	};
2533 
2534 	iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_NOTIF_DEL_BA, true,
2535 					&notif, sizeof(notif));
2536 };
2537 
2538 static void iwl_mvm_free_reorder(struct iwl_mvm *mvm,
2539 				 struct iwl_mvm_baid_data *data)
2540 {
2541 	int i;
2542 
2543 	iwl_mvm_sync_rxq_del_ba(mvm, data->baid);
2544 
2545 	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2546 		int j;
2547 		struct iwl_mvm_reorder_buffer *reorder_buf =
2548 			&data->reorder_buf[i];
2549 		struct iwl_mvm_reorder_buf_entry *entries =
2550 			&data->entries[i * data->entries_per_queue];
2551 
2552 		spin_lock_bh(&reorder_buf->lock);
2553 		if (likely(!reorder_buf->num_stored)) {
2554 			spin_unlock_bh(&reorder_buf->lock);
2555 			continue;
2556 		}
2557 
2558 		/*
2559 		 * This shouldn't happen in regular DELBA since the internal
2560 		 * delBA notification should trigger a release of all frames in
2561 		 * the reorder buffer.
2562 		 */
2563 		WARN_ON(1);
2564 
2565 		for (j = 0; j < reorder_buf->buf_size; j++)
2566 			__skb_queue_purge(&entries[j].e.frames);
2567 		/*
2568 		 * Prevent timer re-arm. This prevents a very far fetched case
2569 		 * where we timed out on the notification. There may be prior
2570 		 * RX frames pending in the RX queue before the notification
2571 		 * that might get processed between now and the actual deletion
2572 		 * and we would re-arm the timer although we are deleting the
2573 		 * reorder buffer.
2574 		 */
2575 		reorder_buf->removed = true;
2576 		spin_unlock_bh(&reorder_buf->lock);
2577 		del_timer_sync(&reorder_buf->reorder_timer);
2578 	}
2579 }
2580 
2581 static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm,
2582 					struct iwl_mvm_baid_data *data,
2583 					u16 ssn, u16 buf_size)
2584 {
2585 	int i;
2586 
2587 	for (i = 0; i < mvm->trans->num_rx_queues; i++) {
2588 		struct iwl_mvm_reorder_buffer *reorder_buf =
2589 			&data->reorder_buf[i];
2590 		struct iwl_mvm_reorder_buf_entry *entries =
2591 			&data->entries[i * data->entries_per_queue];
2592 		int j;
2593 
2594 		reorder_buf->num_stored = 0;
2595 		reorder_buf->head_sn = ssn;
2596 		reorder_buf->buf_size = buf_size;
2597 		/* rx reorder timer */
2598 		timer_setup(&reorder_buf->reorder_timer,
2599 			    iwl_mvm_reorder_timer_expired, 0);
2600 		spin_lock_init(&reorder_buf->lock);
2601 		reorder_buf->mvm = mvm;
2602 		reorder_buf->queue = i;
2603 		reorder_buf->valid = false;
2604 		for (j = 0; j < reorder_buf->buf_size; j++)
2605 			__skb_queue_head_init(&entries[j].e.frames);
2606 	}
2607 }
2608 
2609 static int iwl_mvm_fw_baid_op_sta(struct iwl_mvm *mvm,
2610 				  struct iwl_mvm_sta *mvm_sta,
2611 				  bool start, int tid, u16 ssn,
2612 				  u16 buf_size)
2613 {
2614 	struct iwl_mvm_add_sta_cmd cmd = {
2615 		.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color),
2616 		.sta_id = mvm_sta->sta_id,
2617 		.add_modify = STA_MODE_MODIFY,
2618 	};
2619 	u32 status;
2620 	int ret;
2621 
2622 	if (start) {
2623 		cmd.add_immediate_ba_tid = tid;
2624 		cmd.add_immediate_ba_ssn = cpu_to_le16(ssn);
2625 		cmd.rx_ba_window = cpu_to_le16(buf_size);
2626 		cmd.modify_mask = STA_MODIFY_ADD_BA_TID;
2627 	} else {
2628 		cmd.remove_immediate_ba_tid = tid;
2629 		cmd.modify_mask = STA_MODIFY_REMOVE_BA_TID;
2630 	}
2631 
2632 	status = ADD_STA_SUCCESS;
2633 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2634 					  iwl_mvm_add_sta_cmd_size(mvm),
2635 					  &cmd, &status);
2636 	if (ret)
2637 		return ret;
2638 
2639 	switch (status & IWL_ADD_STA_STATUS_MASK) {
2640 	case ADD_STA_SUCCESS:
2641 		IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2642 			     start ? "start" : "stopp");
2643 		if (WARN_ON(start && iwl_mvm_has_new_rx_api(mvm) &&
2644 			    !(status & IWL_ADD_STA_BAID_VALID_MASK)))
2645 			return -EINVAL;
2646 		return u32_get_bits(status, IWL_ADD_STA_BAID_MASK);
2647 	case ADD_STA_IMMEDIATE_BA_FAILURE:
2648 		IWL_WARN(mvm, "RX BA Session refused by fw\n");
2649 		return -ENOSPC;
2650 	default:
2651 		IWL_ERR(mvm, "RX BA Session failed %sing, status 0x%x\n",
2652 			start ? "start" : "stopp", status);
2653 		return -EIO;
2654 	}
2655 }
2656 
2657 static int iwl_mvm_fw_baid_op_cmd(struct iwl_mvm *mvm,
2658 				  struct iwl_mvm_sta *mvm_sta,
2659 				  bool start, int tid, u16 ssn,
2660 				  u16 buf_size, int baid)
2661 {
2662 	struct iwl_rx_baid_cfg_cmd cmd = {
2663 		.action = start ? cpu_to_le32(IWL_RX_BAID_ACTION_ADD) :
2664 				  cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
2665 	};
2666 	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
2667 	int ret;
2668 
2669 	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
2670 
2671 	if (start) {
2672 		cmd.alloc.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2673 		cmd.alloc.tid = tid;
2674 		cmd.alloc.ssn = cpu_to_le16(ssn);
2675 		cmd.alloc.win_size = cpu_to_le16(buf_size);
2676 		baid = -EIO;
2677 	} else if (iwl_fw_lookup_cmd_ver(mvm->fw, cmd_id, 1) == 1) {
2678 		cmd.remove_v1.baid = cpu_to_le32(baid);
2679 		BUILD_BUG_ON(sizeof(cmd.remove_v1) > sizeof(cmd.remove));
2680 	} else {
2681 		cmd.remove.sta_id_mask = cpu_to_le32(BIT(mvm_sta->sta_id));
2682 		cmd.remove.tid = cpu_to_le32(tid);
2683 	}
2684 
2685 	ret = iwl_mvm_send_cmd_pdu_status(mvm, cmd_id, sizeof(cmd),
2686 					  &cmd, &baid);
2687 	if (ret)
2688 		return ret;
2689 
2690 	if (!start) {
2691 		/* ignore firmware baid on remove */
2692 		baid = 0;
2693 	}
2694 
2695 	IWL_DEBUG_HT(mvm, "RX BA Session %sed in fw\n",
2696 		     start ? "start" : "stopp");
2697 
2698 	if (baid < 0 || baid >= ARRAY_SIZE(mvm->baid_map))
2699 		return -EINVAL;
2700 
2701 	return baid;
2702 }
2703 
2704 static int iwl_mvm_fw_baid_op(struct iwl_mvm *mvm, struct iwl_mvm_sta *mvm_sta,
2705 			      bool start, int tid, u16 ssn, u16 buf_size,
2706 			      int baid)
2707 {
2708 	if (fw_has_capa(&mvm->fw->ucode_capa,
2709 			IWL_UCODE_TLV_CAPA_BAID_ML_SUPPORT))
2710 		return iwl_mvm_fw_baid_op_cmd(mvm, mvm_sta, start,
2711 					      tid, ssn, buf_size, baid);
2712 
2713 	return iwl_mvm_fw_baid_op_sta(mvm, mvm_sta, start,
2714 				      tid, ssn, buf_size);
2715 }
2716 
2717 int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2718 		       int tid, u16 ssn, bool start, u16 buf_size, u16 timeout)
2719 {
2720 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2721 	struct iwl_mvm_baid_data *baid_data = NULL;
2722 	int ret, baid;
2723 	u32 max_ba_id_sessions = iwl_mvm_has_new_tx_api(mvm) ? IWL_MAX_BAID :
2724 							       IWL_MAX_BAID_OLD;
2725 
2726 	lockdep_assert_held(&mvm->mutex);
2727 
2728 	if (start && mvm->rx_ba_sessions >= max_ba_id_sessions) {
2729 		IWL_WARN(mvm, "Not enough RX BA SESSIONS\n");
2730 		return -ENOSPC;
2731 	}
2732 
2733 	if (iwl_mvm_has_new_rx_api(mvm) && start) {
2734 		u16 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
2735 
2736 		/* sparse doesn't like the __align() so don't check */
2737 #ifndef __CHECKER__
2738 		/*
2739 		 * The division below will be OK if either the cache line size
2740 		 * can be divided by the entry size (ALIGN will round up) or if
2741 		 * if the entry size can be divided by the cache line size, in
2742 		 * which case the ALIGN() will do nothing.
2743 		 */
2744 		BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
2745 			     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
2746 #endif
2747 
2748 		/*
2749 		 * Upward align the reorder buffer size to fill an entire cache
2750 		 * line for each queue, to avoid sharing cache lines between
2751 		 * different queues.
2752 		 */
2753 		reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
2754 
2755 		/*
2756 		 * Allocate here so if allocation fails we can bail out early
2757 		 * before starting the BA session in the firmware
2758 		 */
2759 		baid_data = kzalloc(sizeof(*baid_data) +
2760 				    mvm->trans->num_rx_queues *
2761 				    reorder_buf_size,
2762 				    GFP_KERNEL);
2763 		if (!baid_data)
2764 			return -ENOMEM;
2765 
2766 		/*
2767 		 * This division is why we need the above BUILD_BUG_ON(),
2768 		 * if that doesn't hold then this will not be right.
2769 		 */
2770 		baid_data->entries_per_queue =
2771 			reorder_buf_size / sizeof(baid_data->entries[0]);
2772 	}
2773 
2774 	if (iwl_mvm_has_new_rx_api(mvm) && !start) {
2775 		baid = mvm_sta->tid_to_baid[tid];
2776 	} else {
2777 		/* we don't really need it in this case */
2778 		baid = -1;
2779 	}
2780 
2781 	/* Don't send command to remove (start=0) BAID during restart */
2782 	if (start || !test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))
2783 		baid = iwl_mvm_fw_baid_op(mvm, mvm_sta, start, tid, ssn, buf_size,
2784 					  baid);
2785 
2786 	if (baid < 0) {
2787 		ret = baid;
2788 		goto out_free;
2789 	}
2790 
2791 	if (start) {
2792 		mvm->rx_ba_sessions++;
2793 
2794 		if (!iwl_mvm_has_new_rx_api(mvm))
2795 			return 0;
2796 
2797 		baid_data->baid = baid;
2798 		baid_data->timeout = timeout;
2799 		baid_data->last_rx = jiffies;
2800 		baid_data->rcu_ptr = &mvm->baid_map[baid];
2801 		timer_setup(&baid_data->session_timer,
2802 			    iwl_mvm_rx_agg_session_expired, 0);
2803 		baid_data->mvm = mvm;
2804 		baid_data->tid = tid;
2805 		baid_data->sta_id = mvm_sta->sta_id;
2806 
2807 		mvm_sta->tid_to_baid[tid] = baid;
2808 		if (timeout)
2809 			mod_timer(&baid_data->session_timer,
2810 				  TU_TO_EXP_TIME(timeout * 2));
2811 
2812 		iwl_mvm_init_reorder_buffer(mvm, baid_data, ssn, buf_size);
2813 		/*
2814 		 * protect the BA data with RCU to cover a case where our
2815 		 * internal RX sync mechanism will timeout (not that it's
2816 		 * supposed to happen) and we will free the session data while
2817 		 * RX is being processed in parallel
2818 		 */
2819 		IWL_DEBUG_HT(mvm, "Sta %d(%d) is assigned to BAID %d\n",
2820 			     mvm_sta->sta_id, tid, baid);
2821 		WARN_ON(rcu_access_pointer(mvm->baid_map[baid]));
2822 		rcu_assign_pointer(mvm->baid_map[baid], baid_data);
2823 	} else  {
2824 		baid = mvm_sta->tid_to_baid[tid];
2825 
2826 		if (mvm->rx_ba_sessions > 0)
2827 			/* check that restart flow didn't zero the counter */
2828 			mvm->rx_ba_sessions--;
2829 		if (!iwl_mvm_has_new_rx_api(mvm))
2830 			return 0;
2831 
2832 		if (WARN_ON(baid == IWL_RX_REORDER_DATA_INVALID_BAID))
2833 			return -EINVAL;
2834 
2835 		baid_data = rcu_access_pointer(mvm->baid_map[baid]);
2836 		if (WARN_ON(!baid_data))
2837 			return -EINVAL;
2838 
2839 		/* synchronize all rx queues so we can safely delete */
2840 		iwl_mvm_free_reorder(mvm, baid_data);
2841 		del_timer_sync(&baid_data->session_timer);
2842 		RCU_INIT_POINTER(mvm->baid_map[baid], NULL);
2843 		kfree_rcu(baid_data, rcu_head);
2844 		IWL_DEBUG_HT(mvm, "BAID %d is free\n", baid);
2845 
2846 		/*
2847 		 * After we've deleted it, do another queue sync
2848 		 * so if an IWL_MVM_RXQ_NSSN_SYNC was concurrently
2849 		 * running it won't find a new session in the old
2850 		 * BAID. It can find the NULL pointer for the BAID,
2851 		 * but we must not have it find a different session.
2852 		 */
2853 		iwl_mvm_sync_rx_queues_internal(mvm, IWL_MVM_RXQ_EMPTY,
2854 						true, NULL, 0);
2855 	}
2856 	return 0;
2857 
2858 out_free:
2859 	kfree(baid_data);
2860 	return ret;
2861 }
2862 
2863 int iwl_mvm_sta_tx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta,
2864 		       int tid, u8 queue, bool start)
2865 {
2866 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
2867 	struct iwl_mvm_add_sta_cmd cmd = {};
2868 	int ret;
2869 	u32 status;
2870 
2871 	lockdep_assert_held(&mvm->mutex);
2872 
2873 	if (start) {
2874 		mvm_sta->tfd_queue_msk |= BIT(queue);
2875 		mvm_sta->tid_disable_agg &= ~BIT(tid);
2876 	} else {
2877 		/* In DQA-mode the queue isn't removed on agg termination */
2878 		mvm_sta->tid_disable_agg |= BIT(tid);
2879 	}
2880 
2881 	cmd.mac_id_n_color = cpu_to_le32(mvm_sta->mac_id_n_color);
2882 	cmd.sta_id = mvm_sta->sta_id;
2883 	cmd.add_modify = STA_MODE_MODIFY;
2884 	if (!iwl_mvm_has_new_tx_api(mvm))
2885 		cmd.modify_mask = STA_MODIFY_QUEUES;
2886 	cmd.modify_mask |= STA_MODIFY_TID_DISABLE_TX;
2887 	cmd.tfd_queue_msk = cpu_to_le32(mvm_sta->tfd_queue_msk);
2888 	cmd.tid_disable_tx = cpu_to_le16(mvm_sta->tid_disable_agg);
2889 
2890 	status = ADD_STA_SUCCESS;
2891 	ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA,
2892 					  iwl_mvm_add_sta_cmd_size(mvm),
2893 					  &cmd, &status);
2894 	if (ret)
2895 		return ret;
2896 
2897 	switch (status & IWL_ADD_STA_STATUS_MASK) {
2898 	case ADD_STA_SUCCESS:
2899 		break;
2900 	default:
2901 		ret = -EIO;
2902 		IWL_ERR(mvm, "TX BA Session failed %sing, status 0x%x\n",
2903 			start ? "start" : "stopp", status);
2904 		break;
2905 	}
2906 
2907 	return ret;
2908 }
2909 
2910 const u8 tid_to_mac80211_ac[] = {
2911 	IEEE80211_AC_BE,
2912 	IEEE80211_AC_BK,
2913 	IEEE80211_AC_BK,
2914 	IEEE80211_AC_BE,
2915 	IEEE80211_AC_VI,
2916 	IEEE80211_AC_VI,
2917 	IEEE80211_AC_VO,
2918 	IEEE80211_AC_VO,
2919 	IEEE80211_AC_VO, /* We treat MGMT as TID 8, which is set as AC_VO */
2920 };
2921 
2922 static const u8 tid_to_ucode_ac[] = {
2923 	AC_BE,
2924 	AC_BK,
2925 	AC_BK,
2926 	AC_BE,
2927 	AC_VI,
2928 	AC_VI,
2929 	AC_VO,
2930 	AC_VO,
2931 };
2932 
2933 int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
2934 			     struct ieee80211_sta *sta, u16 tid, u16 *ssn)
2935 {
2936 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
2937 	struct iwl_mvm_tid_data *tid_data;
2938 	u16 normalized_ssn;
2939 	u16 txq_id;
2940 	int ret;
2941 
2942 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
2943 		return -EINVAL;
2944 
2945 	if (mvmsta->tid_data[tid].state != IWL_AGG_QUEUED &&
2946 	    mvmsta->tid_data[tid].state != IWL_AGG_OFF) {
2947 		IWL_ERR(mvm,
2948 			"Start AGG when state is not IWL_AGG_QUEUED or IWL_AGG_OFF %d!\n",
2949 			mvmsta->tid_data[tid].state);
2950 		return -ENXIO;
2951 	}
2952 
2953 	lockdep_assert_held(&mvm->mutex);
2954 
2955 	if (mvmsta->tid_data[tid].txq_id == IWL_MVM_INVALID_QUEUE &&
2956 	    iwl_mvm_has_new_tx_api(mvm)) {
2957 		u8 ac = tid_to_mac80211_ac[tid];
2958 
2959 		ret = iwl_mvm_sta_alloc_queue_tvqm(mvm, sta, ac, tid);
2960 		if (ret)
2961 			return ret;
2962 	}
2963 
2964 	spin_lock_bh(&mvmsta->lock);
2965 
2966 	/*
2967 	 * Note the possible cases:
2968 	 *  1. An enabled TXQ - TXQ needs to become agg'ed
2969 	 *  2. The TXQ hasn't yet been enabled, so find a free one and mark
2970 	 *	it as reserved
2971 	 */
2972 	txq_id = mvmsta->tid_data[tid].txq_id;
2973 	if (txq_id == IWL_MVM_INVALID_QUEUE) {
2974 		ret = iwl_mvm_find_free_queue(mvm, mvmsta->sta_id,
2975 					      IWL_MVM_DQA_MIN_DATA_QUEUE,
2976 					      IWL_MVM_DQA_MAX_DATA_QUEUE);
2977 		if (ret < 0) {
2978 			IWL_ERR(mvm, "Failed to allocate agg queue\n");
2979 			goto out;
2980 		}
2981 
2982 		txq_id = ret;
2983 
2984 		/* TXQ hasn't yet been enabled, so mark it only as reserved */
2985 		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_RESERVED;
2986 	} else if (WARN_ON(txq_id >= IWL_MAX_HW_QUEUES)) {
2987 		ret = -ENXIO;
2988 		IWL_ERR(mvm, "tid_id %d out of range (0, %d)!\n",
2989 			tid, IWL_MAX_HW_QUEUES - 1);
2990 		goto out;
2991 
2992 	} else if (unlikely(mvm->queue_info[txq_id].status ==
2993 			    IWL_MVM_QUEUE_SHARED)) {
2994 		ret = -ENXIO;
2995 		IWL_DEBUG_TX_QUEUES(mvm,
2996 				    "Can't start tid %d agg on shared queue!\n",
2997 				    tid);
2998 		goto out;
2999 	}
3000 
3001 	IWL_DEBUG_TX_QUEUES(mvm,
3002 			    "AGG for tid %d will be on queue #%d\n",
3003 			    tid, txq_id);
3004 
3005 	tid_data = &mvmsta->tid_data[tid];
3006 	tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3007 	tid_data->txq_id = txq_id;
3008 	*ssn = tid_data->ssn;
3009 
3010 	IWL_DEBUG_TX_QUEUES(mvm,
3011 			    "Start AGG: sta %d tid %d queue %d - ssn = %d, next_recl = %d\n",
3012 			    mvmsta->sta_id, tid, txq_id, tid_data->ssn,
3013 			    tid_data->next_reclaimed);
3014 
3015 	/*
3016 	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
3017 	 * to align the wrap around of ssn so we compare relevant values.
3018 	 */
3019 	normalized_ssn = tid_data->ssn;
3020 	if (mvm->trans->trans_cfg->gen2)
3021 		normalized_ssn &= 0xff;
3022 
3023 	if (normalized_ssn == tid_data->next_reclaimed) {
3024 		tid_data->state = IWL_AGG_STARTING;
3025 		ret = IEEE80211_AMPDU_TX_START_IMMEDIATE;
3026 	} else {
3027 		tid_data->state = IWL_EMPTYING_HW_QUEUE_ADDBA;
3028 		ret = IEEE80211_AMPDU_TX_START_DELAY_ADDBA;
3029 	}
3030 
3031 out:
3032 	spin_unlock_bh(&mvmsta->lock);
3033 
3034 	return ret;
3035 }
3036 
3037 int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3038 			    struct ieee80211_sta *sta, u16 tid, u16 buf_size,
3039 			    bool amsdu)
3040 {
3041 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3042 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3043 	unsigned int wdg_timeout =
3044 		iwl_mvm_get_wd_timeout(mvm, vif, sta->tdls, false);
3045 	int queue, ret;
3046 	bool alloc_queue = true;
3047 	enum iwl_mvm_queue_status queue_status;
3048 	u16 ssn;
3049 
3050 	struct iwl_trans_txq_scd_cfg cfg = {
3051 		.sta_id = mvmsta->sta_id,
3052 		.tid = tid,
3053 		.frame_limit = buf_size,
3054 		.aggregate = true,
3055 	};
3056 
3057 	/*
3058 	 * When FW supports TLC_OFFLOAD, it also implements Tx aggregation
3059 	 * manager, so this function should never be called in this case.
3060 	 */
3061 	if (WARN_ON_ONCE(iwl_mvm_has_tlc_offload(mvm)))
3062 		return -EINVAL;
3063 
3064 	BUILD_BUG_ON((sizeof(mvmsta->agg_tids) * BITS_PER_BYTE)
3065 		     != IWL_MAX_TID_COUNT);
3066 
3067 	spin_lock_bh(&mvmsta->lock);
3068 	ssn = tid_data->ssn;
3069 	queue = tid_data->txq_id;
3070 	tid_data->state = IWL_AGG_ON;
3071 	mvmsta->agg_tids |= BIT(tid);
3072 	tid_data->ssn = 0xffff;
3073 	tid_data->amsdu_in_ampdu_allowed = amsdu;
3074 	spin_unlock_bh(&mvmsta->lock);
3075 
3076 	if (iwl_mvm_has_new_tx_api(mvm)) {
3077 		/*
3078 		 * If there is no queue for this tid, iwl_mvm_sta_tx_agg_start()
3079 		 * would have failed, so if we are here there is no need to
3080 		 * allocate a queue.
3081 		 * However, if aggregation size is different than the default
3082 		 * size, the scheduler should be reconfigured.
3083 		 * We cannot do this with the new TX API, so return unsupported
3084 		 * for now, until it will be offloaded to firmware..
3085 		 * Note that if SCD default value changes - this condition
3086 		 * should be updated as well.
3087 		 */
3088 		if (buf_size < IWL_FRAME_LIMIT)
3089 			return -ENOTSUPP;
3090 
3091 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3092 		if (ret)
3093 			return -EIO;
3094 		goto out;
3095 	}
3096 
3097 	cfg.fifo = iwl_mvm_ac_to_tx_fifo[tid_to_mac80211_ac[tid]];
3098 
3099 	queue_status = mvm->queue_info[queue].status;
3100 
3101 	/* Maybe there is no need to even alloc a queue... */
3102 	if (mvm->queue_info[queue].status == IWL_MVM_QUEUE_READY)
3103 		alloc_queue = false;
3104 
3105 	/*
3106 	 * Only reconfig the SCD for the queue if the window size has
3107 	 * changed from current (become smaller)
3108 	 */
3109 	if (!alloc_queue && buf_size < IWL_FRAME_LIMIT) {
3110 		/*
3111 		 * If reconfiguring an existing queue, it first must be
3112 		 * drained
3113 		 */
3114 		ret = iwl_trans_wait_tx_queues_empty(mvm->trans,
3115 						     BIT(queue));
3116 		if (ret) {
3117 			IWL_ERR(mvm,
3118 				"Error draining queue before reconfig\n");
3119 			return ret;
3120 		}
3121 
3122 		ret = iwl_mvm_reconfig_scd(mvm, queue, cfg.fifo,
3123 					   mvmsta->sta_id, tid,
3124 					   buf_size, ssn);
3125 		if (ret) {
3126 			IWL_ERR(mvm,
3127 				"Error reconfiguring TXQ #%d\n", queue);
3128 			return ret;
3129 		}
3130 	}
3131 
3132 	if (alloc_queue)
3133 		iwl_mvm_enable_txq(mvm, sta, queue, ssn,
3134 				   &cfg, wdg_timeout);
3135 
3136 	/* Send ADD_STA command to enable aggs only if the queue isn't shared */
3137 	if (queue_status != IWL_MVM_QUEUE_SHARED) {
3138 		ret = iwl_mvm_sta_tx_agg(mvm, sta, tid, queue, true);
3139 		if (ret)
3140 			return -EIO;
3141 	}
3142 
3143 	/* No need to mark as reserved */
3144 	mvm->queue_info[queue].status = IWL_MVM_QUEUE_READY;
3145 
3146 out:
3147 	/*
3148 	 * Even though in theory the peer could have different
3149 	 * aggregation reorder buffer sizes for different sessions,
3150 	 * our ucode doesn't allow for that and has a global limit
3151 	 * for each station. Therefore, use the minimum of all the
3152 	 * aggregation sessions and our default value.
3153 	 */
3154 	mvmsta->max_agg_bufsize =
3155 		min(mvmsta->max_agg_bufsize, buf_size);
3156 	mvmsta->lq_sta.rs_drv.lq.agg_frame_cnt_limit = mvmsta->max_agg_bufsize;
3157 
3158 	IWL_DEBUG_HT(mvm, "Tx aggregation enabled on ra = %pM tid = %d\n",
3159 		     sta->addr, tid);
3160 
3161 	return iwl_mvm_send_lq_cmd(mvm, &mvmsta->lq_sta.rs_drv.lq);
3162 }
3163 
3164 static void iwl_mvm_unreserve_agg_queue(struct iwl_mvm *mvm,
3165 					struct iwl_mvm_sta *mvmsta,
3166 					struct iwl_mvm_tid_data *tid_data)
3167 {
3168 	u16 txq_id = tid_data->txq_id;
3169 
3170 	lockdep_assert_held(&mvm->mutex);
3171 
3172 	if (iwl_mvm_has_new_tx_api(mvm))
3173 		return;
3174 
3175 	/*
3176 	 * The TXQ is marked as reserved only if no traffic came through yet
3177 	 * This means no traffic has been sent on this TID (agg'd or not), so
3178 	 * we no longer have use for the queue. Since it hasn't even been
3179 	 * allocated through iwl_mvm_enable_txq, so we can just mark it back as
3180 	 * free.
3181 	 */
3182 	if (mvm->queue_info[txq_id].status == IWL_MVM_QUEUE_RESERVED) {
3183 		mvm->queue_info[txq_id].status = IWL_MVM_QUEUE_FREE;
3184 		tid_data->txq_id = IWL_MVM_INVALID_QUEUE;
3185 	}
3186 }
3187 
3188 int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3189 			    struct ieee80211_sta *sta, u16 tid)
3190 {
3191 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3192 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3193 	u16 txq_id;
3194 	int err;
3195 
3196 	/*
3197 	 * If mac80211 is cleaning its state, then say that we finished since
3198 	 * our state has been cleared anyway.
3199 	 */
3200 	if (test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status)) {
3201 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3202 		return 0;
3203 	}
3204 
3205 	spin_lock_bh(&mvmsta->lock);
3206 
3207 	txq_id = tid_data->txq_id;
3208 
3209 	IWL_DEBUG_TX_QUEUES(mvm, "Stop AGG: sta %d tid %d q %d state %d\n",
3210 			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3211 
3212 	mvmsta->agg_tids &= ~BIT(tid);
3213 
3214 	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3215 
3216 	switch (tid_data->state) {
3217 	case IWL_AGG_ON:
3218 		tid_data->ssn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
3219 
3220 		IWL_DEBUG_TX_QUEUES(mvm,
3221 				    "ssn = %d, next_recl = %d\n",
3222 				    tid_data->ssn, tid_data->next_reclaimed);
3223 
3224 		tid_data->ssn = 0xffff;
3225 		tid_data->state = IWL_AGG_OFF;
3226 		spin_unlock_bh(&mvmsta->lock);
3227 
3228 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3229 
3230 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3231 		return 0;
3232 	case IWL_AGG_STARTING:
3233 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
3234 		/*
3235 		 * The agg session has been stopped before it was set up. This
3236 		 * can happen when the AddBA timer times out for example.
3237 		 */
3238 
3239 		/* No barriers since we are under mutex */
3240 		lockdep_assert_held(&mvm->mutex);
3241 
3242 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
3243 		tid_data->state = IWL_AGG_OFF;
3244 		err = 0;
3245 		break;
3246 	default:
3247 		IWL_ERR(mvm,
3248 			"Stopping AGG while state not ON or starting for %d on %d (%d)\n",
3249 			mvmsta->sta_id, tid, tid_data->state);
3250 		IWL_ERR(mvm,
3251 			"\ttid_data->txq_id = %d\n", tid_data->txq_id);
3252 		err = -EINVAL;
3253 	}
3254 
3255 	spin_unlock_bh(&mvmsta->lock);
3256 
3257 	return err;
3258 }
3259 
3260 int iwl_mvm_sta_tx_agg_flush(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
3261 			    struct ieee80211_sta *sta, u16 tid)
3262 {
3263 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3264 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
3265 	u16 txq_id;
3266 	enum iwl_mvm_agg_state old_state;
3267 
3268 	/*
3269 	 * First set the agg state to OFF to avoid calling
3270 	 * ieee80211_stop_tx_ba_cb in iwl_mvm_check_ratid_empty.
3271 	 */
3272 	spin_lock_bh(&mvmsta->lock);
3273 	txq_id = tid_data->txq_id;
3274 	IWL_DEBUG_TX_QUEUES(mvm, "Flush AGG: sta %d tid %d q %d state %d\n",
3275 			    mvmsta->sta_id, tid, txq_id, tid_data->state);
3276 	old_state = tid_data->state;
3277 	tid_data->state = IWL_AGG_OFF;
3278 	mvmsta->agg_tids &= ~BIT(tid);
3279 	spin_unlock_bh(&mvmsta->lock);
3280 
3281 	iwl_mvm_unreserve_agg_queue(mvm, mvmsta, tid_data);
3282 
3283 	if (old_state >= IWL_AGG_ON) {
3284 		iwl_mvm_drain_sta(mvm, mvmsta, true);
3285 
3286 		if (iwl_mvm_has_new_tx_api(mvm)) {
3287 			if (iwl_mvm_flush_sta_tids(mvm, mvmsta->sta_id,
3288 						   BIT(tid)))
3289 				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3290 			iwl_trans_wait_txq_empty(mvm->trans, txq_id);
3291 		} else {
3292 			if (iwl_mvm_flush_tx_path(mvm, BIT(txq_id)))
3293 				IWL_ERR(mvm, "Couldn't flush the AGG queue\n");
3294 			iwl_trans_wait_tx_queues_empty(mvm->trans, BIT(txq_id));
3295 		}
3296 
3297 		iwl_mvm_drain_sta(mvm, mvmsta, false);
3298 
3299 		iwl_mvm_sta_tx_agg(mvm, sta, tid, txq_id, false);
3300 	}
3301 
3302 	return 0;
3303 }
3304 
3305 static int iwl_mvm_set_fw_key_idx(struct iwl_mvm *mvm)
3306 {
3307 	int i, max = -1, max_offs = -1;
3308 
3309 	lockdep_assert_held(&mvm->mutex);
3310 
3311 	/* Pick the unused key offset with the highest 'deleted'
3312 	 * counter. Every time a key is deleted, all the counters
3313 	 * are incremented and the one that was just deleted is
3314 	 * reset to zero. Thus, the highest counter is the one
3315 	 * that was deleted longest ago. Pick that one.
3316 	 */
3317 	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3318 		if (test_bit(i, mvm->fw_key_table))
3319 			continue;
3320 		if (mvm->fw_key_deleted[i] > max) {
3321 			max = mvm->fw_key_deleted[i];
3322 			max_offs = i;
3323 		}
3324 	}
3325 
3326 	if (max_offs < 0)
3327 		return STA_KEY_IDX_INVALID;
3328 
3329 	return max_offs;
3330 }
3331 
3332 static struct iwl_mvm_sta *iwl_mvm_get_key_sta(struct iwl_mvm *mvm,
3333 					       struct ieee80211_vif *vif,
3334 					       struct ieee80211_sta *sta)
3335 {
3336 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3337 
3338 	if (sta)
3339 		return iwl_mvm_sta_from_mac80211(sta);
3340 
3341 	/*
3342 	 * The device expects GTKs for station interfaces to be
3343 	 * installed as GTKs for the AP station. If we have no
3344 	 * station ID, then use AP's station ID.
3345 	 */
3346 	if (vif->type == NL80211_IFTYPE_STATION &&
3347 	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3348 		u8 sta_id = mvmvif->ap_sta_id;
3349 
3350 		sta = rcu_dereference_check(mvm->fw_id_to_mac_id[sta_id],
3351 					    lockdep_is_held(&mvm->mutex));
3352 
3353 		/*
3354 		 * It is possible that the 'sta' parameter is NULL,
3355 		 * for example when a GTK is removed - the sta_id will then
3356 		 * be the AP ID, and no station was passed by mac80211.
3357 		 */
3358 		if (IS_ERR_OR_NULL(sta))
3359 			return NULL;
3360 
3361 		return iwl_mvm_sta_from_mac80211(sta);
3362 	}
3363 
3364 	return NULL;
3365 }
3366 
3367 static int iwl_mvm_pn_cmp(const u8 *pn1, const u8 *pn2, int len)
3368 {
3369 	int i;
3370 
3371 	for (i = len - 1; i >= 0; i--) {
3372 		if (pn1[i] > pn2[i])
3373 			return 1;
3374 		if (pn1[i] < pn2[i])
3375 			return -1;
3376 	}
3377 
3378 	return 0;
3379 }
3380 
3381 static int iwl_mvm_send_sta_key(struct iwl_mvm *mvm,
3382 				u32 sta_id,
3383 				struct ieee80211_key_conf *key, bool mcast,
3384 				u32 tkip_iv32, u16 *tkip_p1k, u32 cmd_flags,
3385 				u8 key_offset, bool mfp)
3386 {
3387 	union {
3388 		struct iwl_mvm_add_sta_key_cmd_v1 cmd_v1;
3389 		struct iwl_mvm_add_sta_key_cmd cmd;
3390 	} u = {};
3391 	__le16 key_flags;
3392 	int ret;
3393 	u32 status;
3394 	u16 keyidx;
3395 	u64 pn = 0;
3396 	int i, size;
3397 	bool new_api = fw_has_api(&mvm->fw->ucode_capa,
3398 				  IWL_UCODE_TLV_API_TKIP_MIC_KEYS);
3399 	int api_ver = iwl_fw_lookup_cmd_ver(mvm->fw, ADD_STA_KEY,
3400 					    new_api ? 2 : 1);
3401 
3402 	if (sta_id == IWL_MVM_INVALID_STA)
3403 		return -EINVAL;
3404 
3405 	keyidx = (key->keyidx << STA_KEY_FLG_KEYID_POS) &
3406 		 STA_KEY_FLG_KEYID_MSK;
3407 	key_flags = cpu_to_le16(keyidx);
3408 	key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_KEY_MAP);
3409 
3410 	switch (key->cipher) {
3411 	case WLAN_CIPHER_SUITE_TKIP:
3412 		key_flags |= cpu_to_le16(STA_KEY_FLG_TKIP);
3413 		if (api_ver >= 2) {
3414 			memcpy((void *)&u.cmd.tx_mic_key,
3415 			       &key->key[NL80211_TKIP_DATA_OFFSET_TX_MIC_KEY],
3416 			       IWL_MIC_KEY_SIZE);
3417 
3418 			memcpy((void *)&u.cmd.rx_mic_key,
3419 			       &key->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY],
3420 			       IWL_MIC_KEY_SIZE);
3421 			pn = atomic64_read(&key->tx_pn);
3422 
3423 		} else {
3424 			u.cmd_v1.tkip_rx_tsc_byte2 = tkip_iv32;
3425 			for (i = 0; i < 5; i++)
3426 				u.cmd_v1.tkip_rx_ttak[i] =
3427 					cpu_to_le16(tkip_p1k[i]);
3428 		}
3429 		memcpy(u.cmd.common.key, key->key, key->keylen);
3430 		break;
3431 	case WLAN_CIPHER_SUITE_CCMP:
3432 		key_flags |= cpu_to_le16(STA_KEY_FLG_CCM);
3433 		memcpy(u.cmd.common.key, key->key, key->keylen);
3434 		if (api_ver >= 2)
3435 			pn = atomic64_read(&key->tx_pn);
3436 		break;
3437 	case WLAN_CIPHER_SUITE_WEP104:
3438 		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP_13BYTES);
3439 		fallthrough;
3440 	case WLAN_CIPHER_SUITE_WEP40:
3441 		key_flags |= cpu_to_le16(STA_KEY_FLG_WEP);
3442 		memcpy(u.cmd.common.key + 3, key->key, key->keylen);
3443 		break;
3444 	case WLAN_CIPHER_SUITE_GCMP_256:
3445 		key_flags |= cpu_to_le16(STA_KEY_FLG_KEY_32BYTES);
3446 		fallthrough;
3447 	case WLAN_CIPHER_SUITE_GCMP:
3448 		key_flags |= cpu_to_le16(STA_KEY_FLG_GCMP);
3449 		memcpy(u.cmd.common.key, key->key, key->keylen);
3450 		if (api_ver >= 2)
3451 			pn = atomic64_read(&key->tx_pn);
3452 		break;
3453 	default:
3454 		key_flags |= cpu_to_le16(STA_KEY_FLG_EXT);
3455 		memcpy(u.cmd.common.key, key->key, key->keylen);
3456 	}
3457 
3458 	if (mcast)
3459 		key_flags |= cpu_to_le16(STA_KEY_MULTICAST);
3460 	if (mfp)
3461 		key_flags |= cpu_to_le16(STA_KEY_MFP);
3462 
3463 	u.cmd.common.key_offset = key_offset;
3464 	u.cmd.common.key_flags = key_flags;
3465 	u.cmd.common.sta_id = sta_id;
3466 
3467 	if (key->cipher == WLAN_CIPHER_SUITE_TKIP)
3468 		i = 0;
3469 	else
3470 		i = -1;
3471 
3472 	for (; i < IEEE80211_NUM_TIDS; i++) {
3473 		struct ieee80211_key_seq seq = {};
3474 		u8 _rx_pn[IEEE80211_MAX_PN_LEN] = {}, *rx_pn = _rx_pn;
3475 		int rx_pn_len = 8;
3476 		/* there's a hole at 2/3 in FW format depending on version */
3477 		int hole = api_ver >= 3 ? 0 : 2;
3478 
3479 		ieee80211_get_key_rx_seq(key, i, &seq);
3480 
3481 		if (key->cipher == WLAN_CIPHER_SUITE_TKIP) {
3482 			rx_pn[0] = seq.tkip.iv16;
3483 			rx_pn[1] = seq.tkip.iv16 >> 8;
3484 			rx_pn[2 + hole] = seq.tkip.iv32;
3485 			rx_pn[3 + hole] = seq.tkip.iv32 >> 8;
3486 			rx_pn[4 + hole] = seq.tkip.iv32 >> 16;
3487 			rx_pn[5 + hole] = seq.tkip.iv32 >> 24;
3488 		} else if (key_flags & cpu_to_le16(STA_KEY_FLG_EXT)) {
3489 			rx_pn = seq.hw.seq;
3490 			rx_pn_len = seq.hw.seq_len;
3491 		} else {
3492 			rx_pn[0] = seq.ccmp.pn[0];
3493 			rx_pn[1] = seq.ccmp.pn[1];
3494 			rx_pn[2 + hole] = seq.ccmp.pn[2];
3495 			rx_pn[3 + hole] = seq.ccmp.pn[3];
3496 			rx_pn[4 + hole] = seq.ccmp.pn[4];
3497 			rx_pn[5 + hole] = seq.ccmp.pn[5];
3498 		}
3499 
3500 		if (iwl_mvm_pn_cmp(rx_pn, (u8 *)&u.cmd.common.rx_secur_seq_cnt,
3501 				   rx_pn_len) > 0)
3502 			memcpy(&u.cmd.common.rx_secur_seq_cnt, rx_pn,
3503 			       rx_pn_len);
3504 	}
3505 
3506 	if (api_ver >= 2) {
3507 		u.cmd.transmit_seq_cnt = cpu_to_le64(pn);
3508 		size = sizeof(u.cmd);
3509 	} else {
3510 		size = sizeof(u.cmd_v1);
3511 	}
3512 
3513 	status = ADD_STA_SUCCESS;
3514 	if (cmd_flags & CMD_ASYNC)
3515 		ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA_KEY, CMD_ASYNC, size,
3516 					   &u.cmd);
3517 	else
3518 		ret = iwl_mvm_send_cmd_pdu_status(mvm, ADD_STA_KEY, size,
3519 						  &u.cmd, &status);
3520 
3521 	switch (status) {
3522 	case ADD_STA_SUCCESS:
3523 		IWL_DEBUG_WEP(mvm, "MODIFY_STA: set dynamic key passed\n");
3524 		break;
3525 	default:
3526 		ret = -EIO;
3527 		IWL_ERR(mvm, "MODIFY_STA: set dynamic key failed\n");
3528 		break;
3529 	}
3530 
3531 	return ret;
3532 }
3533 
3534 static int iwl_mvm_send_sta_igtk(struct iwl_mvm *mvm,
3535 				 struct ieee80211_key_conf *keyconf,
3536 				 u8 sta_id, bool remove_key)
3537 {
3538 	struct iwl_mvm_mgmt_mcast_key_cmd igtk_cmd = {};
3539 
3540 	/* verify the key details match the required command's expectations */
3541 	if (WARN_ON((keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) ||
3542 		    (keyconf->keyidx != 4 && keyconf->keyidx != 5 &&
3543 		     keyconf->keyidx != 6 && keyconf->keyidx != 7) ||
3544 		    (keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC &&
3545 		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_128 &&
3546 		     keyconf->cipher != WLAN_CIPHER_SUITE_BIP_GMAC_256)))
3547 		return -EINVAL;
3548 
3549 	if (WARN_ON(!iwl_mvm_has_new_rx_api(mvm) &&
3550 		    keyconf->cipher != WLAN_CIPHER_SUITE_AES_CMAC))
3551 		return -EINVAL;
3552 
3553 	igtk_cmd.key_id = cpu_to_le32(keyconf->keyidx);
3554 	igtk_cmd.sta_id = cpu_to_le32(sta_id);
3555 
3556 	if (remove_key) {
3557 		/* This is a valid situation for IGTK */
3558 		if (sta_id == IWL_MVM_INVALID_STA)
3559 			return 0;
3560 
3561 		igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_NOT_VALID);
3562 	} else {
3563 		struct ieee80211_key_seq seq;
3564 		const u8 *pn;
3565 
3566 		switch (keyconf->cipher) {
3567 		case WLAN_CIPHER_SUITE_AES_CMAC:
3568 			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_CCM);
3569 			break;
3570 		case WLAN_CIPHER_SUITE_BIP_GMAC_128:
3571 		case WLAN_CIPHER_SUITE_BIP_GMAC_256:
3572 			igtk_cmd.ctrl_flags |= cpu_to_le32(STA_KEY_FLG_GCMP);
3573 			break;
3574 		default:
3575 			return -EINVAL;
3576 		}
3577 
3578 		memcpy(igtk_cmd.igtk, keyconf->key, keyconf->keylen);
3579 		if (keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3580 			igtk_cmd.ctrl_flags |=
3581 				cpu_to_le32(STA_KEY_FLG_KEY_32BYTES);
3582 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3583 		pn = seq.aes_cmac.pn;
3584 		igtk_cmd.receive_seq_cnt = cpu_to_le64(((u64) pn[5] << 0) |
3585 						       ((u64) pn[4] << 8) |
3586 						       ((u64) pn[3] << 16) |
3587 						       ((u64) pn[2] << 24) |
3588 						       ((u64) pn[1] << 32) |
3589 						       ((u64) pn[0] << 40));
3590 	}
3591 
3592 	IWL_DEBUG_INFO(mvm, "%s %sIGTK (%d) for sta %u\n",
3593 		       remove_key ? "removing" : "installing",
3594 		       keyconf->keyidx >= 6 ? "B" : "",
3595 		       keyconf->keyidx, igtk_cmd.sta_id);
3596 
3597 	if (!iwl_mvm_has_new_rx_api(mvm)) {
3598 		struct iwl_mvm_mgmt_mcast_key_cmd_v1 igtk_cmd_v1 = {
3599 			.ctrl_flags = igtk_cmd.ctrl_flags,
3600 			.key_id = igtk_cmd.key_id,
3601 			.sta_id = igtk_cmd.sta_id,
3602 			.receive_seq_cnt = igtk_cmd.receive_seq_cnt
3603 		};
3604 
3605 		memcpy(igtk_cmd_v1.igtk, igtk_cmd.igtk,
3606 		       ARRAY_SIZE(igtk_cmd_v1.igtk));
3607 		return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3608 					    sizeof(igtk_cmd_v1), &igtk_cmd_v1);
3609 	}
3610 	return iwl_mvm_send_cmd_pdu(mvm, MGMT_MCAST_KEY, 0,
3611 				    sizeof(igtk_cmd), &igtk_cmd);
3612 }
3613 
3614 
3615 static inline u8 *iwl_mvm_get_mac_addr(struct iwl_mvm *mvm,
3616 				       struct ieee80211_vif *vif,
3617 				       struct ieee80211_sta *sta)
3618 {
3619 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3620 
3621 	if (sta)
3622 		return sta->addr;
3623 
3624 	if (vif->type == NL80211_IFTYPE_STATION &&
3625 	    mvmvif->ap_sta_id != IWL_MVM_INVALID_STA) {
3626 		u8 sta_id = mvmvif->ap_sta_id;
3627 		sta = rcu_dereference_protected(mvm->fw_id_to_mac_id[sta_id],
3628 						lockdep_is_held(&mvm->mutex));
3629 		return sta->addr;
3630 	}
3631 
3632 
3633 	return NULL;
3634 }
3635 
3636 static int __iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3637 				 struct ieee80211_vif *vif,
3638 				 struct ieee80211_sta *sta,
3639 				 struct ieee80211_key_conf *keyconf,
3640 				 u8 key_offset,
3641 				 bool mcast)
3642 {
3643 	const u8 *addr;
3644 	struct ieee80211_key_seq seq;
3645 	u16 p1k[5];
3646 	u32 sta_id;
3647 	bool mfp = false;
3648 
3649 	if (sta) {
3650 		struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
3651 
3652 		sta_id = mvm_sta->sta_id;
3653 		mfp = sta->mfp;
3654 	} else if (vif->type == NL80211_IFTYPE_AP &&
3655 		   !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE)) {
3656 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3657 
3658 		sta_id = mvmvif->mcast_sta.sta_id;
3659 	} else {
3660 		IWL_ERR(mvm, "Failed to find station id\n");
3661 		return -EINVAL;
3662 	}
3663 
3664 	if (keyconf->cipher == WLAN_CIPHER_SUITE_TKIP) {
3665 		addr = iwl_mvm_get_mac_addr(mvm, vif, sta);
3666 		/* get phase 1 key from mac80211 */
3667 		ieee80211_get_key_rx_seq(keyconf, 0, &seq);
3668 		ieee80211_get_tkip_rx_p1k(keyconf, addr, seq.tkip.iv32, p1k);
3669 
3670 		return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3671 					    seq.tkip.iv32, p1k, 0, key_offset,
3672 					    mfp);
3673 	}
3674 
3675 	return iwl_mvm_send_sta_key(mvm, sta_id, keyconf, mcast,
3676 				    0, NULL, 0, key_offset, mfp);
3677 }
3678 
3679 int iwl_mvm_set_sta_key(struct iwl_mvm *mvm,
3680 			struct ieee80211_vif *vif,
3681 			struct ieee80211_sta *sta,
3682 			struct ieee80211_key_conf *keyconf,
3683 			u8 key_offset)
3684 {
3685 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3686 	struct iwl_mvm_sta *mvm_sta;
3687 	u8 sta_id = IWL_MVM_INVALID_STA;
3688 	int ret;
3689 	static const u8 __maybe_unused zero_addr[ETH_ALEN] = {0};
3690 
3691 	lockdep_assert_held(&mvm->mutex);
3692 
3693 	if (vif->type != NL80211_IFTYPE_AP ||
3694 	    keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE) {
3695 		/* Get the station id from the mvm local station table */
3696 		mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3697 		if (!mvm_sta) {
3698 			IWL_ERR(mvm, "Failed to find station\n");
3699 			return -EINVAL;
3700 		}
3701 		sta_id = mvm_sta->sta_id;
3702 
3703 		/*
3704 		 * It is possible that the 'sta' parameter is NULL, and thus
3705 		 * there is a need to retrieve the sta from the local station
3706 		 * table.
3707 		 */
3708 		if (!sta) {
3709 			sta = rcu_dereference_protected(
3710 				mvm->fw_id_to_mac_id[sta_id],
3711 				lockdep_is_held(&mvm->mutex));
3712 			if (IS_ERR_OR_NULL(sta)) {
3713 				IWL_ERR(mvm, "Invalid station id\n");
3714 				return -EINVAL;
3715 			}
3716 		}
3717 
3718 		if (WARN_ON_ONCE(iwl_mvm_sta_from_mac80211(sta)->vif != vif))
3719 			return -EINVAL;
3720 	} else {
3721 		struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
3722 
3723 		sta_id = mvmvif->mcast_sta.sta_id;
3724 	}
3725 
3726 	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3727 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3728 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256) {
3729 		ret = iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, false);
3730 		goto end;
3731 	}
3732 
3733 	/* If the key_offset is not pre-assigned, we need to find a
3734 	 * new offset to use.  In normal cases, the offset is not
3735 	 * pre-assigned, but during HW_RESTART we want to reuse the
3736 	 * same indices, so we pass them when this function is called.
3737 	 *
3738 	 * In D3 entry, we need to hardcoded the indices (because the
3739 	 * firmware hardcodes the PTK offset to 0).  In this case, we
3740 	 * need to make sure we don't overwrite the hw_key_idx in the
3741 	 * keyconf structure, because otherwise we cannot configure
3742 	 * the original ones back when resuming.
3743 	 */
3744 	if (key_offset == STA_KEY_IDX_INVALID) {
3745 		key_offset  = iwl_mvm_set_fw_key_idx(mvm);
3746 		if (key_offset == STA_KEY_IDX_INVALID)
3747 			return -ENOSPC;
3748 		keyconf->hw_key_idx = key_offset;
3749 	}
3750 
3751 	ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf, key_offset, mcast);
3752 	if (ret)
3753 		goto end;
3754 
3755 	/*
3756 	 * For WEP, the same key is used for multicast and unicast. Upload it
3757 	 * again, using the same key offset, and now pointing the other one
3758 	 * to the same key slot (offset).
3759 	 * If this fails, remove the original as well.
3760 	 */
3761 	if ((keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3762 	     keyconf->cipher == WLAN_CIPHER_SUITE_WEP104) &&
3763 	    sta) {
3764 		ret = __iwl_mvm_set_sta_key(mvm, vif, sta, keyconf,
3765 					    key_offset, !mcast);
3766 		if (ret) {
3767 			__iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3768 			goto end;
3769 		}
3770 	}
3771 
3772 	__set_bit(key_offset, mvm->fw_key_table);
3773 
3774 end:
3775 	IWL_DEBUG_WEP(mvm, "key: cipher=%x len=%d idx=%d sta=%pM ret=%d\n",
3776 		      keyconf->cipher, keyconf->keylen, keyconf->keyidx,
3777 		      sta ? sta->addr : zero_addr, ret);
3778 	return ret;
3779 }
3780 
3781 int iwl_mvm_remove_sta_key(struct iwl_mvm *mvm,
3782 			   struct ieee80211_vif *vif,
3783 			   struct ieee80211_sta *sta,
3784 			   struct ieee80211_key_conf *keyconf)
3785 {
3786 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3787 	struct iwl_mvm_sta *mvm_sta;
3788 	u8 sta_id = IWL_MVM_INVALID_STA;
3789 	int ret, i;
3790 
3791 	lockdep_assert_held(&mvm->mutex);
3792 
3793 	/* Get the station from the mvm local station table */
3794 	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3795 	if (mvm_sta)
3796 		sta_id = mvm_sta->sta_id;
3797 	else if (!sta && vif->type == NL80211_IFTYPE_AP && mcast)
3798 		sta_id = iwl_mvm_vif_from_mac80211(vif)->mcast_sta.sta_id;
3799 
3800 
3801 	IWL_DEBUG_WEP(mvm, "mvm remove dynamic key: idx=%d sta=%d\n",
3802 		      keyconf->keyidx, sta_id);
3803 
3804 	if (keyconf->cipher == WLAN_CIPHER_SUITE_AES_CMAC ||
3805 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_128 ||
3806 	    keyconf->cipher == WLAN_CIPHER_SUITE_BIP_GMAC_256)
3807 		return iwl_mvm_send_sta_igtk(mvm, keyconf, sta_id, true);
3808 
3809 	if (!__test_and_clear_bit(keyconf->hw_key_idx, mvm->fw_key_table)) {
3810 		IWL_ERR(mvm, "offset %d not used in fw key table.\n",
3811 			keyconf->hw_key_idx);
3812 		return -ENOENT;
3813 	}
3814 
3815 	/* track which key was deleted last */
3816 	for (i = 0; i < STA_KEY_MAX_NUM; i++) {
3817 		if (mvm->fw_key_deleted[i] < U8_MAX)
3818 			mvm->fw_key_deleted[i]++;
3819 	}
3820 	mvm->fw_key_deleted[keyconf->hw_key_idx] = 0;
3821 
3822 	if (sta && !mvm_sta) {
3823 		IWL_DEBUG_WEP(mvm, "station non-existent, early return.\n");
3824 		return 0;
3825 	}
3826 
3827 	ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, mcast);
3828 	if (ret)
3829 		return ret;
3830 
3831 	/* delete WEP key twice to get rid of (now useless) offset */
3832 	if (keyconf->cipher == WLAN_CIPHER_SUITE_WEP40 ||
3833 	    keyconf->cipher == WLAN_CIPHER_SUITE_WEP104)
3834 		ret = __iwl_mvm_remove_sta_key(mvm, sta_id, keyconf, !mcast);
3835 
3836 	return ret;
3837 }
3838 
3839 void iwl_mvm_update_tkip_key(struct iwl_mvm *mvm,
3840 			     struct ieee80211_vif *vif,
3841 			     struct ieee80211_key_conf *keyconf,
3842 			     struct ieee80211_sta *sta, u32 iv32,
3843 			     u16 *phase1key)
3844 {
3845 	struct iwl_mvm_sta *mvm_sta;
3846 	bool mcast = !(keyconf->flags & IEEE80211_KEY_FLAG_PAIRWISE);
3847 	bool mfp = sta ? sta->mfp : false;
3848 
3849 	rcu_read_lock();
3850 
3851 	mvm_sta = iwl_mvm_get_key_sta(mvm, vif, sta);
3852 	if (WARN_ON_ONCE(!mvm_sta))
3853 		goto unlock;
3854 	iwl_mvm_send_sta_key(mvm, mvm_sta->sta_id, keyconf, mcast,
3855 			     iv32, phase1key, CMD_ASYNC, keyconf->hw_key_idx,
3856 			     mfp);
3857 
3858  unlock:
3859 	rcu_read_unlock();
3860 }
3861 
3862 void iwl_mvm_sta_modify_ps_wake(struct iwl_mvm *mvm,
3863 				struct ieee80211_sta *sta)
3864 {
3865 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3866 	struct iwl_mvm_add_sta_cmd cmd = {
3867 		.add_modify = STA_MODE_MODIFY,
3868 		.sta_id = mvmsta->sta_id,
3869 		.station_flags_msk = cpu_to_le32(STA_FLG_PS),
3870 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3871 	};
3872 	int ret;
3873 
3874 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3875 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3876 	if (ret)
3877 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3878 }
3879 
3880 void iwl_mvm_sta_modify_sleep_tx_count(struct iwl_mvm *mvm,
3881 				       struct ieee80211_sta *sta,
3882 				       enum ieee80211_frame_release_type reason,
3883 				       u16 cnt, u16 tids, bool more_data,
3884 				       bool single_sta_queue)
3885 {
3886 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
3887 	struct iwl_mvm_add_sta_cmd cmd = {
3888 		.add_modify = STA_MODE_MODIFY,
3889 		.sta_id = mvmsta->sta_id,
3890 		.modify_mask = STA_MODIFY_SLEEPING_STA_TX_COUNT,
3891 		.sleep_tx_count = cpu_to_le16(cnt),
3892 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3893 	};
3894 	int tid, ret;
3895 	unsigned long _tids = tids;
3896 
3897 	/* convert TIDs to ACs - we don't support TSPEC so that's OK
3898 	 * Note that this field is reserved and unused by firmware not
3899 	 * supporting GO uAPSD, so it's safe to always do this.
3900 	 */
3901 	for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT)
3902 		cmd.awake_acs |= BIT(tid_to_ucode_ac[tid]);
3903 
3904 	/* If we're releasing frames from aggregation or dqa queues then check
3905 	 * if all the queues that we're releasing frames from, combined, have:
3906 	 *  - more frames than the service period, in which case more_data
3907 	 *    needs to be set
3908 	 *  - fewer than 'cnt' frames, in which case we need to adjust the
3909 	 *    firmware command (but do that unconditionally)
3910 	 */
3911 	if (single_sta_queue) {
3912 		int remaining = cnt;
3913 		int sleep_tx_count;
3914 
3915 		spin_lock_bh(&mvmsta->lock);
3916 		for_each_set_bit(tid, &_tids, IWL_MAX_TID_COUNT) {
3917 			struct iwl_mvm_tid_data *tid_data;
3918 			u16 n_queued;
3919 
3920 			tid_data = &mvmsta->tid_data[tid];
3921 
3922 			n_queued = iwl_mvm_tid_queued(mvm, tid_data);
3923 			if (n_queued > remaining) {
3924 				more_data = true;
3925 				remaining = 0;
3926 				break;
3927 			}
3928 			remaining -= n_queued;
3929 		}
3930 		sleep_tx_count = cnt - remaining;
3931 		if (reason == IEEE80211_FRAME_RELEASE_UAPSD)
3932 			mvmsta->sleep_tx_count = sleep_tx_count;
3933 		spin_unlock_bh(&mvmsta->lock);
3934 
3935 		cmd.sleep_tx_count = cpu_to_le16(sleep_tx_count);
3936 		if (WARN_ON(cnt - remaining == 0)) {
3937 			ieee80211_sta_eosp(sta);
3938 			return;
3939 		}
3940 	}
3941 
3942 	/* Note: this is ignored by firmware not supporting GO uAPSD */
3943 	if (more_data)
3944 		cmd.sleep_state_flags |= STA_SLEEP_STATE_MOREDATA;
3945 
3946 	if (reason == IEEE80211_FRAME_RELEASE_PSPOLL) {
3947 		mvmsta->next_status_eosp = true;
3948 		cmd.sleep_state_flags |= STA_SLEEP_STATE_PS_POLL;
3949 	} else {
3950 		cmd.sleep_state_flags |= STA_SLEEP_STATE_UAPSD;
3951 	}
3952 
3953 	/* block the Tx queues until the FW updated the sleep Tx count */
3954 	iwl_trans_block_txq_ptrs(mvm->trans, true);
3955 
3956 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA,
3957 				   CMD_ASYNC | CMD_WANT_ASYNC_CALLBACK,
3958 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3959 	if (ret)
3960 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3961 }
3962 
3963 void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm,
3964 			   struct iwl_rx_cmd_buffer *rxb)
3965 {
3966 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
3967 	struct iwl_mvm_eosp_notification *notif = (void *)pkt->data;
3968 	struct ieee80211_sta *sta;
3969 	u32 sta_id = le32_to_cpu(notif->sta_id);
3970 
3971 	if (WARN_ON_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations))
3972 		return;
3973 
3974 	rcu_read_lock();
3975 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
3976 	if (!IS_ERR_OR_NULL(sta))
3977 		ieee80211_sta_eosp(sta);
3978 	rcu_read_unlock();
3979 }
3980 
3981 void iwl_mvm_sta_modify_disable_tx(struct iwl_mvm *mvm,
3982 				   struct iwl_mvm_sta *mvmsta, bool disable)
3983 {
3984 	struct iwl_mvm_add_sta_cmd cmd = {
3985 		.add_modify = STA_MODE_MODIFY,
3986 		.sta_id = mvmsta->sta_id,
3987 		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
3988 		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
3989 		.mac_id_n_color = cpu_to_le32(mvmsta->mac_id_n_color),
3990 	};
3991 	int ret;
3992 
3993 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
3994 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
3995 	if (ret)
3996 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
3997 }
3998 
3999 void iwl_mvm_sta_modify_disable_tx_ap(struct iwl_mvm *mvm,
4000 				      struct ieee80211_sta *sta,
4001 				      bool disable)
4002 {
4003 	struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4004 
4005 	spin_lock_bh(&mvm_sta->lock);
4006 
4007 	if (mvm_sta->disable_tx == disable) {
4008 		spin_unlock_bh(&mvm_sta->lock);
4009 		return;
4010 	}
4011 
4012 	mvm_sta->disable_tx = disable;
4013 
4014 	/*
4015 	 * If sta PS state is handled by mac80211, tell it to start/stop
4016 	 * queuing tx for this station.
4017 	 */
4018 	if (!ieee80211_hw_check(mvm->hw, AP_LINK_PS))
4019 		ieee80211_sta_block_awake(mvm->hw, sta, disable);
4020 
4021 	iwl_mvm_sta_modify_disable_tx(mvm, mvm_sta, disable);
4022 
4023 	spin_unlock_bh(&mvm_sta->lock);
4024 }
4025 
4026 static void iwl_mvm_int_sta_modify_disable_tx(struct iwl_mvm *mvm,
4027 					      struct iwl_mvm_vif *mvmvif,
4028 					      struct iwl_mvm_int_sta *sta,
4029 					      bool disable)
4030 {
4031 	u32 id = FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color);
4032 	struct iwl_mvm_add_sta_cmd cmd = {
4033 		.add_modify = STA_MODE_MODIFY,
4034 		.sta_id = sta->sta_id,
4035 		.station_flags = disable ? cpu_to_le32(STA_FLG_DISABLE_TX) : 0,
4036 		.station_flags_msk = cpu_to_le32(STA_FLG_DISABLE_TX),
4037 		.mac_id_n_color = cpu_to_le32(id),
4038 	};
4039 	int ret;
4040 
4041 	ret = iwl_mvm_send_cmd_pdu(mvm, ADD_STA, CMD_ASYNC,
4042 				   iwl_mvm_add_sta_cmd_size(mvm), &cmd);
4043 	if (ret)
4044 		IWL_ERR(mvm, "Failed to send ADD_STA command (%d)\n", ret);
4045 }
4046 
4047 void iwl_mvm_modify_all_sta_disable_tx(struct iwl_mvm *mvm,
4048 				       struct iwl_mvm_vif *mvmvif,
4049 				       bool disable)
4050 {
4051 	struct ieee80211_sta *sta;
4052 	struct iwl_mvm_sta *mvm_sta;
4053 	int i;
4054 
4055 	rcu_read_lock();
4056 
4057 	/* Block/unblock all the stations of the given mvmvif */
4058 	for (i = 0; i < mvm->fw->ucode_capa.num_stations; i++) {
4059 		sta = rcu_dereference(mvm->fw_id_to_mac_id[i]);
4060 		if (IS_ERR_OR_NULL(sta))
4061 			continue;
4062 
4063 		mvm_sta = iwl_mvm_sta_from_mac80211(sta);
4064 		if (mvm_sta->mac_id_n_color !=
4065 		    FW_CMD_ID_AND_COLOR(mvmvif->id, mvmvif->color))
4066 			continue;
4067 
4068 		iwl_mvm_sta_modify_disable_tx_ap(mvm, sta, disable);
4069 	}
4070 
4071 	rcu_read_unlock();
4072 
4073 	if (!fw_has_api(&mvm->fw->ucode_capa, IWL_UCODE_TLV_API_STA_TYPE))
4074 		return;
4075 
4076 	/* Need to block/unblock also multicast station */
4077 	if (mvmvif->mcast_sta.sta_id != IWL_MVM_INVALID_STA)
4078 		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4079 						  &mvmvif->mcast_sta, disable);
4080 
4081 	/*
4082 	 * Only unblock the broadcast station (FW blocks it for immediate
4083 	 * quiet, not the driver)
4084 	 */
4085 	if (!disable && mvmvif->bcast_sta.sta_id != IWL_MVM_INVALID_STA)
4086 		iwl_mvm_int_sta_modify_disable_tx(mvm, mvmvif,
4087 						  &mvmvif->bcast_sta, disable);
4088 }
4089 
4090 void iwl_mvm_csa_client_absent(struct iwl_mvm *mvm, struct ieee80211_vif *vif)
4091 {
4092 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4093 	struct iwl_mvm_sta *mvmsta;
4094 
4095 	rcu_read_lock();
4096 
4097 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, mvmvif->ap_sta_id);
4098 
4099 	if (mvmsta)
4100 		iwl_mvm_sta_modify_disable_tx(mvm, mvmsta, true);
4101 
4102 	rcu_read_unlock();
4103 }
4104 
4105 u16 iwl_mvm_tid_queued(struct iwl_mvm *mvm, struct iwl_mvm_tid_data *tid_data)
4106 {
4107 	u16 sn = IEEE80211_SEQ_TO_SN(tid_data->seq_number);
4108 
4109 	/*
4110 	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
4111 	 * to align the wrap around of ssn so we compare relevant values.
4112 	 */
4113 	if (mvm->trans->trans_cfg->gen2)
4114 		sn &= 0xff;
4115 
4116 	return ieee80211_sn_sub(sn, tid_data->next_reclaimed);
4117 }
4118 
4119 int iwl_mvm_add_pasn_sta(struct iwl_mvm *mvm, struct ieee80211_vif *vif,
4120 			 struct iwl_mvm_int_sta *sta, u8 *addr, u32 cipher,
4121 			 u8 *key, u32 key_len)
4122 {
4123 	int ret;
4124 	u16 queue;
4125 	struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif);
4126 	struct ieee80211_key_conf *keyconf;
4127 
4128 	ret = iwl_mvm_allocate_int_sta(mvm, sta, 0,
4129 				       NL80211_IFTYPE_UNSPECIFIED,
4130 				       IWL_STA_LINK);
4131 	if (ret)
4132 		return ret;
4133 
4134 	ret = iwl_mvm_add_int_sta_with_queue(mvm, mvmvif->id, mvmvif->color,
4135 					     addr, sta, &queue,
4136 					     IWL_MVM_TX_FIFO_BE);
4137 	if (ret)
4138 		goto out;
4139 
4140 	keyconf = kzalloc(sizeof(*keyconf) + key_len, GFP_KERNEL);
4141 	if (!keyconf) {
4142 		ret = -ENOBUFS;
4143 		goto out;
4144 	}
4145 
4146 	keyconf->cipher = cipher;
4147 	memcpy(keyconf->key, key, key_len);
4148 	keyconf->keylen = key_len;
4149 
4150 	ret = iwl_mvm_send_sta_key(mvm, sta->sta_id, keyconf, false,
4151 				   0, NULL, 0, 0, true);
4152 	kfree(keyconf);
4153 	return 0;
4154 out:
4155 	iwl_mvm_dealloc_int_sta(mvm, sta);
4156 	return ret;
4157 }
4158 
4159 void iwl_mvm_cancel_channel_switch(struct iwl_mvm *mvm,
4160 				   struct ieee80211_vif *vif,
4161 				   u32 mac_id)
4162 {
4163 	struct iwl_cancel_channel_switch_cmd cancel_channel_switch_cmd = {
4164 		.mac_id = cpu_to_le32(mac_id),
4165 	};
4166 	int ret;
4167 
4168 	ret = iwl_mvm_send_cmd_pdu(mvm,
4169 				   WIDE_ID(MAC_CONF_GROUP, CANCEL_CHANNEL_SWITCH_CMD),
4170 				   CMD_ASYNC,
4171 				   sizeof(cancel_channel_switch_cmd),
4172 				   &cancel_channel_switch_cmd);
4173 	if (ret)
4174 		IWL_ERR(mvm, "Failed to cancel the channel switch\n");
4175 }
4176