xref: /linux/drivers/net/wireless/intel/iwlwifi/mvm/tx.c (revision ef9226cd56b718c79184a3466d32984a51cb449c)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/ieee80211.h>
8 #include <linux/etherdevice.h>
9 #include <linux/tcp.h>
10 #include <net/gso.h>
11 #include <net/ip.h>
12 #include <net/ipv6.h>
13 
14 #include "iwl-trans.h"
15 #include "iwl-eeprom-parse.h"
16 #include "mvm.h"
17 #include "sta.h"
18 #include "time-sync.h"
19 
20 static void
21 iwl_mvm_bar_check_trigger(struct iwl_mvm *mvm, const u8 *addr,
22 			  u16 tid, u16 ssn)
23 {
24 	struct iwl_fw_dbg_trigger_tlv *trig;
25 	struct iwl_fw_dbg_trigger_ba *ba_trig;
26 
27 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
28 	if (!trig)
29 		return;
30 
31 	ba_trig = (void *)trig->data;
32 
33 	if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
34 		return;
35 
36 	iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
37 				"BAR sent to %pM, tid %d, ssn %d",
38 				addr, tid, ssn);
39 }
40 
41 #define OPT_HDR(type, skb, off) \
42 	(type *)(skb_network_header(skb) + (off))
43 
44 static u32 iwl_mvm_tx_csum(struct iwl_mvm *mvm, struct sk_buff *skb,
45 			   struct ieee80211_tx_info *info,
46 			   bool amsdu)
47 {
48 	struct ieee80211_hdr *hdr = (void *)skb->data;
49 	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
50 	u16 offload_assist = 0;
51 #if IS_ENABLED(CONFIG_INET)
52 	u8 protocol = 0;
53 
54 	/* Do not compute checksum if already computed */
55 	if (skb->ip_summed != CHECKSUM_PARTIAL)
56 		goto out;
57 
58 	/* We do not expect to be requested to csum stuff we do not support */
59 	if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
60 		      (skb->protocol != htons(ETH_P_IP) &&
61 		       skb->protocol != htons(ETH_P_IPV6)),
62 		      "No support for requested checksum\n")) {
63 		skb_checksum_help(skb);
64 		goto out;
65 	}
66 
67 	if (skb->protocol == htons(ETH_P_IP)) {
68 		protocol = ip_hdr(skb)->protocol;
69 	} else {
70 #if IS_ENABLED(CONFIG_IPV6)
71 		struct ipv6hdr *ipv6h =
72 			(struct ipv6hdr *)skb_network_header(skb);
73 		unsigned int off = sizeof(*ipv6h);
74 
75 		protocol = ipv6h->nexthdr;
76 		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
77 			struct ipv6_opt_hdr *hp;
78 
79 			/* only supported extension headers */
80 			if (protocol != NEXTHDR_ROUTING &&
81 			    protocol != NEXTHDR_HOP &&
82 			    protocol != NEXTHDR_DEST) {
83 				skb_checksum_help(skb);
84 				goto out;
85 			}
86 
87 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
88 			protocol = hp->nexthdr;
89 			off += ipv6_optlen(hp);
90 		}
91 		/* if we get here - protocol now should be TCP/UDP */
92 #endif
93 	}
94 
95 	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
96 		WARN_ON_ONCE(1);
97 		skb_checksum_help(skb);
98 		goto out;
99 	}
100 
101 	/* enable L4 csum */
102 	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
103 
104 	/*
105 	 * Set offset to IP header (snap).
106 	 * We don't support tunneling so no need to take care of inner header.
107 	 * Size is in words.
108 	 */
109 	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
110 
111 	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
112 	if (skb->protocol == htons(ETH_P_IP) && amsdu) {
113 		ip_hdr(skb)->check = 0;
114 		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
115 	}
116 
117 	/* reset UDP/TCP header csum */
118 	if (protocol == IPPROTO_TCP)
119 		tcp_hdr(skb)->check = 0;
120 	else
121 		udp_hdr(skb)->check = 0;
122 
123 out:
124 #endif
125 	/*
126 	 * mac header len should include IV, size is in words unless
127 	 * the IV is added by the firmware like in WEP.
128 	 * In new Tx API, the IV is always added by the firmware.
129 	 */
130 	if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
131 	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
132 	    info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
133 		mh_len += info->control.hw_key->iv_len;
134 	mh_len /= 2;
135 	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
136 
137 	if (amsdu)
138 		offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
139 	else if (ieee80211_hdrlen(hdr->frame_control) % 4)
140 		/* padding is inserted later in transport */
141 		offload_assist |= BIT(TX_CMD_OFFLD_PAD);
142 
143 	return offload_assist;
144 }
145 
146 /*
147  * Sets most of the Tx cmd's fields
148  */
149 void iwl_mvm_set_tx_cmd(struct iwl_mvm *mvm, struct sk_buff *skb,
150 			struct iwl_tx_cmd *tx_cmd,
151 			struct ieee80211_tx_info *info, u8 sta_id)
152 {
153 	struct ieee80211_hdr *hdr = (void *)skb->data;
154 	__le16 fc = hdr->frame_control;
155 	u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
156 	u32 len = skb->len + FCS_LEN;
157 	bool amsdu = false;
158 	u8 ac;
159 
160 	if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
161 	    (ieee80211_is_probe_resp(fc) &&
162 	     !is_multicast_ether_addr(hdr->addr1)))
163 		tx_flags |= TX_CMD_FLG_ACK;
164 	else
165 		tx_flags &= ~TX_CMD_FLG_ACK;
166 
167 	if (ieee80211_is_probe_resp(fc))
168 		tx_flags |= TX_CMD_FLG_TSF;
169 
170 	if (ieee80211_has_morefrags(fc))
171 		tx_flags |= TX_CMD_FLG_MORE_FRAG;
172 
173 	if (ieee80211_is_data_qos(fc)) {
174 		u8 *qc = ieee80211_get_qos_ctl(hdr);
175 		tx_cmd->tid_tspec = qc[0] & 0xf;
176 		tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
177 		amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
178 	} else if (ieee80211_is_back_req(fc)) {
179 		struct ieee80211_bar *bar = (void *)skb->data;
180 		u16 control = le16_to_cpu(bar->control);
181 		u16 ssn = le16_to_cpu(bar->start_seq_num);
182 
183 		tx_flags |= TX_CMD_FLG_ACK | TX_CMD_FLG_BAR;
184 		tx_cmd->tid_tspec = (control &
185 				     IEEE80211_BAR_CTRL_TID_INFO_MASK) >>
186 			IEEE80211_BAR_CTRL_TID_INFO_SHIFT;
187 		WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
188 		iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
189 					  ssn);
190 	} else {
191 		if (ieee80211_is_data(fc))
192 			tx_cmd->tid_tspec = IWL_TID_NON_QOS;
193 		else
194 			tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
195 
196 		if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
197 			tx_flags |= TX_CMD_FLG_SEQ_CTL;
198 		else
199 			tx_flags &= ~TX_CMD_FLG_SEQ_CTL;
200 	}
201 
202 	/* Default to 0 (BE) when tid_spec is set to IWL_MAX_TID_COUNT */
203 	if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
204 		ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
205 	else
206 		ac = tid_to_mac80211_ac[0];
207 
208 	tx_flags |= iwl_mvm_bt_coex_tx_prio(mvm, hdr, info, ac) <<
209 			TX_CMD_FLG_BT_PRIO_POS;
210 
211 	if (ieee80211_is_mgmt(fc)) {
212 		if (ieee80211_is_assoc_req(fc) || ieee80211_is_reassoc_req(fc))
213 			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
214 		else if (ieee80211_is_action(fc))
215 			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
216 		else
217 			tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
218 
219 		/* The spec allows Action frames in A-MPDU, we don't support
220 		 * it
221 		 */
222 		WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
223 	} else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
224 		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
225 	} else {
226 		tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
227 	}
228 
229 	if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
230 	    !is_multicast_ether_addr(hdr->addr1))
231 		tx_flags |= TX_CMD_FLG_PROT_REQUIRE;
232 
233 	if (fw_has_capa(&mvm->fw->ucode_capa,
234 			IWL_UCODE_TLV_CAPA_TXPOWER_INSERTION_SUPPORT) &&
235 	    ieee80211_action_contains_tpc(skb))
236 		tx_flags |= TX_CMD_FLG_WRITE_TX_POWER;
237 
238 	tx_cmd->tx_flags = cpu_to_le32(tx_flags);
239 	/* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
240 	tx_cmd->len = cpu_to_le16((u16)skb->len);
241 	tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
242 	tx_cmd->sta_id = sta_id;
243 
244 	tx_cmd->offload_assist =
245 		cpu_to_le16(iwl_mvm_tx_csum(mvm, skb, info, amsdu));
246 }
247 
248 static u32 iwl_mvm_get_tx_ant(struct iwl_mvm *mvm,
249 			      struct ieee80211_tx_info *info,
250 			      struct ieee80211_sta *sta, __le16 fc)
251 {
252 	if (info->band == NL80211_BAND_2GHZ &&
253 	    !iwl_mvm_bt_coex_is_shared_ant_avail(mvm))
254 		return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
255 
256 	if (sta && ieee80211_is_data(fc)) {
257 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
258 
259 		return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
260 	}
261 
262 	return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
263 }
264 
265 static u32 iwl_mvm_convert_rate_idx(struct iwl_mvm *mvm,
266 				    struct ieee80211_tx_info *info,
267 				    int rate_idx)
268 {
269 	u32 rate_flags = 0;
270 	u8 rate_plcp;
271 	bool is_cck;
272 
273 	/* if the rate isn't a well known legacy rate, take the lowest one */
274 	if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
275 		rate_idx = iwl_mvm_mac_ctxt_get_lowest_rate(mvm,
276 							    info,
277 							    info->control.vif);
278 
279 	/* Get PLCP rate for tx_cmd->rate_n_flags */
280 	rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
281 	is_cck = (rate_idx >= IWL_FIRST_CCK_RATE) &&
282 		 (rate_idx <= IWL_LAST_CCK_RATE);
283 
284 	/* Set CCK or OFDM flag */
285 	if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
286 		if (!is_cck)
287 			rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
288 		else
289 			rate_flags |= RATE_MCS_CCK_MSK;
290 	} else if (is_cck) {
291 		rate_flags |= RATE_MCS_CCK_MSK_V1;
292 	}
293 
294 	return (u32)rate_plcp | rate_flags;
295 }
296 
297 static u32 iwl_mvm_get_inject_tx_rate(struct iwl_mvm *mvm,
298 				      struct ieee80211_tx_info *info,
299 				      struct ieee80211_sta *sta,
300 				      __le16 fc)
301 {
302 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
303 	u32 result;
304 
305 	/*
306 	 * we only care about legacy/HT/VHT so far, so we can
307 	 * build in v1 and use iwl_new_rate_from_v1()
308 	 */
309 
310 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
311 		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
312 		u8 nss = ieee80211_rate_get_vht_nss(rate);
313 
314 		result = RATE_MCS_VHT_MSK_V1;
315 		result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
316 		result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
317 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
318 			result |= RATE_MCS_SGI_MSK_V1;
319 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
320 			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
321 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
322 			result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
323 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
324 			result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
325 
326 		if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
327 			result = iwl_new_rate_from_v1(result);
328 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
329 		result = RATE_MCS_HT_MSK_V1;
330 		result |= u32_encode_bits(rate->idx,
331 					  RATE_HT_MCS_RATE_CODE_MSK_V1 |
332 					  RATE_HT_MCS_NSS_MSK_V1);
333 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
334 			result |= RATE_MCS_SGI_MSK_V1;
335 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
336 			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
337 		if (info->flags & IEEE80211_TX_CTL_LDPC)
338 			result |= RATE_MCS_LDPC_MSK_V1;
339 		if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
340 			result |= RATE_MCS_STBC_MSK;
341 
342 		if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
343 			result = iwl_new_rate_from_v1(result);
344 	} else {
345 		int rate_idx = info->control.rates[0].idx;
346 
347 		result = iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
348 	}
349 
350 	if (info->control.antennas)
351 		result |= u32_encode_bits(info->control.antennas,
352 					  RATE_MCS_ANT_AB_MSK);
353 	else
354 		result |= iwl_mvm_get_tx_ant(mvm, info, sta, fc);
355 
356 	return result;
357 }
358 
359 static u32 iwl_mvm_get_tx_rate(struct iwl_mvm *mvm,
360 			       struct ieee80211_tx_info *info,
361 			       struct ieee80211_sta *sta, __le16 fc)
362 {
363 	int rate_idx = -1;
364 
365 	if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
366 		/* info->control is only relevant for non HW rate control */
367 
368 		/* HT rate doesn't make sense for a non data frame */
369 		WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
370 			  !ieee80211_is_data(fc),
371 			  "Got a HT rate (flags:0x%x/mcs:%d/fc:0x%x/state:%d) for a non data frame\n",
372 			  info->control.rates[0].flags,
373 			  info->control.rates[0].idx,
374 			  le16_to_cpu(fc),
375 			  sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
376 
377 		rate_idx = info->control.rates[0].idx;
378 
379 		/* For non 2 GHZ band, remap mac80211 rate indices into driver
380 		 * indices.
381 		 */
382 		if (info->band != NL80211_BAND_2GHZ ||
383 		    (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
384 			rate_idx += IWL_FIRST_OFDM_RATE;
385 
386 		/* For 2.4 GHZ band, check that there is no need to remap */
387 		BUILD_BUG_ON(IWL_FIRST_CCK_RATE != 0);
388 	}
389 
390 	return iwl_mvm_convert_rate_idx(mvm, info, rate_idx);
391 }
392 
393 static u32 iwl_mvm_get_tx_rate_n_flags(struct iwl_mvm *mvm,
394 				       struct ieee80211_tx_info *info,
395 				       struct ieee80211_sta *sta, __le16 fc)
396 {
397 	if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
398 		return iwl_mvm_get_inject_tx_rate(mvm, info, sta, fc);
399 
400 	return iwl_mvm_get_tx_rate(mvm, info, sta, fc) |
401 		iwl_mvm_get_tx_ant(mvm, info, sta, fc);
402 }
403 
404 /*
405  * Sets the fields in the Tx cmd that are rate related
406  */
407 void iwl_mvm_set_tx_cmd_rate(struct iwl_mvm *mvm, struct iwl_tx_cmd *tx_cmd,
408 			    struct ieee80211_tx_info *info,
409 			    struct ieee80211_sta *sta, __le16 fc)
410 {
411 	/* Set retry limit on RTS packets */
412 	tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
413 
414 	/* Set retry limit on DATA packets and Probe Responses*/
415 	if (ieee80211_is_probe_resp(fc)) {
416 		tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
417 		tx_cmd->rts_retry_limit =
418 			min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
419 	} else if (ieee80211_is_back_req(fc)) {
420 		tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
421 	} else {
422 		tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
423 	}
424 
425 	/*
426 	 * for data packets, rate info comes from the table inside the fw. This
427 	 * table is controlled by LINK_QUALITY commands
428 	 */
429 
430 	if (likely(ieee80211_is_data(fc) && sta &&
431 		   !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) {
432 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
433 
434 		if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
435 			tx_cmd->initial_rate_index = 0;
436 			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
437 			return;
438 		}
439 	} else if (ieee80211_is_back_req(fc)) {
440 		tx_cmd->tx_flags |=
441 			cpu_to_le32(TX_CMD_FLG_ACK | TX_CMD_FLG_BAR);
442 	}
443 
444 	/* Set the rate in the TX cmd */
445 	tx_cmd->rate_n_flags =
446 		cpu_to_le32(iwl_mvm_get_tx_rate_n_flags(mvm, info, sta, fc));
447 }
448 
449 static inline void iwl_mvm_set_tx_cmd_pn(struct ieee80211_tx_info *info,
450 					 u8 *crypto_hdr)
451 {
452 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
453 	u64 pn;
454 
455 	pn = atomic64_inc_return(&keyconf->tx_pn);
456 	crypto_hdr[0] = pn;
457 	crypto_hdr[2] = 0;
458 	crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
459 	crypto_hdr[1] = pn >> 8;
460 	crypto_hdr[4] = pn >> 16;
461 	crypto_hdr[5] = pn >> 24;
462 	crypto_hdr[6] = pn >> 32;
463 	crypto_hdr[7] = pn >> 40;
464 }
465 
466 /*
467  * Sets the fields in the Tx cmd that are crypto related
468  */
469 static void iwl_mvm_set_tx_cmd_crypto(struct iwl_mvm *mvm,
470 				      struct ieee80211_tx_info *info,
471 				      struct iwl_tx_cmd *tx_cmd,
472 				      struct sk_buff *skb_frag,
473 				      int hdrlen)
474 {
475 	struct ieee80211_key_conf *keyconf = info->control.hw_key;
476 	u8 *crypto_hdr = skb_frag->data + hdrlen;
477 	enum iwl_tx_cmd_sec_ctrl type = TX_CMD_SEC_CCM;
478 	u64 pn;
479 
480 	switch (keyconf->cipher) {
481 	case WLAN_CIPHER_SUITE_CCMP:
482 		iwl_mvm_set_tx_cmd_ccmp(info, tx_cmd);
483 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
484 		break;
485 
486 	case WLAN_CIPHER_SUITE_TKIP:
487 		tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
488 		pn = atomic64_inc_return(&keyconf->tx_pn);
489 		ieee80211_tkip_add_iv(crypto_hdr, keyconf, pn);
490 		ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
491 		break;
492 
493 	case WLAN_CIPHER_SUITE_WEP104:
494 		tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
495 		fallthrough;
496 	case WLAN_CIPHER_SUITE_WEP40:
497 		tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
498 			((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
499 			  TX_CMD_SEC_WEP_KEY_IDX_MSK);
500 
501 		memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
502 		break;
503 	case WLAN_CIPHER_SUITE_GCMP:
504 	case WLAN_CIPHER_SUITE_GCMP_256:
505 		type = TX_CMD_SEC_GCMP;
506 		fallthrough;
507 	case WLAN_CIPHER_SUITE_CCMP_256:
508 		/* TODO: Taking the key from the table might introduce a race
509 		 * when PTK rekeying is done, having an old packets with a PN
510 		 * based on the old key but the message encrypted with a new
511 		 * one.
512 		 * Need to handle this.
513 		 */
514 		tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
515 		tx_cmd->key[0] = keyconf->hw_key_idx;
516 		iwl_mvm_set_tx_cmd_pn(info, crypto_hdr);
517 		break;
518 	default:
519 		tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
520 	}
521 }
522 
523 static bool iwl_mvm_use_host_rate(struct iwl_mvm *mvm,
524 				  struct iwl_mvm_sta *mvmsta,
525 				  struct ieee80211_hdr *hdr,
526 				  struct ieee80211_tx_info *info)
527 {
528 	if (unlikely(!mvmsta))
529 		return true;
530 
531 	if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
532 		return true;
533 
534 	if (likely(ieee80211_is_data(hdr->frame_control) &&
535 		   mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
536 		return false;
537 
538 	/*
539 	 * Not a data frame, use host rate if on an old device that
540 	 * can't possibly be doing MLO (firmware may be selecting a
541 	 * bad rate), if we might be doing MLO we need to let FW pick
542 	 * (since we don't necesarily know the link), but FW rate
543 	 * selection was fixed.
544 	 */
545 	return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
546 }
547 
548 static void iwl_mvm_copy_hdr(void *cmd, const void *hdr, int hdrlen,
549 			     const u8 *addr3_override)
550 {
551 	struct ieee80211_hdr *out_hdr = cmd;
552 
553 	memcpy(cmd, hdr, hdrlen);
554 	if (addr3_override)
555 		memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
556 }
557 
558 /*
559  * Allocates and sets the Tx cmd the driver data pointers in the skb
560  */
561 static struct iwl_device_tx_cmd *
562 iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb,
563 		      struct ieee80211_tx_info *info, int hdrlen,
564 		      struct ieee80211_sta *sta, u8 sta_id,
565 		      const u8 *addr3_override)
566 {
567 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
568 	struct iwl_device_tx_cmd *dev_cmd;
569 	struct iwl_tx_cmd *tx_cmd;
570 
571 	dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
572 
573 	if (unlikely(!dev_cmd))
574 		return NULL;
575 
576 	dev_cmd->hdr.cmd = TX_CMD;
577 
578 	if (iwl_mvm_has_new_tx_api(mvm)) {
579 		u32 rate_n_flags = 0;
580 		u16 flags = 0;
581 		struct iwl_mvm_sta *mvmsta = sta ?
582 			iwl_mvm_sta_from_mac80211(sta) : NULL;
583 		bool amsdu = false;
584 
585 		if (ieee80211_is_data_qos(hdr->frame_control)) {
586 			u8 *qc = ieee80211_get_qos_ctl(hdr);
587 
588 			amsdu = *qc & IEEE80211_QOS_CTL_A_MSDU_PRESENT;
589 		}
590 
591 		if (!info->control.hw_key)
592 			flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
593 
594 		/*
595 		 * For data and mgmt packets rate info comes from the fw (for
596 		 * new devices, older FW is somewhat broken for this). Only
597 		 * set rate/antenna for injected frames with fixed rate, or
598 		 * when no sta is given, or with older firmware.
599 		 */
600 		if (unlikely(iwl_mvm_use_host_rate(mvm, mvmsta, hdr, info))) {
601 			flags |= IWL_TX_FLAGS_CMD_RATE;
602 			rate_n_flags =
603 				iwl_mvm_get_tx_rate_n_flags(mvm, info, sta,
604 							    hdr->frame_control);
605 		} else if (!ieee80211_is_data(hdr->frame_control) ||
606 			   mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
607 			/* These are important frames */
608 			flags |= IWL_TX_FLAGS_HIGH_PRI;
609 		}
610 
611 		if (mvm->trans->trans_cfg->device_family >=
612 		    IWL_DEVICE_FAMILY_AX210) {
613 			struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
614 			u32 offload_assist = iwl_mvm_tx_csum(mvm, skb,
615 							     info, amsdu);
616 
617 			cmd->offload_assist = cpu_to_le32(offload_assist);
618 
619 			/* Total # bytes to be transmitted */
620 			cmd->len = cpu_to_le16((u16)skb->len);
621 
622 			/* Copy MAC header from skb into command buffer */
623 			iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
624 
625 			cmd->flags = cpu_to_le16(flags);
626 			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
627 		} else {
628 			struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
629 			u16 offload_assist = iwl_mvm_tx_csum(mvm, skb,
630 							     info, amsdu);
631 
632 			cmd->offload_assist = cpu_to_le16(offload_assist);
633 
634 			/* Total # bytes to be transmitted */
635 			cmd->len = cpu_to_le16((u16)skb->len);
636 
637 			/* Copy MAC header from skb into command buffer */
638 			iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
639 
640 			cmd->flags = cpu_to_le32(flags);
641 			cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
642 		}
643 		goto out;
644 	}
645 
646 	tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
647 
648 	if (info->control.hw_key)
649 		iwl_mvm_set_tx_cmd_crypto(mvm, info, tx_cmd, skb, hdrlen);
650 
651 	iwl_mvm_set_tx_cmd(mvm, skb, tx_cmd, info, sta_id);
652 
653 	iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
654 
655 	/* Copy MAC header from skb into command buffer */
656 	iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
657 
658 out:
659 	return dev_cmd;
660 }
661 
662 static void iwl_mvm_skb_prepare_status(struct sk_buff *skb,
663 				       struct iwl_device_tx_cmd *cmd)
664 {
665 	struct ieee80211_tx_info *skb_info = IEEE80211_SKB_CB(skb);
666 
667 	memset(&skb_info->status, 0, sizeof(skb_info->status));
668 	memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
669 
670 	skb_info->driver_data[1] = cmd;
671 }
672 
673 static int iwl_mvm_get_ctrl_vif_queue(struct iwl_mvm *mvm,
674 				      struct iwl_mvm_vif_link_info *link,
675 				      struct ieee80211_tx_info *info,
676 				      struct sk_buff *skb)
677 {
678 	struct ieee80211_hdr *hdr = (void *)skb->data;
679 	__le16 fc = hdr->frame_control;
680 
681 	switch (info->control.vif->type) {
682 	case NL80211_IFTYPE_AP:
683 	case NL80211_IFTYPE_ADHOC:
684 		/*
685 		 * Non-bufferable frames use the broadcast station, thus they
686 		 * use the probe queue.
687 		 * Also take care of the case where we send a deauth to a
688 		 * station that we don't have, or similarly an association
689 		 * response (with non-success status) for a station we can't
690 		 * accept.
691 		 * Also, disassociate frames might happen, particular with
692 		 * reason 7 ("Class 3 frame received from nonassociated STA").
693 		 */
694 		if (ieee80211_is_mgmt(fc) &&
695 		    (!ieee80211_is_bufferable_mmpdu(skb) ||
696 		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
697 			return link->mgmt_queue;
698 
699 		if (!ieee80211_has_order(fc) && !ieee80211_is_probe_req(fc) &&
700 		    is_multicast_ether_addr(hdr->addr1))
701 			return link->cab_queue;
702 
703 		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
704 			  "fc=0x%02x", le16_to_cpu(fc));
705 		return link->mgmt_queue;
706 	case NL80211_IFTYPE_P2P_DEVICE:
707 		if (ieee80211_is_mgmt(fc))
708 			return mvm->p2p_dev_queue;
709 
710 		WARN_ON_ONCE(1);
711 		return mvm->p2p_dev_queue;
712 	default:
713 		WARN_ONCE(1, "Not a ctrl vif, no available queue\n");
714 		return -1;
715 	}
716 }
717 
718 static void iwl_mvm_probe_resp_set_noa(struct iwl_mvm *mvm,
719 				       struct sk_buff *skb)
720 {
721 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
722 	struct iwl_mvm_vif *mvmvif =
723 		iwl_mvm_vif_from_mac80211(info->control.vif);
724 	struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
725 	int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
726 	struct iwl_probe_resp_data *resp_data;
727 	const u8 *ie;
728 	u8 *pos;
729 	u8 match[] = {
730 		(WLAN_OUI_WFA >> 16) & 0xff,
731 		(WLAN_OUI_WFA >> 8) & 0xff,
732 		WLAN_OUI_WFA & 0xff,
733 		WLAN_OUI_TYPE_WFA_P2P,
734 	};
735 
736 	rcu_read_lock();
737 
738 	resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data);
739 	if (!resp_data)
740 		goto out;
741 
742 	if (!resp_data->notif.noa_active)
743 		goto out;
744 
745 	ie = cfg80211_find_ie_match(WLAN_EID_VENDOR_SPECIFIC,
746 				    mgmt->u.probe_resp.variable,
747 				    skb->len - base_len,
748 				    match, 4, 2);
749 	if (!ie) {
750 		IWL_DEBUG_TX(mvm, "probe resp doesn't have P2P IE\n");
751 		goto out;
752 	}
753 
754 	if (skb_tailroom(skb) < resp_data->noa_len) {
755 		if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
756 			IWL_ERR(mvm,
757 				"Failed to reallocate probe resp\n");
758 			goto out;
759 		}
760 	}
761 
762 	pos = skb_put(skb, resp_data->noa_len);
763 
764 	*pos++ = WLAN_EID_VENDOR_SPECIFIC;
765 	/* Set length of IE body (not including ID and length itself) */
766 	*pos++ = resp_data->noa_len - 2;
767 	*pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
768 	*pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
769 	*pos++ = WLAN_OUI_WFA & 0xff;
770 	*pos++ = WLAN_OUI_TYPE_WFA_P2P;
771 
772 	memcpy(pos, &resp_data->notif.noa_attr,
773 	       resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
774 
775 out:
776 	rcu_read_unlock();
777 }
778 
779 int iwl_mvm_tx_skb_non_sta(struct iwl_mvm *mvm, struct sk_buff *skb)
780 {
781 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
782 	struct ieee80211_tx_info info;
783 	struct iwl_device_tx_cmd *dev_cmd;
784 	u8 sta_id;
785 	int hdrlen = ieee80211_hdrlen(hdr->frame_control);
786 	__le16 fc = hdr->frame_control;
787 	bool offchannel = IEEE80211_SKB_CB(skb)->flags &
788 		IEEE80211_TX_CTL_TX_OFFCHAN;
789 	int queue = -1;
790 
791 	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
792 		return -1;
793 
794 	memcpy(&info, skb->cb, sizeof(info));
795 
796 	if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
797 		return -1;
798 
799 	if (WARN_ON_ONCE(info.flags & IEEE80211_TX_CTL_AMPDU))
800 		return -1;
801 
802 	if (info.control.vif) {
803 		struct iwl_mvm_vif *mvmvif =
804 			iwl_mvm_vif_from_mac80211(info.control.vif);
805 
806 		if (info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE ||
807 		    info.control.vif->type == NL80211_IFTYPE_AP ||
808 		    info.control.vif->type == NL80211_IFTYPE_ADHOC) {
809 			u32 link_id = u32_get_bits(info.control.flags,
810 						   IEEE80211_TX_CTRL_MLO_LINK);
811 			struct iwl_mvm_vif_link_info *link;
812 
813 			if (link_id == IEEE80211_LINK_UNSPECIFIED) {
814 				if (info.control.vif->active_links)
815 					link_id = ffs(info.control.vif->active_links) - 1;
816 				else
817 					link_id = 0;
818 			}
819 
820 			link = mvmvif->link[link_id];
821 			if (WARN_ON(!link))
822 				return -1;
823 
824 			if (!ieee80211_is_data(hdr->frame_control))
825 				sta_id = link->bcast_sta.sta_id;
826 			else
827 				sta_id = link->mcast_sta.sta_id;
828 
829 			queue = iwl_mvm_get_ctrl_vif_queue(mvm, link, &info,
830 							   skb);
831 		} else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
832 			queue = mvm->snif_queue;
833 			sta_id = mvm->snif_sta.sta_id;
834 		} else if (info.control.vif->type == NL80211_IFTYPE_STATION &&
835 			   offchannel) {
836 			/*
837 			 * IWL_MVM_OFFCHANNEL_QUEUE is used for ROC packets
838 			 * that can be used in 2 different types of vifs, P2P &
839 			 * STATION.
840 			 * P2P uses the offchannel queue.
841 			 * STATION (HS2.0) uses the auxiliary context of the FW,
842 			 * and hence needs to be sent on the aux queue.
843 			 */
844 			sta_id = mvm->aux_sta.sta_id;
845 			queue = mvm->aux_queue;
846 		}
847 	}
848 
849 	if (queue < 0) {
850 		IWL_ERR(mvm, "No queue was found. Dropping TX\n");
851 		return -1;
852 	}
853 
854 	if (unlikely(ieee80211_is_probe_resp(fc)))
855 		iwl_mvm_probe_resp_set_noa(mvm, skb);
856 
857 	IWL_DEBUG_TX(mvm, "station Id %d, queue=%d\n", sta_id, queue);
858 
859 	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, &info, hdrlen, NULL, sta_id,
860 					NULL);
861 	if (!dev_cmd)
862 		return -1;
863 
864 	/* From now on, we cannot access info->control */
865 	iwl_mvm_skb_prepare_status(skb, dev_cmd);
866 
867 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
868 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
869 		return -1;
870 	}
871 
872 	return 0;
873 }
874 
875 unsigned int iwl_mvm_max_amsdu_size(struct iwl_mvm *mvm,
876 				    struct ieee80211_sta *sta, unsigned int tid)
877 {
878 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
879 	u8 ac = tid_to_mac80211_ac[tid];
880 	enum nl80211_band band;
881 	unsigned int txf;
882 	unsigned int val;
883 	int lmac;
884 
885 	/* For HE redirect to trigger based fifos */
886 	if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
887 		ac += 4;
888 
889 	txf = iwl_mvm_mac_ac_to_tx_fifo(mvm, ac);
890 
891 	/*
892 	 * Don't send an AMSDU that will be longer than the TXF.
893 	 * Add a security margin of 256 for the TX command + headers.
894 	 * We also want to have the start of the next packet inside the
895 	 * fifo to be able to send bursts.
896 	 */
897 	val = mvmsta->max_amsdu_len;
898 
899 	if (hweight16(sta->valid_links) <= 1) {
900 		if (sta->valid_links) {
901 			struct ieee80211_bss_conf *link_conf;
902 			unsigned int link = ffs(sta->valid_links) - 1;
903 
904 			rcu_read_lock();
905 			link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
906 			if (WARN_ON(!link_conf))
907 				band = NL80211_BAND_2GHZ;
908 			else
909 				band = link_conf->chanreq.oper.chan->band;
910 			rcu_read_unlock();
911 		} else {
912 			band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
913 		}
914 
915 		lmac = iwl_mvm_get_lmac_id(mvm, band);
916 	} else if (fw_has_capa(&mvm->fw->ucode_capa,
917 			       IWL_UCODE_TLV_CAPA_CDB_SUPPORT)) {
918 		/* for real MLO restrict to both LMACs if they exist */
919 		lmac = IWL_LMAC_5G_INDEX;
920 		val = min_t(unsigned int, val,
921 			    mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
922 		lmac = IWL_LMAC_24G_INDEX;
923 	} else {
924 		lmac = IWL_LMAC_24G_INDEX;
925 	}
926 
927 	return min_t(unsigned int, val,
928 		     mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
929 }
930 
931 #ifdef CONFIG_INET
932 
933 static int
934 iwl_mvm_tx_tso_segment(struct sk_buff *skb, unsigned int num_subframes,
935 		       netdev_features_t netdev_flags,
936 		       struct sk_buff_head *mpdus_skb)
937 {
938 	struct sk_buff *tmp, *next;
939 	struct ieee80211_hdr *hdr = (void *)skb->data;
940 	char cb[sizeof(skb->cb)];
941 	u16 i = 0;
942 	unsigned int tcp_payload_len;
943 	unsigned int mss = skb_shinfo(skb)->gso_size;
944 	bool ipv4 = (skb->protocol == htons(ETH_P_IP));
945 	bool qos = ieee80211_is_data_qos(hdr->frame_control);
946 	u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
947 
948 	skb_shinfo(skb)->gso_size = num_subframes * mss;
949 	memcpy(cb, skb->cb, sizeof(cb));
950 
951 	next = skb_gso_segment(skb, netdev_flags);
952 	skb_shinfo(skb)->gso_size = mss;
953 	skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
954 
955 	if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
956 		return -ENOMEM;
957 
958 	if (WARN_ONCE(IS_ERR(next),
959 		      "skb_gso_segment error: %d\n", (int)PTR_ERR(next)))
960 		return PTR_ERR(next);
961 
962 	if (next)
963 		consume_skb(skb);
964 
965 	skb_list_walk_safe(next, tmp, next) {
966 		memcpy(tmp->cb, cb, sizeof(tmp->cb));
967 		/*
968 		 * Compute the length of all the data added for the A-MSDU.
969 		 * This will be used to compute the length to write in the TX
970 		 * command. We have: SNAP + IP + TCP for n -1 subframes and
971 		 * ETH header for n subframes.
972 		 */
973 		tcp_payload_len = skb_tail_pointer(tmp) -
974 			skb_transport_header(tmp) -
975 			tcp_hdrlen(tmp) + tmp->data_len;
976 
977 		if (ipv4)
978 			ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
979 
980 		if (tcp_payload_len > mss) {
981 			skb_shinfo(tmp)->gso_size = mss;
982 			skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
983 							   SKB_GSO_TCPV6;
984 		} else {
985 			if (qos) {
986 				u8 *qc;
987 
988 				if (ipv4)
989 					ip_send_check(ip_hdr(tmp));
990 
991 				qc = ieee80211_get_qos_ctl((void *)tmp->data);
992 				*qc &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
993 			}
994 			skb_shinfo(tmp)->gso_size = 0;
995 		}
996 
997 		skb_mark_not_on_list(tmp);
998 		__skb_queue_tail(mpdus_skb, tmp);
999 		i++;
1000 	}
1001 
1002 	return 0;
1003 }
1004 
1005 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
1006 			  struct ieee80211_tx_info *info,
1007 			  struct ieee80211_sta *sta,
1008 			  struct sk_buff_head *mpdus_skb)
1009 {
1010 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1011 	struct ieee80211_hdr *hdr = (void *)skb->data;
1012 	unsigned int mss = skb_shinfo(skb)->gso_size;
1013 	unsigned int num_subframes, tcp_payload_len, subf_len, max_amsdu_len;
1014 	u16 snap_ip_tcp, pad;
1015 	netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
1016 	u8 tid;
1017 
1018 	snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
1019 
1020 	if (!mvmsta->max_amsdu_len ||
1021 	    !ieee80211_is_data_qos(hdr->frame_control) ||
1022 	    !mvmsta->amsdu_enabled)
1023 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
1024 
1025 	/*
1026 	 * Do not build AMSDU for IPv6 with extension headers.
1027 	 * ask stack to segment and checkum the generated MPDUs for us.
1028 	 */
1029 	if (skb->protocol == htons(ETH_P_IPV6) &&
1030 	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
1031 	    IPPROTO_TCP) {
1032 		netdev_flags &= ~NETIF_F_CSUM_MASK;
1033 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
1034 	}
1035 
1036 	tid = ieee80211_get_tid(hdr);
1037 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
1038 		return -EINVAL;
1039 
1040 	/*
1041 	 * No need to lock amsdu_in_ampdu_allowed since it can't be modified
1042 	 * during an BA session.
1043 	 */
1044 	if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
1045 	     !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
1046 	    !(mvmsta->amsdu_enabled & BIT(tid)))
1047 		return iwl_mvm_tx_tso_segment(skb, 1, netdev_flags, mpdus_skb);
1048 
1049 	/*
1050 	 * Take the min of ieee80211 station and mvm station
1051 	 */
1052 	max_amsdu_len =
1053 		min_t(unsigned int, sta->cur->max_amsdu_len,
1054 		      iwl_mvm_max_amsdu_size(mvm, sta, tid));
1055 
1056 	/*
1057 	 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
1058 	 * supported. This is a spec requirement (IEEE 802.11-2015
1059 	 * section 8.7.3 NOTE 3).
1060 	 */
1061 	if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1062 	    !sta->deflink.vht_cap.vht_supported)
1063 		max_amsdu_len = min_t(unsigned int, max_amsdu_len, 4095);
1064 
1065 	/* Sub frame header + SNAP + IP header + TCP header + MSS */
1066 	subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
1067 	pad = (4 - subf_len) & 0x3;
1068 
1069 	/*
1070 	 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
1071 	 * N * subf_len + (N - 1) * pad.
1072 	 */
1073 	num_subframes = (max_amsdu_len + pad) / (subf_len + pad);
1074 
1075 	if (sta->max_amsdu_subframes &&
1076 	    num_subframes > sta->max_amsdu_subframes)
1077 		num_subframes = sta->max_amsdu_subframes;
1078 
1079 	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1080 		tcp_hdrlen(skb) + skb->data_len;
1081 
1082 	/*
1083 	 * Make sure we have enough TBs for the A-MSDU:
1084 	 *	2 for each subframe
1085 	 *	1 more for each fragment
1086 	 *	1 more for the potential data in the header
1087 	 */
1088 	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
1089 	    mvm->trans->max_skb_frags)
1090 		num_subframes = 1;
1091 
1092 	if (num_subframes > 1)
1093 		*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1094 
1095 	/* This skb fits in one single A-MSDU */
1096 	if (num_subframes * mss >= tcp_payload_len) {
1097 		__skb_queue_tail(mpdus_skb, skb);
1098 		return 0;
1099 	}
1100 
1101 	/*
1102 	 * Trick the segmentation function to make it
1103 	 * create SKBs that can fit into one A-MSDU.
1104 	 */
1105 	return iwl_mvm_tx_tso_segment(skb, num_subframes, netdev_flags,
1106 				      mpdus_skb);
1107 }
1108 #else /* CONFIG_INET */
1109 static int iwl_mvm_tx_tso(struct iwl_mvm *mvm, struct sk_buff *skb,
1110 			  struct ieee80211_tx_info *info,
1111 			  struct ieee80211_sta *sta,
1112 			  struct sk_buff_head *mpdus_skb)
1113 {
1114 	/* Impossible to get TSO with CONFIG_INET */
1115 	WARN_ON(1);
1116 
1117 	return -1;
1118 }
1119 #endif
1120 
1121 /* Check if there are any timed-out TIDs on a given shared TXQ */
1122 static bool iwl_mvm_txq_should_update(struct iwl_mvm *mvm, int txq_id)
1123 {
1124 	unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1125 	unsigned long now = jiffies;
1126 	int tid;
1127 
1128 	if (WARN_ON(iwl_mvm_has_new_tx_api(mvm)))
1129 		return false;
1130 
1131 	for_each_set_bit(tid, &queue_tid_bitmap, IWL_MAX_TID_COUNT + 1) {
1132 		if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1133 				IWL_MVM_DQA_QUEUE_TIMEOUT, now))
1134 			return true;
1135 	}
1136 
1137 	return false;
1138 }
1139 
1140 static void iwl_mvm_tx_airtime(struct iwl_mvm *mvm,
1141 			       struct iwl_mvm_sta *mvmsta,
1142 			       int airtime)
1143 {
1144 	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1145 	struct iwl_mvm_tcm_mac *mdata;
1146 
1147 	if (mac >= NUM_MAC_INDEX_DRIVER)
1148 		return;
1149 
1150 	mdata = &mvm->tcm.data[mac];
1151 
1152 	if (mvm->tcm.paused)
1153 		return;
1154 
1155 	if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1156 		schedule_delayed_work(&mvm->tcm.work, 0);
1157 
1158 	mdata->tx.airtime += airtime;
1159 }
1160 
1161 static int iwl_mvm_tx_pkt_queued(struct iwl_mvm *mvm,
1162 				 struct iwl_mvm_sta *mvmsta, int tid)
1163 {
1164 	u32 ac = tid_to_mac80211_ac[tid];
1165 	int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1166 	struct iwl_mvm_tcm_mac *mdata;
1167 
1168 	if (mac >= NUM_MAC_INDEX_DRIVER)
1169 		return -EINVAL;
1170 
1171 	mdata = &mvm->tcm.data[mac];
1172 
1173 	mdata->tx.pkts[ac]++;
1174 
1175 	return 0;
1176 }
1177 
1178 /*
1179  * Sets the fields in the Tx cmd that are crypto related.
1180  *
1181  * This function must be called with BHs disabled.
1182  */
1183 static int iwl_mvm_tx_mpdu(struct iwl_mvm *mvm, struct sk_buff *skb,
1184 			   struct ieee80211_tx_info *info,
1185 			   struct ieee80211_sta *sta,
1186 			   const u8 *addr3_override)
1187 {
1188 	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1189 	struct iwl_mvm_sta *mvmsta;
1190 	struct iwl_device_tx_cmd *dev_cmd;
1191 	__le16 fc;
1192 	u16 seq_number = 0;
1193 	u8 tid = IWL_MAX_TID_COUNT;
1194 	u16 txq_id;
1195 	bool is_ampdu = false;
1196 	int hdrlen;
1197 
1198 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
1199 	fc = hdr->frame_control;
1200 	hdrlen = ieee80211_hdrlen(fc);
1201 
1202 	if (IWL_MVM_NON_TRANSMITTING_AP && ieee80211_is_probe_resp(fc))
1203 		return -1;
1204 
1205 	if (WARN_ON_ONCE(!mvmsta))
1206 		return -1;
1207 
1208 	if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1209 		return -1;
1210 
1211 	if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
1212 		return -1;
1213 
1214 	if (unlikely(ieee80211_is_probe_resp(fc)))
1215 		iwl_mvm_probe_resp_set_noa(mvm, skb);
1216 
1217 	dev_cmd = iwl_mvm_set_tx_params(mvm, skb, info, hdrlen,
1218 					sta, mvmsta->deflink.sta_id,
1219 					addr3_override);
1220 	if (!dev_cmd)
1221 		goto drop;
1222 
1223 	/*
1224 	 * we handle that entirely ourselves -- for uAPSD the firmware
1225 	 * will always send a notification, and for PS-Poll responses
1226 	 * we'll notify mac80211 when getting frame status
1227 	 */
1228 	info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1229 
1230 	spin_lock(&mvmsta->lock);
1231 
1232 	/* nullfunc frames should go to the MGMT queue regardless of QOS,
1233 	 * the conditions of !ieee80211_is_qos_nullfunc(fc) and
1234 	 * !ieee80211_is_data_qos(fc) keep the default assignment of MGMT TID
1235 	 */
1236 	if (ieee80211_is_data_qos(fc) && !ieee80211_is_qos_nullfunc(fc)) {
1237 		tid = ieee80211_get_tid(hdr);
1238 		if (WARN_ONCE(tid >= IWL_MAX_TID_COUNT, "Invalid TID %d", tid))
1239 			goto drop_unlock_sta;
1240 
1241 		is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1242 		if (WARN_ONCE(is_ampdu &&
1243 			      mvmsta->tid_data[tid].state != IWL_AGG_ON,
1244 			      "Invalid internal agg state %d for TID %d",
1245 			       mvmsta->tid_data[tid].state, tid))
1246 			goto drop_unlock_sta;
1247 
1248 		seq_number = mvmsta->tid_data[tid].seq_number;
1249 		seq_number &= IEEE80211_SCTL_SEQ;
1250 
1251 		if (!iwl_mvm_has_new_tx_api(mvm)) {
1252 			struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1253 
1254 			hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1255 			hdr->seq_ctrl |= cpu_to_le16(seq_number);
1256 			/* update the tx_cmd hdr as it was already copied */
1257 			tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1258 		}
1259 	} else if (ieee80211_is_data(fc) && !ieee80211_is_data_qos(fc) &&
1260 		   !ieee80211_is_nullfunc(fc)) {
1261 		tid = IWL_TID_NON_QOS;
1262 	}
1263 
1264 	txq_id = mvmsta->tid_data[tid].txq_id;
1265 
1266 	WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1267 
1268 	if (WARN_ONCE(txq_id == IWL_MVM_INVALID_QUEUE, "Invalid TXQ id")) {
1269 		iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1270 		spin_unlock(&mvmsta->lock);
1271 		return -1;
1272 	}
1273 
1274 	if (!iwl_mvm_has_new_tx_api(mvm)) {
1275 		/* Keep track of the time of the last frame for this RA/TID */
1276 		mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1277 
1278 		/*
1279 		 * If we have timed-out TIDs - schedule the worker that will
1280 		 * reconfig the queues and update them
1281 		 *
1282 		 * Note that the no lock is taken here in order to not serialize
1283 		 * the TX flow. This isn't dangerous because scheduling
1284 		 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1285 		 * schedule it due to some race condition then next TX we get
1286 		 * here we will.
1287 		 */
1288 		if (unlikely(mvm->queue_info[txq_id].status ==
1289 			     IWL_MVM_QUEUE_SHARED &&
1290 			     iwl_mvm_txq_should_update(mvm, txq_id)))
1291 			schedule_work(&mvm->add_stream_wk);
1292 	}
1293 
1294 	IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1295 		     mvmsta->deflink.sta_id, tid, txq_id,
1296 		     IEEE80211_SEQ_TO_SN(seq_number), skb->len);
1297 
1298 	/* From now on, we cannot access info->control */
1299 	iwl_mvm_skb_prepare_status(skb, dev_cmd);
1300 
1301 	/*
1302 	 * The IV is introduced by the HW for new tx api, and it is not present
1303 	 * in the skb, hence, don't tell iwl_mvm_mei_tx_copy_to_csme about the
1304 	 * IV for those devices.
1305 	 */
1306 	if (ieee80211_is_data(fc))
1307 		iwl_mvm_mei_tx_copy_to_csme(mvm, skb,
1308 					    info->control.hw_key &&
1309 					    !iwl_mvm_has_new_tx_api(mvm) ?
1310 					    info->control.hw_key->iv_len : 0);
1311 
1312 	if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1313 		goto drop_unlock_sta;
1314 
1315 	if (tid < IWL_MAX_TID_COUNT && !ieee80211_has_morefrags(fc))
1316 		mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1317 
1318 	spin_unlock(&mvmsta->lock);
1319 
1320 	if (iwl_mvm_tx_pkt_queued(mvm, mvmsta,
1321 				  tid == IWL_MAX_TID_COUNT ? 0 : tid))
1322 		goto drop;
1323 
1324 	return 0;
1325 
1326 drop_unlock_sta:
1327 	iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1328 	spin_unlock(&mvmsta->lock);
1329 drop:
1330 	IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id,
1331 		     tid);
1332 	return -1;
1333 }
1334 
1335 int iwl_mvm_tx_skb_sta(struct iwl_mvm *mvm, struct sk_buff *skb,
1336 		       struct ieee80211_sta *sta)
1337 {
1338 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1339 	struct ieee80211_tx_info info;
1340 	struct sk_buff_head mpdus_skbs;
1341 	struct ieee80211_vif *vif;
1342 	unsigned int payload_len;
1343 	int ret;
1344 	struct sk_buff *orig_skb = skb;
1345 	const u8 *addr3;
1346 
1347 	if (WARN_ON_ONCE(!mvmsta))
1348 		return -1;
1349 
1350 	if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1351 		return -1;
1352 
1353 	memcpy(&info, skb->cb, sizeof(info));
1354 
1355 	if (!skb_is_gso(skb))
1356 		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
1357 
1358 	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1359 		tcp_hdrlen(skb) + skb->data_len;
1360 
1361 	if (payload_len <= skb_shinfo(skb)->gso_size)
1362 		return iwl_mvm_tx_mpdu(mvm, skb, &info, sta, NULL);
1363 
1364 	__skb_queue_head_init(&mpdus_skbs);
1365 
1366 	vif = info.control.vif;
1367 	if (!vif)
1368 		return -1;
1369 
1370 	ret = iwl_mvm_tx_tso(mvm, skb, &info, sta, &mpdus_skbs);
1371 	if (ret)
1372 		return ret;
1373 
1374 	WARN_ON(skb_queue_empty(&mpdus_skbs));
1375 
1376 	/*
1377 	 * As described in IEEE sta 802.11-2020, table 9-30 (Address
1378 	 * field contents), A-MSDU address 3 should contain the BSSID
1379 	 * address.
1380 	 * Pass address 3 down to iwl_mvm_tx_mpdu() and further to set it
1381 	 * in the command header. We need to preserve the original
1382 	 * address 3 in the skb header to correctly create all the
1383 	 * A-MSDU subframe headers from it.
1384 	 */
1385 	switch (vif->type) {
1386 	case NL80211_IFTYPE_STATION:
1387 		addr3 = vif->cfg.ap_addr;
1388 		break;
1389 	case NL80211_IFTYPE_AP:
1390 		addr3 = vif->addr;
1391 		break;
1392 	default:
1393 		addr3 = NULL;
1394 		break;
1395 	}
1396 
1397 	while (!skb_queue_empty(&mpdus_skbs)) {
1398 		struct ieee80211_hdr *hdr;
1399 		bool amsdu;
1400 
1401 		skb = __skb_dequeue(&mpdus_skbs);
1402 		hdr = (void *)skb->data;
1403 		amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
1404 			(*ieee80211_get_qos_ctl(hdr) &
1405 			 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
1406 
1407 		ret = iwl_mvm_tx_mpdu(mvm, skb, &info, sta,
1408 				      amsdu ? addr3 : NULL);
1409 		if (ret) {
1410 			/* Free skbs created as part of TSO logic that have not yet been dequeued */
1411 			__skb_queue_purge(&mpdus_skbs);
1412 			/* skb here is not necessarily same as skb that entered this method,
1413 			 * so free it explicitly.
1414 			 */
1415 			if (skb == orig_skb)
1416 				ieee80211_free_txskb(mvm->hw, skb);
1417 			else
1418 				kfree_skb(skb);
1419 			/* there was error, but we consumed skb one way or another, so return 0 */
1420 			return 0;
1421 		}
1422 	}
1423 
1424 	return 0;
1425 }
1426 
1427 static void iwl_mvm_check_ratid_empty(struct iwl_mvm *mvm,
1428 				      struct ieee80211_sta *sta, u8 tid)
1429 {
1430 	struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1431 	struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1432 	struct ieee80211_vif *vif = mvmsta->vif;
1433 	u16 normalized_ssn;
1434 
1435 	lockdep_assert_held(&mvmsta->lock);
1436 
1437 	if ((tid_data->state == IWL_AGG_ON ||
1438 	     tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1439 	    iwl_mvm_tid_queued(mvm, tid_data) == 0) {
1440 		/*
1441 		 * Now that this aggregation or DQA queue is empty tell
1442 		 * mac80211 so it knows we no longer have frames buffered for
1443 		 * the station on this TID (for the TIM bitmap calculation.)
1444 		 */
1445 		ieee80211_sta_set_buffered(sta, tid, false);
1446 	}
1447 
1448 	/*
1449 	 * In 22000 HW, the next_reclaimed index is only 8 bit, so we'll need
1450 	 * to align the wrap around of ssn so we compare relevant values.
1451 	 */
1452 	normalized_ssn = tid_data->ssn;
1453 	if (mvm->trans->trans_cfg->gen2)
1454 		normalized_ssn &= 0xff;
1455 
1456 	if (normalized_ssn != tid_data->next_reclaimed)
1457 		return;
1458 
1459 	switch (tid_data->state) {
1460 	case IWL_EMPTYING_HW_QUEUE_ADDBA:
1461 		IWL_DEBUG_TX_QUEUES(mvm,
1462 				    "Can continue addBA flow ssn = next_recl = %d\n",
1463 				    tid_data->next_reclaimed);
1464 		tid_data->state = IWL_AGG_STARTING;
1465 		ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1466 		break;
1467 
1468 	case IWL_EMPTYING_HW_QUEUE_DELBA:
1469 		IWL_DEBUG_TX_QUEUES(mvm,
1470 				    "Can continue DELBA flow ssn = next_recl = %d\n",
1471 				    tid_data->next_reclaimed);
1472 		tid_data->state = IWL_AGG_OFF;
1473 		ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1474 		break;
1475 
1476 	default:
1477 		break;
1478 	}
1479 }
1480 
1481 #ifdef CONFIG_IWLWIFI_DEBUG
1482 const char *iwl_mvm_get_tx_fail_reason(u32 status)
1483 {
1484 #define TX_STATUS_FAIL(x) case TX_STATUS_FAIL_ ## x: return #x
1485 #define TX_STATUS_POSTPONE(x) case TX_STATUS_POSTPONE_ ## x: return #x
1486 
1487 	switch (status & TX_STATUS_MSK) {
1488 	case TX_STATUS_SUCCESS:
1489 		return "SUCCESS";
1490 	TX_STATUS_POSTPONE(DELAY);
1491 	TX_STATUS_POSTPONE(FEW_BYTES);
1492 	TX_STATUS_POSTPONE(BT_PRIO);
1493 	TX_STATUS_POSTPONE(QUIET_PERIOD);
1494 	TX_STATUS_POSTPONE(CALC_TTAK);
1495 	TX_STATUS_FAIL(INTERNAL_CROSSED_RETRY);
1496 	TX_STATUS_FAIL(SHORT_LIMIT);
1497 	TX_STATUS_FAIL(LONG_LIMIT);
1498 	TX_STATUS_FAIL(UNDERRUN);
1499 	TX_STATUS_FAIL(DRAIN_FLOW);
1500 	TX_STATUS_FAIL(RFKILL_FLUSH);
1501 	TX_STATUS_FAIL(LIFE_EXPIRE);
1502 	TX_STATUS_FAIL(DEST_PS);
1503 	TX_STATUS_FAIL(HOST_ABORTED);
1504 	TX_STATUS_FAIL(BT_RETRY);
1505 	TX_STATUS_FAIL(STA_INVALID);
1506 	TX_STATUS_FAIL(FRAG_DROPPED);
1507 	TX_STATUS_FAIL(TID_DISABLE);
1508 	TX_STATUS_FAIL(FIFO_FLUSHED);
1509 	TX_STATUS_FAIL(SMALL_CF_POLL);
1510 	TX_STATUS_FAIL(FW_DROP);
1511 	TX_STATUS_FAIL(STA_COLOR_MISMATCH);
1512 	}
1513 
1514 	return "UNKNOWN";
1515 
1516 #undef TX_STATUS_FAIL
1517 #undef TX_STATUS_POSTPONE
1518 }
1519 #endif /* CONFIG_IWLWIFI_DEBUG */
1520 
1521 static int iwl_mvm_get_hwrate_chan_width(u32 chan_width)
1522 {
1523 	switch (chan_width) {
1524 	case RATE_MCS_CHAN_WIDTH_20:
1525 		return 0;
1526 	case RATE_MCS_CHAN_WIDTH_40:
1527 		return IEEE80211_TX_RC_40_MHZ_WIDTH;
1528 	case RATE_MCS_CHAN_WIDTH_80:
1529 		return IEEE80211_TX_RC_80_MHZ_WIDTH;
1530 	case RATE_MCS_CHAN_WIDTH_160:
1531 		return IEEE80211_TX_RC_160_MHZ_WIDTH;
1532 	default:
1533 		return 0;
1534 	}
1535 }
1536 
1537 void iwl_mvm_hwrate_to_tx_rate(u32 rate_n_flags,
1538 			       enum nl80211_band band,
1539 			       struct ieee80211_tx_rate *r)
1540 {
1541 	u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1542 	u32 rate = format == RATE_MCS_HT_MSK ?
1543 		RATE_HT_MCS_INDEX(rate_n_flags) :
1544 		rate_n_flags & RATE_MCS_CODE_MSK;
1545 
1546 	r->flags |=
1547 		iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1548 					      RATE_MCS_CHAN_WIDTH_MSK);
1549 
1550 	if (rate_n_flags & RATE_MCS_SGI_MSK)
1551 		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1552 	if (format ==  RATE_MCS_HT_MSK) {
1553 		r->flags |= IEEE80211_TX_RC_MCS;
1554 		r->idx = rate;
1555 	} else if (format ==  RATE_MCS_VHT_MSK) {
1556 		ieee80211_rate_set_vht(r, rate,
1557 				       FIELD_GET(RATE_MCS_NSS_MSK,
1558 						 rate_n_flags) + 1);
1559 		r->flags |= IEEE80211_TX_RC_VHT_MCS;
1560 	} else if (format == RATE_MCS_HE_MSK) {
1561 		/* mac80211 cannot do this without ieee80211_tx_status_ext()
1562 		 * but it only matters for radiotap */
1563 		r->idx = 0;
1564 	} else {
1565 		r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1566 							       band);
1567 	}
1568 }
1569 
1570 void iwl_mvm_hwrate_to_tx_rate_v1(u32 rate_n_flags,
1571 				  enum nl80211_band band,
1572 				  struct ieee80211_tx_rate *r)
1573 {
1574 	if (rate_n_flags & RATE_HT_MCS_GF_MSK)
1575 		r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1576 
1577 	r->flags |=
1578 		iwl_mvm_get_hwrate_chan_width(rate_n_flags &
1579 					      RATE_MCS_CHAN_WIDTH_MSK_V1);
1580 
1581 	if (rate_n_flags & RATE_MCS_SGI_MSK_V1)
1582 		r->flags |= IEEE80211_TX_RC_SHORT_GI;
1583 	if (rate_n_flags & RATE_MCS_HT_MSK_V1) {
1584 		r->flags |= IEEE80211_TX_RC_MCS;
1585 		r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
1586 	} else if (rate_n_flags & RATE_MCS_VHT_MSK_V1) {
1587 		ieee80211_rate_set_vht(
1588 			r, rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK,
1589 			FIELD_GET(RATE_MCS_NSS_MSK, rate_n_flags) + 1);
1590 		r->flags |= IEEE80211_TX_RC_VHT_MCS;
1591 	} else {
1592 		r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1593 							     band);
1594 	}
1595 }
1596 
1597 /*
1598  * translate ucode response to mac80211 tx status control values
1599  */
1600 static void iwl_mvm_hwrate_to_tx_status(const struct iwl_fw *fw,
1601 					u32 rate_n_flags,
1602 					struct ieee80211_tx_info *info)
1603 {
1604 	struct ieee80211_tx_rate *r = &info->status.rates[0];
1605 
1606 	if (iwl_fw_lookup_notif_ver(fw, LONG_GROUP,
1607 				    TX_CMD, 0) <= 6)
1608 		rate_n_flags = iwl_new_rate_from_v1(rate_n_flags);
1609 
1610 	info->status.antenna =
1611 		((rate_n_flags & RATE_MCS_ANT_AB_MSK) >> RATE_MCS_ANT_POS);
1612 	iwl_mvm_hwrate_to_tx_rate(rate_n_flags,
1613 				  info->band, r);
1614 }
1615 
1616 static void iwl_mvm_tx_status_check_trigger(struct iwl_mvm *mvm,
1617 					    u32 status, __le16 frame_control)
1618 {
1619 	struct iwl_fw_dbg_trigger_tlv *trig;
1620 	struct iwl_fw_dbg_trigger_tx_status *status_trig;
1621 	int i;
1622 
1623 	if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS) {
1624 		enum iwl_fw_ini_time_point tp =
1625 			IWL_FW_INI_TIME_POINT_TX_FAILED;
1626 
1627 		if (ieee80211_is_action(frame_control))
1628 			tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1629 
1630 		iwl_dbg_tlv_time_point(&mvm->fwrt,
1631 				       tp, NULL);
1632 		return;
1633 	}
1634 
1635 	trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1636 				     FW_DBG_TRIGGER_TX_STATUS);
1637 	if (!trig)
1638 		return;
1639 
1640 	status_trig = (void *)trig->data;
1641 
1642 	for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1643 		/* don't collect on status 0 */
1644 		if (!status_trig->statuses[i].status)
1645 			break;
1646 
1647 		if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1648 			continue;
1649 
1650 		iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1651 					"Tx status %d was received",
1652 					status & TX_STATUS_MSK);
1653 		break;
1654 	}
1655 }
1656 
1657 /*
1658  * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1659  * @tx_resp: the Tx response from the fw (agg or non-agg)
1660  *
1661  * When the fw sends an AMPDU, it fetches the MPDUs one after the other. Since
1662  * it can't know that everything will go well until the end of the AMPDU, it
1663  * can't know in advance the number of MPDUs that will be sent in the current
1664  * batch. This is why it writes the agg Tx response while it fetches the MPDUs.
1665  * Hence, it can't know in advance what the SSN of the SCD will be at the end
1666  * of the batch. This is why the SSN of the SCD is written at the end of the
1667  * whole struct at a variable offset. This function knows how to cope with the
1668  * variable offset and returns the SSN of the SCD.
1669  *
1670  * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
1671  */
1672 static inline u32 iwl_mvm_get_scd_ssn(struct iwl_mvm *mvm,
1673 				      struct iwl_mvm_tx_resp *tx_resp)
1674 {
1675 	u32 val = le32_to_cpup((__le32 *)iwl_mvm_get_agg_status(mvm, tx_resp) +
1676 			       tx_resp->frame_count);
1677 
1678 	if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1679 		return val & 0xFFFF;
1680 	return val & 0xFFF;
1681 }
1682 
1683 static void iwl_mvm_rx_tx_cmd_single(struct iwl_mvm *mvm,
1684 				     struct iwl_rx_packet *pkt)
1685 {
1686 	struct ieee80211_sta *sta;
1687 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1688 	int txq_id = SEQ_TO_QUEUE(sequence);
1689 	/* struct iwl_mvm_tx_resp_v3 is almost the same */
1690 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1691 	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1692 	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1693 	struct agg_tx_status *agg_status =
1694 		iwl_mvm_get_agg_status(mvm, tx_resp);
1695 	u32 status = le16_to_cpu(agg_status->status);
1696 	u16 ssn = iwl_mvm_get_scd_ssn(mvm, tx_resp);
1697 	struct sk_buff_head skbs;
1698 	u8 skb_freed = 0;
1699 	u8 lq_color;
1700 	u16 next_reclaimed, seq_ctl;
1701 	bool is_ndp = false;
1702 
1703 	__skb_queue_head_init(&skbs);
1704 
1705 	if (iwl_mvm_has_new_tx_api(mvm))
1706 		txq_id = le16_to_cpu(tx_resp->tx_queue);
1707 
1708 	seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1709 
1710 	/* we can free until ssn % q.n_bd not inclusive */
1711 	iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
1712 
1713 	while (!skb_queue_empty(&skbs)) {
1714 		struct sk_buff *skb = __skb_dequeue(&skbs);
1715 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1716 		struct ieee80211_hdr *hdr = (void *)skb->data;
1717 		bool flushed = false;
1718 
1719 		skb_freed++;
1720 
1721 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1722 
1723 		memset(&info->status, 0, sizeof(info->status));
1724 		info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1725 
1726 		/* inform mac80211 about what happened with the frame */
1727 		switch (status & TX_STATUS_MSK) {
1728 		case TX_STATUS_SUCCESS:
1729 		case TX_STATUS_DIRECT_DONE:
1730 			info->flags |= IEEE80211_TX_STAT_ACK;
1731 			break;
1732 		case TX_STATUS_FAIL_FIFO_FLUSHED:
1733 		case TX_STATUS_FAIL_DRAIN_FLOW:
1734 			flushed = true;
1735 			break;
1736 		case TX_STATUS_FAIL_DEST_PS:
1737 			/* the FW should have stopped the queue and not
1738 			 * return this status
1739 			 */
1740 			IWL_ERR_LIMIT(mvm,
1741 				      "FW reported TX filtered, status=0x%x, FC=0x%x\n",
1742 				      status, le16_to_cpu(hdr->frame_control));
1743 			info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1744 			break;
1745 		default:
1746 			break;
1747 		}
1748 
1749 		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1750 		    ieee80211_is_mgmt(hdr->frame_control))
1751 			iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1752 
1753 		/*
1754 		 * If we are freeing multiple frames, mark all the frames
1755 		 * but the first one as acked, since they were acknowledged
1756 		 * before
1757 		 * */
1758 		if (skb_freed > 1)
1759 			info->flags |= IEEE80211_TX_STAT_ACK;
1760 
1761 		iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
1762 
1763 		info->status.rates[0].count = tx_resp->failure_frame + 1;
1764 
1765 		iwl_mvm_hwrate_to_tx_status(mvm->fw,
1766 					    le32_to_cpu(tx_resp->initial_rate),
1767 					    info);
1768 
1769 		/* Don't assign the converted initial_rate, because driver
1770 		 * TLC uses this and doesn't support the new FW rate
1771 		 */
1772 		info->status.status_driver_data[1] =
1773 			(void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1774 
1775 		/* Single frame failure in an AMPDU queue => send BAR */
1776 		if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1777 		    !(info->flags & IEEE80211_TX_STAT_ACK) &&
1778 		    !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1779 			info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1780 		info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1781 
1782 		/* W/A FW bug: seq_ctl is wrong upon failure / BAR frame */
1783 		if (ieee80211_is_back_req(hdr->frame_control))
1784 			seq_ctl = 0;
1785 		else if (status != TX_STATUS_SUCCESS)
1786 			seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1787 
1788 		if (unlikely(!seq_ctl)) {
1789 			/*
1790 			 * If it is an NDP, we can't update next_reclaim since
1791 			 * its sequence control is 0. Note that for that same
1792 			 * reason, NDPs are never sent to A-MPDU'able queues
1793 			 * so that we can never have more than one freed frame
1794 			 * for a single Tx resonse (see WARN_ON below).
1795 			 */
1796 			if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1797 				is_ndp = true;
1798 		}
1799 
1800 		/*
1801 		 * TODO: this is not accurate if we are freeing more than one
1802 		 * packet.
1803 		 */
1804 		info->status.tx_time =
1805 			le16_to_cpu(tx_resp->wireless_media_time);
1806 		BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1807 		lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1808 		info->status.status_driver_data[0] =
1809 			RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1810 
1811 		if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
1812 			ieee80211_tx_status_skb(mvm->hw, skb);
1813 	}
1814 
1815 	/* This is an aggregation queue or might become one, so we use
1816 	 * the ssn since: ssn = wifi seq_num % 256.
1817 	 * The seq_ctl is the sequence control of the packet to which
1818 	 * this Tx response relates. But if there is a hole in the
1819 	 * bitmap of the BA we received, this Tx response may allow to
1820 	 * reclaim the hole and all the subsequent packets that were
1821 	 * already acked. In that case, seq_ctl != ssn, and the next
1822 	 * packet to be reclaimed will be ssn and not seq_ctl. In that
1823 	 * case, several packets will be reclaimed even if
1824 	 * frame_count = 1.
1825 	 *
1826 	 * The ssn is the index (% 256) of the latest packet that has
1827 	 * treated (acked / dropped) + 1.
1828 	 */
1829 	next_reclaimed = ssn;
1830 
1831 	IWL_DEBUG_TX_REPLY(mvm,
1832 			   "TXQ %d status %s (0x%08x)\n",
1833 			   txq_id, iwl_mvm_get_tx_fail_reason(status), status);
1834 
1835 	IWL_DEBUG_TX_REPLY(mvm,
1836 			   "\t\t\t\tinitial_rate 0x%x retries %d, idx=%d ssn=%d next_reclaimed=0x%x seq_ctl=0x%x\n",
1837 			   le32_to_cpu(tx_resp->initial_rate),
1838 			   tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1839 			   ssn, next_reclaimed, seq_ctl);
1840 
1841 	rcu_read_lock();
1842 
1843 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1844 	/*
1845 	 * sta can't be NULL otherwise it'd mean that the sta has been freed in
1846 	 * the firmware while we still have packets for it in the Tx queues.
1847 	 */
1848 	if (WARN_ON_ONCE(!sta))
1849 		goto out;
1850 
1851 	if (!IS_ERR(sta)) {
1852 		struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta);
1853 
1854 		iwl_mvm_tx_airtime(mvm, mvmsta,
1855 				   le16_to_cpu(tx_resp->wireless_media_time));
1856 
1857 		if ((status & TX_STATUS_MSK) != TX_STATUS_SUCCESS &&
1858 		    mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1859 			iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1860 
1861 		if (sta->wme && tid != IWL_MGMT_TID) {
1862 			struct iwl_mvm_tid_data *tid_data =
1863 				&mvmsta->tid_data[tid];
1864 			bool send_eosp_ndp = false;
1865 
1866 			spin_lock_bh(&mvmsta->lock);
1867 
1868 			if (!is_ndp) {
1869 				tid_data->next_reclaimed = next_reclaimed;
1870 				IWL_DEBUG_TX_REPLY(mvm,
1871 						   "Next reclaimed packet:%d\n",
1872 						   next_reclaimed);
1873 			} else {
1874 				IWL_DEBUG_TX_REPLY(mvm,
1875 						   "NDP - don't update next_reclaimed\n");
1876 			}
1877 
1878 			iwl_mvm_check_ratid_empty(mvm, sta, tid);
1879 
1880 			if (mvmsta->sleep_tx_count) {
1881 				mvmsta->sleep_tx_count--;
1882 				if (mvmsta->sleep_tx_count &&
1883 				    !iwl_mvm_tid_queued(mvm, tid_data)) {
1884 					/*
1885 					 * The number of frames in the queue
1886 					 * dropped to 0 even if we sent less
1887 					 * frames than we thought we had on the
1888 					 * Tx queue.
1889 					 * This means we had holes in the BA
1890 					 * window that we just filled, ask
1891 					 * mac80211 to send EOSP since the
1892 					 * firmware won't know how to do that.
1893 					 * Send NDP and the firmware will send
1894 					 * EOSP notification that will trigger
1895 					 * a call to ieee80211_sta_eosp().
1896 					 */
1897 					send_eosp_ndp = true;
1898 				}
1899 			}
1900 
1901 			spin_unlock_bh(&mvmsta->lock);
1902 			if (send_eosp_ndp) {
1903 				iwl_mvm_sta_modify_sleep_tx_count(mvm, sta,
1904 					IEEE80211_FRAME_RELEASE_UAPSD,
1905 					1, tid, false, false);
1906 				mvmsta->sleep_tx_count = 0;
1907 				ieee80211_send_eosp_nullfunc(sta, tid);
1908 			}
1909 		}
1910 
1911 		if (mvmsta->next_status_eosp) {
1912 			mvmsta->next_status_eosp = false;
1913 			ieee80211_sta_eosp(sta);
1914 		}
1915 	}
1916 out:
1917 	rcu_read_unlock();
1918 }
1919 
1920 #ifdef CONFIG_IWLWIFI_DEBUG
1921 #define AGG_TX_STATE_(x) case AGG_TX_STATE_ ## x: return #x
1922 static const char *iwl_get_agg_tx_status(u16 status)
1923 {
1924 	switch (status & AGG_TX_STATE_STATUS_MSK) {
1925 	AGG_TX_STATE_(TRANSMITTED);
1926 	AGG_TX_STATE_(UNDERRUN);
1927 	AGG_TX_STATE_(BT_PRIO);
1928 	AGG_TX_STATE_(FEW_BYTES);
1929 	AGG_TX_STATE_(ABORT);
1930 	AGG_TX_STATE_(TX_ON_AIR_DROP);
1931 	AGG_TX_STATE_(LAST_SENT_TRY_CNT);
1932 	AGG_TX_STATE_(LAST_SENT_BT_KILL);
1933 	AGG_TX_STATE_(SCD_QUERY);
1934 	AGG_TX_STATE_(TEST_BAD_CRC32);
1935 	AGG_TX_STATE_(RESPONSE);
1936 	AGG_TX_STATE_(DUMP_TX);
1937 	AGG_TX_STATE_(DELAY_TX);
1938 	}
1939 
1940 	return "UNKNOWN";
1941 }
1942 
1943 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1944 				      struct iwl_rx_packet *pkt)
1945 {
1946 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1947 	struct agg_tx_status *frame_status =
1948 		iwl_mvm_get_agg_status(mvm, tx_resp);
1949 	int i;
1950 	bool tirgger_timepoint = false;
1951 
1952 	for (i = 0; i < tx_resp->frame_count; i++) {
1953 		u16 fstatus = le16_to_cpu(frame_status[i].status);
1954 		/* In case one frame wasn't transmitted trigger time point */
1955 		tirgger_timepoint |= ((fstatus & AGG_TX_STATE_STATUS_MSK) !=
1956 				      AGG_TX_STATE_TRANSMITTED);
1957 		IWL_DEBUG_TX_REPLY(mvm,
1958 				   "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1959 				   iwl_get_agg_tx_status(fstatus),
1960 				   fstatus & AGG_TX_STATE_STATUS_MSK,
1961 				   (fstatus & AGG_TX_STATE_TRY_CNT_MSK) >>
1962 					AGG_TX_STATE_TRY_CNT_POS,
1963 				   le16_to_cpu(frame_status[i].sequence));
1964 	}
1965 
1966 	if (tirgger_timepoint)
1967 		iwl_dbg_tlv_time_point(&mvm->fwrt,
1968 				       IWL_FW_INI_TIME_POINT_TX_FAILED, NULL);
1969 
1970 }
1971 #else
1972 static void iwl_mvm_rx_tx_cmd_agg_dbg(struct iwl_mvm *mvm,
1973 				      struct iwl_rx_packet *pkt)
1974 {}
1975 #endif /* CONFIG_IWLWIFI_DEBUG */
1976 
1977 static void iwl_mvm_rx_tx_cmd_agg(struct iwl_mvm *mvm,
1978 				  struct iwl_rx_packet *pkt)
1979 {
1980 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1981 	int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1982 	int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1983 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1984 	struct iwl_mvm_sta *mvmsta;
1985 	int queue = SEQ_TO_QUEUE(sequence);
1986 	struct ieee80211_sta *sta;
1987 
1988 	if (WARN_ON_ONCE(queue < IWL_MVM_DQA_MIN_DATA_QUEUE &&
1989 			 (queue != IWL_MVM_DQA_BSS_CLIENT_QUEUE)))
1990 		return;
1991 
1992 	iwl_mvm_rx_tx_cmd_agg_dbg(mvm, pkt);
1993 
1994 	rcu_read_lock();
1995 
1996 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
1997 
1998 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1999 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
2000 		rcu_read_unlock();
2001 		return;
2002 	}
2003 
2004 	if (!WARN_ON_ONCE(!mvmsta)) {
2005 		mvmsta->tid_data[tid].rate_n_flags =
2006 			le32_to_cpu(tx_resp->initial_rate);
2007 		mvmsta->tid_data[tid].tx_time =
2008 			le16_to_cpu(tx_resp->wireless_media_time);
2009 		mvmsta->tid_data[tid].lq_color =
2010 			TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
2011 		iwl_mvm_tx_airtime(mvm, mvmsta,
2012 				   le16_to_cpu(tx_resp->wireless_media_time));
2013 	}
2014 
2015 	rcu_read_unlock();
2016 }
2017 
2018 void iwl_mvm_rx_tx_cmd(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2019 {
2020 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
2021 	struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
2022 
2023 	if (tx_resp->frame_count == 1)
2024 		iwl_mvm_rx_tx_cmd_single(mvm, pkt);
2025 	else
2026 		iwl_mvm_rx_tx_cmd_agg(mvm, pkt);
2027 }
2028 
2029 static void iwl_mvm_tx_reclaim(struct iwl_mvm *mvm, int sta_id, int tid,
2030 			       int txq, int index,
2031 			       struct ieee80211_tx_info *tx_info, u32 rate,
2032 			       bool is_flush)
2033 {
2034 	struct sk_buff_head reclaimed_skbs;
2035 	struct iwl_mvm_tid_data *tid_data = NULL;
2036 	struct ieee80211_sta *sta;
2037 	struct iwl_mvm_sta *mvmsta = NULL;
2038 	struct sk_buff *skb;
2039 	int freed;
2040 
2041 	if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
2042 		      tid > IWL_MAX_TID_COUNT,
2043 		      "sta_id %d tid %d", sta_id, tid))
2044 		return;
2045 
2046 	rcu_read_lock();
2047 
2048 	sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2049 
2050 	/* Reclaiming frames for a station that has been deleted ? */
2051 	if (WARN_ON_ONCE(!sta)) {
2052 		rcu_read_unlock();
2053 		return;
2054 	}
2055 
2056 	__skb_queue_head_init(&reclaimed_skbs);
2057 
2058 	/*
2059 	 * Release all TFDs before the SSN, i.e. all TFDs in front of
2060 	 * block-ack window (we assume that they've been successfully
2061 	 * transmitted ... if not, it's too late anyway).
2062 	 */
2063 	iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
2064 
2065 	skb_queue_walk(&reclaimed_skbs, skb) {
2066 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2067 
2068 		iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
2069 
2070 		memset(&info->status, 0, sizeof(info->status));
2071 		/* Packet was transmitted successfully, failures come as single
2072 		 * frames because before failing a frame the firmware transmits
2073 		 * it without aggregation at least once.
2074 		 */
2075 		if (!is_flush)
2076 			info->flags |= IEEE80211_TX_STAT_ACK;
2077 		else
2078 			info->flags &= ~IEEE80211_TX_STAT_ACK;
2079 	}
2080 
2081 	/*
2082 	 * It's possible to get a BA response after invalidating the rcu (rcu is
2083 	 * invalidated in order to prevent new Tx from being sent, but there may
2084 	 * be some frames already in-flight).
2085 	 * In this case we just want to reclaim, and could skip all the
2086 	 * sta-dependent stuff since it's in the middle of being removed
2087 	 * anyways.
2088 	 */
2089 	if (IS_ERR(sta))
2090 		goto out;
2091 
2092 	mvmsta = iwl_mvm_sta_from_mac80211(sta);
2093 	tid_data = &mvmsta->tid_data[tid];
2094 
2095 	if (tid_data->txq_id != txq) {
2096 		IWL_ERR(mvm,
2097 			"invalid reclaim request: Q %d, tid %d\n",
2098 			tid_data->txq_id, tid);
2099 		rcu_read_unlock();
2100 		return;
2101 	}
2102 
2103 	spin_lock_bh(&mvmsta->lock);
2104 
2105 	tid_data->next_reclaimed = index;
2106 
2107 	iwl_mvm_check_ratid_empty(mvm, sta, tid);
2108 
2109 	freed = 0;
2110 
2111 	/* pack lq color from tid_data along the reduced txp */
2112 	tx_info->status.status_driver_data[0] =
2113 		RS_DRV_DATA_PACK(tid_data->lq_color,
2114 				 tx_info->status.status_driver_data[0]);
2115 	tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
2116 
2117 	skb_queue_walk(&reclaimed_skbs, skb) {
2118 		struct ieee80211_hdr *hdr = (void *)skb->data;
2119 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
2120 
2121 		if (!is_flush) {
2122 			if (ieee80211_is_data_qos(hdr->frame_control))
2123 				freed++;
2124 			else
2125 				WARN_ON_ONCE(tid != IWL_MAX_TID_COUNT);
2126 		}
2127 
2128 		/* this is the first skb we deliver in this batch */
2129 		/* put the rate scaling data there */
2130 		if (freed == 1) {
2131 			info->flags |= IEEE80211_TX_STAT_AMPDU;
2132 			memcpy(&info->status, &tx_info->status,
2133 			       sizeof(tx_info->status));
2134 			iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
2135 		}
2136 	}
2137 
2138 	spin_unlock_bh(&mvmsta->lock);
2139 
2140 	/* We got a BA notif with 0 acked or scd_ssn didn't progress which is
2141 	 * possible (i.e. first MPDU in the aggregation wasn't acked)
2142 	 * Still it's important to update RS about sent vs. acked.
2143 	 */
2144 	if (!is_flush && skb_queue_empty(&reclaimed_skbs) &&
2145 	    !iwl_mvm_has_tlc_offload(mvm)) {
2146 		struct ieee80211_chanctx_conf *chanctx_conf = NULL;
2147 
2148 		/* no TLC offload, so non-MLD mode */
2149 		if (mvmsta->vif)
2150 			chanctx_conf =
2151 				rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
2152 
2153 		if (WARN_ON_ONCE(!chanctx_conf))
2154 			goto out;
2155 
2156 		tx_info->band = chanctx_conf->def.chan->band;
2157 		iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
2158 
2159 		IWL_DEBUG_TX_REPLY(mvm, "No reclaim. Update rs directly\n");
2160 		iwl_mvm_rs_tx_status(mvm, sta, tid, tx_info, false);
2161 	}
2162 
2163 out:
2164 	rcu_read_unlock();
2165 
2166 	while (!skb_queue_empty(&reclaimed_skbs)) {
2167 		skb = __skb_dequeue(&reclaimed_skbs);
2168 		ieee80211_tx_status_skb(mvm->hw, skb);
2169 	}
2170 }
2171 
2172 void iwl_mvm_rx_ba_notif(struct iwl_mvm *mvm, struct iwl_rx_cmd_buffer *rxb)
2173 {
2174 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
2175 	unsigned int pkt_len = iwl_rx_packet_payload_len(pkt);
2176 	int sta_id, tid, txq, index;
2177 	struct ieee80211_tx_info ba_info = {};
2178 	struct iwl_mvm_ba_notif *ba_notif;
2179 	struct iwl_mvm_tid_data *tid_data;
2180 	struct iwl_mvm_sta *mvmsta;
2181 
2182 	ba_info.flags = IEEE80211_TX_STAT_AMPDU;
2183 
2184 	if (iwl_mvm_has_new_tx_api(mvm)) {
2185 		struct iwl_mvm_compressed_ba_notif *ba_res =
2186 			(void *)pkt->data;
2187 		u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
2188 		u16 tfd_cnt;
2189 		int i;
2190 
2191 		if (IWL_FW_CHECK(mvm, sizeof(*ba_res) > pkt_len,
2192 				 "short BA notification (%d)\n", pkt_len))
2193 			return;
2194 
2195 		sta_id = ba_res->sta_id;
2196 		ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
2197 		ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
2198 		ba_info.status.tx_time =
2199 			(u16)le32_to_cpu(ba_res->wireless_time);
2200 		ba_info.status.status_driver_data[0] =
2201 			(void *)(uintptr_t)ba_res->reduced_txp;
2202 
2203 		tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
2204 		if (!tfd_cnt)
2205 			return;
2206 
2207 		if (IWL_FW_CHECK(mvm,
2208 				 struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
2209 				 "short BA notification (tfds:%d, size:%d)\n",
2210 				 tfd_cnt, pkt_len))
2211 			return;
2212 
2213 		IWL_DEBUG_TX_REPLY(mvm,
2214 				   "BA_NOTIFICATION Received from sta_id = %d, flags %x, sent:%d, acked:%d\n",
2215 				   sta_id, le32_to_cpu(ba_res->flags),
2216 				   le16_to_cpu(ba_res->txed),
2217 				   le16_to_cpu(ba_res->done));
2218 
2219 		rcu_read_lock();
2220 
2221 		mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2222 		/*
2223 		 * It's possible to get a BA response after invalidating the rcu
2224 		 * (rcu is invalidated in order to prevent new Tx from being
2225 		 * sent, but there may be some frames already in-flight).
2226 		 * In this case we just want to reclaim, and could skip all the
2227 		 * sta-dependent stuff since it's in the middle of being removed
2228 		 * anyways.
2229 		 */
2230 
2231 		/* Free per TID */
2232 		for (i = 0; i < tfd_cnt; i++) {
2233 			struct iwl_mvm_compressed_ba_tfd *ba_tfd =
2234 				&ba_res->tfd[i];
2235 
2236 			tid = ba_tfd->tid;
2237 			if (tid == IWL_MGMT_TID)
2238 				tid = IWL_MAX_TID_COUNT;
2239 
2240 			if (mvmsta)
2241 				mvmsta->tid_data[i].lq_color = lq_color;
2242 
2243 			iwl_mvm_tx_reclaim(mvm, sta_id, tid,
2244 					   (int)(le16_to_cpu(ba_tfd->q_num)),
2245 					   le16_to_cpu(ba_tfd->tfd_index),
2246 					   &ba_info,
2247 					   le32_to_cpu(ba_res->tx_rate), false);
2248 		}
2249 
2250 		if (mvmsta)
2251 			iwl_mvm_tx_airtime(mvm, mvmsta,
2252 					   le32_to_cpu(ba_res->wireless_time));
2253 		rcu_read_unlock();
2254 		return;
2255 	}
2256 
2257 	ba_notif = (void *)pkt->data;
2258 	sta_id = ba_notif->sta_id;
2259 	tid = ba_notif->tid;
2260 	/* "flow" corresponds to Tx queue */
2261 	txq = le16_to_cpu(ba_notif->scd_flow);
2262 	/* "ssn" is start of block-ack Tx window, corresponds to index
2263 	 * (in Tx queue's circular buffer) of first TFD/frame in window */
2264 	index = le16_to_cpu(ba_notif->scd_ssn);
2265 
2266 	rcu_read_lock();
2267 	mvmsta = iwl_mvm_sta_from_staid_rcu(mvm, sta_id);
2268 	if (IWL_FW_CHECK(mvm, !mvmsta,
2269 			 "invalid STA ID %d in BA notif\n",
2270 			 sta_id)) {
2271 		rcu_read_unlock();
2272 		return;
2273 	}
2274 
2275 	tid_data = &mvmsta->tid_data[tid];
2276 
2277 	ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
2278 	ba_info.status.ampdu_len = ba_notif->txed;
2279 	ba_info.status.tx_time = tid_data->tx_time;
2280 	ba_info.status.status_driver_data[0] =
2281 		(void *)(uintptr_t)ba_notif->reduced_txp;
2282 
2283 	rcu_read_unlock();
2284 
2285 	IWL_DEBUG_TX_REPLY(mvm,
2286 			   "BA_NOTIFICATION Received from %pM, sta_id = %d\n",
2287 			   ba_notif->sta_addr, ba_notif->sta_id);
2288 
2289 	IWL_DEBUG_TX_REPLY(mvm,
2290 			   "TID = %d, SeqCtl = %d, bitmap = 0x%llx, scd_flow = %d, scd_ssn = %d sent:%d, acked:%d\n",
2291 			   ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
2292 			   le64_to_cpu(ba_notif->bitmap), txq, index,
2293 			   ba_notif->txed, ba_notif->txed_2_done);
2294 
2295 	IWL_DEBUG_TX_REPLY(mvm, "reduced txp from ba notif %d\n",
2296 			   ba_notif->reduced_txp);
2297 
2298 	iwl_mvm_tx_reclaim(mvm, sta_id, tid, txq, index, &ba_info,
2299 			   tid_data->rate_n_flags, false);
2300 }
2301 
2302 /*
2303  * Note that there are transports that buffer frames before they reach
2304  * the firmware. This means that after flush_tx_path is called, the
2305  * queue might not be empty. The race-free way to handle this is to:
2306  * 1) set the station as draining
2307  * 2) flush the Tx path
2308  * 3) wait for the transport queues to be empty
2309  */
2310 int iwl_mvm_flush_tx_path(struct iwl_mvm *mvm, u32 tfd_msk)
2311 {
2312 	int ret;
2313 	struct iwl_tx_path_flush_cmd_v1 flush_cmd = {
2314 		.queues_ctl = cpu_to_le32(tfd_msk),
2315 		.flush_ctl = cpu_to_le16(DUMP_TX_FIFO_FLUSH),
2316 	};
2317 
2318 	WARN_ON(iwl_mvm_has_new_tx_api(mvm));
2319 	ret = iwl_mvm_send_cmd_pdu(mvm, TXPATH_FLUSH, 0,
2320 				   sizeof(flush_cmd), &flush_cmd);
2321 	if (ret)
2322 		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2323 	return ret;
2324 }
2325 
2326 int iwl_mvm_flush_sta_tids(struct iwl_mvm *mvm, u32 sta_id, u16 tids)
2327 {
2328 	int ret;
2329 	struct iwl_tx_path_flush_cmd_rsp *rsp;
2330 	struct iwl_tx_path_flush_cmd flush_cmd = {
2331 		.sta_id = cpu_to_le32(sta_id),
2332 		.tid_mask = cpu_to_le16(tids),
2333 	};
2334 
2335 	struct iwl_host_cmd cmd = {
2336 		.id = TXPATH_FLUSH,
2337 		.len = { sizeof(flush_cmd), },
2338 		.data = { &flush_cmd, },
2339 	};
2340 
2341 	WARN_ON(!iwl_mvm_has_new_tx_api(mvm));
2342 
2343 	if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
2344 		cmd.flags |= CMD_WANT_SKB | CMD_SEND_IN_RFKILL;
2345 
2346 	IWL_DEBUG_TX_QUEUES(mvm, "flush for sta id %d tid mask 0x%x\n",
2347 			    sta_id, tids);
2348 
2349 	ret = iwl_mvm_send_cmd(mvm, &cmd);
2350 
2351 	if (ret) {
2352 		IWL_ERR(mvm, "Failed to send flush command (%d)\n", ret);
2353 		return ret;
2354 	}
2355 
2356 	if (cmd.flags & CMD_WANT_SKB) {
2357 		int i;
2358 		int num_flushed_queues;
2359 
2360 		if (WARN_ON_ONCE(iwl_rx_packet_payload_len(cmd.resp_pkt) != sizeof(*rsp))) {
2361 			ret = -EIO;
2362 			goto free_rsp;
2363 		}
2364 
2365 		rsp = (void *)cmd.resp_pkt->data;
2366 
2367 		if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
2368 			      "sta_id %d != rsp_sta_id %d",
2369 			      sta_id, le16_to_cpu(rsp->sta_id))) {
2370 			ret = -EIO;
2371 			goto free_rsp;
2372 		}
2373 
2374 		num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
2375 		if (WARN_ONCE(num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
2376 			      "num_flushed_queues %d", num_flushed_queues)) {
2377 			ret = -EIO;
2378 			goto free_rsp;
2379 		}
2380 
2381 		for (i = 0; i < num_flushed_queues; i++) {
2382 			struct ieee80211_tx_info tx_info = {};
2383 			struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
2384 			int tid = le16_to_cpu(queue_info->tid);
2385 			int read_before = le16_to_cpu(queue_info->read_before_flush);
2386 			int read_after = le16_to_cpu(queue_info->read_after_flush);
2387 			int queue_num = le16_to_cpu(queue_info->queue_num);
2388 
2389 			if (tid == IWL_MGMT_TID)
2390 				tid = IWL_MAX_TID_COUNT;
2391 
2392 			IWL_DEBUG_TX_QUEUES(mvm,
2393 					    "tid %d queue_id %d read-before %d read-after %d\n",
2394 					    tid, queue_num, read_before, read_after);
2395 
2396 			iwl_mvm_tx_reclaim(mvm, sta_id, tid, queue_num, read_after,
2397 					   &tx_info, 0, true);
2398 		}
2399 free_rsp:
2400 		iwl_free_resp(&cmd);
2401 	}
2402 	return ret;
2403 }
2404 
2405 int iwl_mvm_flush_sta(struct iwl_mvm *mvm, u32 sta_id, u32 tfd_queue_mask)
2406 {
2407 	if (iwl_mvm_has_new_tx_api(mvm))
2408 		return iwl_mvm_flush_sta_tids(mvm, sta_id, 0xffff);
2409 
2410 	return iwl_mvm_flush_tx_path(mvm, tfd_queue_mask);
2411 }
2412