xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/tx.c (revision 0b897fbd900e12a08baa3d1a0457944046a882ea)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024 - 2025 Intel Corporation
4  */
5 #include <net/ip.h>
6 
7 #include "tx.h"
8 #include "sta.h"
9 #include "hcmd.h"
10 #include "iwl-utils.h"
11 #include "iface.h"
12 
13 #include "fw/dbg.h"
14 
15 #include "fw/api/tx.h"
16 #include "fw/api/rs.h"
17 #include "fw/api/txq.h"
18 #include "fw/api/datapath.h"
19 #include "fw/api/time-event.h"
20 
21 #define MAX_ANT_NUM 2
22 
23 /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24  * the *index* used for the last TX, and returns the next valid *index* to use.
25  * In order to set it in the tx_cmd, must do BIT(idx).
26  */
27 static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
28 {
29 	u8 index = last_idx;
30 
31 	for (int i = 0; i < MAX_ANT_NUM; i++) {
32 		index = (index + 1) % MAX_ANT_NUM;
33 		if (valid & BIT(index))
34 			return index;
35 	}
36 
37 	WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
38 
39 	return last_idx;
40 }
41 
42 void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
43 {
44 	*ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
45 }
46 
47 static int
48 iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
49 {
50 	struct ieee80211_sta *sta = txq->sta;
51 	struct ieee80211_link_sta *link_sta;
52 	unsigned int link_id;
53 	int max_size = IWL_DEFAULT_QUEUE_SIZE;
54 
55 	lockdep_assert_wiphy(mld->wiphy);
56 
57 	for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
58 		if (link_sta->eht_cap.has_eht) {
59 			max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
60 			break;
61 		}
62 
63 		if (link_sta->he_cap.has_he)
64 			max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
65 	}
66 
67 	return max_size;
68 }
69 
70 static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
71 {
72 	u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
73 	u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
74 	/* We can't know when the station is asleep or awake, so we
75 	 * must disable the queue hang detection.
76 	 */
77 	unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
78 				IWL_WATCHDOG_DISABLED :
79 				mld->trans->trans_cfg->base_params->wd_timeout;
80 	int queue, size;
81 
82 	lockdep_assert_wiphy(mld->wiphy);
83 
84 	if (tid == IWL_MGMT_TID)
85 		size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
86 			     mld->trans->cfg->min_txq_size);
87 	else
88 		size = iwl_mld_get_queue_size(mld, txq);
89 
90 	queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
91 				    watchdog_timeout);
92 
93 	if (queue >= 0)
94 		IWL_DEBUG_TX_QUEUES(mld,
95 				    "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
96 				    queue, fw_sta_mask, tid);
97 	return queue;
98 }
99 
100 static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
101 {
102 	struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
103 	int id;
104 
105 	lockdep_assert_wiphy(mld->wiphy);
106 
107 	/* This will alse send the SCD_QUEUE_CONFIG_CMD */
108 	id = iwl_mld_allocate_txq(mld, txq);
109 	if (id < 0)
110 		return id;
111 
112 	mld_txq->fw_id = id;
113 	mld_txq->status.allocated = true;
114 
115 	rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
116 
117 	return 0;
118 }
119 
120 void iwl_mld_add_txq_list(struct iwl_mld *mld)
121 {
122 	lockdep_assert_wiphy(mld->wiphy);
123 
124 	while (!list_empty(&mld->txqs_to_add)) {
125 		struct ieee80211_txq *txq;
126 		struct iwl_mld_txq *mld_txq =
127 			list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
128 					 list);
129 		int failed;
130 
131 		txq = container_of((void *)mld_txq, struct ieee80211_txq,
132 				   drv_priv);
133 
134 		failed = iwl_mld_add_txq(mld, txq);
135 
136 		local_bh_disable();
137 		spin_lock(&mld->add_txqs_lock);
138 		list_del_init(&mld_txq->list);
139 		spin_unlock(&mld->add_txqs_lock);
140 		/* If the queue allocation failed, we can't transmit. Leave the
141 		 * frames on the txq, maybe the attempt to allocate the queue
142 		 * will succeed.
143 		 */
144 		if (!failed)
145 			iwl_mld_tx_from_txq(mld, txq);
146 		local_bh_enable();
147 	}
148 }
149 
150 void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
151 {
152 	struct iwl_mld *mld = container_of(wk, struct iwl_mld,
153 					   add_txqs_wk);
154 
155 	/* will reschedule to run after restart */
156 	if (mld->fw_status.in_hw_restart)
157 		return;
158 
159 	iwl_mld_add_txq_list(mld);
160 }
161 
162 void
163 iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
164 {
165 	struct iwl_scd_queue_cfg_cmd remove_cmd = {
166 		.operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
167 		.u.remove.tid = cpu_to_le32(tid),
168 		.u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
169 	};
170 
171 	iwl_mld_send_cmd_pdu(mld,
172 			     WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
173 			     &remove_cmd);
174 
175 	iwl_trans_txq_free(mld->trans, queue_id);
176 }
177 
178 void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
179 {
180 	struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
181 	u32 sta_msk, tid;
182 
183 	lockdep_assert_wiphy(mld->wiphy);
184 
185 	spin_lock_bh(&mld->add_txqs_lock);
186 	if (!list_empty(&mld_txq->list))
187 		list_del_init(&mld_txq->list);
188 	spin_unlock_bh(&mld->add_txqs_lock);
189 
190 	if (!mld_txq->status.allocated ||
191 	    WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
192 		return;
193 
194 	sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
195 
196 	tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
197 					       txq->tid;
198 
199 	iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
200 
201 	RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
202 	mld_txq->status.allocated = false;
203 }
204 
205 #define OPT_HDR(type, skb, off) \
206 	(type *)(skb_network_header(skb) + (off))
207 
208 static __le32
209 iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
210 {
211 	struct ieee80211_hdr *hdr = (void *)skb->data;
212 	u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
213 	u16 offload_assist = 0;
214 #if IS_ENABLED(CONFIG_INET)
215 	u8 protocol = 0;
216 
217 	/* Do not compute checksum if already computed */
218 	if (skb->ip_summed != CHECKSUM_PARTIAL)
219 		goto out;
220 
221 	/* We do not expect to be requested to csum stuff we do not support */
222 
223 	/* TBD: do we also need to check
224 	 * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
225 	 * the devices we support has this flags?
226 	 */
227 	if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
228 		      skb->protocol != htons(ETH_P_IPV6),
229 		      "No support for requested checksum\n")) {
230 		skb_checksum_help(skb);
231 		goto out;
232 	}
233 
234 	if (skb->protocol == htons(ETH_P_IP)) {
235 		protocol = ip_hdr(skb)->protocol;
236 	} else {
237 #if IS_ENABLED(CONFIG_IPV6)
238 		struct ipv6hdr *ipv6h =
239 			(struct ipv6hdr *)skb_network_header(skb);
240 		unsigned int off = sizeof(*ipv6h);
241 
242 		protocol = ipv6h->nexthdr;
243 		while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
244 			struct ipv6_opt_hdr *hp;
245 
246 			/* only supported extension headers */
247 			if (protocol != NEXTHDR_ROUTING &&
248 			    protocol != NEXTHDR_HOP &&
249 			    protocol != NEXTHDR_DEST) {
250 				skb_checksum_help(skb);
251 				goto out;
252 			}
253 
254 			hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
255 			protocol = hp->nexthdr;
256 			off += ipv6_optlen(hp);
257 		}
258 		/* if we get here - protocol now should be TCP/UDP */
259 #endif
260 	}
261 
262 	if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
263 		WARN_ON_ONCE(1);
264 		skb_checksum_help(skb);
265 		goto out;
266 	}
267 
268 	/* enable L4 csum */
269 	offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
270 
271 	/* Set offset to IP header (snap).
272 	 * We don't support tunneling so no need to take care of inner header.
273 	 * Size is in words.
274 	 */
275 	offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
276 
277 	/* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
278 	if (skb->protocol == htons(ETH_P_IP) && amsdu) {
279 		ip_hdr(skb)->check = 0;
280 		offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
281 	}
282 
283 	/* reset UDP/TCP header csum */
284 	if (protocol == IPPROTO_TCP)
285 		tcp_hdr(skb)->check = 0;
286 	else
287 		udp_hdr(skb)->check = 0;
288 
289 out:
290 #endif
291 	mh_len /= 2;
292 	offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
293 
294 	if (amsdu)
295 		offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
296 	else if (ieee80211_hdrlen(hdr->frame_control) % 4)
297 		/* padding is inserted later in transport */
298 		offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299 
300 	return cpu_to_le32(offload_assist);
301 }
302 
303 static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
304 					     struct ieee80211_vif *vif,
305 					     struct ieee80211_tx_info *info,
306 					     unsigned long *basic_rates,
307 					     u8 *band)
308 {
309 	u32 link_id = u32_get_bits(info->control.flags,
310 				   IEEE80211_TX_CTRL_MLO_LINK);
311 
312 	*basic_rates = vif->bss_conf.basic_rates;
313 	*band = info->band;
314 
315 	if (link_id == IEEE80211_LINK_UNSPECIFIED &&
316 	    ieee80211_vif_is_mld(vif)) {
317 		/* shouldn't do this when >1 link is active */
318 		WARN_ON(hweight16(vif->active_links) != 1);
319 		link_id = __ffs(vif->active_links);
320 	}
321 
322 	if (link_id < IEEE80211_LINK_UNSPECIFIED) {
323 		struct ieee80211_bss_conf *link_conf;
324 
325 		rcu_read_lock();
326 		link_conf = rcu_dereference(vif->link_conf[link_id]);
327 		if (link_conf) {
328 			*basic_rates = link_conf->basic_rates;
329 			if (link_conf->chanreq.oper.chan)
330 				*band = link_conf->chanreq.oper.chan->band;
331 		}
332 		rcu_read_unlock();
333 	}
334 }
335 
336 u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
337 			   struct ieee80211_tx_info *info,
338 			   struct ieee80211_vif *vif)
339 {
340 	struct ieee80211_supported_band *sband;
341 	u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
342 	unsigned long basic_rates;
343 	u8 band, rate;
344 	u32 i;
345 
346 	iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
347 
348 	sband = mld->hw->wiphy->bands[band];
349 	for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
350 		u16 hw = sband->bitrates[i].hw_value;
351 
352 		if (hw >= IWL_FIRST_OFDM_RATE) {
353 			if (lowest_ofdm > hw)
354 				lowest_ofdm = hw;
355 		} else if (lowest_cck > hw) {
356 			lowest_cck = hw;
357 		}
358 	}
359 
360 	if (band == NL80211_BAND_2GHZ && !vif->p2p &&
361 	    vif->type != NL80211_IFTYPE_P2P_DEVICE &&
362 	    !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
363 		if (lowest_cck != IWL_RATE_COUNT)
364 			rate = lowest_cck;
365 		else if (lowest_ofdm != IWL_RATE_COUNT)
366 			rate = lowest_ofdm;
367 		else
368 			rate = IWL_FIRST_CCK_RATE;
369 	} else if (lowest_ofdm != IWL_RATE_COUNT) {
370 		rate = lowest_ofdm;
371 	} else {
372 		rate = IWL_FIRST_OFDM_RATE;
373 	}
374 
375 	return rate;
376 }
377 
378 static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
379 					   struct ieee80211_tx_info *info,
380 					   int rate_idx)
381 {
382 	u32 rate_flags = 0;
383 	u8 rate_plcp;
384 
385 	/* if the rate isn't a well known legacy rate, take the lowest one */
386 	if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
387 		rate_idx = iwl_mld_get_lowest_rate(mld, info,
388 						   info->control.vif);
389 
390 	WARN_ON_ONCE(rate_idx < 0);
391 
392 	/* Set CCK or OFDM flag */
393 	if (rate_idx <= IWL_LAST_CCK_RATE)
394 		rate_flags |= RATE_MCS_CCK_MSK;
395 	else
396 		rate_flags |= RATE_MCS_LEGACY_OFDM_MSK;
397 
398 	/* Legacy rates are indexed:
399 	 * 0 - 3 for CCK and 0 - 7 for OFDM
400 	 */
401 	rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
402 		     rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
403 
404 	return (u32)rate_plcp | rate_flags;
405 }
406 
407 static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
408 			      struct ieee80211_tx_info *info,
409 			      struct ieee80211_sta *sta, __le16 fc)
410 {
411 	if (sta && ieee80211_is_data(fc)) {
412 		struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
413 
414 		return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
415 	}
416 
417 	return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
418 }
419 
420 static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
421 				      struct ieee80211_tx_info *info,
422 				      struct ieee80211_sta *sta,
423 				      __le16 fc)
424 {
425 	struct ieee80211_tx_rate *rate = &info->control.rates[0];
426 	u32 result;
427 
428 	/* we only care about legacy/HT/VHT so far, so we can
429 	 * build in v1 and use iwl_new_rate_from_v1()
430 	 * FIXME: in newer devices we only support the new rates, build
431 	 * the rate_n_flags in the new format here instead of using v1 and
432 	 * converting it.
433 	 */
434 
435 	if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
436 		u8 mcs = ieee80211_rate_get_vht_mcs(rate);
437 		u8 nss = ieee80211_rate_get_vht_nss(rate);
438 
439 		result = RATE_MCS_VHT_MSK_V1;
440 		result |= u32_encode_bits(mcs, RATE_VHT_MCS_RATE_CODE_MSK);
441 		result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
442 
443 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
444 			result |= RATE_MCS_SGI_MSK_V1;
445 
446 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
447 			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
448 		else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
449 			result |= u32_encode_bits(2, RATE_MCS_CHAN_WIDTH_MSK_V1);
450 		else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
451 			result |= u32_encode_bits(3, RATE_MCS_CHAN_WIDTH_MSK_V1);
452 
453 		result = iwl_new_rate_from_v1(result);
454 	} else if (rate->flags & IEEE80211_TX_RC_MCS) {
455 		result = RATE_MCS_HT_MSK_V1;
456 		result |= u32_encode_bits(rate->idx,
457 					  RATE_HT_MCS_RATE_CODE_MSK_V1 |
458 					  RATE_HT_MCS_NSS_MSK_V1);
459 		if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
460 			result |= RATE_MCS_SGI_MSK_V1;
461 		if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
462 			result |= u32_encode_bits(1, RATE_MCS_CHAN_WIDTH_MSK_V1);
463 		if (info->flags & IEEE80211_TX_CTL_LDPC)
464 			result |= RATE_MCS_LDPC_MSK_V1;
465 		if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
466 			result |= RATE_MCS_STBC_MSK;
467 
468 		result = iwl_new_rate_from_v1(result);
469 	} else {
470 		result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
471 	}
472 
473 	if (info->control.antennas)
474 		result |= u32_encode_bits(info->control.antennas,
475 					  RATE_MCS_ANT_AB_MSK);
476 	else
477 		result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
478 
479 	return result;
480 }
481 
482 static u32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
483 				       struct ieee80211_tx_info *info,
484 				       struct ieee80211_sta *sta, __le16 fc)
485 {
486 	if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
487 		return iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
488 
489 	return iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
490 		iwl_mld_get_tx_ant(mld, info, sta, fc);
491 }
492 
493 static void
494 iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd_gen3 *tx_cmd,
495 			struct sk_buff *skb, bool amsdu)
496 {
497 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
498 	struct ieee80211_hdr *hdr = (void *)skb->data;
499 	struct ieee80211_vif *vif;
500 
501 	/* Copy MAC header from skb into command buffer */
502 	memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
503 
504 	if (!amsdu || !skb_is_gso(skb))
505 		return;
506 
507 	/* As described in IEEE sta 802.11-2020, table 9-30 (Address
508 	 * field contents), A-MSDU address 3 should contain the BSSID
509 	 * address.
510 	 *
511 	 * In TSO, the skb header address 3 contains the original address 3 to
512 	 * correctly create all the A-MSDU subframes headers from it.
513 	 * Override now the address 3 in the command header with the BSSID.
514 	 *
515 	 * Note: we fill in the MLD address, but the firmware will do the
516 	 * necessary translation to link address after encryption.
517 	 */
518 	vif = info->control.vif;
519 	switch (vif->type) {
520 	case NL80211_IFTYPE_STATION:
521 		ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
522 		break;
523 	case NL80211_IFTYPE_AP:
524 		ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
525 		break;
526 	default:
527 		break;
528 	}
529 }
530 
531 static void
532 iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
533 		    struct iwl_device_tx_cmd *dev_tx_cmd,
534 		    struct ieee80211_sta *sta)
535 {
536 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
537 	struct ieee80211_hdr *hdr = (void *)skb->data;
538 	struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
539 					    NULL;
540 	struct iwl_tx_cmd_gen3 *tx_cmd;
541 	bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
542 		     (*ieee80211_get_qos_ctl(hdr) &
543 		      IEEE80211_QOS_CTL_A_MSDU_PRESENT);
544 	u32 rate_n_flags = 0;
545 	u16 flags = 0;
546 
547 	dev_tx_cmd->hdr.cmd = TX_CMD;
548 
549 	if (!info->control.hw_key)
550 		flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
551 
552 	/* For data and mgmt packets rate info comes from the fw.
553 	 * Only set rate/antenna for injected frames with fixed rate, or
554 	 * when no sta is given.
555 	 */
556 	if (unlikely(!sta ||
557 		     info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
558 		flags |= IWL_TX_FLAGS_CMD_RATE;
559 		rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
560 							   hdr->frame_control);
561 	} else if (!ieee80211_is_data(hdr->frame_control) ||
562 		   (mld_sta &&
563 		    mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
564 		/* These are important frames */
565 		flags |= IWL_TX_FLAGS_HIGH_PRI;
566 	}
567 
568 	tx_cmd = (void *)dev_tx_cmd->payload;
569 
570 	iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
571 
572 	tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
573 
574 	/* Total # bytes to be transmitted */
575 	tx_cmd->len = cpu_to_le16((u16)skb->len);
576 
577 	tx_cmd->flags = cpu_to_le16(flags);
578 
579 	tx_cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
580 }
581 
582 /* Caller of this need to check that info->control.vif is not NULL */
583 static struct iwl_mld_link *
584 iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
585 {
586 	struct iwl_mld_vif *mld_vif =
587 		iwl_mld_vif_from_mac80211(info->control.vif);
588 	u32 link_id = u32_get_bits(info->control.flags,
589 				   IEEE80211_TX_CTRL_MLO_LINK);
590 
591 	if (link_id == IEEE80211_LINK_UNSPECIFIED) {
592 		if (info->control.vif->active_links)
593 			link_id = ffs(info->control.vif->active_links) - 1;
594 		else
595 			link_id = 0;
596 	}
597 
598 	return rcu_dereference(mld_vif->link[link_id]);
599 }
600 
601 static int
602 iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
603 			struct sk_buff *skb)
604 {
605 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
606 	struct ieee80211_hdr *hdr = (void *)skb->data;
607 	__le16 fc = hdr->frame_control;
608 	struct iwl_mld_vif *mld_vif;
609 	struct iwl_mld_link *link;
610 
611 	if (txq && txq->sta)
612 		return iwl_mld_txq_from_mac80211(txq)->fw_id;
613 
614 	if (!info->control.vif)
615 		return IWL_MLD_INVALID_QUEUE;
616 
617 	switch (info->control.vif->type) {
618 	case NL80211_IFTYPE_AP:
619 	case NL80211_IFTYPE_ADHOC:
620 		link = iwl_mld_get_link_from_tx_info(info);
621 
622 		if (WARN_ON(!link))
623 			break;
624 
625 		/* ucast disassociate/deauth frames without a station might
626 		 * happen, especially with reason 7 ("Class 3 frame received
627 		 * from nonassociated STA").
628 		 */
629 		if (ieee80211_is_mgmt(fc) &&
630 		    (!ieee80211_is_bufferable_mmpdu(skb) ||
631 		     ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
632 			return link->bcast_sta.queue_id;
633 
634 		if (is_multicast_ether_addr(hdr->addr1) &&
635 		    !ieee80211_has_order(fc))
636 			return link->mcast_sta.queue_id;
637 
638 		WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
639 			  "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
640 		return link->bcast_sta.queue_id;
641 	case NL80211_IFTYPE_P2P_DEVICE:
642 		mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
643 
644 		if (mld_vif->roc_activity == ROC_NUM_ACTIVITIES) {
645 			IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
646 			return IWL_MLD_INVALID_DROP_TX;
647 		}
648 
649 		WARN_ON(!ieee80211_is_mgmt(fc));
650 
651 		return mld_vif->deflink.aux_sta.queue_id;
652 	case NL80211_IFTYPE_MONITOR:
653 		mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
654 		return mld_vif->deflink.mon_sta.queue_id;
655 	default:
656 		WARN_ONCE(1, "Unsupported vif type\n");
657 		break;
658 	}
659 
660 	return IWL_MLD_INVALID_QUEUE;
661 }
662 
663 static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
664 				       struct sk_buff *skb)
665 {
666 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
667 	struct iwl_mld_link *mld_link =
668 		&iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
669 	struct iwl_probe_resp_data *resp_data;
670 	u8 *pos;
671 
672 	if (!info->control.vif->p2p)
673 		return;
674 
675 	rcu_read_lock();
676 
677 	resp_data = rcu_dereference(mld_link->probe_resp_data);
678 	if (!resp_data)
679 		goto out;
680 
681 	if (!resp_data->notif.noa_active)
682 		goto out;
683 
684 	if (skb_tailroom(skb) < resp_data->noa_len) {
685 		if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
686 			IWL_ERR(mld,
687 				"Failed to reallocate probe resp\n");
688 			goto out;
689 		}
690 	}
691 
692 	pos = skb_put(skb, resp_data->noa_len);
693 
694 	*pos++ = WLAN_EID_VENDOR_SPECIFIC;
695 	/* Set length of IE body (not including ID and length itself) */
696 	*pos++ = resp_data->noa_len - 2;
697 	*pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
698 	*pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
699 	*pos++ = WLAN_OUI_WFA & 0xff;
700 	*pos++ = WLAN_OUI_TYPE_WFA_P2P;
701 
702 	memcpy(pos, &resp_data->notif.noa_attr,
703 	       resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
704 
705 out:
706 	rcu_read_unlock();
707 }
708 
709 /* This function must be called with BHs disabled */
710 static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
711 			   struct ieee80211_txq *txq)
712 {
713 	struct ieee80211_hdr *hdr = (void *)skb->data;
714 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
715 	struct ieee80211_sta *sta = txq ? txq->sta : NULL;
716 	struct iwl_device_tx_cmd *dev_tx_cmd;
717 	int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
718 	u8 tid = IWL_MAX_TID_COUNT;
719 
720 	if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
721 	    queue == IWL_MLD_INVALID_DROP_TX)
722 		return -1;
723 
724 	if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
725 		return -1;
726 
727 	dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
728 	if (unlikely(!dev_tx_cmd))
729 		return -1;
730 
731 	if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
732 		if (IWL_MLD_NON_TRANSMITTING_AP)
733 			return -1;
734 
735 		iwl_mld_probe_resp_set_noa(mld, skb);
736 	}
737 
738 	iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
739 
740 	if (ieee80211_is_data(hdr->frame_control)) {
741 		if (ieee80211_is_data_qos(hdr->frame_control))
742 			tid = ieee80211_get_tid(hdr);
743 		else
744 			tid = IWL_TID_NON_QOS;
745 	}
746 
747 	IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
748 		     tid, queue, skb->len);
749 
750 	/* From now on, we cannot access info->control */
751 	memset(&info->status, 0, sizeof(info->status));
752 	memset(info->driver_data, 0, sizeof(info->driver_data));
753 
754 	info->driver_data[1] = dev_tx_cmd;
755 
756 	if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
757 		goto err;
758 
759 	/* Update low-latency counter when a packet is queued instead
760 	 * of after TX, it makes sense for early low-latency detection
761 	 */
762 	if (sta)
763 		iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
764 
765 	return 0;
766 
767 err:
768 	iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
769 	IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
770 	return -1;
771 }
772 
773 #ifdef CONFIG_INET
774 
775 /* This function handles the segmentation of a large TSO packet into multiple
776  * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
777  * constraints.
778  */
779 static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
780 				  struct ieee80211_sta *sta,
781 				  struct sk_buff_head *mpdus_skbs)
782 {
783 	struct ieee80211_hdr *hdr = (void *)skb->data;
784 	netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
785 	unsigned int mss = skb_shinfo(skb)->gso_size;
786 	unsigned int num_subframes, tcp_payload_len, subf_len;
787 	u16 snap_ip_tcp, pad, max_tid_amsdu_len;
788 	u8 tid;
789 
790 	snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
791 
792 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
793 	    !sta->cur->max_rc_amsdu_len)
794 		return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
795 
796 	/* Do not build AMSDU for IPv6 with extension headers.
797 	 * Ask stack to segment and checksum the generated MPDUs for us.
798 	 */
799 	if (skb->protocol == htons(ETH_P_IPV6) &&
800 	    ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
801 	    IPPROTO_TCP) {
802 		netdev_flags &= ~NETIF_F_CSUM_MASK;
803 		return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
804 	}
805 
806 	tid = ieee80211_get_tid(hdr);
807 	if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
808 		return -EINVAL;
809 
810 	max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
811 	if (!max_tid_amsdu_len)
812 		return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
813 
814 	/* Sub frame header + SNAP + IP header + TCP header + MSS */
815 	subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
816 	pad = (4 - subf_len) & 0x3;
817 
818 	/* If we have N subframes in the A-MSDU, then the A-MSDU's size is
819 	 * N * subf_len + (N - 1) * pad.
820 	 */
821 	num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
822 
823 	if (sta->max_amsdu_subframes &&
824 	    num_subframes > sta->max_amsdu_subframes)
825 		num_subframes = sta->max_amsdu_subframes;
826 
827 	tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
828 		tcp_hdrlen(skb) + skb->data_len;
829 
830 	/* Make sure we have enough TBs for the A-MSDU:
831 	 *	2 for each subframe
832 	 *	1 more for each fragment
833 	 *	1 more for the potential data in the header
834 	 */
835 	if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
836 	    mld->trans->max_skb_frags)
837 		num_subframes = 1;
838 
839 	if (num_subframes > 1)
840 		*ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
841 
842 	/* This skb fits in one single A-MSDU */
843 	if (tcp_payload_len <= num_subframes * mss) {
844 		__skb_queue_tail(mpdus_skbs, skb);
845 		return 0;
846 	}
847 
848 	/* Trick the segmentation function to make it create SKBs that can fit
849 	 * into one A-MSDU.
850 	 */
851 	return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
852 }
853 
854 /* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
855  * large packets when necessary and transmitting each segment as MPDU.
856  */
857 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
858 			  struct ieee80211_txq *txq)
859 {
860 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
861 	struct sk_buff *orig_skb = skb;
862 	struct sk_buff_head mpdus_skbs;
863 	unsigned int payload_len;
864 	int ret;
865 
866 	if (WARN_ON(!txq || !txq->sta))
867 		return -1;
868 
869 	payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
870 		tcp_hdrlen(skb) + skb->data_len;
871 
872 	if (payload_len <= skb_shinfo(skb)->gso_size)
873 		return iwl_mld_tx_mpdu(mld, skb, txq);
874 
875 	if (!info->control.vif)
876 		return -1;
877 
878 	__skb_queue_head_init(&mpdus_skbs);
879 
880 	ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
881 	if (ret)
882 		return ret;
883 
884 	WARN_ON(skb_queue_empty(&mpdus_skbs));
885 
886 	while (!skb_queue_empty(&mpdus_skbs)) {
887 		skb = __skb_dequeue(&mpdus_skbs);
888 
889 		ret = iwl_mld_tx_mpdu(mld, skb, txq);
890 		if (!ret)
891 			continue;
892 
893 		/* Free skbs created as part of TSO logic that have not yet
894 		 * been dequeued
895 		 */
896 		__skb_queue_purge(&mpdus_skbs);
897 
898 		/* skb here is not necessarily same as skb that entered
899 		 * this method, so free it explicitly.
900 		 */
901 		if (skb == orig_skb)
902 			ieee80211_free_txskb(mld->hw, skb);
903 		else
904 			kfree_skb(skb);
905 
906 		/* there was error, but we consumed skb one way or
907 		 * another, so return 0
908 		 */
909 		return 0;
910 	}
911 
912 	return 0;
913 }
914 #else
915 static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
916 			  struct ieee80211_txq *txq)
917 {
918 	/* Impossible to get TSO without CONFIG_INET */
919 	WARN_ON(1);
920 
921 	return -1;
922 }
923 #endif /* CONFIG_INET */
924 
925 void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
926 		    struct ieee80211_txq *txq)
927 {
928 	if (skb_is_gso(skb)) {
929 		if (!iwl_mld_tx_tso(mld, skb, txq))
930 			return;
931 		goto err;
932 	}
933 
934 	if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
935 		return;
936 
937 err:
938 	ieee80211_free_txskb(mld->hw, skb);
939 }
940 
941 void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
942 {
943 	struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
944 	struct sk_buff *skb = NULL;
945 	u8 zero_addr[ETH_ALEN] = {};
946 
947 	/*
948 	 * No need for threads to be pending here, they can leave the first
949 	 * taker all the work.
950 	 *
951 	 * mld_txq->tx_request logic:
952 	 *
953 	 * If 0, no one is currently TXing, set to 1 to indicate current thread
954 	 * will now start TX and other threads should quit.
955 	 *
956 	 * If 1, another thread is currently TXing, set to 2 to indicate to
957 	 * that thread that there was another request. Since that request may
958 	 * have raced with the check whether the queue is empty, the TXing
959 	 * thread should check the queue's status one more time before leaving.
960 	 * This check is done in order to not leave any TX hanging in the queue
961 	 * until the next TX invocation (which may not even happen).
962 	 *
963 	 * If 2, another thread is currently TXing, and it will already double
964 	 * check the queue, so do nothing.
965 	 */
966 	if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
967 		return;
968 
969 	rcu_read_lock();
970 	do {
971 		while (likely(!mld_txq->status.stop_full) &&
972 		       (skb = ieee80211_tx_dequeue(mld->hw, txq)))
973 			iwl_mld_tx_skb(mld, skb, txq);
974 	} while (atomic_dec_return(&mld_txq->tx_request));
975 
976 	IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
977 		     txq->sta ? txq->sta->addr : zero_addr, txq->tid);
978 
979 	rcu_read_unlock();
980 }
981 
982 static void iwl_mld_hwrate_to_tx_rate(u32 rate_n_flags,
983 				      struct ieee80211_tx_info *info)
984 {
985 	enum nl80211_band band = info->band;
986 	struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
987 	u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
988 	u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
989 	u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
990 
991 	if (sgi)
992 		tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
993 
994 	switch (chan_width) {
995 	case RATE_MCS_CHAN_WIDTH_20:
996 		break;
997 	case RATE_MCS_CHAN_WIDTH_40:
998 		tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
999 		break;
1000 	case RATE_MCS_CHAN_WIDTH_80:
1001 		tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1002 		break;
1003 	case RATE_MCS_CHAN_WIDTH_160:
1004 		tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1005 		break;
1006 	default:
1007 		break;
1008 	}
1009 
1010 	switch (format) {
1011 	case RATE_MCS_HT_MSK:
1012 		tx_rate->flags |= IEEE80211_TX_RC_MCS;
1013 		tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
1014 		break;
1015 	case RATE_MCS_VHT_MSK:
1016 		ieee80211_rate_set_vht(tx_rate,
1017 				       rate_n_flags & RATE_MCS_CODE_MSK,
1018 				       FIELD_GET(RATE_MCS_NSS_MSK,
1019 						 rate_n_flags) + 1);
1020 		tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
1021 		break;
1022 	case RATE_MCS_HE_MSK:
1023 		/* mac80211 cannot do this without ieee80211_tx_status_ext()
1024 		 * but it only matters for radiotap
1025 		 */
1026 		tx_rate->idx = 0;
1027 		break;
1028 	default:
1029 		tx_rate->idx =
1030 			iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1031 							      band);
1032 		break;
1033 	}
1034 }
1035 
1036 void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
1037 				  struct iwl_rx_packet *pkt)
1038 {
1039 	struct iwl_tx_resp *tx_resp = (void *)pkt->data;
1040 	int txq_id = le16_to_cpu(tx_resp->tx_queue);
1041 	struct agg_tx_status *agg_status = &tx_resp->status;
1042 	u32 status = le16_to_cpu(agg_status->status);
1043 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1044 	size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
1045 	int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
1046 	int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
1047 	struct ieee80211_link_sta *link_sta;
1048 	struct iwl_mld_sta *mld_sta;
1049 	u16 ssn;
1050 	struct sk_buff_head skbs;
1051 	u8 skb_freed = 0;
1052 	bool mgmt = false;
1053 	bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
1054 
1055 	if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
1056 			 "Invalid tx_resp notif frame_count (%d)\n",
1057 			 tx_resp->frame_count))
1058 		return;
1059 
1060 	/* validate the size of the variable part of the notif */
1061 	if (IWL_FW_CHECK(mld, notif_size != pkt_len,
1062 			 "Invalid tx_resp notif size (expected=%zu got=%u)\n",
1063 			 notif_size, pkt_len))
1064 		return;
1065 
1066 	ssn = le32_to_cpup((__le32 *)agg_status +
1067 			   tx_resp->frame_count) & 0xFFFF;
1068 
1069 	__skb_queue_head_init(&skbs);
1070 
1071 	/* we can free until ssn % q.n_bd not inclusive */
1072 	iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
1073 
1074 	while (!skb_queue_empty(&skbs)) {
1075 		struct sk_buff *skb = __skb_dequeue(&skbs);
1076 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1077 		struct ieee80211_hdr *hdr = (void *)skb->data;
1078 
1079 		skb_freed++;
1080 
1081 		iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1082 
1083 		memset(&info->status, 0, sizeof(info->status));
1084 
1085 		info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1086 
1087 		/* inform mac80211 about what happened with the frame */
1088 		switch (status & TX_STATUS_MSK) {
1089 		case TX_STATUS_SUCCESS:
1090 		case TX_STATUS_DIRECT_DONE:
1091 			info->flags |= IEEE80211_TX_STAT_ACK;
1092 			break;
1093 		default:
1094 			break;
1095 		}
1096 
1097 		/* If we are freeing multiple frames, mark all the frames
1098 		 * but the first one as acked, since they were acknowledged
1099 		 * before
1100 		 */
1101 		if (skb_freed > 1)
1102 			info->flags |= IEEE80211_TX_STAT_ACK;
1103 
1104 		if (tx_failure) {
1105 			enum iwl_fw_ini_time_point tp =
1106 				IWL_FW_INI_TIME_POINT_TX_FAILED;
1107 
1108 			if (ieee80211_is_action(hdr->frame_control))
1109 				tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1110 			else if (ieee80211_is_mgmt(hdr->frame_control))
1111 				mgmt = true;
1112 
1113 			iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
1114 		}
1115 
1116 		iwl_mld_hwrate_to_tx_rate(le32_to_cpu(tx_resp->initial_rate),
1117 					  info);
1118 
1119 		if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
1120 			ieee80211_tx_status_skb(mld->hw, skb);
1121 	}
1122 
1123 	IWL_DEBUG_TX_REPLY(mld,
1124 			   "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
1125 			   txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
1126 			   tx_resp->failure_frame);
1127 
1128 	if (tx_failure && mgmt)
1129 		iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
1130 
1131 	if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1132 			 "Got invalid sta_id (%d)\n", sta_id))
1133 		return;
1134 
1135 	rcu_read_lock();
1136 
1137 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1138 	if (!link_sta) {
1139 		/* This can happen if the TX cmd was sent before pre_rcu_remove
1140 		 * but the TX response was received after
1141 		 */
1142 		IWL_DEBUG_TX_REPLY(mld,
1143 				   "Got valid sta_id (%d) but sta is NULL\n",
1144 				   sta_id);
1145 		goto out;
1146 	}
1147 
1148 	if (IS_ERR(link_sta))
1149 		goto out;
1150 
1151 	mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
1152 
1153 	if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
1154 		iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
1155 
1156 	if (tid < IWL_MAX_TID_COUNT)
1157 		iwl_mld_count_mpdu_tx(link_sta, 1);
1158 
1159 out:
1160 	rcu_read_unlock();
1161 }
1162 
1163 static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
1164 				   bool in_flush)
1165 {
1166 	struct sk_buff_head reclaimed_skbs;
1167 
1168 	__skb_queue_head_init(&reclaimed_skbs);
1169 
1170 	iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
1171 
1172 	while (!skb_queue_empty(&reclaimed_skbs)) {
1173 		struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
1174 		struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1175 
1176 		iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1177 
1178 		memset(&info->status, 0, sizeof(info->status));
1179 
1180 		/* Packet was transmitted successfully, failures come as single
1181 		 * frames because before failing a frame the firmware transmits
1182 		 * it without aggregation at least once.
1183 		 */
1184 		if (!in_flush)
1185 			info->flags |= IEEE80211_TX_STAT_ACK;
1186 		else
1187 			info->flags &= ~IEEE80211_TX_STAT_ACK;
1188 
1189 		ieee80211_tx_status_skb(mld->hw, skb);
1190 	}
1191 }
1192 
1193 int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
1194 {
1195 	struct iwl_tx_path_flush_cmd_rsp *rsp;
1196 	struct iwl_tx_path_flush_cmd flush_cmd = {
1197 		.sta_id = cpu_to_le32(fw_sta_id),
1198 		.tid_mask = cpu_to_le16(0xffff),
1199 	};
1200 	struct iwl_host_cmd cmd = {
1201 		.id = TXPATH_FLUSH,
1202 		.len = { sizeof(flush_cmd), },
1203 		.data = { &flush_cmd, },
1204 		.flags = CMD_WANT_SKB,
1205 	};
1206 	int ret, num_flushed_queues;
1207 	u32 resp_len;
1208 
1209 	IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
1210 			    fw_sta_id, 0xffff);
1211 
1212 	ret = iwl_mld_send_cmd(mld, &cmd);
1213 	if (ret) {
1214 		IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
1215 		return ret;
1216 	}
1217 
1218 	resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1219 	if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
1220 			 "Invalid TXPATH_FLUSH response len: %d\n",
1221 			 resp_len)) {
1222 		ret = -EIO;
1223 		goto free_rsp;
1224 	}
1225 
1226 	rsp = (void *)cmd.resp_pkt->data;
1227 
1228 	if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
1229 			 "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
1230 			 le16_to_cpu(rsp->sta_id))) {
1231 		ret = -EIO;
1232 		goto free_rsp;
1233 	}
1234 
1235 	num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
1236 	if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
1237 			 "num_flushed_queues %d\n", num_flushed_queues)) {
1238 		ret = -EIO;
1239 		goto free_rsp;
1240 	}
1241 
1242 	for (int i = 0; i < num_flushed_queues; i++) {
1243 		struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
1244 		int read_after = le16_to_cpu(queue_info->read_after_flush);
1245 		int txq_id = le16_to_cpu(queue_info->queue_num);
1246 
1247 		if (IWL_FW_CHECK(mld,
1248 				 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1249 				 "Invalid txq id %d\n", txq_id))
1250 			continue;
1251 
1252 		IWL_DEBUG_TX_QUEUES(mld,
1253 				    "tid %d txq_id %d read-before %d read-after %d\n",
1254 				    le16_to_cpu(queue_info->tid), txq_id,
1255 				    le16_to_cpu(queue_info->read_before_flush),
1256 				    read_after);
1257 
1258 		iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
1259 	}
1260 
1261 free_rsp:
1262 	iwl_free_resp(&cmd);
1263 	return ret;
1264 }
1265 
1266 int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
1267 {
1268 	struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
1269 	int ret;
1270 
1271 	lockdep_assert_wiphy(mld->wiphy);
1272 
1273 	if (likely(mld_txq->status.allocated))
1274 		return 0;
1275 
1276 	ret = iwl_mld_add_txq(mld, txq);
1277 
1278 	spin_lock_bh(&mld->add_txqs_lock);
1279 	if (!list_empty(&mld_txq->list))
1280 		list_del_init(&mld_txq->list);
1281 	spin_unlock_bh(&mld->add_txqs_lock);
1282 
1283 	return ret;
1284 }
1285 
1286 int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
1287 			    struct ieee80211_sta *sta,
1288 			    u32 old_sta_mask, u32 new_sta_mask)
1289 {
1290 	struct iwl_scd_queue_cfg_cmd cmd = {
1291 		.operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
1292 		.u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
1293 		.u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
1294 	};
1295 
1296 	lockdep_assert_wiphy(mld->wiphy);
1297 
1298 	for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
1299 		struct ieee80211_txq *txq =
1300 			sta->txq[tid != IWL_MAX_TID_COUNT ?
1301 					tid : IEEE80211_NUM_TIDS];
1302 		struct iwl_mld_txq *mld_txq =
1303 			iwl_mld_txq_from_mac80211(txq);
1304 		int ret;
1305 
1306 		if (!mld_txq->status.allocated)
1307 			continue;
1308 
1309 		if (tid == IWL_MAX_TID_COUNT)
1310 			cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
1311 		else
1312 			cmd.u.modify.tid = cpu_to_le32(tid);
1313 
1314 		ret = iwl_mld_send_cmd_pdu(mld,
1315 					   WIDE_ID(DATA_PATH_GROUP,
1316 						   SCD_QUEUE_CONFIG_CMD),
1317 					   &cmd);
1318 		if (ret)
1319 			return ret;
1320 	}
1321 
1322 	return 0;
1323 }
1324 
1325 void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
1326 					struct iwl_rx_packet *pkt)
1327 {
1328 	struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
1329 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1330 	u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
1331 	u8 sta_id = ba_res->sta_id;
1332 	struct ieee80211_link_sta *link_sta;
1333 
1334 	if (!tfd_cnt)
1335 		return;
1336 
1337 	if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
1338 			 "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
1339 			 tfd_cnt, pkt_len))
1340 		return;
1341 
1342 	IWL_DEBUG_TX_REPLY(mld,
1343 			   "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
1344 			   sta_id, le32_to_cpu(ba_res->flags),
1345 			   le16_to_cpu(ba_res->txed),
1346 			   le16_to_cpu(ba_res->done));
1347 
1348 	for (int i = 0; i < tfd_cnt; i++) {
1349 		struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
1350 		int txq_id = le16_to_cpu(ba_tfd->q_num);
1351 		int index = le16_to_cpu(ba_tfd->tfd_index);
1352 
1353 		if (IWL_FW_CHECK(mld,
1354 				 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1355 				 "Invalid txq id %d\n", txq_id))
1356 			continue;
1357 
1358 		iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
1359 	}
1360 
1361 	if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1362 			 "Got invalid sta_id (%d)\n", sta_id))
1363 		return;
1364 
1365 	rcu_read_lock();
1366 
1367 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1368 	if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
1369 			 "Got valid sta_id (%d) but link_sta is NULL\n",
1370 			 sta_id))
1371 		goto out;
1372 
1373 	iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
1374 out:
1375 	rcu_read_unlock();
1376 }
1377