Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx

1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2024 - 2025 Intel Corporation
7 #include "tx.h"
10 #include "iwl-utils.h"
15 #include "fw/api/tx.h"
19 #include "fw/api/time-event.h"
23 /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24 * the *index* used for the last TX, and returns the next valid *index* to use.
50 struct ieee80211_sta *sta = txq->sta; in iwl_mld_get_queue_size()
55 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_get_queue_size()
57 for_each_sta_active_link(txq->vif, sta, link_sta, link_id) { in iwl_mld_get_queue_size()
58 if (link_sta->eht_cap.has_eht) { in iwl_mld_get_queue_size()
63 if (link_sta->he_cap.has_he) in iwl_mld_get_queue_size()
72 u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid; in iwl_mld_allocate_txq()
73 u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta); in iwl_mld_allocate_txq()
75 * must disable the queue hang detection. in iwl_mld_allocate_txq()
77 unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ? in iwl_mld_allocate_txq()
79 mld->trans->mac_cfg->base->wd_timeout; in iwl_mld_allocate_txq()
80 int queue, size; in iwl_mld_allocate_txq() local
82 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_allocate_txq()
86 mld->trans->mac_cfg->base->min_txq_size); in iwl_mld_allocate_txq()
90 queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size, in iwl_mld_allocate_txq()
93 if (queue >= 0) in iwl_mld_allocate_txq()
96 queue, fw_sta_mask, tid); in iwl_mld_allocate_txq()
97 return queue; in iwl_mld_allocate_txq()
105 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_add_txq()
112 mld_txq->fw_id = id; in iwl_mld_add_txq()
113 mld_txq->status.allocated = true; in iwl_mld_add_txq()
115 rcu_assign_pointer(mld->fw_id_to_txq[id], txq); in iwl_mld_add_txq()
122 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_add_txq_list()
124 while (!list_empty(&mld->txqs_to_add)) { in iwl_mld_add_txq_list()
127 list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq, in iwl_mld_add_txq_list()
137 spin_lock(&mld->add_txqs_lock); in iwl_mld_add_txq_list()
138 list_del_init(&mld_txq->list); in iwl_mld_add_txq_list()
139 spin_unlock(&mld->add_txqs_lock); in iwl_mld_add_txq_list()
140 /* If the queue allocation failed, we can't transmit. Leave the in iwl_mld_add_txq_list()
141 * frames on the txq, maybe the attempt to allocate the queue in iwl_mld_add_txq_list()
156 if (mld->fw_status.in_hw_restart) in iwl_mld_add_txqs_wk()
175 iwl_trans_txq_free(mld->trans, queue_id); in iwl_mld_free_txq()
183 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_remove_txq()
185 spin_lock_bh(&mld->add_txqs_lock); in iwl_mld_remove_txq()
186 if (!list_empty(&mld_txq->list)) in iwl_mld_remove_txq()
187 list_del_init(&mld_txq->list); in iwl_mld_remove_txq()
188 spin_unlock_bh(&mld->add_txqs_lock); in iwl_mld_remove_txq()
190 if (!mld_txq->status.allocated || in iwl_mld_remove_txq()
191 WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq))) in iwl_mld_remove_txq()
194 sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta); in iwl_mld_remove_txq()
196 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : in iwl_mld_remove_txq()
197 txq->tid; in iwl_mld_remove_txq()
199 iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id); in iwl_mld_remove_txq()
201 RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL); in iwl_mld_remove_txq()
202 mld_txq->status.allocated = false; in iwl_mld_remove_txq()
211 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_get_offload_assist()
212 u16 mh_len = ieee80211_hdrlen(hdr->frame_control); in iwl_mld_get_offload_assist()
218 if (skb->ip_summed != CHECKSUM_PARTIAL) in iwl_mld_get_offload_assist()
224 * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all in iwl_mld_get_offload_assist()
227 if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) && in iwl_mld_get_offload_assist()
228 skb->protocol != htons(ETH_P_IPV6), in iwl_mld_get_offload_assist()
234 if (skb->protocol == htons(ETH_P_IP)) { in iwl_mld_get_offload_assist()
235 protocol = ip_hdr(skb)->protocol; in iwl_mld_get_offload_assist()
242 protocol = ipv6h->nexthdr; in iwl_mld_get_offload_assist()
255 protocol = hp->nexthdr; in iwl_mld_get_offload_assist()
258 /* if we get here - protocol now should be TCP/UDP */ in iwl_mld_get_offload_assist()
278 if (skb->protocol == htons(ETH_P_IP) && amsdu) { in iwl_mld_get_offload_assist()
279 ip_hdr(skb)->check = 0; in iwl_mld_get_offload_assist()
285 tcp_hdr(skb)->check = 0; in iwl_mld_get_offload_assist()
287 udp_hdr(skb)->check = 0; in iwl_mld_get_offload_assist()
296 else if (ieee80211_hdrlen(hdr->frame_control) % 4) in iwl_mld_get_offload_assist()
309 u32 link_id = u32_get_bits(info->control.flags, in iwl_mld_get_basic_rates_and_band()
312 *basic_rates = vif->bss_conf.basic_rates; in iwl_mld_get_basic_rates_and_band()
313 *band = info->band; in iwl_mld_get_basic_rates_and_band()
318 WARN_ON(hweight16(vif->active_links) != 1); in iwl_mld_get_basic_rates_and_band()
319 link_id = __ffs(vif->active_links); in iwl_mld_get_basic_rates_and_band()
326 link_conf = rcu_dereference(vif->link_conf[link_id]); in iwl_mld_get_basic_rates_and_band()
328 *basic_rates = link_conf->basic_rates; in iwl_mld_get_basic_rates_and_band()
329 if (link_conf->chanreq.oper.chan) in iwl_mld_get_basic_rates_and_band()
330 *band = link_conf->chanreq.oper.chan->band; in iwl_mld_get_basic_rates_and_band()
348 sband = mld->hw->wiphy->bands[band]; in iwl_mld_get_lowest_rate()
350 u16 hw = sband->bitrates[i].hw_value; in iwl_mld_get_lowest_rate()
360 if (band == NL80211_BAND_2GHZ && !vif->p2p && in iwl_mld_get_lowest_rate()
361 vif->type != NL80211_IFTYPE_P2P_DEVICE && in iwl_mld_get_lowest_rate()
362 !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) { in iwl_mld_get_lowest_rate()
388 info->control.vif); in iwl_mld_mac80211_rate_idx_to_fw()
399 * 0 - 3 for CCK and 0 - 7 for OFDM in iwl_mld_mac80211_rate_idx_to_fw()
402 rate_idx - IWL_FIRST_OFDM_RATE : rate_idx); in iwl_mld_mac80211_rate_idx_to_fw()
414 return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS; in iwl_mld_get_tx_ant()
417 return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS; in iwl_mld_get_tx_ant()
425 struct ieee80211_tx_rate *rate = &info->control.rates[0]; in iwl_mld_get_inject_tx_rate()
428 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) { in iwl_mld_get_inject_tx_rate()
436 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mld_get_inject_tx_rate()
439 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mld_get_inject_tx_rate()
441 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) in iwl_mld_get_inject_tx_rate()
443 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) in iwl_mld_get_inject_tx_rate()
445 } else if (rate->flags & IEEE80211_TX_RC_MCS) { in iwl_mld_get_inject_tx_rate()
446 /* only MCS 0-15 are supported */ in iwl_mld_get_inject_tx_rate()
447 u8 mcs = rate->idx & 7; in iwl_mld_get_inject_tx_rate()
448 u8 nss = rate->idx > 7; in iwl_mld_get_inject_tx_rate()
454 if (rate->flags & IEEE80211_TX_RC_SHORT_GI) in iwl_mld_get_inject_tx_rate()
456 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) in iwl_mld_get_inject_tx_rate()
458 if (info->flags & IEEE80211_TX_CTL_LDPC) in iwl_mld_get_inject_tx_rate()
460 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC)) in iwl_mld_get_inject_tx_rate()
463 result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx); in iwl_mld_get_inject_tx_rate()
466 if (info->control.antennas) in iwl_mld_get_inject_tx_rate()
467 result |= u32_encode_bits(info->control.antennas, in iwl_mld_get_inject_tx_rate()
481 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) in iwl_mld_get_tx_rate_n_flags()
484 rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) | in iwl_mld_get_tx_rate_n_flags()
487 return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3); in iwl_mld_get_tx_rate_n_flags()
495 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_fill_tx_cmd_hdr()
499 memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control)); in iwl_mld_fill_tx_cmd_hdr()
504 /* As described in IEEE sta 802.11-2020, table 9-30 (Address in iwl_mld_fill_tx_cmd_hdr()
505 * field contents), A-MSDU address 3 should contain the BSSID in iwl_mld_fill_tx_cmd_hdr()
509 * correctly create all the A-MSDU subframes headers from it. in iwl_mld_fill_tx_cmd_hdr()
515 vif = info->control.vif; in iwl_mld_fill_tx_cmd_hdr()
516 switch (vif->type) { in iwl_mld_fill_tx_cmd_hdr()
518 ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr); in iwl_mld_fill_tx_cmd_hdr()
521 ether_addr_copy(tx_cmd->hdr->addr3, vif->addr); in iwl_mld_fill_tx_cmd_hdr()
534 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_fill_tx_cmd()
538 bool amsdu = ieee80211_is_data_qos(hdr->frame_control) && in iwl_mld_fill_tx_cmd()
544 dev_tx_cmd->hdr.cmd = TX_CMD; in iwl_mld_fill_tx_cmd()
546 if (!info->control.hw_key) in iwl_mld_fill_tx_cmd()
554 info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) { in iwl_mld_fill_tx_cmd()
557 hdr->frame_control); in iwl_mld_fill_tx_cmd()
558 } else if (!ieee80211_is_data(hdr->frame_control) || in iwl_mld_fill_tx_cmd()
560 mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) { in iwl_mld_fill_tx_cmd()
565 tx_cmd = (void *)dev_tx_cmd->payload; in iwl_mld_fill_tx_cmd()
569 tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu); in iwl_mld_fill_tx_cmd()
572 tx_cmd->len = cpu_to_le16((u16)skb->len); in iwl_mld_fill_tx_cmd()
574 tx_cmd->flags = cpu_to_le16(flags); in iwl_mld_fill_tx_cmd()
576 tx_cmd->rate_n_flags = rate_n_flags; in iwl_mld_fill_tx_cmd()
579 /* Caller of this need to check that info->control.vif is not NULL */
584 iwl_mld_vif_from_mac80211(info->control.vif); in iwl_mld_get_link_from_tx_info()
585 u32 link_id = u32_get_bits(info->control.flags, in iwl_mld_get_link_from_tx_info()
589 if (info->control.vif->active_links) in iwl_mld_get_link_from_tx_info()
590 link_id = ffs(info->control.vif->active_links) - 1; in iwl_mld_get_link_from_tx_info()
595 return rcu_dereference(mld_vif->link[link_id]); in iwl_mld_get_link_from_tx_info()
603 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_get_tx_queue_id()
604 __le16 fc = hdr->frame_control; in iwl_mld_get_tx_queue_id()
608 if (txq && txq->sta) in iwl_mld_get_tx_queue_id()
609 return iwl_mld_txq_from_mac80211(txq)->fw_id; in iwl_mld_get_tx_queue_id()
611 if (!info->control.vif) in iwl_mld_get_tx_queue_id()
614 switch (info->control.vif->type) { in iwl_mld_get_tx_queue_id()
629 return link->bcast_sta.queue_id; in iwl_mld_get_tx_queue_id()
631 if (is_multicast_ether_addr(hdr->addr1) && in iwl_mld_get_tx_queue_id()
633 return link->mcast_sta.queue_id; in iwl_mld_get_tx_queue_id()
635 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC, in iwl_mld_get_tx_queue_id()
637 return link->bcast_sta.queue_id; in iwl_mld_get_tx_queue_id()
639 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); in iwl_mld_get_tx_queue_id()
641 if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC && in iwl_mld_get_tx_queue_id()
642 mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) { in iwl_mld_get_tx_queue_id()
644 "Drop tx outside ROC with activity %d\n", in iwl_mld_get_tx_queue_id()
645 mld_vif->roc_activity); in iwl_mld_get_tx_queue_id()
651 return mld_vif->aux_sta.queue_id; in iwl_mld_get_tx_queue_id()
653 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); in iwl_mld_get_tx_queue_id()
654 return mld_vif->deflink.mon_sta.queue_id; in iwl_mld_get_tx_queue_id()
656 mld_vif = iwl_mld_vif_from_mac80211(info->control.vif); in iwl_mld_get_tx_queue_id()
658 if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) { in iwl_mld_get_tx_queue_id()
659 IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n"); in iwl_mld_get_tx_queue_id()
663 if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) { in iwl_mld_get_tx_queue_id()
664 IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n"); in iwl_mld_get_tx_queue_id()
669 return mld_vif->aux_sta.queue_id; in iwl_mld_get_tx_queue_id()
683 &iwl_mld_vif_from_mac80211(info->control.vif)->deflink; in iwl_mld_probe_resp_set_noa()
687 if (!info->control.vif->p2p) in iwl_mld_probe_resp_set_noa()
692 resp_data = rcu_dereference(mld_link->probe_resp_data); in iwl_mld_probe_resp_set_noa()
696 if (!resp_data->notif.noa_active) in iwl_mld_probe_resp_set_noa()
699 if (skb_tailroom(skb) < resp_data->noa_len) { in iwl_mld_probe_resp_set_noa()
700 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) { in iwl_mld_probe_resp_set_noa()
707 pos = skb_put(skb, resp_data->noa_len); in iwl_mld_probe_resp_set_noa()
711 *pos++ = resp_data->noa_len - 2; in iwl_mld_probe_resp_set_noa()
717 memcpy(pos, &resp_data->notif.noa_attr, in iwl_mld_probe_resp_set_noa()
718 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie)); in iwl_mld_probe_resp_set_noa()
728 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_tx_mpdu()
730 struct ieee80211_sta *sta = txq ? txq->sta : NULL; in iwl_mld_tx_mpdu()
732 int queue = iwl_mld_get_tx_queue_id(mld, txq, skb); in iwl_mld_tx_mpdu() local
735 if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") || in iwl_mld_tx_mpdu()
736 queue == IWL_MLD_INVALID_DROP_TX) in iwl_mld_tx_mpdu()
737 return -1; in iwl_mld_tx_mpdu()
739 if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control))) in iwl_mld_tx_mpdu()
740 return -1; in iwl_mld_tx_mpdu()
742 dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans); in iwl_mld_tx_mpdu()
744 return -1; in iwl_mld_tx_mpdu()
746 if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) { in iwl_mld_tx_mpdu()
748 return -1; in iwl_mld_tx_mpdu()
755 if (ieee80211_is_data(hdr->frame_control)) { in iwl_mld_tx_mpdu()
756 if (ieee80211_is_data_qos(hdr->frame_control)) in iwl_mld_tx_mpdu()
762 IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n", in iwl_mld_tx_mpdu()
763 tid, queue, skb->len); in iwl_mld_tx_mpdu()
765 /* From now on, we cannot access info->control */ in iwl_mld_tx_mpdu()
766 memset(&info->status, 0, sizeof(info->status)); in iwl_mld_tx_mpdu()
767 memset(info->driver_data, 0, sizeof(info->driver_data)); in iwl_mld_tx_mpdu()
769 info->driver_data[1] = dev_tx_cmd; in iwl_mld_tx_mpdu()
771 if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue)) in iwl_mld_tx_mpdu()
774 /* Update low-latency counter when a packet is queued instead in iwl_mld_tx_mpdu()
775 * of after TX, it makes sense for early low-latency detection in iwl_mld_tx_mpdu()
783 iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd); in iwl_mld_tx_mpdu()
784 IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue); in iwl_mld_tx_mpdu()
785 return -1; in iwl_mld_tx_mpdu()
798 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_tx_tso_segment()
800 unsigned int mss = skb_shinfo(skb)->gso_size; in iwl_mld_tx_tso_segment()
807 if (!ieee80211_is_data_qos(hdr->frame_control) || in iwl_mld_tx_tso_segment()
808 !sta->cur->max_rc_amsdu_len) in iwl_mld_tx_tso_segment()
814 if (skb->protocol == htons(ETH_P_IPV6) && in iwl_mld_tx_tso_segment()
815 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr != in iwl_mld_tx_tso_segment()
823 return -EINVAL; in iwl_mld_tx_tso_segment()
825 max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid]; in iwl_mld_tx_tso_segment()
831 pad = (4 - subf_len) & 0x3; in iwl_mld_tx_tso_segment()
833 /* If we have N subframes in the A-MSDU, then the A-MSDU's size is in iwl_mld_tx_tso_segment()
834 * N * subf_len + (N - 1) * pad. in iwl_mld_tx_tso_segment()
838 if (sta->max_amsdu_subframes && in iwl_mld_tx_tso_segment()
839 num_subframes > sta->max_amsdu_subframes) in iwl_mld_tx_tso_segment()
840 num_subframes = sta->max_amsdu_subframes; in iwl_mld_tx_tso_segment()
842 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mld_tx_tso_segment()
843 tcp_hdrlen(skb) + skb->data_len; in iwl_mld_tx_tso_segment()
845 /* Make sure we have enough TBs for the A-MSDU: in iwl_mld_tx_tso_segment()
850 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) > in iwl_mld_tx_tso_segment()
851 mld->trans->info.max_skb_frags) in iwl_mld_tx_tso_segment()
857 /* This skb fits in one single A-MSDU */ in iwl_mld_tx_tso_segment()
864 * into one A-MSDU. in iwl_mld_tx_tso_segment()
881 if (WARN_ON(!txq || !txq->sta)) in iwl_mld_tx_tso()
882 return -1; in iwl_mld_tx_tso()
884 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) - in iwl_mld_tx_tso()
885 tcp_hdrlen(skb) + skb->data_len; in iwl_mld_tx_tso()
887 if (payload_len <= skb_shinfo(skb)->gso_size) in iwl_mld_tx_tso()
890 if (!info->control.vif) in iwl_mld_tx_tso()
891 return -1; in iwl_mld_tx_tso()
895 ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs); in iwl_mld_tx_tso()
917 ieee80211_free_txskb(mld->hw, skb); in iwl_mld_tx_tso()
936 return -1; in iwl_mld_tx_tso()
953 ieee80211_free_txskb(mld->hw, skb); in iwl_mld_tx_skb()
966 * mld_txq->tx_request logic: in iwl_mld_tx_from_txq()
969 * will now start TX and other threads should quit. in iwl_mld_tx_from_txq()
973 * have raced with the check whether the queue is empty, the TXing in iwl_mld_tx_from_txq()
974 * thread should check the queue's status one more time before leaving. in iwl_mld_tx_from_txq()
975 * This check is done in order to not leave any TX hanging in the queue in iwl_mld_tx_from_txq()
976 * until the next TX invocation (which may not even happen). in iwl_mld_tx_from_txq()
979 * check the queue, so do nothing. in iwl_mld_tx_from_txq()
981 if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2)) in iwl_mld_tx_from_txq()
986 while (likely(!mld_txq->status.stop_full) && in iwl_mld_tx_from_txq()
987 (skb = ieee80211_tx_dequeue(mld->hw, txq))) in iwl_mld_tx_from_txq()
989 } while (atomic_dec_return(&mld_txq->tx_request)); in iwl_mld_tx_from_txq()
992 txq->sta ? txq->sta->addr : zero_addr, txq->tid); in iwl_mld_tx_from_txq()
1001 enum nl80211_band band = info->band; in iwl_mld_hwrate_to_tx_rate()
1002 struct ieee80211_tx_rate *tx_rate = &info->status.rates[0]; in iwl_mld_hwrate_to_tx_rate()
1004 mld->fw_rates_ver_3); in iwl_mld_hwrate_to_tx_rate()
1010 tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI; in iwl_mld_hwrate_to_tx_rate()
1016 tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH; in iwl_mld_hwrate_to_tx_rate()
1019 tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH; in iwl_mld_hwrate_to_tx_rate()
1022 tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH; in iwl_mld_hwrate_to_tx_rate()
1030 tx_rate->flags |= IEEE80211_TX_RC_MCS; in iwl_mld_hwrate_to_tx_rate()
1031 tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags); in iwl_mld_hwrate_to_tx_rate()
1038 tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS; in iwl_mld_hwrate_to_tx_rate()
1045 tx_rate->idx = 0; in iwl_mld_hwrate_to_tx_rate()
1048 tx_rate->idx = in iwl_mld_hwrate_to_tx_rate()
1056 struct iwl_rx_packet *pkt) in iwl_mld_handle_tx_resp_notif() argument
1058 struct iwl_tx_resp *tx_resp = (void *)pkt->data; in iwl_mld_handle_tx_resp_notif()
1059 int txq_id = le16_to_cpu(tx_resp->tx_queue); in iwl_mld_handle_tx_resp_notif()
1060 struct agg_tx_status *agg_status = &tx_resp->status; in iwl_mld_handle_tx_resp_notif()
1061 u32 status = le16_to_cpu(agg_status->status); in iwl_mld_handle_tx_resp_notif()
1062 u32 pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mld_handle_tx_resp_notif()
1064 int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid); in iwl_mld_handle_tx_resp_notif()
1065 int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid); in iwl_mld_handle_tx_resp_notif()
1074 if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1, in iwl_mld_handle_tx_resp_notif()
1076 tx_resp->frame_count)) in iwl_mld_handle_tx_resp_notif()
1086 tx_resp->frame_count) & 0xFFFF; in iwl_mld_handle_tx_resp_notif()
1091 iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false); in iwl_mld_handle_tx_resp_notif()
1096 struct ieee80211_hdr *hdr = (void *)skb->data; in iwl_mld_handle_tx_resp_notif()
1100 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); in iwl_mld_handle_tx_resp_notif()
1102 memset(&info->status, 0, sizeof(info->status)); in iwl_mld_handle_tx_resp_notif()
1104 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED); in iwl_mld_handle_tx_resp_notif()
1110 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mld_handle_tx_resp_notif()
1121 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mld_handle_tx_resp_notif()
1127 if (ieee80211_is_action(hdr->frame_control)) in iwl_mld_handle_tx_resp_notif()
1129 else if (ieee80211_is_mgmt(hdr->frame_control)) in iwl_mld_handle_tx_resp_notif()
1132 iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL); in iwl_mld_handle_tx_resp_notif()
1135 iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info); in iwl_mld_handle_tx_resp_notif()
1137 if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1))) in iwl_mld_handle_tx_resp_notif()
1138 ieee80211_tx_status_skb(mld->hw, skb); in iwl_mld_handle_tx_resp_notif()
1143 txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate), in iwl_mld_handle_tx_resp_notif()
1144 tx_resp->failure_frame); in iwl_mld_handle_tx_resp_notif()
1147 iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant); in iwl_mld_handle_tx_resp_notif()
1149 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations, in iwl_mld_handle_tx_resp_notif()
1155 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); in iwl_mld_handle_tx_resp_notif()
1157 /* This can happen if the TX cmd was sent before pre_rcu_remove in iwl_mld_handle_tx_resp_notif()
1158 * but the TX response was received after in iwl_mld_handle_tx_resp_notif()
1169 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta); in iwl_mld_handle_tx_resp_notif()
1171 if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED) in iwl_mld_handle_tx_resp_notif()
1172 iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant); in iwl_mld_handle_tx_resp_notif()
1188 iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush); in iwl_mld_tx_reclaim_txq()
1194 iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]); in iwl_mld_tx_reclaim_txq()
1196 memset(&info->status, 0, sizeof(info->status)); in iwl_mld_tx_reclaim_txq()
1203 info->flags |= IEEE80211_TX_STAT_ACK; in iwl_mld_tx_reclaim_txq()
1205 info->flags &= ~IEEE80211_TX_STAT_ACK; in iwl_mld_tx_reclaim_txq()
1207 ieee80211_tx_status_skb(mld->hw, skb); in iwl_mld_tx_reclaim_txq()
1240 ret = -EIO; in iwl_mld_flush_link_sta_txqs()
1244 rsp = (void *)cmd.resp_pkt->data; in iwl_mld_flush_link_sta_txqs()
1246 if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id, in iwl_mld_flush_link_sta_txqs()
1248 le16_to_cpu(rsp->sta_id))) { in iwl_mld_flush_link_sta_txqs()
1249 ret = -EIO; in iwl_mld_flush_link_sta_txqs()
1253 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues); in iwl_mld_flush_link_sta_txqs()
1256 ret = -EIO; in iwl_mld_flush_link_sta_txqs()
1261 struct iwl_flush_queue_info *queue_info = &rsp->queues[i]; in iwl_mld_flush_link_sta_txqs()
1262 int read_after = le16_to_cpu(queue_info->read_after_flush); in iwl_mld_flush_link_sta_txqs()
1263 int txq_id = le16_to_cpu(queue_info->queue_num); in iwl_mld_flush_link_sta_txqs()
1266 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), in iwl_mld_flush_link_sta_txqs()
1271 "tid %d txq_id %d read-before %d read-after %d\n", in iwl_mld_flush_link_sta_txqs()
1272 le16_to_cpu(queue_info->tid), txq_id, in iwl_mld_flush_link_sta_txqs()
1273 le16_to_cpu(queue_info->read_before_flush), in iwl_mld_flush_link_sta_txqs()
1289 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_ensure_queue()
1291 if (likely(mld_txq->status.allocated)) in iwl_mld_ensure_queue()
1296 spin_lock_bh(&mld->add_txqs_lock); in iwl_mld_ensure_queue()
1297 if (!list_empty(&mld_txq->list)) in iwl_mld_ensure_queue()
1298 list_del_init(&mld_txq->list); in iwl_mld_ensure_queue()
1299 spin_unlock_bh(&mld->add_txqs_lock); in iwl_mld_ensure_queue()
1314 lockdep_assert_wiphy(mld->wiphy); in iwl_mld_update_sta_txqs()
1318 sta->txq[tid != IWL_MAX_TID_COUNT ? in iwl_mld_update_sta_txqs()
1324 if (!mld_txq->status.allocated) in iwl_mld_update_sta_txqs()
1344 struct iwl_rx_packet *pkt) in iwl_mld_handle_compressed_ba_notif() argument
1346 struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data; in iwl_mld_handle_compressed_ba_notif()
1347 u32 pkt_len = iwl_rx_packet_payload_len(pkt); in iwl_mld_handle_compressed_ba_notif()
1348 u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt); in iwl_mld_handle_compressed_ba_notif()
1349 u8 sta_id = ba_res->sta_id; in iwl_mld_handle_compressed_ba_notif()
1362 sta_id, le32_to_cpu(ba_res->flags), in iwl_mld_handle_compressed_ba_notif()
1363 le16_to_cpu(ba_res->txed), in iwl_mld_handle_compressed_ba_notif()
1364 le16_to_cpu(ba_res->done)); in iwl_mld_handle_compressed_ba_notif()
1367 struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i]; in iwl_mld_handle_compressed_ba_notif()
1368 int txq_id = le16_to_cpu(ba_tfd->q_num); in iwl_mld_handle_compressed_ba_notif()
1369 int index = le16_to_cpu(ba_tfd->tfd_index); in iwl_mld_handle_compressed_ba_notif()
1372 txq_id >= ARRAY_SIZE(mld->fw_id_to_txq), in iwl_mld_handle_compressed_ba_notif()
1379 if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations, in iwl_mld_handle_compressed_ba_notif()
1385 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]); in iwl_mld_handle_compressed_ba_notif()
1391 iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed)); in iwl_mld_handle_compressed_ba_notif()