Lines Matching +full:tcm +full:- +full:mode
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
14 #include "iwl-trans.h"
15 #include "iwl-nvm-utils.h"
18 #include "time-sync.h"
27 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL, FW_DBG_TRIGGER_BA);
31 ba_trig = (void *)trig->data;
33 if (!(le16_to_cpu(ba_trig->tx_bar) & BIT(tid)))
36 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
48 struct ieee80211_hdr *hdr = (void *)skb->data;
49 u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
55 if (skb->ip_summed != CHECKSUM_PARTIAL)
59 if (WARN_ONCE(!(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) ||
60 (skb->protocol != htons(ETH_P_IP) &&
61 skb->protocol != htons(ETH_P_IPV6)),
67 if (skb->protocol == htons(ETH_P_IP)) {
68 protocol = ip_hdr(skb)->protocol;
75 protocol = ipv6h->nexthdr;
88 protocol = hp->nexthdr;
91 /* if we get here - protocol now should be TCP/UDP */
112 if (skb->protocol == htons(ETH_P_IP) && amsdu) {
113 ip_hdr(skb)->check = 0;
119 tcp_hdr(skb)->check = 0;
121 udp_hdr(skb)->check = 0;
130 if (!iwl_mvm_has_new_tx_api(mvm) && info->control.hw_key &&
131 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP40 &&
132 info->control.hw_key->cipher != WLAN_CIPHER_SUITE_WEP104)
133 mh_len += info->control.hw_key->iv_len;
139 else if (ieee80211_hdrlen(hdr->frame_control) % 4)
153 struct ieee80211_hdr *hdr = (void *)skb->data;
154 __le16 fc = hdr->frame_control;
155 u32 tx_flags = le32_to_cpu(tx_cmd->tx_flags);
156 u32 len = skb->len + FCS_LEN;
160 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK) ||
162 !is_multicast_ether_addr(hdr->addr1)))
175 tx_cmd->tid_tspec = qc[0] & 0xf;
179 struct ieee80211_bar *bar = (void *)skb->data;
180 u16 control = le16_to_cpu(bar->control);
181 u16 ssn = le16_to_cpu(bar->start_seq_num);
184 tx_cmd->tid_tspec = (control &
187 WARN_ON_ONCE(tx_cmd->tid_tspec >= IWL_MAX_TID_COUNT);
188 iwl_mvm_bar_check_trigger(mvm, bar->ra, tx_cmd->tid_tspec,
192 tx_cmd->tid_tspec = IWL_TID_NON_QOS;
194 tx_cmd->tid_tspec = IWL_MAX_TID_COUNT;
196 if (info->flags & IEEE80211_TX_CTL_ASSIGN_SEQ)
203 if (tx_cmd->tid_tspec < IWL_MAX_TID_COUNT)
204 ac = tid_to_mac80211_ac[tx_cmd->tid_tspec];
213 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_ASSOC);
215 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
217 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
219 /* The spec allows Action frames in A-MPDU, we don't support
222 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_AMPDU);
223 } else if (info->control.flags & IEEE80211_TX_CTRL_PORT_CTRL_PROTO) {
224 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_MGMT);
226 tx_cmd->pm_frame_timeout = cpu_to_le16(PM_FRAME_NONE);
229 if (ieee80211_is_data(fc) && len > mvm->rts_threshold &&
230 !is_multicast_ether_addr(hdr->addr1))
233 if (fw_has_capa(&mvm->fw->ucode_capa,
238 tx_cmd->tx_flags = cpu_to_le32(tx_flags);
239 /* Total # bytes to be transmitted - PCIe code will adjust for A-MSDU */
240 tx_cmd->len = cpu_to_le16((u16)skb->len);
241 tx_cmd->life_time = cpu_to_le32(TX_CMD_LIFE_TIME_INFINITE);
242 tx_cmd->sta_id = sta_id;
244 tx_cmd->offload_assist =
252 if (info->band == NL80211_BAND_2GHZ &&
254 return mvm->cfg->non_shared_ant << RATE_MCS_ANT_POS;
259 return BIT(mvmsta->tx_ant) << RATE_MCS_ANT_POS;
262 return BIT(mvm->mgmt_last_antenna_idx) << RATE_MCS_ANT_POS;
277 info->control.vif);
279 /* Get PLCP rate for tx_cmd->rate_n_flags */
280 rate_plcp = iwl_mvm_mac80211_idx_to_hwrate(mvm->fw, rate_idx);
285 if (iwl_fw_lookup_cmd_ver(mvm->fw, TX_CMD, 0) > 8) {
302 struct ieee80211_tx_rate *rate = &info->control.rates[0];
310 if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
317 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
319 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
321 else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
323 else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
326 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
328 } else if (rate->flags & IEEE80211_TX_RC_MCS) {
330 result |= u32_encode_bits(rate->idx,
333 if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
335 if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
337 if (info->flags & IEEE80211_TX_CTL_LDPC)
339 if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
342 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TX_CMD, 0) > 6)
345 int rate_idx = info->control.rates[0].idx;
350 if (info->control.antennas)
351 result |= u32_encode_bits(info->control.antennas,
363 int rate_idx = -1;
365 if (!ieee80211_hw_check(mvm->hw, HAS_RATE_CONTROL)) {
366 /* info->control is only relevant for non HW rate control */
369 WARN_ONCE(info->control.rates[0].flags & IEEE80211_TX_RC_MCS &&
372 info->control.rates[0].flags,
373 info->control.rates[0].idx,
375 sta ? iwl_mvm_sta_from_mac80211(sta)->sta_state : -1);
377 rate_idx = info->control.rates[0].idx;
382 if (info->band != NL80211_BAND_2GHZ ||
383 (info->flags & IEEE80211_TX_CTL_NO_CCK_RATE))
397 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
412 tx_cmd->rts_retry_limit = IWL_RTS_DFAULT_RETRY_LIMIT;
416 tx_cmd->data_retry_limit = IWL_MGMT_DFAULT_RETRY_LIMIT;
417 tx_cmd->rts_retry_limit =
418 min(tx_cmd->data_retry_limit, tx_cmd->rts_retry_limit);
420 tx_cmd->data_retry_limit = IWL_BAR_DFAULT_RETRY_LIMIT;
422 tx_cmd->data_retry_limit = IWL_DEFAULT_TX_RETRY;
431 !(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))) {
434 if (mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED) {
435 tx_cmd->initial_rate_index = 0;
436 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_STA_RATE);
440 tx_cmd->tx_flags |=
445 tx_cmd->rate_n_flags =
452 struct ieee80211_key_conf *keyconf = info->control.hw_key;
455 pn = atomic64_inc_return(&keyconf->tx_pn);
458 crypto_hdr[3] = 0x20 | (keyconf->keyidx << 6);
475 struct ieee80211_key_conf *keyconf = info->control.hw_key;
476 u8 *crypto_hdr = skb_frag->data + hdrlen;
480 switch (keyconf->cipher) {
487 tx_cmd->sec_ctl = TX_CMD_SEC_TKIP;
488 pn = atomic64_inc_return(&keyconf->tx_pn);
490 ieee80211_get_tkip_p2k(keyconf, skb_frag, tx_cmd->key);
494 tx_cmd->sec_ctl |= TX_CMD_SEC_KEY128;
497 tx_cmd->sec_ctl |= TX_CMD_SEC_WEP |
498 ((keyconf->keyidx << TX_CMD_SEC_WEP_KEY_IDX_POS) &
501 memcpy(&tx_cmd->key[3], keyconf->key, keyconf->keylen);
514 tx_cmd->sec_ctl |= type | TX_CMD_SEC_KEY_FROM_TABLE;
515 tx_cmd->key[0] = keyconf->hw_key_idx;
519 tx_cmd->sec_ctl |= TX_CMD_SEC_EXT;
531 if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
534 if (likely(ieee80211_is_data(hdr->frame_control) &&
535 mvmsta->sta_state >= IEEE80211_STA_AUTHORIZED))
545 return mvm->trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_BZ;
555 memcpy(out_hdr->addr3, addr3_override, ETH_ALEN);
567 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
571 dev_cmd = iwl_trans_alloc_tx_cmd(mvm->trans);
576 dev_cmd->hdr.cmd = TX_CMD;
585 if (ieee80211_is_data_qos(hdr->frame_control)) {
591 if (!info->control.hw_key)
604 hdr->frame_control);
605 } else if (!ieee80211_is_data(hdr->frame_control) ||
606 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED) {
611 if (mvm->trans->trans_cfg->device_family >=
613 struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload;
617 cmd->offload_assist = cpu_to_le32(offload_assist);
620 cmd->len = cpu_to_le16((u16)skb->len);
623 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
625 cmd->flags = cpu_to_le16(flags);
626 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
628 struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload;
632 cmd->offload_assist = cpu_to_le16(offload_assist);
635 cmd->len = cpu_to_le16((u16)skb->len);
638 iwl_mvm_copy_hdr(cmd->hdr, hdr, hdrlen, addr3_override);
640 cmd->flags = cpu_to_le32(flags);
641 cmd->rate_n_flags = cpu_to_le32(rate_n_flags);
646 tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
648 if (info->control.hw_key)
653 iwl_mvm_set_tx_cmd_rate(mvm, tx_cmd, info, sta, hdr->frame_control);
656 iwl_mvm_copy_hdr(tx_cmd->hdr, hdr, hdrlen, addr3_override);
667 memset(&skb_info->status, 0, sizeof(skb_info->status));
668 memset(skb_info->driver_data, 0, sizeof(skb_info->driver_data));
670 skb_info->driver_data[1] = cmd;
678 struct ieee80211_hdr *hdr = (void *)skb->data;
679 __le16 fc = hdr->frame_control;
681 switch (info->control.vif->type) {
685 * Non-bufferable frames use the broadcast station, thus they
689 * response (with non-success status) for a station we can't
697 return link->mgmt_queue;
700 is_multicast_ether_addr(hdr->addr1))
701 return link->cab_queue;
703 WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
705 return link->mgmt_queue;
708 return mvm->p2p_dev_queue;
711 return mvm->p2p_dev_queue;
714 return -1;
723 iwl_mvm_vif_from_mac80211(info->control.vif);
724 struct ieee80211_mgmt *mgmt = (struct ieee80211_mgmt *)skb->data;
725 int base_len = (u8 *)mgmt->u.probe_resp.variable - (u8 *)mgmt;
738 resp_data = rcu_dereference(mvmvif->deflink.probe_resp_data);
742 if (!resp_data->notif.noa_active)
746 mgmt->u.probe_resp.variable,
747 skb->len - base_len,
754 if (skb_tailroom(skb) < resp_data->noa_len) {
755 if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
762 pos = skb_put(skb, resp_data->noa_len);
766 *pos++ = resp_data->noa_len - 2;
772 memcpy(pos, &resp_data->notif.noa_attr,
773 resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
781 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
785 int hdrlen = ieee80211_hdrlen(hdr->frame_control);
786 __le16 fc = hdr->frame_control;
787 bool offchannel = IEEE80211_SKB_CB(skb)->flags &
789 int queue = -1;
792 return -1;
794 memcpy(&info, skb->cb, sizeof(info));
796 if (WARN_ON_ONCE(skb->len > IEEE80211_MAX_DATA_LEN + hdrlen))
797 return -1;
800 return -1;
807 if ((info.control.vif->type == NL80211_IFTYPE_P2P_DEVICE &&
809 (info.control.vif->type == NL80211_IFTYPE_STATION &&
821 sta_id = mvm->aux_sta.sta_id;
822 queue = mvm->aux_queue;
824 return -1;
825 } else if (info.control.vif->type ==
827 info.control.vif->type == NL80211_IFTYPE_AP ||
828 info.control.vif->type == NL80211_IFTYPE_ADHOC) {
834 if (info.control.vif->active_links)
835 link_id = ffs(info.control.vif->active_links) - 1;
840 link = mvmvif->link[link_id];
842 return -1;
844 if (!ieee80211_is_data(hdr->frame_control))
845 sta_id = link->bcast_sta.sta_id;
847 sta_id = link->mcast_sta.sta_id;
851 } else if (info.control.vif->type == NL80211_IFTYPE_MONITOR) {
852 queue = mvm->snif_queue;
853 sta_id = mvm->snif_sta.sta_id;
859 return -1;
870 return -1;
872 /* From now on, we cannot access info->control */
875 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, queue)) {
876 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
877 return -1;
894 if (sta->deflink.he_cap.has_he && !WARN_ON(!iwl_mvm_has_new_tx_api(mvm)))
905 val = mvmsta->max_amsdu_len;
907 if (hweight16(sta->valid_links) <= 1) {
908 if (sta->valid_links) {
910 unsigned int link = ffs(sta->valid_links) - 1;
913 link_conf = rcu_dereference(mvmsta->vif->link_conf[link]);
917 band = link_conf->chanreq.oper.chan->band;
920 band = mvmsta->vif->bss_conf.chanreq.oper.chan->band;
924 } else if (fw_has_capa(&mvm->fw->ucode_capa,
929 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
936 mvm->fwrt.smem_cfg.lmac[lmac].txfifo_size[txf] - 256);
947 struct ieee80211_hdr *hdr = (void *)skb->data;
948 char cb[sizeof(skb->cb)];
951 unsigned int mss = skb_shinfo(skb)->gso_size;
952 bool ipv4 = (skb->protocol == htons(ETH_P_IP));
953 bool qos = ieee80211_is_data_qos(hdr->frame_control);
954 u16 ip_base_id = ipv4 ? ntohs(ip_hdr(skb)->id) : 0;
956 skb_shinfo(skb)->gso_size = num_subframes * mss;
957 memcpy(cb, skb->cb, sizeof(cb));
960 skb_shinfo(skb)->gso_size = mss;
961 skb_shinfo(skb)->gso_type = ipv4 ? SKB_GSO_TCPV4 : SKB_GSO_TCPV6;
963 if (IS_ERR(next) && PTR_ERR(next) == -ENOMEM)
964 return -ENOMEM;
974 memcpy(tmp->cb, cb, sizeof(tmp->cb));
976 * Compute the length of all the data added for the A-MSDU.
978 * command. We have: SNAP + IP + TCP for n -1 subframes and
981 tcp_payload_len = skb_tail_pointer(tmp) -
982 skb_transport_header(tmp) -
983 tcp_hdrlen(tmp) + tmp->data_len;
986 ip_hdr(tmp)->id = htons(ip_base_id + i * num_subframes);
989 skb_shinfo(tmp)->gso_size = mss;
990 skb_shinfo(tmp)->gso_type = ipv4 ? SKB_GSO_TCPV4 :
999 qc = ieee80211_get_qos_ctl((void *)tmp->data);
1002 skb_shinfo(tmp)->gso_size = 0;
1019 struct ieee80211_hdr *hdr = (void *)skb->data;
1020 unsigned int mss = skb_shinfo(skb)->gso_size;
1028 if (!mvmsta->max_amsdu_len ||
1029 !ieee80211_is_data_qos(hdr->frame_control) ||
1030 !mvmsta->amsdu_enabled)
1037 if (skb->protocol == htons(ETH_P_IPV6) &&
1038 ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
1046 return -EINVAL;
1052 if ((info->flags & IEEE80211_TX_CTL_AMPDU &&
1053 !mvmsta->tid_data[tid].amsdu_in_ampdu_allowed) ||
1054 !(mvmsta->amsdu_enabled & BIT(tid)))
1061 min_t(unsigned int, sta->cur->max_amsdu_len,
1065 * Limit A-MSDU in A-MPDU to 4095 bytes when VHT is not
1066 * supported. This is a spec requirement (IEEE 802.11-2015
1069 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1070 !sta->deflink.vht_cap.vht_supported)
1075 pad = (4 - subf_len) & 0x3;
1078 * If we have N subframes in the A-MSDU, then the A-MSDU's size is
1079 * N * subf_len + (N - 1) * pad.
1083 if (sta->max_amsdu_subframes &&
1084 num_subframes > sta->max_amsdu_subframes)
1085 num_subframes = sta->max_amsdu_subframes;
1087 tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1088 tcp_hdrlen(skb) + skb->data_len;
1091 * Make sure we have enough TBs for the A-MSDU:
1096 if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
1097 mvm->trans->max_skb_frags)
1103 /* This skb fits in one single A-MSDU */
1111 * create SKBs that can fit into one A-MSDU.
1125 return -1;
1129 /* Check if there are any timed-out TIDs on a given shared TXQ */
1132 unsigned long queue_tid_bitmap = mvm->queue_info[txq_id].tid_bitmap;
1140 if (time_before(mvm->queue_info[txq_id].last_frame_time[tid] +
1152 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1158 mdata = &mvm->tcm.data[mac];
1160 if (mvm->tcm.paused)
1163 if (time_after(jiffies, mvm->tcm.ts + MVM_TCM_PERIOD))
1164 schedule_delayed_work(&mvm->tcm.work, 0);
1166 mdata->tx.airtime += airtime;
1173 int mac = mvmsta->mac_id_n_color & FW_CTXT_ID_MSK;
1177 return -EINVAL;
1179 mdata = &mvm->tcm.data[mac];
1181 mdata->tx.pkts[ac]++;
1196 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
1207 fc = hdr->frame_control;
1211 return -1;
1214 return -1;
1216 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1217 return -1;
1219 if (unlikely(ieee80211_is_any_nullfunc(fc)) && sta->deflink.he_cap.has_he)
1220 return -1;
1226 sta, mvmsta->deflink.sta_id,
1232 * we handle that entirely ourselves -- for uAPSD the firmware
1233 * will always send a notification, and for PS-Poll responses
1236 info->flags &= ~IEEE80211_TX_STATUS_EOSP;
1238 spin_lock(&mvmsta->lock);
1249 is_ampdu = info->flags & IEEE80211_TX_CTL_AMPDU;
1251 mvmsta->tid_data[tid].state != IWL_AGG_ON,
1253 mvmsta->tid_data[tid].state, tid))
1256 seq_number = mvmsta->tid_data[tid].seq_number;
1260 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1262 hdr->seq_ctrl &= cpu_to_le16(IEEE80211_SCTL_FRAG);
1263 hdr->seq_ctrl |= cpu_to_le16(seq_number);
1265 tx_cmd->hdr->seq_ctrl = hdr->seq_ctrl;
1272 txq_id = mvmsta->tid_data[tid].txq_id;
1274 WARN_ON_ONCE(info->flags & IEEE80211_TX_CTL_SEND_AFTER_DTIM);
1281 fc, tid, txq_id, mvm, skb, skb->len, info, sta);
1283 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1284 spin_unlock(&mvmsta->lock);
1285 return -1;
1290 mvm->queue_info[txq_id].last_frame_time[tid] = jiffies;
1293 * If we have timed-out TIDs - schedule the worker that will
1298 * mvm->add_stream_wk can't ruin the state, and if we DON'T
1302 if (unlikely(mvm->queue_info[txq_id].status ==
1305 schedule_work(&mvm->add_stream_wk);
1308 IWL_DEBUG_TX(mvm, "TX to [%d|%d] Q:%d - seq: 0x%x len %d\n",
1309 mvmsta->deflink.sta_id, tid, txq_id,
1310 IEEE80211_SEQ_TO_SN(seq_number), skb->len);
1312 /* From now on, we cannot access info->control */
1322 info->control.hw_key &&
1324 info->control.hw_key->iv_len : 0);
1326 if (iwl_trans_tx(mvm->trans, skb, dev_cmd, txq_id))
1330 mvmsta->tid_data[tid].seq_number = seq_number + 0x10;
1332 spin_unlock(&mvmsta->lock);
1341 iwl_trans_free_tx_cmd(mvm->trans, dev_cmd);
1342 spin_unlock(&mvmsta->lock);
1344 IWL_DEBUG_TX(mvm, "TX to [%d|%d] dropped\n", mvmsta->deflink.sta_id,
1346 return -1;
1362 return -1;
1364 if (WARN_ON_ONCE(mvmsta->deflink.sta_id == IWL_MVM_INVALID_STA))
1365 return -1;
1367 memcpy(&info, skb->cb, sizeof(info));
1372 payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
1373 tcp_hdrlen(skb) + skb->data_len;
1375 if (payload_len <= skb_shinfo(skb)->gso_size)
1382 return -1;
1391 * As described in IEEE sta 802.11-2020, table 9-30 (Address
1392 * field contents), A-MSDU address 3 should contain the BSSID
1397 * A-MSDU subframe headers from it.
1399 switch (vif->type) {
1401 addr3 = vif->cfg.ap_addr;
1404 addr3 = vif->addr;
1416 hdr = (void *)skb->data;
1417 amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
1430 ieee80211_free_txskb(mvm->hw, skb);
1445 struct iwl_mvm_tid_data *tid_data = &mvmsta->tid_data[tid];
1446 struct ieee80211_vif *vif = mvmsta->vif;
1449 lockdep_assert_held(&mvmsta->lock);
1451 if ((tid_data->state == IWL_AGG_ON ||
1452 tid_data->state == IWL_EMPTYING_HW_QUEUE_DELBA) &&
1466 normalized_ssn = tid_data->ssn;
1467 if (mvm->trans->trans_cfg->gen2)
1470 if (normalized_ssn != tid_data->next_reclaimed)
1473 switch (tid_data->state) {
1477 tid_data->next_reclaimed);
1478 tid_data->state = IWL_AGG_STARTING;
1479 ieee80211_start_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1485 tid_data->next_reclaimed);
1486 tid_data->state = IWL_AGG_OFF;
1487 ieee80211_stop_tx_ba_cb_irqsafe(vif, sta->addr, tid);
1560 r->flags |=
1565 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1567 r->flags |= IEEE80211_TX_RC_MCS;
1568 r->idx = rate;
1573 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1577 r->idx = 0;
1579 r->idx = iwl_mvm_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1589 r->flags |= IEEE80211_TX_RC_GREEN_FIELD;
1591 r->flags |=
1596 r->flags |= IEEE80211_TX_RC_SHORT_GI;
1598 r->flags |= IEEE80211_TX_RC_MCS;
1599 r->idx = rate_n_flags & RATE_HT_MCS_INDEX_MSK_V1;
1604 r->flags |= IEEE80211_TX_RC_VHT_MCS;
1606 r->idx = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags,
1618 struct ieee80211_tx_rate *r = &info->status.rates[0];
1624 info->status.antenna =
1627 info->band, r);
1644 iwl_dbg_tlv_time_point(&mvm->fwrt,
1649 trig = iwl_fw_dbg_trigger_on(&mvm->fwrt, NULL,
1654 status_trig = (void *)trig->data;
1656 for (i = 0; i < ARRAY_SIZE(status_trig->statuses); i++) {
1658 if (!status_trig->statuses[i].status)
1661 if (status_trig->statuses[i].status != (status & TX_STATUS_MSK))
1664 iwl_fw_dbg_collect_trig(&mvm->fwrt, trig,
1672 * iwl_mvm_get_scd_ssn - returns the SSN of the SCD
1673 * @tx_resp: the Tx response from the fw (agg or non-agg)
1684 * For 22000-series and lower, this is just 12 bits. For later, 16 bits.
1690 tx_resp->frame_count);
1692 if (mvm->trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1701 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1704 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1705 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1706 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1709 u32 status = le16_to_cpu(agg_status->status);
1720 txq_id = le16_to_cpu(tx_resp->tx_queue);
1722 seq_ctl = le16_to_cpu(tx_resp->seq_ctl);
1725 iwl_trans_reclaim(mvm->trans, txq_id, ssn, &skbs, false);
1730 struct ieee80211_hdr *hdr = (void *)skb->data;
1735 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
1737 memset(&info->status, 0, sizeof(info->status));
1738 info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1744 info->flags |= IEEE80211_TX_STAT_ACK;
1756 status, le16_to_cpu(hdr->frame_control));
1757 info->flags |= IEEE80211_TX_STAT_TX_FILTERED;
1764 ieee80211_is_mgmt(hdr->frame_control))
1765 iwl_mvm_toggle_tx_ant(mvm, &mvm->mgmt_last_antenna_idx);
1773 info->flags |= IEEE80211_TX_STAT_ACK;
1775 iwl_mvm_tx_status_check_trigger(mvm, status, hdr->frame_control);
1777 info->status.rates[0].count = tx_resp->failure_frame + 1;
1779 iwl_mvm_hwrate_to_tx_status(mvm->fw,
1780 le32_to_cpu(tx_resp->initial_rate),
1786 info->status.status_driver_data[1] =
1787 (void *)(uintptr_t)le32_to_cpu(tx_resp->initial_rate);
1790 if (info->flags & IEEE80211_TX_CTL_AMPDU &&
1791 !(info->flags & IEEE80211_TX_STAT_ACK) &&
1792 !(info->flags & IEEE80211_TX_STAT_TX_FILTERED) && !flushed)
1793 info->flags |= IEEE80211_TX_STAT_AMPDU_NO_BACK;
1794 info->flags &= ~IEEE80211_TX_CTL_AMPDU;
1797 if (ieee80211_is_back_req(hdr->frame_control))
1800 seq_ctl = le16_to_cpu(hdr->seq_ctrl);
1806 * reason, NDPs are never sent to A-MPDU'able queues
1810 if (ieee80211_is_qos_nullfunc(hdr->frame_control))
1818 info->status.tx_time =
1819 le16_to_cpu(tx_resp->wireless_media_time);
1820 BUILD_BUG_ON(ARRAY_SIZE(info->status.status_driver_data) < 1);
1821 lq_color = TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
1822 info->status.status_driver_data[0] =
1823 RS_DRV_DATA_PACK(lq_color, tx_resp->reduced_tpc);
1825 if (likely(!iwl_mvm_time_sync_frame(mvm, skb, hdr->addr1)))
1826 ieee80211_tx_status_skb(mvm->hw, skb);
1851 le32_to_cpu(tx_resp->initial_rate),
1852 tx_resp->failure_frame, SEQ_TO_INDEX(sequence),
1857 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
1869 le16_to_cpu(tx_resp->wireless_media_time));
1872 mvmsta->sta_state < IEEE80211_STA_AUTHORIZED)
1873 iwl_mvm_toggle_tx_ant(mvm, &mvmsta->tx_ant);
1875 if (sta->wme && tid != IWL_MGMT_TID) {
1877 &mvmsta->tid_data[tid];
1880 spin_lock_bh(&mvmsta->lock);
1883 tid_data->next_reclaimed = next_reclaimed;
1890 "NDP - don't update next_reclaimed\n");
1895 if (mvmsta->sleep_tx_count) {
1896 mvmsta->sleep_tx_count--;
1897 if (mvmsta->sleep_tx_count &&
1916 spin_unlock_bh(&mvmsta->lock);
1921 mvmsta->sleep_tx_count = 0;
1926 if (mvmsta->next_status_eosp) {
1927 mvmsta->next_status_eosp = false;
1961 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1967 for (i = 0; i < tx_resp->frame_count; i++) {
1973 "status %s (0x%04x), try-count (%d) seq (0x%x)\n",
1982 iwl_dbg_tlv_time_point(&mvm->fwrt,
1995 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
1996 int sta_id = IWL_MVM_TX_RES_GET_RA(tx_resp->ra_tid);
1997 int tid = IWL_MVM_TX_RES_GET_TID(tx_resp->ra_tid);
1998 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
2013 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2014 if (WARN_ON_ONCE(IS_ERR_OR_NULL(sta) || !sta->wme)) {
2020 mvmsta->tid_data[tid].rate_n_flags =
2021 le32_to_cpu(tx_resp->initial_rate);
2022 mvmsta->tid_data[tid].tx_time =
2023 le16_to_cpu(tx_resp->wireless_media_time);
2024 mvmsta->tid_data[tid].lq_color =
2025 TX_RES_RATE_TABLE_COL_GET(tx_resp->tlc_info);
2027 le16_to_cpu(tx_resp->wireless_media_time));
2036 struct iwl_mvm_tx_resp *tx_resp = (void *)pkt->data;
2038 if (tx_resp->frame_count == 1)
2056 if (WARN_ONCE(sta_id >= mvm->fw->ucode_capa.num_stations ||
2063 sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_id]);
2075 * block-ack window (we assume that they've been successfully
2078 iwl_trans_reclaim(mvm->trans, txq, index, &reclaimed_skbs, is_flush);
2083 iwl_trans_free_tx_cmd(mvm->trans, info->driver_data[1]);
2085 memset(&info->status, 0, sizeof(info->status));
2091 info->flags |= IEEE80211_TX_STAT_ACK;
2093 info->flags &= ~IEEE80211_TX_STAT_ACK;
2099 * be some frames already in-flight).
2101 * sta-dependent stuff since it's in the middle of being removed
2108 tid_data = &mvmsta->tid_data[tid];
2110 if (tid_data->txq_id != txq) {
2113 tid_data->txq_id, tid);
2118 spin_lock_bh(&mvmsta->lock);
2120 tid_data->next_reclaimed = index;
2127 tx_info->status.status_driver_data[0] =
2128 RS_DRV_DATA_PACK(tid_data->lq_color,
2129 tx_info->status.status_driver_data[0]);
2130 tx_info->status.status_driver_data[1] = (void *)(uintptr_t)rate;
2133 struct ieee80211_hdr *hdr = (void *)skb->data;
2137 if (ieee80211_is_data_qos(hdr->frame_control))
2146 info->flags |= IEEE80211_TX_STAT_AMPDU;
2147 memcpy(&info->status, &tx_info->status,
2148 sizeof(tx_info->status));
2149 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, info);
2153 spin_unlock_bh(&mvmsta->lock);
2163 /* no TLC offload, so non-MLD mode */
2164 if (mvmsta->vif)
2166 rcu_dereference(mvmsta->vif->bss_conf.chanctx_conf);
2171 tx_info->band = chanctx_conf->def.chan->band;
2172 iwl_mvm_hwrate_to_tx_status(mvm->fw, rate, tx_info);
2183 ieee80211_tx_status_skb(mvm->hw, skb);
2201 (void *)pkt->data;
2202 u8 lq_color = TX_RES_RATE_TABLE_COL_GET(ba_res->tlc_rate_info);
2210 sta_id = ba_res->sta_id;
2211 ba_info.status.ampdu_ack_len = (u8)le16_to_cpu(ba_res->done);
2212 ba_info.status.ampdu_len = (u8)le16_to_cpu(ba_res->txed);
2214 (u16)le32_to_cpu(ba_res->wireless_time);
2216 (void *)(uintptr_t)ba_res->reduced_txp;
2218 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
2230 sta_id, le32_to_cpu(ba_res->flags),
2231 le16_to_cpu(ba_res->txed),
2232 le16_to_cpu(ba_res->done));
2240 * sent, but there may be some frames already in-flight).
2242 * sta-dependent stuff since it's in the middle of being removed
2249 &ba_res->tfd[i];
2251 tid = ba_tfd->tid;
2256 mvmsta->tid_data[i].lq_color = lq_color;
2259 (int)(le16_to_cpu(ba_tfd->q_num)),
2260 le16_to_cpu(ba_tfd->tfd_index),
2262 le32_to_cpu(ba_res->tx_rate), false);
2267 le32_to_cpu(ba_res->wireless_time));
2270 le16_to_cpu(ba_res->txed), true, 0);
2276 ba_notif = (void *)pkt->data;
2277 sta_id = ba_notif->sta_id;
2278 tid = ba_notif->tid;
2280 txq = le16_to_cpu(ba_notif->scd_flow);
2281 /* "ssn" is start of block-ack Tx window, corresponds to index
2283 index = le16_to_cpu(ba_notif->scd_ssn);
2294 tid_data = &mvmsta->tid_data[tid];
2296 ba_info.status.ampdu_ack_len = ba_notif->txed_2_done;
2297 ba_info.status.ampdu_len = ba_notif->txed;
2298 ba_info.status.tx_time = tid_data->tx_time;
2300 (void *)(uintptr_t)ba_notif->reduced_txp;
2306 ba_notif->sta_addr, ba_notif->sta_id);
2310 ba_notif->tid, le16_to_cpu(ba_notif->seq_ctl),
2311 le64_to_cpu(ba_notif->bitmap), txq, index,
2312 ba_notif->txed, ba_notif->txed_2_done);
2315 ba_notif->reduced_txp);
2318 tid_data->rate_n_flags, false);
2324 * queue might not be empty. The race-free way to handle this is to:
2362 if (iwl_fw_lookup_notif_ver(mvm->fw, LONG_GROUP, TXPATH_FLUSH, 0) > 0)
2380 ret = -EIO;
2384 rsp = (void *)cmd.resp_pkt->data;
2386 if (WARN_ONCE(le16_to_cpu(rsp->sta_id) != sta_id,
2388 sta_id, le16_to_cpu(rsp->sta_id))) {
2389 ret = -EIO;
2393 num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
2396 ret = -EIO;
2402 struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
2403 int tid = le16_to_cpu(queue_info->tid);
2404 int read_before = le16_to_cpu(queue_info->read_before_flush);
2405 int read_after = le16_to_cpu(queue_info->read_after_flush);
2406 int queue_num = le16_to_cpu(queue_info->queue_num);
2412 "tid %d queue_id %d read-before %d read-after %d\n",