1*6b627f88SBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*6b627f88SBjoern A. Zeeb /*
3*6b627f88SBjoern A. Zeeb * Copyright (C) 2024 - 2025 Intel Corporation
4*6b627f88SBjoern A. Zeeb */
5*6b627f88SBjoern A. Zeeb #include <net/ip.h>
6*6b627f88SBjoern A. Zeeb
7*6b627f88SBjoern A. Zeeb #include "tx.h"
8*6b627f88SBjoern A. Zeeb #include "sta.h"
9*6b627f88SBjoern A. Zeeb #include "hcmd.h"
10*6b627f88SBjoern A. Zeeb #include "iwl-utils.h"
11*6b627f88SBjoern A. Zeeb #include "iface.h"
12*6b627f88SBjoern A. Zeeb
13*6b627f88SBjoern A. Zeeb #include "fw/dbg.h"
14*6b627f88SBjoern A. Zeeb
15*6b627f88SBjoern A. Zeeb #include "fw/api/tx.h"
16*6b627f88SBjoern A. Zeeb #include "fw/api/rs.h"
17*6b627f88SBjoern A. Zeeb #include "fw/api/txq.h"
18*6b627f88SBjoern A. Zeeb #include "fw/api/datapath.h"
19*6b627f88SBjoern A. Zeeb #include "fw/api/time-event.h"
20*6b627f88SBjoern A. Zeeb
21*6b627f88SBjoern A. Zeeb #define MAX_ANT_NUM 2
22*6b627f88SBjoern A. Zeeb
23*6b627f88SBjoern A. Zeeb /* Toggles between TX antennas. Receives the bitmask of valid TX antennas and
24*6b627f88SBjoern A. Zeeb * the *index* used for the last TX, and returns the next valid *index* to use.
25*6b627f88SBjoern A. Zeeb * In order to set it in the tx_cmd, must do BIT(idx).
26*6b627f88SBjoern A. Zeeb */
iwl_mld_next_ant(u8 valid,u8 last_idx)27*6b627f88SBjoern A. Zeeb static u8 iwl_mld_next_ant(u8 valid, u8 last_idx)
28*6b627f88SBjoern A. Zeeb {
29*6b627f88SBjoern A. Zeeb u8 index = last_idx;
30*6b627f88SBjoern A. Zeeb
31*6b627f88SBjoern A. Zeeb for (int i = 0; i < MAX_ANT_NUM; i++) {
32*6b627f88SBjoern A. Zeeb index = (index + 1) % MAX_ANT_NUM;
33*6b627f88SBjoern A. Zeeb if (valid & BIT(index))
34*6b627f88SBjoern A. Zeeb return index;
35*6b627f88SBjoern A. Zeeb }
36*6b627f88SBjoern A. Zeeb
37*6b627f88SBjoern A. Zeeb WARN_ONCE(1, "Failed to toggle between antennas 0x%x", valid);
38*6b627f88SBjoern A. Zeeb
39*6b627f88SBjoern A. Zeeb return last_idx;
40*6b627f88SBjoern A. Zeeb }
41*6b627f88SBjoern A. Zeeb
iwl_mld_toggle_tx_ant(struct iwl_mld * mld,u8 * ant)42*6b627f88SBjoern A. Zeeb void iwl_mld_toggle_tx_ant(struct iwl_mld *mld, u8 *ant)
43*6b627f88SBjoern A. Zeeb {
44*6b627f88SBjoern A. Zeeb *ant = iwl_mld_next_ant(iwl_mld_get_valid_tx_ant(mld), *ant);
45*6b627f88SBjoern A. Zeeb }
46*6b627f88SBjoern A. Zeeb
47*6b627f88SBjoern A. Zeeb static int
iwl_mld_get_queue_size(struct iwl_mld * mld,struct ieee80211_txq * txq)48*6b627f88SBjoern A. Zeeb iwl_mld_get_queue_size(struct iwl_mld *mld, struct ieee80211_txq *txq)
49*6b627f88SBjoern A. Zeeb {
50*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta = txq->sta;
51*6b627f88SBjoern A. Zeeb struct ieee80211_link_sta *link_sta;
52*6b627f88SBjoern A. Zeeb unsigned int link_id;
53*6b627f88SBjoern A. Zeeb int max_size = IWL_DEFAULT_QUEUE_SIZE;
54*6b627f88SBjoern A. Zeeb
55*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
56*6b627f88SBjoern A. Zeeb
57*6b627f88SBjoern A. Zeeb for_each_sta_active_link(txq->vif, sta, link_sta, link_id) {
58*6b627f88SBjoern A. Zeeb if (link_sta->eht_cap.has_eht) {
59*6b627f88SBjoern A. Zeeb max_size = IWL_DEFAULT_QUEUE_SIZE_EHT;
60*6b627f88SBjoern A. Zeeb break;
61*6b627f88SBjoern A. Zeeb }
62*6b627f88SBjoern A. Zeeb
63*6b627f88SBjoern A. Zeeb if (link_sta->he_cap.has_he)
64*6b627f88SBjoern A. Zeeb max_size = IWL_DEFAULT_QUEUE_SIZE_HE;
65*6b627f88SBjoern A. Zeeb }
66*6b627f88SBjoern A. Zeeb
67*6b627f88SBjoern A. Zeeb return max_size;
68*6b627f88SBjoern A. Zeeb }
69*6b627f88SBjoern A. Zeeb
iwl_mld_allocate_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)70*6b627f88SBjoern A. Zeeb static int iwl_mld_allocate_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
71*6b627f88SBjoern A. Zeeb {
72*6b627f88SBjoern A. Zeeb u8 tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID : txq->tid;
73*6b627f88SBjoern A. Zeeb u32 fw_sta_mask = iwl_mld_fw_sta_id_mask(mld, txq->sta);
74*6b627f88SBjoern A. Zeeb /* We can't know when the station is asleep or awake, so we
75*6b627f88SBjoern A. Zeeb * must disable the queue hang detection.
76*6b627f88SBjoern A. Zeeb */
77*6b627f88SBjoern A. Zeeb unsigned int watchdog_timeout = txq->vif->type == NL80211_IFTYPE_AP ?
78*6b627f88SBjoern A. Zeeb IWL_WATCHDOG_DISABLED :
79*6b627f88SBjoern A. Zeeb mld->trans->mac_cfg->base->wd_timeout;
80*6b627f88SBjoern A. Zeeb int queue, size;
81*6b627f88SBjoern A. Zeeb
82*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
83*6b627f88SBjoern A. Zeeb
84*6b627f88SBjoern A. Zeeb if (tid == IWL_MGMT_TID)
85*6b627f88SBjoern A. Zeeb size = max_t(u32, IWL_MGMT_QUEUE_SIZE,
86*6b627f88SBjoern A. Zeeb mld->trans->mac_cfg->base->min_txq_size);
87*6b627f88SBjoern A. Zeeb else
88*6b627f88SBjoern A. Zeeb size = iwl_mld_get_queue_size(mld, txq);
89*6b627f88SBjoern A. Zeeb
90*6b627f88SBjoern A. Zeeb queue = iwl_trans_txq_alloc(mld->trans, 0, fw_sta_mask, tid, size,
91*6b627f88SBjoern A. Zeeb watchdog_timeout);
92*6b627f88SBjoern A. Zeeb
93*6b627f88SBjoern A. Zeeb if (queue >= 0)
94*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mld,
95*6b627f88SBjoern A. Zeeb "Enabling TXQ #%d for sta mask 0x%x tid %d\n",
96*6b627f88SBjoern A. Zeeb queue, fw_sta_mask, tid);
97*6b627f88SBjoern A. Zeeb return queue;
98*6b627f88SBjoern A. Zeeb }
99*6b627f88SBjoern A. Zeeb
iwl_mld_add_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)100*6b627f88SBjoern A. Zeeb static int iwl_mld_add_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
101*6b627f88SBjoern A. Zeeb {
102*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
103*6b627f88SBjoern A. Zeeb int id;
104*6b627f88SBjoern A. Zeeb
105*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
106*6b627f88SBjoern A. Zeeb
107*6b627f88SBjoern A. Zeeb /* This will alse send the SCD_QUEUE_CONFIG_CMD */
108*6b627f88SBjoern A. Zeeb id = iwl_mld_allocate_txq(mld, txq);
109*6b627f88SBjoern A. Zeeb if (id < 0)
110*6b627f88SBjoern A. Zeeb return id;
111*6b627f88SBjoern A. Zeeb
112*6b627f88SBjoern A. Zeeb mld_txq->fw_id = id;
113*6b627f88SBjoern A. Zeeb mld_txq->status.allocated = true;
114*6b627f88SBjoern A. Zeeb
115*6b627f88SBjoern A. Zeeb rcu_assign_pointer(mld->fw_id_to_txq[id], txq);
116*6b627f88SBjoern A. Zeeb
117*6b627f88SBjoern A. Zeeb return 0;
118*6b627f88SBjoern A. Zeeb }
119*6b627f88SBjoern A. Zeeb
iwl_mld_add_txq_list(struct iwl_mld * mld)120*6b627f88SBjoern A. Zeeb void iwl_mld_add_txq_list(struct iwl_mld *mld)
121*6b627f88SBjoern A. Zeeb {
122*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
123*6b627f88SBjoern A. Zeeb
124*6b627f88SBjoern A. Zeeb while (!list_empty(&mld->txqs_to_add)) {
125*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq;
126*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq =
127*6b627f88SBjoern A. Zeeb list_first_entry(&mld->txqs_to_add, struct iwl_mld_txq,
128*6b627f88SBjoern A. Zeeb list);
129*6b627f88SBjoern A. Zeeb int failed;
130*6b627f88SBjoern A. Zeeb
131*6b627f88SBjoern A. Zeeb txq = container_of((void *)mld_txq, struct ieee80211_txq,
132*6b627f88SBjoern A. Zeeb drv_priv);
133*6b627f88SBjoern A. Zeeb
134*6b627f88SBjoern A. Zeeb failed = iwl_mld_add_txq(mld, txq);
135*6b627f88SBjoern A. Zeeb
136*6b627f88SBjoern A. Zeeb local_bh_disable();
137*6b627f88SBjoern A. Zeeb spin_lock(&mld->add_txqs_lock);
138*6b627f88SBjoern A. Zeeb list_del_init(&mld_txq->list);
139*6b627f88SBjoern A. Zeeb spin_unlock(&mld->add_txqs_lock);
140*6b627f88SBjoern A. Zeeb /* If the queue allocation failed, we can't transmit. Leave the
141*6b627f88SBjoern A. Zeeb * frames on the txq, maybe the attempt to allocate the queue
142*6b627f88SBjoern A. Zeeb * will succeed.
143*6b627f88SBjoern A. Zeeb */
144*6b627f88SBjoern A. Zeeb if (!failed)
145*6b627f88SBjoern A. Zeeb iwl_mld_tx_from_txq(mld, txq);
146*6b627f88SBjoern A. Zeeb local_bh_enable();
147*6b627f88SBjoern A. Zeeb }
148*6b627f88SBjoern A. Zeeb }
149*6b627f88SBjoern A. Zeeb
iwl_mld_add_txqs_wk(struct wiphy * wiphy,struct wiphy_work * wk)150*6b627f88SBjoern A. Zeeb void iwl_mld_add_txqs_wk(struct wiphy *wiphy, struct wiphy_work *wk)
151*6b627f88SBjoern A. Zeeb {
152*6b627f88SBjoern A. Zeeb struct iwl_mld *mld = container_of(wk, struct iwl_mld,
153*6b627f88SBjoern A. Zeeb add_txqs_wk);
154*6b627f88SBjoern A. Zeeb
155*6b627f88SBjoern A. Zeeb /* will reschedule to run after restart */
156*6b627f88SBjoern A. Zeeb if (mld->fw_status.in_hw_restart)
157*6b627f88SBjoern A. Zeeb return;
158*6b627f88SBjoern A. Zeeb
159*6b627f88SBjoern A. Zeeb iwl_mld_add_txq_list(mld);
160*6b627f88SBjoern A. Zeeb }
161*6b627f88SBjoern A. Zeeb
162*6b627f88SBjoern A. Zeeb void
iwl_mld_free_txq(struct iwl_mld * mld,u32 fw_sta_mask,u32 tid,u32 queue_id)163*6b627f88SBjoern A. Zeeb iwl_mld_free_txq(struct iwl_mld *mld, u32 fw_sta_mask, u32 tid, u32 queue_id)
164*6b627f88SBjoern A. Zeeb {
165*6b627f88SBjoern A. Zeeb struct iwl_scd_queue_cfg_cmd remove_cmd = {
166*6b627f88SBjoern A. Zeeb .operation = cpu_to_le32(IWL_SCD_QUEUE_REMOVE),
167*6b627f88SBjoern A. Zeeb .u.remove.tid = cpu_to_le32(tid),
168*6b627f88SBjoern A. Zeeb .u.remove.sta_mask = cpu_to_le32(fw_sta_mask),
169*6b627f88SBjoern A. Zeeb };
170*6b627f88SBjoern A. Zeeb
171*6b627f88SBjoern A. Zeeb iwl_mld_send_cmd_pdu(mld,
172*6b627f88SBjoern A. Zeeb WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD),
173*6b627f88SBjoern A. Zeeb &remove_cmd);
174*6b627f88SBjoern A. Zeeb
175*6b627f88SBjoern A. Zeeb iwl_trans_txq_free(mld->trans, queue_id);
176*6b627f88SBjoern A. Zeeb }
177*6b627f88SBjoern A. Zeeb
iwl_mld_remove_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)178*6b627f88SBjoern A. Zeeb void iwl_mld_remove_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
179*6b627f88SBjoern A. Zeeb {
180*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
181*6b627f88SBjoern A. Zeeb u32 sta_msk, tid;
182*6b627f88SBjoern A. Zeeb
183*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
184*6b627f88SBjoern A. Zeeb
185*6b627f88SBjoern A. Zeeb spin_lock_bh(&mld->add_txqs_lock);
186*6b627f88SBjoern A. Zeeb if (!list_empty(&mld_txq->list))
187*6b627f88SBjoern A. Zeeb list_del_init(&mld_txq->list);
188*6b627f88SBjoern A. Zeeb spin_unlock_bh(&mld->add_txqs_lock);
189*6b627f88SBjoern A. Zeeb
190*6b627f88SBjoern A. Zeeb if (!mld_txq->status.allocated ||
191*6b627f88SBjoern A. Zeeb WARN_ON(mld_txq->fw_id >= ARRAY_SIZE(mld->fw_id_to_txq)))
192*6b627f88SBjoern A. Zeeb return;
193*6b627f88SBjoern A. Zeeb
194*6b627f88SBjoern A. Zeeb sta_msk = iwl_mld_fw_sta_id_mask(mld, txq->sta);
195*6b627f88SBjoern A. Zeeb
196*6b627f88SBjoern A. Zeeb tid = txq->tid == IEEE80211_NUM_TIDS ? IWL_MGMT_TID :
197*6b627f88SBjoern A. Zeeb txq->tid;
198*6b627f88SBjoern A. Zeeb
199*6b627f88SBjoern A. Zeeb iwl_mld_free_txq(mld, sta_msk, tid, mld_txq->fw_id);
200*6b627f88SBjoern A. Zeeb
201*6b627f88SBjoern A. Zeeb RCU_INIT_POINTER(mld->fw_id_to_txq[mld_txq->fw_id], NULL);
202*6b627f88SBjoern A. Zeeb mld_txq->status.allocated = false;
203*6b627f88SBjoern A. Zeeb }
204*6b627f88SBjoern A. Zeeb
205*6b627f88SBjoern A. Zeeb #define OPT_HDR(type, skb, off) \
206*6b627f88SBjoern A. Zeeb (type *)(skb_network_header(skb) + (off))
207*6b627f88SBjoern A. Zeeb
208*6b627f88SBjoern A. Zeeb static __le32
iwl_mld_get_offload_assist(struct sk_buff * skb,bool amsdu)209*6b627f88SBjoern A. Zeeb iwl_mld_get_offload_assist(struct sk_buff *skb, bool amsdu)
210*6b627f88SBjoern A. Zeeb {
211*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
212*6b627f88SBjoern A. Zeeb u16 mh_len = ieee80211_hdrlen(hdr->frame_control);
213*6b627f88SBjoern A. Zeeb u16 offload_assist = 0;
214*6b627f88SBjoern A. Zeeb #if IS_ENABLED(CONFIG_INET)
215*6b627f88SBjoern A. Zeeb u8 protocol = 0;
216*6b627f88SBjoern A. Zeeb
217*6b627f88SBjoern A. Zeeb /* Do not compute checksum if already computed */
218*6b627f88SBjoern A. Zeeb if (skb->ip_summed != CHECKSUM_PARTIAL)
219*6b627f88SBjoern A. Zeeb goto out;
220*6b627f88SBjoern A. Zeeb
221*6b627f88SBjoern A. Zeeb /* We do not expect to be requested to csum stuff we do not support */
222*6b627f88SBjoern A. Zeeb
223*6b627f88SBjoern A. Zeeb /* TBD: do we also need to check
224*6b627f88SBjoern A. Zeeb * !(mvm->hw->netdev_features & IWL_TX_CSUM_NETIF_FLAGS) now that all
225*6b627f88SBjoern A. Zeeb * the devices we support has this flags?
226*6b627f88SBjoern A. Zeeb */
227*6b627f88SBjoern A. Zeeb if (WARN_ONCE(skb->protocol != htons(ETH_P_IP) &&
228*6b627f88SBjoern A. Zeeb skb->protocol != htons(ETH_P_IPV6),
229*6b627f88SBjoern A. Zeeb "No support for requested checksum\n")) {
230*6b627f88SBjoern A. Zeeb skb_checksum_help(skb);
231*6b627f88SBjoern A. Zeeb goto out;
232*6b627f88SBjoern A. Zeeb }
233*6b627f88SBjoern A. Zeeb
234*6b627f88SBjoern A. Zeeb if (skb->protocol == htons(ETH_P_IP)) {
235*6b627f88SBjoern A. Zeeb protocol = ip_hdr(skb)->protocol;
236*6b627f88SBjoern A. Zeeb } else {
237*6b627f88SBjoern A. Zeeb #if IS_ENABLED(CONFIG_IPV6)
238*6b627f88SBjoern A. Zeeb struct ipv6hdr *ipv6h =
239*6b627f88SBjoern A. Zeeb (struct ipv6hdr *)skb_network_header(skb);
240*6b627f88SBjoern A. Zeeb unsigned int off = sizeof(*ipv6h);
241*6b627f88SBjoern A. Zeeb
242*6b627f88SBjoern A. Zeeb protocol = ipv6h->nexthdr;
243*6b627f88SBjoern A. Zeeb while (protocol != NEXTHDR_NONE && ipv6_ext_hdr(protocol)) {
244*6b627f88SBjoern A. Zeeb struct ipv6_opt_hdr *hp;
245*6b627f88SBjoern A. Zeeb
246*6b627f88SBjoern A. Zeeb /* only supported extension headers */
247*6b627f88SBjoern A. Zeeb if (protocol != NEXTHDR_ROUTING &&
248*6b627f88SBjoern A. Zeeb protocol != NEXTHDR_HOP &&
249*6b627f88SBjoern A. Zeeb protocol != NEXTHDR_DEST) {
250*6b627f88SBjoern A. Zeeb skb_checksum_help(skb);
251*6b627f88SBjoern A. Zeeb goto out;
252*6b627f88SBjoern A. Zeeb }
253*6b627f88SBjoern A. Zeeb
254*6b627f88SBjoern A. Zeeb hp = OPT_HDR(struct ipv6_opt_hdr, skb, off);
255*6b627f88SBjoern A. Zeeb protocol = hp->nexthdr;
256*6b627f88SBjoern A. Zeeb off += ipv6_optlen(hp);
257*6b627f88SBjoern A. Zeeb }
258*6b627f88SBjoern A. Zeeb /* if we get here - protocol now should be TCP/UDP */
259*6b627f88SBjoern A. Zeeb #endif
260*6b627f88SBjoern A. Zeeb }
261*6b627f88SBjoern A. Zeeb
262*6b627f88SBjoern A. Zeeb if (protocol != IPPROTO_TCP && protocol != IPPROTO_UDP) {
263*6b627f88SBjoern A. Zeeb WARN_ON_ONCE(1);
264*6b627f88SBjoern A. Zeeb skb_checksum_help(skb);
265*6b627f88SBjoern A. Zeeb goto out;
266*6b627f88SBjoern A. Zeeb }
267*6b627f88SBjoern A. Zeeb
268*6b627f88SBjoern A. Zeeb /* enable L4 csum */
269*6b627f88SBjoern A. Zeeb offload_assist |= BIT(TX_CMD_OFFLD_L4_EN);
270*6b627f88SBjoern A. Zeeb
271*6b627f88SBjoern A. Zeeb /* Set offset to IP header (snap).
272*6b627f88SBjoern A. Zeeb * We don't support tunneling so no need to take care of inner header.
273*6b627f88SBjoern A. Zeeb * Size is in words.
274*6b627f88SBjoern A. Zeeb */
275*6b627f88SBjoern A. Zeeb offload_assist |= (4 << TX_CMD_OFFLD_IP_HDR);
276*6b627f88SBjoern A. Zeeb
277*6b627f88SBjoern A. Zeeb /* Do IPv4 csum for AMSDU only (no IP csum for Ipv6) */
278*6b627f88SBjoern A. Zeeb if (skb->protocol == htons(ETH_P_IP) && amsdu) {
279*6b627f88SBjoern A. Zeeb ip_hdr(skb)->check = 0;
280*6b627f88SBjoern A. Zeeb offload_assist |= BIT(TX_CMD_OFFLD_L3_EN);
281*6b627f88SBjoern A. Zeeb }
282*6b627f88SBjoern A. Zeeb
283*6b627f88SBjoern A. Zeeb /* reset UDP/TCP header csum */
284*6b627f88SBjoern A. Zeeb if (protocol == IPPROTO_TCP)
285*6b627f88SBjoern A. Zeeb tcp_hdr(skb)->check = 0;
286*6b627f88SBjoern A. Zeeb else
287*6b627f88SBjoern A. Zeeb udp_hdr(skb)->check = 0;
288*6b627f88SBjoern A. Zeeb
289*6b627f88SBjoern A. Zeeb out:
290*6b627f88SBjoern A. Zeeb #endif
291*6b627f88SBjoern A. Zeeb mh_len /= 2;
292*6b627f88SBjoern A. Zeeb offload_assist |= mh_len << TX_CMD_OFFLD_MH_SIZE;
293*6b627f88SBjoern A. Zeeb
294*6b627f88SBjoern A. Zeeb if (amsdu)
295*6b627f88SBjoern A. Zeeb offload_assist |= BIT(TX_CMD_OFFLD_AMSDU);
296*6b627f88SBjoern A. Zeeb else if (ieee80211_hdrlen(hdr->frame_control) % 4)
297*6b627f88SBjoern A. Zeeb /* padding is inserted later in transport */
298*6b627f88SBjoern A. Zeeb offload_assist |= BIT(TX_CMD_OFFLD_PAD);
299*6b627f88SBjoern A. Zeeb
300*6b627f88SBjoern A. Zeeb return cpu_to_le32(offload_assist);
301*6b627f88SBjoern A. Zeeb }
302*6b627f88SBjoern A. Zeeb
iwl_mld_get_basic_rates_and_band(struct iwl_mld * mld,struct ieee80211_vif * vif,struct ieee80211_tx_info * info,unsigned long * basic_rates,u8 * band)303*6b627f88SBjoern A. Zeeb static void iwl_mld_get_basic_rates_and_band(struct iwl_mld *mld,
304*6b627f88SBjoern A. Zeeb struct ieee80211_vif *vif,
305*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
306*6b627f88SBjoern A. Zeeb unsigned long *basic_rates,
307*6b627f88SBjoern A. Zeeb u8 *band)
308*6b627f88SBjoern A. Zeeb {
309*6b627f88SBjoern A. Zeeb u32 link_id = u32_get_bits(info->control.flags,
310*6b627f88SBjoern A. Zeeb IEEE80211_TX_CTRL_MLO_LINK);
311*6b627f88SBjoern A. Zeeb
312*6b627f88SBjoern A. Zeeb *basic_rates = vif->bss_conf.basic_rates;
313*6b627f88SBjoern A. Zeeb *band = info->band;
314*6b627f88SBjoern A. Zeeb
315*6b627f88SBjoern A. Zeeb if (link_id == IEEE80211_LINK_UNSPECIFIED &&
316*6b627f88SBjoern A. Zeeb ieee80211_vif_is_mld(vif)) {
317*6b627f88SBjoern A. Zeeb /* shouldn't do this when >1 link is active */
318*6b627f88SBjoern A. Zeeb WARN_ON(hweight16(vif->active_links) != 1);
319*6b627f88SBjoern A. Zeeb link_id = __ffs(vif->active_links);
320*6b627f88SBjoern A. Zeeb }
321*6b627f88SBjoern A. Zeeb
322*6b627f88SBjoern A. Zeeb if (link_id < IEEE80211_LINK_UNSPECIFIED) {
323*6b627f88SBjoern A. Zeeb struct ieee80211_bss_conf *link_conf;
324*6b627f88SBjoern A. Zeeb
325*6b627f88SBjoern A. Zeeb rcu_read_lock();
326*6b627f88SBjoern A. Zeeb link_conf = rcu_dereference(vif->link_conf[link_id]);
327*6b627f88SBjoern A. Zeeb if (link_conf) {
328*6b627f88SBjoern A. Zeeb *basic_rates = link_conf->basic_rates;
329*6b627f88SBjoern A. Zeeb if (link_conf->chanreq.oper.chan)
330*6b627f88SBjoern A. Zeeb *band = link_conf->chanreq.oper.chan->band;
331*6b627f88SBjoern A. Zeeb }
332*6b627f88SBjoern A. Zeeb rcu_read_unlock();
333*6b627f88SBjoern A. Zeeb }
334*6b627f88SBjoern A. Zeeb }
335*6b627f88SBjoern A. Zeeb
iwl_mld_get_lowest_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_vif * vif)336*6b627f88SBjoern A. Zeeb u8 iwl_mld_get_lowest_rate(struct iwl_mld *mld,
337*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
338*6b627f88SBjoern A. Zeeb struct ieee80211_vif *vif)
339*6b627f88SBjoern A. Zeeb {
340*6b627f88SBjoern A. Zeeb struct ieee80211_supported_band *sband;
341*6b627f88SBjoern A. Zeeb u16 lowest_cck = IWL_RATE_COUNT, lowest_ofdm = IWL_RATE_COUNT;
342*6b627f88SBjoern A. Zeeb unsigned long basic_rates;
343*6b627f88SBjoern A. Zeeb u8 band, rate;
344*6b627f88SBjoern A. Zeeb u32 i;
345*6b627f88SBjoern A. Zeeb
346*6b627f88SBjoern A. Zeeb iwl_mld_get_basic_rates_and_band(mld, vif, info, &basic_rates, &band);
347*6b627f88SBjoern A. Zeeb
348*6b627f88SBjoern A. Zeeb sband = mld->hw->wiphy->bands[band];
349*6b627f88SBjoern A. Zeeb for_each_set_bit(i, &basic_rates, BITS_PER_LONG) {
350*6b627f88SBjoern A. Zeeb u16 hw = sband->bitrates[i].hw_value;
351*6b627f88SBjoern A. Zeeb
352*6b627f88SBjoern A. Zeeb if (hw >= IWL_FIRST_OFDM_RATE) {
353*6b627f88SBjoern A. Zeeb if (lowest_ofdm > hw)
354*6b627f88SBjoern A. Zeeb lowest_ofdm = hw;
355*6b627f88SBjoern A. Zeeb } else if (lowest_cck > hw) {
356*6b627f88SBjoern A. Zeeb lowest_cck = hw;
357*6b627f88SBjoern A. Zeeb }
358*6b627f88SBjoern A. Zeeb }
359*6b627f88SBjoern A. Zeeb
360*6b627f88SBjoern A. Zeeb if (band == NL80211_BAND_2GHZ && !vif->p2p &&
361*6b627f88SBjoern A. Zeeb vif->type != NL80211_IFTYPE_P2P_DEVICE &&
362*6b627f88SBjoern A. Zeeb !(info->flags & IEEE80211_TX_CTL_NO_CCK_RATE)) {
363*6b627f88SBjoern A. Zeeb if (lowest_cck != IWL_RATE_COUNT)
364*6b627f88SBjoern A. Zeeb rate = lowest_cck;
365*6b627f88SBjoern A. Zeeb else if (lowest_ofdm != IWL_RATE_COUNT)
366*6b627f88SBjoern A. Zeeb rate = lowest_ofdm;
367*6b627f88SBjoern A. Zeeb else
368*6b627f88SBjoern A. Zeeb rate = IWL_FIRST_CCK_RATE;
369*6b627f88SBjoern A. Zeeb } else if (lowest_ofdm != IWL_RATE_COUNT) {
370*6b627f88SBjoern A. Zeeb rate = lowest_ofdm;
371*6b627f88SBjoern A. Zeeb } else {
372*6b627f88SBjoern A. Zeeb rate = IWL_FIRST_OFDM_RATE;
373*6b627f88SBjoern A. Zeeb }
374*6b627f88SBjoern A. Zeeb
375*6b627f88SBjoern A. Zeeb return rate;
376*6b627f88SBjoern A. Zeeb }
377*6b627f88SBjoern A. Zeeb
iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld * mld,struct ieee80211_tx_info * info,int rate_idx)378*6b627f88SBjoern A. Zeeb static u32 iwl_mld_mac80211_rate_idx_to_fw(struct iwl_mld *mld,
379*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
380*6b627f88SBjoern A. Zeeb int rate_idx)
381*6b627f88SBjoern A. Zeeb {
382*6b627f88SBjoern A. Zeeb u32 rate_flags = 0;
383*6b627f88SBjoern A. Zeeb u8 rate_plcp;
384*6b627f88SBjoern A. Zeeb
385*6b627f88SBjoern A. Zeeb /* if the rate isn't a well known legacy rate, take the lowest one */
386*6b627f88SBjoern A. Zeeb if (rate_idx < 0 || rate_idx >= IWL_RATE_COUNT_LEGACY)
387*6b627f88SBjoern A. Zeeb rate_idx = iwl_mld_get_lowest_rate(mld, info,
388*6b627f88SBjoern A. Zeeb info->control.vif);
389*6b627f88SBjoern A. Zeeb
390*6b627f88SBjoern A. Zeeb WARN_ON_ONCE(rate_idx < 0);
391*6b627f88SBjoern A. Zeeb
392*6b627f88SBjoern A. Zeeb /* Set CCK or OFDM flag */
393*6b627f88SBjoern A. Zeeb if (rate_idx <= IWL_LAST_CCK_RATE)
394*6b627f88SBjoern A. Zeeb rate_flags |= RATE_MCS_MOD_TYPE_CCK;
395*6b627f88SBjoern A. Zeeb else
396*6b627f88SBjoern A. Zeeb rate_flags |= RATE_MCS_MOD_TYPE_LEGACY_OFDM;
397*6b627f88SBjoern A. Zeeb
398*6b627f88SBjoern A. Zeeb /* Legacy rates are indexed:
399*6b627f88SBjoern A. Zeeb * 0 - 3 for CCK and 0 - 7 for OFDM
400*6b627f88SBjoern A. Zeeb */
401*6b627f88SBjoern A. Zeeb rate_plcp = (rate_idx >= IWL_FIRST_OFDM_RATE ?
402*6b627f88SBjoern A. Zeeb rate_idx - IWL_FIRST_OFDM_RATE : rate_idx);
403*6b627f88SBjoern A. Zeeb
404*6b627f88SBjoern A. Zeeb return (u32)rate_plcp | rate_flags;
405*6b627f88SBjoern A. Zeeb }
406*6b627f88SBjoern A. Zeeb
iwl_mld_get_tx_ant(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)407*6b627f88SBjoern A. Zeeb static u32 iwl_mld_get_tx_ant(struct iwl_mld *mld,
408*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
409*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta, __le16 fc)
410*6b627f88SBjoern A. Zeeb {
411*6b627f88SBjoern A. Zeeb if (sta && ieee80211_is_data(fc)) {
412*6b627f88SBjoern A. Zeeb struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
413*6b627f88SBjoern A. Zeeb
414*6b627f88SBjoern A. Zeeb return BIT(mld_sta->data_tx_ant) << RATE_MCS_ANT_POS;
415*6b627f88SBjoern A. Zeeb }
416*6b627f88SBjoern A. Zeeb
417*6b627f88SBjoern A. Zeeb return BIT(mld->mgmt_tx_ant) << RATE_MCS_ANT_POS;
418*6b627f88SBjoern A. Zeeb }
419*6b627f88SBjoern A. Zeeb
iwl_mld_get_inject_tx_rate(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)420*6b627f88SBjoern A. Zeeb static u32 iwl_mld_get_inject_tx_rate(struct iwl_mld *mld,
421*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
422*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta,
423*6b627f88SBjoern A. Zeeb __le16 fc)
424*6b627f88SBjoern A. Zeeb {
425*6b627f88SBjoern A. Zeeb struct ieee80211_tx_rate *rate = &info->control.rates[0];
426*6b627f88SBjoern A. Zeeb u32 result;
427*6b627f88SBjoern A. Zeeb
428*6b627f88SBjoern A. Zeeb if (rate->flags & IEEE80211_TX_RC_VHT_MCS) {
429*6b627f88SBjoern A. Zeeb u8 mcs = ieee80211_rate_get_vht_mcs(rate);
430*6b627f88SBjoern A. Zeeb u8 nss = ieee80211_rate_get_vht_nss(rate);
431*6b627f88SBjoern A. Zeeb
432*6b627f88SBjoern A. Zeeb result = RATE_MCS_MOD_TYPE_VHT;
433*6b627f88SBjoern A. Zeeb result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
434*6b627f88SBjoern A. Zeeb result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
435*6b627f88SBjoern A. Zeeb
436*6b627f88SBjoern A. Zeeb if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
437*6b627f88SBjoern A. Zeeb result |= RATE_MCS_SGI_MSK;
438*6b627f88SBjoern A. Zeeb
439*6b627f88SBjoern A. Zeeb if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
440*6b627f88SBjoern A. Zeeb result |= RATE_MCS_CHAN_WIDTH_40;
441*6b627f88SBjoern A. Zeeb else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH)
442*6b627f88SBjoern A. Zeeb result |= RATE_MCS_CHAN_WIDTH_80;
443*6b627f88SBjoern A. Zeeb else if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH)
444*6b627f88SBjoern A. Zeeb result |= RATE_MCS_CHAN_WIDTH_160;
445*6b627f88SBjoern A. Zeeb } else if (rate->flags & IEEE80211_TX_RC_MCS) {
446*6b627f88SBjoern A. Zeeb /* only MCS 0-15 are supported */
447*6b627f88SBjoern A. Zeeb u8 mcs = rate->idx & 7;
448*6b627f88SBjoern A. Zeeb u8 nss = rate->idx > 7;
449*6b627f88SBjoern A. Zeeb
450*6b627f88SBjoern A. Zeeb result = RATE_MCS_MOD_TYPE_HT;
451*6b627f88SBjoern A. Zeeb result |= u32_encode_bits(mcs, RATE_MCS_CODE_MSK);
452*6b627f88SBjoern A. Zeeb result |= u32_encode_bits(nss, RATE_MCS_NSS_MSK);
453*6b627f88SBjoern A. Zeeb
454*6b627f88SBjoern A. Zeeb if (rate->flags & IEEE80211_TX_RC_SHORT_GI)
455*6b627f88SBjoern A. Zeeb result |= RATE_MCS_SGI_MSK;
456*6b627f88SBjoern A. Zeeb if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH)
457*6b627f88SBjoern A. Zeeb result |= RATE_MCS_CHAN_WIDTH_40;
458*6b627f88SBjoern A. Zeeb if (info->flags & IEEE80211_TX_CTL_LDPC)
459*6b627f88SBjoern A. Zeeb result |= RATE_MCS_LDPC_MSK;
460*6b627f88SBjoern A. Zeeb if (u32_get_bits(info->flags, IEEE80211_TX_CTL_STBC))
461*6b627f88SBjoern A. Zeeb result |= RATE_MCS_STBC_MSK;
462*6b627f88SBjoern A. Zeeb } else {
463*6b627f88SBjoern A. Zeeb result = iwl_mld_mac80211_rate_idx_to_fw(mld, info, rate->idx);
464*6b627f88SBjoern A. Zeeb }
465*6b627f88SBjoern A. Zeeb
466*6b627f88SBjoern A. Zeeb if (info->control.antennas)
467*6b627f88SBjoern A. Zeeb result |= u32_encode_bits(info->control.antennas,
468*6b627f88SBjoern A. Zeeb RATE_MCS_ANT_AB_MSK);
469*6b627f88SBjoern A. Zeeb else
470*6b627f88SBjoern A. Zeeb result |= iwl_mld_get_tx_ant(mld, info, sta, fc);
471*6b627f88SBjoern A. Zeeb
472*6b627f88SBjoern A. Zeeb return result;
473*6b627f88SBjoern A. Zeeb }
474*6b627f88SBjoern A. Zeeb
iwl_mld_get_tx_rate_n_flags(struct iwl_mld * mld,struct ieee80211_tx_info * info,struct ieee80211_sta * sta,__le16 fc)475*6b627f88SBjoern A. Zeeb static __le32 iwl_mld_get_tx_rate_n_flags(struct iwl_mld *mld,
476*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info,
477*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta, __le16 fc)
478*6b627f88SBjoern A. Zeeb {
479*6b627f88SBjoern A. Zeeb u32 rate;
480*6b627f88SBjoern A. Zeeb
481*6b627f88SBjoern A. Zeeb if (unlikely(info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT))
482*6b627f88SBjoern A. Zeeb rate = iwl_mld_get_inject_tx_rate(mld, info, sta, fc);
483*6b627f88SBjoern A. Zeeb else
484*6b627f88SBjoern A. Zeeb rate = iwl_mld_mac80211_rate_idx_to_fw(mld, info, -1) |
485*6b627f88SBjoern A. Zeeb iwl_mld_get_tx_ant(mld, info, sta, fc);
486*6b627f88SBjoern A. Zeeb
487*6b627f88SBjoern A. Zeeb return iwl_v3_rate_to_v2_v3(rate, mld->fw_rates_ver_3);
488*6b627f88SBjoern A. Zeeb }
489*6b627f88SBjoern A. Zeeb
490*6b627f88SBjoern A. Zeeb static void
iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd * tx_cmd,struct sk_buff * skb,bool amsdu)491*6b627f88SBjoern A. Zeeb iwl_mld_fill_tx_cmd_hdr(struct iwl_tx_cmd *tx_cmd,
492*6b627f88SBjoern A. Zeeb struct sk_buff *skb, bool amsdu)
493*6b627f88SBjoern A. Zeeb {
494*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
495*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
496*6b627f88SBjoern A. Zeeb struct ieee80211_vif *vif;
497*6b627f88SBjoern A. Zeeb
498*6b627f88SBjoern A. Zeeb /* Copy MAC header from skb into command buffer */
499*6b627f88SBjoern A. Zeeb memcpy(tx_cmd->hdr, hdr, ieee80211_hdrlen(hdr->frame_control));
500*6b627f88SBjoern A. Zeeb
501*6b627f88SBjoern A. Zeeb if (!amsdu || !skb_is_gso(skb))
502*6b627f88SBjoern A. Zeeb return;
503*6b627f88SBjoern A. Zeeb
504*6b627f88SBjoern A. Zeeb /* As described in IEEE sta 802.11-2020, table 9-30 (Address
505*6b627f88SBjoern A. Zeeb * field contents), A-MSDU address 3 should contain the BSSID
506*6b627f88SBjoern A. Zeeb * address.
507*6b627f88SBjoern A. Zeeb *
508*6b627f88SBjoern A. Zeeb * In TSO, the skb header address 3 contains the original address 3 to
509*6b627f88SBjoern A. Zeeb * correctly create all the A-MSDU subframes headers from it.
510*6b627f88SBjoern A. Zeeb * Override now the address 3 in the command header with the BSSID.
511*6b627f88SBjoern A. Zeeb *
512*6b627f88SBjoern A. Zeeb * Note: we fill in the MLD address, but the firmware will do the
513*6b627f88SBjoern A. Zeeb * necessary translation to link address after encryption.
514*6b627f88SBjoern A. Zeeb */
515*6b627f88SBjoern A. Zeeb vif = info->control.vif;
516*6b627f88SBjoern A. Zeeb switch (vif->type) {
517*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_STATION:
518*6b627f88SBjoern A. Zeeb ether_addr_copy(tx_cmd->hdr->addr3, vif->cfg.ap_addr);
519*6b627f88SBjoern A. Zeeb break;
520*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_AP:
521*6b627f88SBjoern A. Zeeb ether_addr_copy(tx_cmd->hdr->addr3, vif->addr);
522*6b627f88SBjoern A. Zeeb break;
523*6b627f88SBjoern A. Zeeb default:
524*6b627f88SBjoern A. Zeeb break;
525*6b627f88SBjoern A. Zeeb }
526*6b627f88SBjoern A. Zeeb }
527*6b627f88SBjoern A. Zeeb
528*6b627f88SBjoern A. Zeeb static void
iwl_mld_fill_tx_cmd(struct iwl_mld * mld,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_tx_cmd,struct ieee80211_sta * sta)529*6b627f88SBjoern A. Zeeb iwl_mld_fill_tx_cmd(struct iwl_mld *mld, struct sk_buff *skb,
530*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_tx_cmd,
531*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta)
532*6b627f88SBjoern A. Zeeb {
533*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
534*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
535*6b627f88SBjoern A. Zeeb struct iwl_mld_sta *mld_sta = sta ? iwl_mld_sta_from_mac80211(sta) :
536*6b627f88SBjoern A. Zeeb NULL;
537*6b627f88SBjoern A. Zeeb struct iwl_tx_cmd *tx_cmd;
538*6b627f88SBjoern A. Zeeb bool amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
539*6b627f88SBjoern A. Zeeb (*ieee80211_get_qos_ctl(hdr) &
540*6b627f88SBjoern A. Zeeb IEEE80211_QOS_CTL_A_MSDU_PRESENT);
541*6b627f88SBjoern A. Zeeb __le32 rate_n_flags = 0;
542*6b627f88SBjoern A. Zeeb u16 flags = 0;
543*6b627f88SBjoern A. Zeeb
544*6b627f88SBjoern A. Zeeb dev_tx_cmd->hdr.cmd = TX_CMD;
545*6b627f88SBjoern A. Zeeb
546*6b627f88SBjoern A. Zeeb if (!info->control.hw_key)
547*6b627f88SBjoern A. Zeeb flags |= IWL_TX_FLAGS_ENCRYPT_DIS;
548*6b627f88SBjoern A. Zeeb
549*6b627f88SBjoern A. Zeeb /* For data and mgmt packets rate info comes from the fw.
550*6b627f88SBjoern A. Zeeb * Only set rate/antenna for injected frames with fixed rate, or
551*6b627f88SBjoern A. Zeeb * when no sta is given.
552*6b627f88SBjoern A. Zeeb */
553*6b627f88SBjoern A. Zeeb if (unlikely(!sta ||
554*6b627f88SBjoern A. Zeeb info->control.flags & IEEE80211_TX_CTRL_RATE_INJECT)) {
555*6b627f88SBjoern A. Zeeb flags |= IWL_TX_FLAGS_CMD_RATE;
556*6b627f88SBjoern A. Zeeb rate_n_flags = iwl_mld_get_tx_rate_n_flags(mld, info, sta,
557*6b627f88SBjoern A. Zeeb hdr->frame_control);
558*6b627f88SBjoern A. Zeeb } else if (!ieee80211_is_data(hdr->frame_control) ||
559*6b627f88SBjoern A. Zeeb (mld_sta &&
560*6b627f88SBjoern A. Zeeb mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)) {
561*6b627f88SBjoern A. Zeeb /* These are important frames */
562*6b627f88SBjoern A. Zeeb flags |= IWL_TX_FLAGS_HIGH_PRI;
563*6b627f88SBjoern A. Zeeb }
564*6b627f88SBjoern A. Zeeb
565*6b627f88SBjoern A. Zeeb tx_cmd = (void *)dev_tx_cmd->payload;
566*6b627f88SBjoern A. Zeeb
567*6b627f88SBjoern A. Zeeb iwl_mld_fill_tx_cmd_hdr(tx_cmd, skb, amsdu);
568*6b627f88SBjoern A. Zeeb
569*6b627f88SBjoern A. Zeeb tx_cmd->offload_assist = iwl_mld_get_offload_assist(skb, amsdu);
570*6b627f88SBjoern A. Zeeb
571*6b627f88SBjoern A. Zeeb /* Total # bytes to be transmitted */
572*6b627f88SBjoern A. Zeeb tx_cmd->len = cpu_to_le16((u16)skb->len);
573*6b627f88SBjoern A. Zeeb
574*6b627f88SBjoern A. Zeeb tx_cmd->flags = cpu_to_le16(flags);
575*6b627f88SBjoern A. Zeeb
576*6b627f88SBjoern A. Zeeb tx_cmd->rate_n_flags = rate_n_flags;
577*6b627f88SBjoern A. Zeeb }
578*6b627f88SBjoern A. Zeeb
579*6b627f88SBjoern A. Zeeb /* Caller of this need to check that info->control.vif is not NULL */
580*6b627f88SBjoern A. Zeeb static struct iwl_mld_link *
iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info * info)581*6b627f88SBjoern A. Zeeb iwl_mld_get_link_from_tx_info(struct ieee80211_tx_info *info)
582*6b627f88SBjoern A. Zeeb {
583*6b627f88SBjoern A. Zeeb struct iwl_mld_vif *mld_vif =
584*6b627f88SBjoern A. Zeeb iwl_mld_vif_from_mac80211(info->control.vif);
585*6b627f88SBjoern A. Zeeb u32 link_id = u32_get_bits(info->control.flags,
586*6b627f88SBjoern A. Zeeb IEEE80211_TX_CTRL_MLO_LINK);
587*6b627f88SBjoern A. Zeeb
588*6b627f88SBjoern A. Zeeb if (link_id == IEEE80211_LINK_UNSPECIFIED) {
589*6b627f88SBjoern A. Zeeb if (info->control.vif->active_links)
590*6b627f88SBjoern A. Zeeb link_id = ffs(info->control.vif->active_links) - 1;
591*6b627f88SBjoern A. Zeeb else
592*6b627f88SBjoern A. Zeeb link_id = 0;
593*6b627f88SBjoern A. Zeeb }
594*6b627f88SBjoern A. Zeeb
595*6b627f88SBjoern A. Zeeb return rcu_dereference(mld_vif->link[link_id]);
596*6b627f88SBjoern A. Zeeb }
597*6b627f88SBjoern A. Zeeb
598*6b627f88SBjoern A. Zeeb static int
iwl_mld_get_tx_queue_id(struct iwl_mld * mld,struct ieee80211_txq * txq,struct sk_buff * skb)599*6b627f88SBjoern A. Zeeb iwl_mld_get_tx_queue_id(struct iwl_mld *mld, struct ieee80211_txq *txq,
600*6b627f88SBjoern A. Zeeb struct sk_buff *skb)
601*6b627f88SBjoern A. Zeeb {
602*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
603*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
604*6b627f88SBjoern A. Zeeb __le16 fc = hdr->frame_control;
605*6b627f88SBjoern A. Zeeb struct iwl_mld_vif *mld_vif;
606*6b627f88SBjoern A. Zeeb struct iwl_mld_link *link;
607*6b627f88SBjoern A. Zeeb
608*6b627f88SBjoern A. Zeeb if (txq && txq->sta)
609*6b627f88SBjoern A. Zeeb return iwl_mld_txq_from_mac80211(txq)->fw_id;
610*6b627f88SBjoern A. Zeeb
611*6b627f88SBjoern A. Zeeb if (!info->control.vif)
612*6b627f88SBjoern A. Zeeb return IWL_MLD_INVALID_QUEUE;
613*6b627f88SBjoern A. Zeeb
614*6b627f88SBjoern A. Zeeb switch (info->control.vif->type) {
615*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_AP:
616*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_ADHOC:
617*6b627f88SBjoern A. Zeeb link = iwl_mld_get_link_from_tx_info(info);
618*6b627f88SBjoern A. Zeeb
619*6b627f88SBjoern A. Zeeb if (WARN_ON(!link))
620*6b627f88SBjoern A. Zeeb break;
621*6b627f88SBjoern A. Zeeb
622*6b627f88SBjoern A. Zeeb /* ucast disassociate/deauth frames without a station might
623*6b627f88SBjoern A. Zeeb * happen, especially with reason 7 ("Class 3 frame received
624*6b627f88SBjoern A. Zeeb * from nonassociated STA").
625*6b627f88SBjoern A. Zeeb */
626*6b627f88SBjoern A. Zeeb if (ieee80211_is_mgmt(fc) &&
627*6b627f88SBjoern A. Zeeb (!ieee80211_is_bufferable_mmpdu(skb) ||
628*6b627f88SBjoern A. Zeeb ieee80211_is_deauth(fc) || ieee80211_is_disassoc(fc)))
629*6b627f88SBjoern A. Zeeb return link->bcast_sta.queue_id;
630*6b627f88SBjoern A. Zeeb
631*6b627f88SBjoern A. Zeeb if (is_multicast_ether_addr(hdr->addr1) &&
632*6b627f88SBjoern A. Zeeb !ieee80211_has_order(fc))
633*6b627f88SBjoern A. Zeeb return link->mcast_sta.queue_id;
634*6b627f88SBjoern A. Zeeb
635*6b627f88SBjoern A. Zeeb WARN_ONCE(info->control.vif->type != NL80211_IFTYPE_ADHOC,
636*6b627f88SBjoern A. Zeeb "Couldn't find a TXQ. fc=0x%02x", le16_to_cpu(fc));
637*6b627f88SBjoern A. Zeeb return link->bcast_sta.queue_id;
638*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_P2P_DEVICE:
639*6b627f88SBjoern A. Zeeb mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
640*6b627f88SBjoern A. Zeeb
641*6b627f88SBjoern A. Zeeb if (mld_vif->roc_activity != ROC_ACTIVITY_P2P_DISC &&
642*6b627f88SBjoern A. Zeeb mld_vif->roc_activity != ROC_ACTIVITY_P2P_NEG) {
643*6b627f88SBjoern A. Zeeb IWL_DEBUG_DROP(mld,
644*6b627f88SBjoern A. Zeeb "Drop tx outside ROC with activity %d\n",
645*6b627f88SBjoern A. Zeeb mld_vif->roc_activity);
646*6b627f88SBjoern A. Zeeb return IWL_MLD_INVALID_DROP_TX;
647*6b627f88SBjoern A. Zeeb }
648*6b627f88SBjoern A. Zeeb
649*6b627f88SBjoern A. Zeeb WARN_ON(!ieee80211_is_mgmt(fc));
650*6b627f88SBjoern A. Zeeb
651*6b627f88SBjoern A. Zeeb return mld_vif->aux_sta.queue_id;
652*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_MONITOR:
653*6b627f88SBjoern A. Zeeb mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
654*6b627f88SBjoern A. Zeeb return mld_vif->deflink.mon_sta.queue_id;
655*6b627f88SBjoern A. Zeeb case NL80211_IFTYPE_STATION:
656*6b627f88SBjoern A. Zeeb mld_vif = iwl_mld_vif_from_mac80211(info->control.vif);
657*6b627f88SBjoern A. Zeeb
658*6b627f88SBjoern A. Zeeb if (!(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) {
659*6b627f88SBjoern A. Zeeb IWL_DEBUG_DROP(mld, "Drop tx not off-channel\n");
660*6b627f88SBjoern A. Zeeb return IWL_MLD_INVALID_DROP_TX;
661*6b627f88SBjoern A. Zeeb }
662*6b627f88SBjoern A. Zeeb
663*6b627f88SBjoern A. Zeeb if (mld_vif->roc_activity != ROC_ACTIVITY_HOTSPOT) {
664*6b627f88SBjoern A. Zeeb IWL_DEBUG_DROP(mld, "Drop tx outside ROC\n");
665*6b627f88SBjoern A. Zeeb return IWL_MLD_INVALID_DROP_TX;
666*6b627f88SBjoern A. Zeeb }
667*6b627f88SBjoern A. Zeeb
668*6b627f88SBjoern A. Zeeb WARN_ON(!ieee80211_is_mgmt(fc));
669*6b627f88SBjoern A. Zeeb return mld_vif->aux_sta.queue_id;
670*6b627f88SBjoern A. Zeeb default:
671*6b627f88SBjoern A. Zeeb WARN_ONCE(1, "Unsupported vif type\n");
672*6b627f88SBjoern A. Zeeb break;
673*6b627f88SBjoern A. Zeeb }
674*6b627f88SBjoern A. Zeeb
675*6b627f88SBjoern A. Zeeb return IWL_MLD_INVALID_QUEUE;
676*6b627f88SBjoern A. Zeeb }
677*6b627f88SBjoern A. Zeeb
iwl_mld_probe_resp_set_noa(struct iwl_mld * mld,struct sk_buff * skb)678*6b627f88SBjoern A. Zeeb static void iwl_mld_probe_resp_set_noa(struct iwl_mld *mld,
679*6b627f88SBjoern A. Zeeb struct sk_buff *skb)
680*6b627f88SBjoern A. Zeeb {
681*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
682*6b627f88SBjoern A. Zeeb struct iwl_mld_link *mld_link =
683*6b627f88SBjoern A. Zeeb &iwl_mld_vif_from_mac80211(info->control.vif)->deflink;
684*6b627f88SBjoern A. Zeeb struct iwl_probe_resp_data *resp_data;
685*6b627f88SBjoern A. Zeeb u8 *pos;
686*6b627f88SBjoern A. Zeeb
687*6b627f88SBjoern A. Zeeb if (!info->control.vif->p2p)
688*6b627f88SBjoern A. Zeeb return;
689*6b627f88SBjoern A. Zeeb
690*6b627f88SBjoern A. Zeeb rcu_read_lock();
691*6b627f88SBjoern A. Zeeb
692*6b627f88SBjoern A. Zeeb resp_data = rcu_dereference(mld_link->probe_resp_data);
693*6b627f88SBjoern A. Zeeb if (!resp_data)
694*6b627f88SBjoern A. Zeeb goto out;
695*6b627f88SBjoern A. Zeeb
696*6b627f88SBjoern A. Zeeb if (!resp_data->notif.noa_active)
697*6b627f88SBjoern A. Zeeb goto out;
698*6b627f88SBjoern A. Zeeb
699*6b627f88SBjoern A. Zeeb if (skb_tailroom(skb) < resp_data->noa_len) {
700*6b627f88SBjoern A. Zeeb if (pskb_expand_head(skb, 0, resp_data->noa_len, GFP_ATOMIC)) {
701*6b627f88SBjoern A. Zeeb IWL_ERR(mld,
702*6b627f88SBjoern A. Zeeb "Failed to reallocate probe resp\n");
703*6b627f88SBjoern A. Zeeb goto out;
704*6b627f88SBjoern A. Zeeb }
705*6b627f88SBjoern A. Zeeb }
706*6b627f88SBjoern A. Zeeb
707*6b627f88SBjoern A. Zeeb pos = skb_put(skb, resp_data->noa_len);
708*6b627f88SBjoern A. Zeeb
709*6b627f88SBjoern A. Zeeb *pos++ = WLAN_EID_VENDOR_SPECIFIC;
710*6b627f88SBjoern A. Zeeb /* Set length of IE body (not including ID and length itself) */
711*6b627f88SBjoern A. Zeeb *pos++ = resp_data->noa_len - 2;
712*6b627f88SBjoern A. Zeeb *pos++ = (WLAN_OUI_WFA >> 16) & 0xff;
713*6b627f88SBjoern A. Zeeb *pos++ = (WLAN_OUI_WFA >> 8) & 0xff;
714*6b627f88SBjoern A. Zeeb *pos++ = WLAN_OUI_WFA & 0xff;
715*6b627f88SBjoern A. Zeeb *pos++ = WLAN_OUI_TYPE_WFA_P2P;
716*6b627f88SBjoern A. Zeeb
717*6b627f88SBjoern A. Zeeb memcpy(pos, &resp_data->notif.noa_attr,
718*6b627f88SBjoern A. Zeeb resp_data->noa_len - sizeof(struct ieee80211_vendor_ie));
719*6b627f88SBjoern A. Zeeb
720*6b627f88SBjoern A. Zeeb out:
721*6b627f88SBjoern A. Zeeb rcu_read_unlock();
722*6b627f88SBjoern A. Zeeb }
723*6b627f88SBjoern A. Zeeb
724*6b627f88SBjoern A. Zeeb /* This function must be called with BHs disabled */
iwl_mld_tx_mpdu(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)725*6b627f88SBjoern A. Zeeb static int iwl_mld_tx_mpdu(struct iwl_mld *mld, struct sk_buff *skb,
726*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq)
727*6b627f88SBjoern A. Zeeb {
728*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
729*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
730*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta = txq ? txq->sta : NULL;
731*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_tx_cmd;
732*6b627f88SBjoern A. Zeeb int queue = iwl_mld_get_tx_queue_id(mld, txq, skb);
733*6b627f88SBjoern A. Zeeb u8 tid = IWL_MAX_TID_COUNT;
734*6b627f88SBjoern A. Zeeb
735*6b627f88SBjoern A. Zeeb if (WARN_ONCE(queue == IWL_MLD_INVALID_QUEUE, "Invalid TX Queue id") ||
736*6b627f88SBjoern A. Zeeb queue == IWL_MLD_INVALID_DROP_TX)
737*6b627f88SBjoern A. Zeeb return -1;
738*6b627f88SBjoern A. Zeeb
739*6b627f88SBjoern A. Zeeb if (unlikely(ieee80211_is_any_nullfunc(hdr->frame_control)))
740*6b627f88SBjoern A. Zeeb return -1;
741*6b627f88SBjoern A. Zeeb
742*6b627f88SBjoern A. Zeeb dev_tx_cmd = iwl_trans_alloc_tx_cmd(mld->trans);
743*6b627f88SBjoern A. Zeeb if (unlikely(!dev_tx_cmd))
744*6b627f88SBjoern A. Zeeb return -1;
745*6b627f88SBjoern A. Zeeb
746*6b627f88SBjoern A. Zeeb if (unlikely(ieee80211_is_probe_resp(hdr->frame_control))) {
747*6b627f88SBjoern A. Zeeb if (IWL_MLD_NON_TRANSMITTING_AP)
748*6b627f88SBjoern A. Zeeb return -1;
749*6b627f88SBjoern A. Zeeb
750*6b627f88SBjoern A. Zeeb iwl_mld_probe_resp_set_noa(mld, skb);
751*6b627f88SBjoern A. Zeeb }
752*6b627f88SBjoern A. Zeeb
753*6b627f88SBjoern A. Zeeb iwl_mld_fill_tx_cmd(mld, skb, dev_tx_cmd, sta);
754*6b627f88SBjoern A. Zeeb
755*6b627f88SBjoern A. Zeeb if (ieee80211_is_data(hdr->frame_control)) {
756*6b627f88SBjoern A. Zeeb if (ieee80211_is_data_qos(hdr->frame_control))
757*6b627f88SBjoern A. Zeeb tid = ieee80211_get_tid(hdr);
758*6b627f88SBjoern A. Zeeb else
759*6b627f88SBjoern A. Zeeb tid = IWL_TID_NON_QOS;
760*6b627f88SBjoern A. Zeeb }
761*6b627f88SBjoern A. Zeeb
762*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX(mld, "TX TID:%d from Q:%d len %d\n",
763*6b627f88SBjoern A. Zeeb tid, queue, skb->len);
764*6b627f88SBjoern A. Zeeb
765*6b627f88SBjoern A. Zeeb /* From now on, we cannot access info->control */
766*6b627f88SBjoern A. Zeeb memset(&info->status, 0, sizeof(info->status));
767*6b627f88SBjoern A. Zeeb memset(info->driver_data, 0, sizeof(info->driver_data));
768*6b627f88SBjoern A. Zeeb
769*6b627f88SBjoern A. Zeeb info->driver_data[1] = dev_tx_cmd;
770*6b627f88SBjoern A. Zeeb
771*6b627f88SBjoern A. Zeeb if (iwl_trans_tx(mld->trans, skb, dev_tx_cmd, queue))
772*6b627f88SBjoern A. Zeeb goto err;
773*6b627f88SBjoern A. Zeeb
774*6b627f88SBjoern A. Zeeb /* Update low-latency counter when a packet is queued instead
775*6b627f88SBjoern A. Zeeb * of after TX, it makes sense for early low-latency detection
776*6b627f88SBjoern A. Zeeb */
777*6b627f88SBjoern A. Zeeb if (sta)
778*6b627f88SBjoern A. Zeeb iwl_mld_low_latency_update_counters(mld, hdr, sta, 0);
779*6b627f88SBjoern A. Zeeb
780*6b627f88SBjoern A. Zeeb return 0;
781*6b627f88SBjoern A. Zeeb
782*6b627f88SBjoern A. Zeeb err:
783*6b627f88SBjoern A. Zeeb iwl_trans_free_tx_cmd(mld->trans, dev_tx_cmd);
784*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX(mld, "TX from Q:%d dropped\n", queue);
785*6b627f88SBjoern A. Zeeb return -1;
786*6b627f88SBjoern A. Zeeb }
787*6b627f88SBjoern A. Zeeb
788*6b627f88SBjoern A. Zeeb #ifdef CONFIG_INET
789*6b627f88SBjoern A. Zeeb
790*6b627f88SBjoern A. Zeeb /* This function handles the segmentation of a large TSO packet into multiple
791*6b627f88SBjoern A. Zeeb * MPDUs, ensuring that the resulting segments conform to AMSDU limits and
792*6b627f88SBjoern A. Zeeb * constraints.
793*6b627f88SBjoern A. Zeeb */
iwl_mld_tx_tso_segment(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_sta * sta,struct sk_buff_head * mpdus_skbs)794*6b627f88SBjoern A. Zeeb static int iwl_mld_tx_tso_segment(struct iwl_mld *mld, struct sk_buff *skb,
795*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta,
796*6b627f88SBjoern A. Zeeb struct sk_buff_head *mpdus_skbs)
797*6b627f88SBjoern A. Zeeb {
798*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
799*6b627f88SBjoern A. Zeeb netdev_features_t netdev_flags = NETIF_F_CSUM_MASK | NETIF_F_SG;
800*6b627f88SBjoern A. Zeeb unsigned int mss = skb_shinfo(skb)->gso_size;
801*6b627f88SBjoern A. Zeeb unsigned int num_subframes, tcp_payload_len, subf_len;
802*6b627f88SBjoern A. Zeeb u16 snap_ip_tcp, pad, max_tid_amsdu_len;
803*6b627f88SBjoern A. Zeeb u8 tid;
804*6b627f88SBjoern A. Zeeb
805*6b627f88SBjoern A. Zeeb snap_ip_tcp = 8 + skb_network_header_len(skb) + tcp_hdrlen(skb);
806*6b627f88SBjoern A. Zeeb
807*6b627f88SBjoern A. Zeeb if (!ieee80211_is_data_qos(hdr->frame_control) ||
808*6b627f88SBjoern A. Zeeb !sta->cur->max_rc_amsdu_len)
809*6b627f88SBjoern A. Zeeb return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
810*6b627f88SBjoern A. Zeeb
811*6b627f88SBjoern A. Zeeb /* Do not build AMSDU for IPv6 with extension headers.
812*6b627f88SBjoern A. Zeeb * Ask stack to segment and checksum the generated MPDUs for us.
813*6b627f88SBjoern A. Zeeb */
814*6b627f88SBjoern A. Zeeb if (skb->protocol == htons(ETH_P_IPV6) &&
815*6b627f88SBjoern A. Zeeb ((struct ipv6hdr *)skb_network_header(skb))->nexthdr !=
816*6b627f88SBjoern A. Zeeb IPPROTO_TCP) {
817*6b627f88SBjoern A. Zeeb netdev_flags &= ~NETIF_F_CSUM_MASK;
818*6b627f88SBjoern A. Zeeb return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
819*6b627f88SBjoern A. Zeeb }
820*6b627f88SBjoern A. Zeeb
821*6b627f88SBjoern A. Zeeb tid = ieee80211_get_tid(hdr);
822*6b627f88SBjoern A. Zeeb if (WARN_ON_ONCE(tid >= IWL_MAX_TID_COUNT))
823*6b627f88SBjoern A. Zeeb return -EINVAL;
824*6b627f88SBjoern A. Zeeb
825*6b627f88SBjoern A. Zeeb max_tid_amsdu_len = sta->cur->max_tid_amsdu_len[tid];
826*6b627f88SBjoern A. Zeeb if (!max_tid_amsdu_len)
827*6b627f88SBjoern A. Zeeb return iwl_tx_tso_segment(skb, 1, netdev_flags, mpdus_skbs);
828*6b627f88SBjoern A. Zeeb
829*6b627f88SBjoern A. Zeeb /* Sub frame header + SNAP + IP header + TCP header + MSS */
830*6b627f88SBjoern A. Zeeb subf_len = sizeof(struct ethhdr) + snap_ip_tcp + mss;
831*6b627f88SBjoern A. Zeeb pad = (4 - subf_len) & 0x3;
832*6b627f88SBjoern A. Zeeb
833*6b627f88SBjoern A. Zeeb /* If we have N subframes in the A-MSDU, then the A-MSDU's size is
834*6b627f88SBjoern A. Zeeb * N * subf_len + (N - 1) * pad.
835*6b627f88SBjoern A. Zeeb */
836*6b627f88SBjoern A. Zeeb num_subframes = (max_tid_amsdu_len + pad) / (subf_len + pad);
837*6b627f88SBjoern A. Zeeb
838*6b627f88SBjoern A. Zeeb if (sta->max_amsdu_subframes &&
839*6b627f88SBjoern A. Zeeb num_subframes > sta->max_amsdu_subframes)
840*6b627f88SBjoern A. Zeeb num_subframes = sta->max_amsdu_subframes;
841*6b627f88SBjoern A. Zeeb
842*6b627f88SBjoern A. Zeeb tcp_payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
843*6b627f88SBjoern A. Zeeb tcp_hdrlen(skb) + skb->data_len;
844*6b627f88SBjoern A. Zeeb
845*6b627f88SBjoern A. Zeeb /* Make sure we have enough TBs for the A-MSDU:
846*6b627f88SBjoern A. Zeeb * 2 for each subframe
847*6b627f88SBjoern A. Zeeb * 1 more for each fragment
848*6b627f88SBjoern A. Zeeb * 1 more for the potential data in the header
849*6b627f88SBjoern A. Zeeb */
850*6b627f88SBjoern A. Zeeb if ((num_subframes * 2 + skb_shinfo(skb)->nr_frags + 1) >
851*6b627f88SBjoern A. Zeeb mld->trans->info.max_skb_frags)
852*6b627f88SBjoern A. Zeeb num_subframes = 1;
853*6b627f88SBjoern A. Zeeb
854*6b627f88SBjoern A. Zeeb if (num_subframes > 1)
855*6b627f88SBjoern A. Zeeb *ieee80211_get_qos_ctl(hdr) |= IEEE80211_QOS_CTL_A_MSDU_PRESENT;
856*6b627f88SBjoern A. Zeeb
857*6b627f88SBjoern A. Zeeb /* This skb fits in one single A-MSDU */
858*6b627f88SBjoern A. Zeeb if (tcp_payload_len <= num_subframes * mss) {
859*6b627f88SBjoern A. Zeeb __skb_queue_tail(mpdus_skbs, skb);
860*6b627f88SBjoern A. Zeeb return 0;
861*6b627f88SBjoern A. Zeeb }
862*6b627f88SBjoern A. Zeeb
863*6b627f88SBjoern A. Zeeb /* Trick the segmentation function to make it create SKBs that can fit
864*6b627f88SBjoern A. Zeeb * into one A-MSDU.
865*6b627f88SBjoern A. Zeeb */
866*6b627f88SBjoern A. Zeeb return iwl_tx_tso_segment(skb, num_subframes, netdev_flags, mpdus_skbs);
867*6b627f88SBjoern A. Zeeb }
868*6b627f88SBjoern A. Zeeb
869*6b627f88SBjoern A. Zeeb /* Manages TSO (TCP Segmentation Offload) packet transmission by segmenting
870*6b627f88SBjoern A. Zeeb * large packets when necessary and transmitting each segment as MPDU.
871*6b627f88SBjoern A. Zeeb */
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)872*6b627f88SBjoern A. Zeeb static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
873*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq)
874*6b627f88SBjoern A. Zeeb {
875*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
876*6b627f88SBjoern A. Zeeb struct sk_buff *orig_skb = skb;
877*6b627f88SBjoern A. Zeeb struct sk_buff_head mpdus_skbs;
878*6b627f88SBjoern A. Zeeb unsigned int payload_len;
879*6b627f88SBjoern A. Zeeb int ret;
880*6b627f88SBjoern A. Zeeb
881*6b627f88SBjoern A. Zeeb if (WARN_ON(!txq || !txq->sta))
882*6b627f88SBjoern A. Zeeb return -1;
883*6b627f88SBjoern A. Zeeb
884*6b627f88SBjoern A. Zeeb payload_len = skb_tail_pointer(skb) - skb_transport_header(skb) -
885*6b627f88SBjoern A. Zeeb tcp_hdrlen(skb) + skb->data_len;
886*6b627f88SBjoern A. Zeeb
887*6b627f88SBjoern A. Zeeb if (payload_len <= skb_shinfo(skb)->gso_size)
888*6b627f88SBjoern A. Zeeb return iwl_mld_tx_mpdu(mld, skb, txq);
889*6b627f88SBjoern A. Zeeb
890*6b627f88SBjoern A. Zeeb if (!info->control.vif)
891*6b627f88SBjoern A. Zeeb return -1;
892*6b627f88SBjoern A. Zeeb
893*6b627f88SBjoern A. Zeeb __skb_queue_head_init(&mpdus_skbs);
894*6b627f88SBjoern A. Zeeb
895*6b627f88SBjoern A. Zeeb ret = iwl_mld_tx_tso_segment(mld, skb, txq->sta, &mpdus_skbs);
896*6b627f88SBjoern A. Zeeb if (ret)
897*6b627f88SBjoern A. Zeeb return ret;
898*6b627f88SBjoern A. Zeeb
899*6b627f88SBjoern A. Zeeb WARN_ON(skb_queue_empty(&mpdus_skbs));
900*6b627f88SBjoern A. Zeeb
901*6b627f88SBjoern A. Zeeb while (!skb_queue_empty(&mpdus_skbs)) {
902*6b627f88SBjoern A. Zeeb skb = __skb_dequeue(&mpdus_skbs);
903*6b627f88SBjoern A. Zeeb
904*6b627f88SBjoern A. Zeeb ret = iwl_mld_tx_mpdu(mld, skb, txq);
905*6b627f88SBjoern A. Zeeb if (!ret)
906*6b627f88SBjoern A. Zeeb continue;
907*6b627f88SBjoern A. Zeeb
908*6b627f88SBjoern A. Zeeb /* Free skbs created as part of TSO logic that have not yet
909*6b627f88SBjoern A. Zeeb * been dequeued
910*6b627f88SBjoern A. Zeeb */
911*6b627f88SBjoern A. Zeeb __skb_queue_purge(&mpdus_skbs);
912*6b627f88SBjoern A. Zeeb
913*6b627f88SBjoern A. Zeeb /* skb here is not necessarily same as skb that entered
914*6b627f88SBjoern A. Zeeb * this method, so free it explicitly.
915*6b627f88SBjoern A. Zeeb */
916*6b627f88SBjoern A. Zeeb if (skb == orig_skb)
917*6b627f88SBjoern A. Zeeb ieee80211_free_txskb(mld->hw, skb);
918*6b627f88SBjoern A. Zeeb else
919*6b627f88SBjoern A. Zeeb kfree_skb(skb);
920*6b627f88SBjoern A. Zeeb
921*6b627f88SBjoern A. Zeeb /* there was error, but we consumed skb one way or
922*6b627f88SBjoern A. Zeeb * another, so return 0
923*6b627f88SBjoern A. Zeeb */
924*6b627f88SBjoern A. Zeeb return 0;
925*6b627f88SBjoern A. Zeeb }
926*6b627f88SBjoern A. Zeeb
927*6b627f88SBjoern A. Zeeb return 0;
928*6b627f88SBjoern A. Zeeb }
929*6b627f88SBjoern A. Zeeb #else
iwl_mld_tx_tso(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)930*6b627f88SBjoern A. Zeeb static int iwl_mld_tx_tso(struct iwl_mld *mld, struct sk_buff *skb,
931*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq)
932*6b627f88SBjoern A. Zeeb {
933*6b627f88SBjoern A. Zeeb /* Impossible to get TSO without CONFIG_INET */
934*6b627f88SBjoern A. Zeeb WARN_ON(1);
935*6b627f88SBjoern A. Zeeb
936*6b627f88SBjoern A. Zeeb return -1;
937*6b627f88SBjoern A. Zeeb }
938*6b627f88SBjoern A. Zeeb #endif /* CONFIG_INET */
939*6b627f88SBjoern A. Zeeb
iwl_mld_tx_skb(struct iwl_mld * mld,struct sk_buff * skb,struct ieee80211_txq * txq)940*6b627f88SBjoern A. Zeeb void iwl_mld_tx_skb(struct iwl_mld *mld, struct sk_buff *skb,
941*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq)
942*6b627f88SBjoern A. Zeeb {
943*6b627f88SBjoern A. Zeeb if (skb_is_gso(skb)) {
944*6b627f88SBjoern A. Zeeb if (!iwl_mld_tx_tso(mld, skb, txq))
945*6b627f88SBjoern A. Zeeb return;
946*6b627f88SBjoern A. Zeeb goto err;
947*6b627f88SBjoern A. Zeeb }
948*6b627f88SBjoern A. Zeeb
949*6b627f88SBjoern A. Zeeb if (likely(!iwl_mld_tx_mpdu(mld, skb, txq)))
950*6b627f88SBjoern A. Zeeb return;
951*6b627f88SBjoern A. Zeeb
952*6b627f88SBjoern A. Zeeb err:
953*6b627f88SBjoern A. Zeeb ieee80211_free_txskb(mld->hw, skb);
954*6b627f88SBjoern A. Zeeb }
955*6b627f88SBjoern A. Zeeb
iwl_mld_tx_from_txq(struct iwl_mld * mld,struct ieee80211_txq * txq)956*6b627f88SBjoern A. Zeeb void iwl_mld_tx_from_txq(struct iwl_mld *mld, struct ieee80211_txq *txq)
957*6b627f88SBjoern A. Zeeb {
958*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
959*6b627f88SBjoern A. Zeeb struct sk_buff *skb = NULL;
960*6b627f88SBjoern A. Zeeb u8 zero_addr[ETH_ALEN] = {};
961*6b627f88SBjoern A. Zeeb
962*6b627f88SBjoern A. Zeeb /*
963*6b627f88SBjoern A. Zeeb * No need for threads to be pending here, they can leave the first
964*6b627f88SBjoern A. Zeeb * taker all the work.
965*6b627f88SBjoern A. Zeeb *
966*6b627f88SBjoern A. Zeeb * mld_txq->tx_request logic:
967*6b627f88SBjoern A. Zeeb *
968*6b627f88SBjoern A. Zeeb * If 0, no one is currently TXing, set to 1 to indicate current thread
969*6b627f88SBjoern A. Zeeb * will now start TX and other threads should quit.
970*6b627f88SBjoern A. Zeeb *
971*6b627f88SBjoern A. Zeeb * If 1, another thread is currently TXing, set to 2 to indicate to
972*6b627f88SBjoern A. Zeeb * that thread that there was another request. Since that request may
973*6b627f88SBjoern A. Zeeb * have raced with the check whether the queue is empty, the TXing
974*6b627f88SBjoern A. Zeeb * thread should check the queue's status one more time before leaving.
975*6b627f88SBjoern A. Zeeb * This check is done in order to not leave any TX hanging in the queue
976*6b627f88SBjoern A. Zeeb * until the next TX invocation (which may not even happen).
977*6b627f88SBjoern A. Zeeb *
978*6b627f88SBjoern A. Zeeb * If 2, another thread is currently TXing, and it will already double
979*6b627f88SBjoern A. Zeeb * check the queue, so do nothing.
980*6b627f88SBjoern A. Zeeb */
981*6b627f88SBjoern A. Zeeb if (atomic_fetch_add_unless(&mld_txq->tx_request, 1, 2))
982*6b627f88SBjoern A. Zeeb return;
983*6b627f88SBjoern A. Zeeb
984*6b627f88SBjoern A. Zeeb rcu_read_lock();
985*6b627f88SBjoern A. Zeeb do {
986*6b627f88SBjoern A. Zeeb while (likely(!mld_txq->status.stop_full) &&
987*6b627f88SBjoern A. Zeeb (skb = ieee80211_tx_dequeue(mld->hw, txq)))
988*6b627f88SBjoern A. Zeeb iwl_mld_tx_skb(mld, skb, txq);
989*6b627f88SBjoern A. Zeeb } while (atomic_dec_return(&mld_txq->tx_request));
990*6b627f88SBjoern A. Zeeb
991*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX(mld, "TXQ of sta %pM tid %d is now empty\n",
992*6b627f88SBjoern A. Zeeb txq->sta ? txq->sta->addr : zero_addr, txq->tid);
993*6b627f88SBjoern A. Zeeb
994*6b627f88SBjoern A. Zeeb rcu_read_unlock();
995*6b627f88SBjoern A. Zeeb }
996*6b627f88SBjoern A. Zeeb
iwl_mld_hwrate_to_tx_rate(struct iwl_mld * mld,__le32 rate_n_flags_fw,struct ieee80211_tx_info * info)997*6b627f88SBjoern A. Zeeb static void iwl_mld_hwrate_to_tx_rate(struct iwl_mld *mld,
998*6b627f88SBjoern A. Zeeb __le32 rate_n_flags_fw,
999*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info)
1000*6b627f88SBjoern A. Zeeb {
1001*6b627f88SBjoern A. Zeeb enum nl80211_band band = info->band;
1002*6b627f88SBjoern A. Zeeb struct ieee80211_tx_rate *tx_rate = &info->status.rates[0];
1003*6b627f88SBjoern A. Zeeb u32 rate_n_flags = iwl_v3_rate_from_v2_v3(rate_n_flags_fw,
1004*6b627f88SBjoern A. Zeeb mld->fw_rates_ver_3);
1005*6b627f88SBjoern A. Zeeb u32 sgi = rate_n_flags & RATE_MCS_SGI_MSK;
1006*6b627f88SBjoern A. Zeeb u32 chan_width = rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK;
1007*6b627f88SBjoern A. Zeeb u32 format = rate_n_flags & RATE_MCS_MOD_TYPE_MSK;
1008*6b627f88SBjoern A. Zeeb
1009*6b627f88SBjoern A. Zeeb if (sgi)
1010*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_SHORT_GI;
1011*6b627f88SBjoern A. Zeeb
1012*6b627f88SBjoern A. Zeeb switch (chan_width) {
1013*6b627f88SBjoern A. Zeeb case RATE_MCS_CHAN_WIDTH_20:
1014*6b627f88SBjoern A. Zeeb break;
1015*6b627f88SBjoern A. Zeeb case RATE_MCS_CHAN_WIDTH_40:
1016*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_40_MHZ_WIDTH;
1017*6b627f88SBjoern A. Zeeb break;
1018*6b627f88SBjoern A. Zeeb case RATE_MCS_CHAN_WIDTH_80:
1019*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_80_MHZ_WIDTH;
1020*6b627f88SBjoern A. Zeeb break;
1021*6b627f88SBjoern A. Zeeb case RATE_MCS_CHAN_WIDTH_160:
1022*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_160_MHZ_WIDTH;
1023*6b627f88SBjoern A. Zeeb break;
1024*6b627f88SBjoern A. Zeeb default:
1025*6b627f88SBjoern A. Zeeb break;
1026*6b627f88SBjoern A. Zeeb }
1027*6b627f88SBjoern A. Zeeb
1028*6b627f88SBjoern A. Zeeb switch (format) {
1029*6b627f88SBjoern A. Zeeb case RATE_MCS_MOD_TYPE_HT:
1030*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_MCS;
1031*6b627f88SBjoern A. Zeeb tx_rate->idx = RATE_HT_MCS_INDEX(rate_n_flags);
1032*6b627f88SBjoern A. Zeeb break;
1033*6b627f88SBjoern A. Zeeb case RATE_MCS_MOD_TYPE_VHT:
1034*6b627f88SBjoern A. Zeeb ieee80211_rate_set_vht(tx_rate,
1035*6b627f88SBjoern A. Zeeb rate_n_flags & RATE_MCS_CODE_MSK,
1036*6b627f88SBjoern A. Zeeb u32_get_bits(rate_n_flags,
1037*6b627f88SBjoern A. Zeeb RATE_MCS_NSS_MSK) + 1);
1038*6b627f88SBjoern A. Zeeb tx_rate->flags |= IEEE80211_TX_RC_VHT_MCS;
1039*6b627f88SBjoern A. Zeeb break;
1040*6b627f88SBjoern A. Zeeb case RATE_MCS_MOD_TYPE_HE:
1041*6b627f88SBjoern A. Zeeb case RATE_MCS_MOD_TYPE_EHT:
1042*6b627f88SBjoern A. Zeeb /* mac80211 cannot do this without ieee80211_tx_status_ext()
1043*6b627f88SBjoern A. Zeeb * but it only matters for radiotap
1044*6b627f88SBjoern A. Zeeb */
1045*6b627f88SBjoern A. Zeeb tx_rate->idx = 0;
1046*6b627f88SBjoern A. Zeeb break;
1047*6b627f88SBjoern A. Zeeb default:
1048*6b627f88SBjoern A. Zeeb tx_rate->idx =
1049*6b627f88SBjoern A. Zeeb iwl_mld_legacy_hw_idx_to_mac80211_idx(rate_n_flags,
1050*6b627f88SBjoern A. Zeeb band);
1051*6b627f88SBjoern A. Zeeb break;
1052*6b627f88SBjoern A. Zeeb }
1053*6b627f88SBjoern A. Zeeb }
1054*6b627f88SBjoern A. Zeeb
iwl_mld_handle_tx_resp_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1055*6b627f88SBjoern A. Zeeb void iwl_mld_handle_tx_resp_notif(struct iwl_mld *mld,
1056*6b627f88SBjoern A. Zeeb struct iwl_rx_packet *pkt)
1057*6b627f88SBjoern A. Zeeb {
1058*6b627f88SBjoern A. Zeeb struct iwl_tx_resp *tx_resp = (void *)pkt->data;
1059*6b627f88SBjoern A. Zeeb int txq_id = le16_to_cpu(tx_resp->tx_queue);
1060*6b627f88SBjoern A. Zeeb struct agg_tx_status *agg_status = &tx_resp->status;
1061*6b627f88SBjoern A. Zeeb u32 status = le16_to_cpu(agg_status->status);
1062*6b627f88SBjoern A. Zeeb u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1063*6b627f88SBjoern A. Zeeb size_t notif_size = sizeof(*tx_resp) + sizeof(u32);
1064*6b627f88SBjoern A. Zeeb int sta_id = IWL_TX_RES_GET_RA(tx_resp->ra_tid);
1065*6b627f88SBjoern A. Zeeb int tid = IWL_TX_RES_GET_TID(tx_resp->ra_tid);
1066*6b627f88SBjoern A. Zeeb struct ieee80211_link_sta *link_sta;
1067*6b627f88SBjoern A. Zeeb struct iwl_mld_sta *mld_sta;
1068*6b627f88SBjoern A. Zeeb u16 ssn;
1069*6b627f88SBjoern A. Zeeb struct sk_buff_head skbs;
1070*6b627f88SBjoern A. Zeeb u8 skb_freed = 0;
1071*6b627f88SBjoern A. Zeeb bool mgmt = false;
1072*6b627f88SBjoern A. Zeeb bool tx_failure = (status & TX_STATUS_MSK) != TX_STATUS_SUCCESS;
1073*6b627f88SBjoern A. Zeeb
1074*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, tx_resp->frame_count != 1,
1075*6b627f88SBjoern A. Zeeb "Invalid tx_resp notif frame_count (%d)\n",
1076*6b627f88SBjoern A. Zeeb tx_resp->frame_count))
1077*6b627f88SBjoern A. Zeeb return;
1078*6b627f88SBjoern A. Zeeb
1079*6b627f88SBjoern A. Zeeb /* validate the size of the variable part of the notif */
1080*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, notif_size != pkt_len,
1081*6b627f88SBjoern A. Zeeb "Invalid tx_resp notif size (expected=%zu got=%u)\n",
1082*6b627f88SBjoern A. Zeeb notif_size, pkt_len))
1083*6b627f88SBjoern A. Zeeb return;
1084*6b627f88SBjoern A. Zeeb
1085*6b627f88SBjoern A. Zeeb ssn = le32_to_cpup((__le32 *)agg_status +
1086*6b627f88SBjoern A. Zeeb tx_resp->frame_count) & 0xFFFF;
1087*6b627f88SBjoern A. Zeeb
1088*6b627f88SBjoern A. Zeeb __skb_queue_head_init(&skbs);
1089*6b627f88SBjoern A. Zeeb
1090*6b627f88SBjoern A. Zeeb /* we can free until ssn % q.n_bd not inclusive */
1091*6b627f88SBjoern A. Zeeb iwl_trans_reclaim(mld->trans, txq_id, ssn, &skbs, false);
1092*6b627f88SBjoern A. Zeeb
1093*6b627f88SBjoern A. Zeeb while (!skb_queue_empty(&skbs)) {
1094*6b627f88SBjoern A. Zeeb struct sk_buff *skb = __skb_dequeue(&skbs);
1095*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1096*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
1097*6b627f88SBjoern A. Zeeb
1098*6b627f88SBjoern A. Zeeb skb_freed++;
1099*6b627f88SBjoern A. Zeeb
1100*6b627f88SBjoern A. Zeeb iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1101*6b627f88SBjoern A. Zeeb
1102*6b627f88SBjoern A. Zeeb memset(&info->status, 0, sizeof(info->status));
1103*6b627f88SBjoern A. Zeeb
1104*6b627f88SBjoern A. Zeeb info->flags &= ~(IEEE80211_TX_STAT_ACK | IEEE80211_TX_STAT_TX_FILTERED);
1105*6b627f88SBjoern A. Zeeb
1106*6b627f88SBjoern A. Zeeb /* inform mac80211 about what happened with the frame */
1107*6b627f88SBjoern A. Zeeb switch (status & TX_STATUS_MSK) {
1108*6b627f88SBjoern A. Zeeb case TX_STATUS_SUCCESS:
1109*6b627f88SBjoern A. Zeeb case TX_STATUS_DIRECT_DONE:
1110*6b627f88SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK;
1111*6b627f88SBjoern A. Zeeb break;
1112*6b627f88SBjoern A. Zeeb default:
1113*6b627f88SBjoern A. Zeeb break;
1114*6b627f88SBjoern A. Zeeb }
1115*6b627f88SBjoern A. Zeeb
1116*6b627f88SBjoern A. Zeeb /* If we are freeing multiple frames, mark all the frames
1117*6b627f88SBjoern A. Zeeb * but the first one as acked, since they were acknowledged
1118*6b627f88SBjoern A. Zeeb * before
1119*6b627f88SBjoern A. Zeeb */
1120*6b627f88SBjoern A. Zeeb if (skb_freed > 1)
1121*6b627f88SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK;
1122*6b627f88SBjoern A. Zeeb
1123*6b627f88SBjoern A. Zeeb if (tx_failure) {
1124*6b627f88SBjoern A. Zeeb enum iwl_fw_ini_time_point tp =
1125*6b627f88SBjoern A. Zeeb IWL_FW_INI_TIME_POINT_TX_FAILED;
1126*6b627f88SBjoern A. Zeeb
1127*6b627f88SBjoern A. Zeeb if (ieee80211_is_action(hdr->frame_control))
1128*6b627f88SBjoern A. Zeeb tp = IWL_FW_INI_TIME_POINT_TX_WFD_ACTION_FRAME_FAILED;
1129*6b627f88SBjoern A. Zeeb else if (ieee80211_is_mgmt(hdr->frame_control))
1130*6b627f88SBjoern A. Zeeb mgmt = true;
1131*6b627f88SBjoern A. Zeeb
1132*6b627f88SBjoern A. Zeeb iwl_dbg_tlv_time_point(&mld->fwrt, tp, NULL);
1133*6b627f88SBjoern A. Zeeb }
1134*6b627f88SBjoern A. Zeeb
1135*6b627f88SBjoern A. Zeeb iwl_mld_hwrate_to_tx_rate(mld, tx_resp->initial_rate, info);
1136*6b627f88SBjoern A. Zeeb
1137*6b627f88SBjoern A. Zeeb if (likely(!iwl_mld_time_sync_frame(mld, skb, hdr->addr1)))
1138*6b627f88SBjoern A. Zeeb ieee80211_tx_status_skb(mld->hw, skb);
1139*6b627f88SBjoern A. Zeeb }
1140*6b627f88SBjoern A. Zeeb
1141*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_REPLY(mld,
1142*6b627f88SBjoern A. Zeeb "TXQ %d status 0x%08x ssn=%d initial_rate 0x%x retries %d\n",
1143*6b627f88SBjoern A. Zeeb txq_id, status, ssn, le32_to_cpu(tx_resp->initial_rate),
1144*6b627f88SBjoern A. Zeeb tx_resp->failure_frame);
1145*6b627f88SBjoern A. Zeeb
1146*6b627f88SBjoern A. Zeeb if (tx_failure && mgmt)
1147*6b627f88SBjoern A. Zeeb iwl_mld_toggle_tx_ant(mld, &mld->mgmt_tx_ant);
1148*6b627f88SBjoern A. Zeeb
1149*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1150*6b627f88SBjoern A. Zeeb "Got invalid sta_id (%d)\n", sta_id))
1151*6b627f88SBjoern A. Zeeb return;
1152*6b627f88SBjoern A. Zeeb
1153*6b627f88SBjoern A. Zeeb rcu_read_lock();
1154*6b627f88SBjoern A. Zeeb
1155*6b627f88SBjoern A. Zeeb link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1156*6b627f88SBjoern A. Zeeb if (!link_sta) {
1157*6b627f88SBjoern A. Zeeb /* This can happen if the TX cmd was sent before pre_rcu_remove
1158*6b627f88SBjoern A. Zeeb * but the TX response was received after
1159*6b627f88SBjoern A. Zeeb */
1160*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_REPLY(mld,
1161*6b627f88SBjoern A. Zeeb "Got valid sta_id (%d) but sta is NULL\n",
1162*6b627f88SBjoern A. Zeeb sta_id);
1163*6b627f88SBjoern A. Zeeb goto out;
1164*6b627f88SBjoern A. Zeeb }
1165*6b627f88SBjoern A. Zeeb
1166*6b627f88SBjoern A. Zeeb if (IS_ERR(link_sta))
1167*6b627f88SBjoern A. Zeeb goto out;
1168*6b627f88SBjoern A. Zeeb
1169*6b627f88SBjoern A. Zeeb mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
1170*6b627f88SBjoern A. Zeeb
1171*6b627f88SBjoern A. Zeeb if (tx_failure && mld_sta->sta_state < IEEE80211_STA_AUTHORIZED)
1172*6b627f88SBjoern A. Zeeb iwl_mld_toggle_tx_ant(mld, &mld_sta->data_tx_ant);
1173*6b627f88SBjoern A. Zeeb
1174*6b627f88SBjoern A. Zeeb if (tid < IWL_MAX_TID_COUNT)
1175*6b627f88SBjoern A. Zeeb iwl_mld_count_mpdu_tx(link_sta, 1);
1176*6b627f88SBjoern A. Zeeb
1177*6b627f88SBjoern A. Zeeb out:
1178*6b627f88SBjoern A. Zeeb rcu_read_unlock();
1179*6b627f88SBjoern A. Zeeb }
1180*6b627f88SBjoern A. Zeeb
iwl_mld_tx_reclaim_txq(struct iwl_mld * mld,int txq,int index,bool in_flush)1181*6b627f88SBjoern A. Zeeb static void iwl_mld_tx_reclaim_txq(struct iwl_mld *mld, int txq, int index,
1182*6b627f88SBjoern A. Zeeb bool in_flush)
1183*6b627f88SBjoern A. Zeeb {
1184*6b627f88SBjoern A. Zeeb struct sk_buff_head reclaimed_skbs;
1185*6b627f88SBjoern A. Zeeb
1186*6b627f88SBjoern A. Zeeb __skb_queue_head_init(&reclaimed_skbs);
1187*6b627f88SBjoern A. Zeeb
1188*6b627f88SBjoern A. Zeeb iwl_trans_reclaim(mld->trans, txq, index, &reclaimed_skbs, in_flush);
1189*6b627f88SBjoern A. Zeeb
1190*6b627f88SBjoern A. Zeeb while (!skb_queue_empty(&reclaimed_skbs)) {
1191*6b627f88SBjoern A. Zeeb struct sk_buff *skb = __skb_dequeue(&reclaimed_skbs);
1192*6b627f88SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
1193*6b627f88SBjoern A. Zeeb
1194*6b627f88SBjoern A. Zeeb iwl_trans_free_tx_cmd(mld->trans, info->driver_data[1]);
1195*6b627f88SBjoern A. Zeeb
1196*6b627f88SBjoern A. Zeeb memset(&info->status, 0, sizeof(info->status));
1197*6b627f88SBjoern A. Zeeb
1198*6b627f88SBjoern A. Zeeb /* Packet was transmitted successfully, failures come as single
1199*6b627f88SBjoern A. Zeeb * frames because before failing a frame the firmware transmits
1200*6b627f88SBjoern A. Zeeb * it without aggregation at least once.
1201*6b627f88SBjoern A. Zeeb */
1202*6b627f88SBjoern A. Zeeb if (!in_flush)
1203*6b627f88SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK;
1204*6b627f88SBjoern A. Zeeb else
1205*6b627f88SBjoern A. Zeeb info->flags &= ~IEEE80211_TX_STAT_ACK;
1206*6b627f88SBjoern A. Zeeb
1207*6b627f88SBjoern A. Zeeb ieee80211_tx_status_skb(mld->hw, skb);
1208*6b627f88SBjoern A. Zeeb }
1209*6b627f88SBjoern A. Zeeb }
1210*6b627f88SBjoern A. Zeeb
iwl_mld_flush_link_sta_txqs(struct iwl_mld * mld,u32 fw_sta_id)1211*6b627f88SBjoern A. Zeeb int iwl_mld_flush_link_sta_txqs(struct iwl_mld *mld, u32 fw_sta_id)
1212*6b627f88SBjoern A. Zeeb {
1213*6b627f88SBjoern A. Zeeb struct iwl_tx_path_flush_cmd_rsp *rsp;
1214*6b627f88SBjoern A. Zeeb struct iwl_tx_path_flush_cmd flush_cmd = {
1215*6b627f88SBjoern A. Zeeb .sta_id = cpu_to_le32(fw_sta_id),
1216*6b627f88SBjoern A. Zeeb .tid_mask = cpu_to_le16(0xffff),
1217*6b627f88SBjoern A. Zeeb };
1218*6b627f88SBjoern A. Zeeb struct iwl_host_cmd cmd = {
1219*6b627f88SBjoern A. Zeeb .id = TXPATH_FLUSH,
1220*6b627f88SBjoern A. Zeeb .len = { sizeof(flush_cmd), },
1221*6b627f88SBjoern A. Zeeb .data = { &flush_cmd, },
1222*6b627f88SBjoern A. Zeeb .flags = CMD_WANT_SKB,
1223*6b627f88SBjoern A. Zeeb };
1224*6b627f88SBjoern A. Zeeb int ret, num_flushed_queues;
1225*6b627f88SBjoern A. Zeeb u32 resp_len;
1226*6b627f88SBjoern A. Zeeb
1227*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mld, "flush for sta id %d tid mask 0x%x\n",
1228*6b627f88SBjoern A. Zeeb fw_sta_id, 0xffff);
1229*6b627f88SBjoern A. Zeeb
1230*6b627f88SBjoern A. Zeeb ret = iwl_mld_send_cmd(mld, &cmd);
1231*6b627f88SBjoern A. Zeeb if (ret) {
1232*6b627f88SBjoern A. Zeeb IWL_ERR(mld, "Failed to send flush command (%d)\n", ret);
1233*6b627f88SBjoern A. Zeeb return ret;
1234*6b627f88SBjoern A. Zeeb }
1235*6b627f88SBjoern A. Zeeb
1236*6b627f88SBjoern A. Zeeb resp_len = iwl_rx_packet_payload_len(cmd.resp_pkt);
1237*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, resp_len != sizeof(*rsp),
1238*6b627f88SBjoern A. Zeeb "Invalid TXPATH_FLUSH response len: %d\n",
1239*6b627f88SBjoern A. Zeeb resp_len)) {
1240*6b627f88SBjoern A. Zeeb ret = -EIO;
1241*6b627f88SBjoern A. Zeeb goto free_rsp;
1242*6b627f88SBjoern A. Zeeb }
1243*6b627f88SBjoern A. Zeeb
1244*6b627f88SBjoern A. Zeeb rsp = (void *)cmd.resp_pkt->data;
1245*6b627f88SBjoern A. Zeeb
1246*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, le16_to_cpu(rsp->sta_id) != fw_sta_id,
1247*6b627f88SBjoern A. Zeeb "sta_id %d != rsp_sta_id %d\n", fw_sta_id,
1248*6b627f88SBjoern A. Zeeb le16_to_cpu(rsp->sta_id))) {
1249*6b627f88SBjoern A. Zeeb ret = -EIO;
1250*6b627f88SBjoern A. Zeeb goto free_rsp;
1251*6b627f88SBjoern A. Zeeb }
1252*6b627f88SBjoern A. Zeeb
1253*6b627f88SBjoern A. Zeeb num_flushed_queues = le16_to_cpu(rsp->num_flushed_queues);
1254*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, num_flushed_queues > IWL_TX_FLUSH_QUEUE_RSP,
1255*6b627f88SBjoern A. Zeeb "num_flushed_queues %d\n", num_flushed_queues)) {
1256*6b627f88SBjoern A. Zeeb ret = -EIO;
1257*6b627f88SBjoern A. Zeeb goto free_rsp;
1258*6b627f88SBjoern A. Zeeb }
1259*6b627f88SBjoern A. Zeeb
1260*6b627f88SBjoern A. Zeeb for (int i = 0; i < num_flushed_queues; i++) {
1261*6b627f88SBjoern A. Zeeb struct iwl_flush_queue_info *queue_info = &rsp->queues[i];
1262*6b627f88SBjoern A. Zeeb int read_after = le16_to_cpu(queue_info->read_after_flush);
1263*6b627f88SBjoern A. Zeeb int txq_id = le16_to_cpu(queue_info->queue_num);
1264*6b627f88SBjoern A. Zeeb
1265*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld,
1266*6b627f88SBjoern A. Zeeb txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1267*6b627f88SBjoern A. Zeeb "Invalid txq id %d\n", txq_id))
1268*6b627f88SBjoern A. Zeeb continue;
1269*6b627f88SBjoern A. Zeeb
1270*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(mld,
1271*6b627f88SBjoern A. Zeeb "tid %d txq_id %d read-before %d read-after %d\n",
1272*6b627f88SBjoern A. Zeeb le16_to_cpu(queue_info->tid), txq_id,
1273*6b627f88SBjoern A. Zeeb le16_to_cpu(queue_info->read_before_flush),
1274*6b627f88SBjoern A. Zeeb read_after);
1275*6b627f88SBjoern A. Zeeb
1276*6b627f88SBjoern A. Zeeb iwl_mld_tx_reclaim_txq(mld, txq_id, read_after, true);
1277*6b627f88SBjoern A. Zeeb }
1278*6b627f88SBjoern A. Zeeb
1279*6b627f88SBjoern A. Zeeb free_rsp:
1280*6b627f88SBjoern A. Zeeb iwl_free_resp(&cmd);
1281*6b627f88SBjoern A. Zeeb return ret;
1282*6b627f88SBjoern A. Zeeb }
1283*6b627f88SBjoern A. Zeeb
iwl_mld_ensure_queue(struct iwl_mld * mld,struct ieee80211_txq * txq)1284*6b627f88SBjoern A. Zeeb int iwl_mld_ensure_queue(struct iwl_mld *mld, struct ieee80211_txq *txq)
1285*6b627f88SBjoern A. Zeeb {
1286*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq = iwl_mld_txq_from_mac80211(txq);
1287*6b627f88SBjoern A. Zeeb int ret;
1288*6b627f88SBjoern A. Zeeb
1289*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
1290*6b627f88SBjoern A. Zeeb
1291*6b627f88SBjoern A. Zeeb if (likely(mld_txq->status.allocated))
1292*6b627f88SBjoern A. Zeeb return 0;
1293*6b627f88SBjoern A. Zeeb
1294*6b627f88SBjoern A. Zeeb ret = iwl_mld_add_txq(mld, txq);
1295*6b627f88SBjoern A. Zeeb
1296*6b627f88SBjoern A. Zeeb spin_lock_bh(&mld->add_txqs_lock);
1297*6b627f88SBjoern A. Zeeb if (!list_empty(&mld_txq->list))
1298*6b627f88SBjoern A. Zeeb list_del_init(&mld_txq->list);
1299*6b627f88SBjoern A. Zeeb spin_unlock_bh(&mld->add_txqs_lock);
1300*6b627f88SBjoern A. Zeeb
1301*6b627f88SBjoern A. Zeeb return ret;
1302*6b627f88SBjoern A. Zeeb }
1303*6b627f88SBjoern A. Zeeb
iwl_mld_update_sta_txqs(struct iwl_mld * mld,struct ieee80211_sta * sta,u32 old_sta_mask,u32 new_sta_mask)1304*6b627f88SBjoern A. Zeeb int iwl_mld_update_sta_txqs(struct iwl_mld *mld,
1305*6b627f88SBjoern A. Zeeb struct ieee80211_sta *sta,
1306*6b627f88SBjoern A. Zeeb u32 old_sta_mask, u32 new_sta_mask)
1307*6b627f88SBjoern A. Zeeb {
1308*6b627f88SBjoern A. Zeeb struct iwl_scd_queue_cfg_cmd cmd = {
1309*6b627f88SBjoern A. Zeeb .operation = cpu_to_le32(IWL_SCD_QUEUE_MODIFY),
1310*6b627f88SBjoern A. Zeeb .u.modify.old_sta_mask = cpu_to_le32(old_sta_mask),
1311*6b627f88SBjoern A. Zeeb .u.modify.new_sta_mask = cpu_to_le32(new_sta_mask),
1312*6b627f88SBjoern A. Zeeb };
1313*6b627f88SBjoern A. Zeeb
1314*6b627f88SBjoern A. Zeeb lockdep_assert_wiphy(mld->wiphy);
1315*6b627f88SBjoern A. Zeeb
1316*6b627f88SBjoern A. Zeeb for (int tid = 0; tid <= IWL_MAX_TID_COUNT; tid++) {
1317*6b627f88SBjoern A. Zeeb struct ieee80211_txq *txq =
1318*6b627f88SBjoern A. Zeeb sta->txq[tid != IWL_MAX_TID_COUNT ?
1319*6b627f88SBjoern A. Zeeb tid : IEEE80211_NUM_TIDS];
1320*6b627f88SBjoern A. Zeeb struct iwl_mld_txq *mld_txq =
1321*6b627f88SBjoern A. Zeeb iwl_mld_txq_from_mac80211(txq);
1322*6b627f88SBjoern A. Zeeb int ret;
1323*6b627f88SBjoern A. Zeeb
1324*6b627f88SBjoern A. Zeeb if (!mld_txq->status.allocated)
1325*6b627f88SBjoern A. Zeeb continue;
1326*6b627f88SBjoern A. Zeeb
1327*6b627f88SBjoern A. Zeeb if (tid == IWL_MAX_TID_COUNT)
1328*6b627f88SBjoern A. Zeeb cmd.u.modify.tid = cpu_to_le32(IWL_MGMT_TID);
1329*6b627f88SBjoern A. Zeeb else
1330*6b627f88SBjoern A. Zeeb cmd.u.modify.tid = cpu_to_le32(tid);
1331*6b627f88SBjoern A. Zeeb
1332*6b627f88SBjoern A. Zeeb ret = iwl_mld_send_cmd_pdu(mld,
1333*6b627f88SBjoern A. Zeeb WIDE_ID(DATA_PATH_GROUP,
1334*6b627f88SBjoern A. Zeeb SCD_QUEUE_CONFIG_CMD),
1335*6b627f88SBjoern A. Zeeb &cmd);
1336*6b627f88SBjoern A. Zeeb if (ret)
1337*6b627f88SBjoern A. Zeeb return ret;
1338*6b627f88SBjoern A. Zeeb }
1339*6b627f88SBjoern A. Zeeb
1340*6b627f88SBjoern A. Zeeb return 0;
1341*6b627f88SBjoern A. Zeeb }
1342*6b627f88SBjoern A. Zeeb
iwl_mld_handle_compressed_ba_notif(struct iwl_mld * mld,struct iwl_rx_packet * pkt)1343*6b627f88SBjoern A. Zeeb void iwl_mld_handle_compressed_ba_notif(struct iwl_mld *mld,
1344*6b627f88SBjoern A. Zeeb struct iwl_rx_packet *pkt)
1345*6b627f88SBjoern A. Zeeb {
1346*6b627f88SBjoern A. Zeeb struct iwl_compressed_ba_notif *ba_res = (void *)pkt->data;
1347*6b627f88SBjoern A. Zeeb u32 pkt_len = iwl_rx_packet_payload_len(pkt);
1348*6b627f88SBjoern A. Zeeb u16 tfd_cnt = le16_to_cpu(ba_res->tfd_cnt);
1349*6b627f88SBjoern A. Zeeb u8 sta_id = ba_res->sta_id;
1350*6b627f88SBjoern A. Zeeb struct ieee80211_link_sta *link_sta;
1351*6b627f88SBjoern A. Zeeb
1352*6b627f88SBjoern A. Zeeb if (!tfd_cnt)
1353*6b627f88SBjoern A. Zeeb return;
1354*6b627f88SBjoern A. Zeeb
1355*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, struct_size(ba_res, tfd, tfd_cnt) > pkt_len,
1356*6b627f88SBjoern A. Zeeb "Short BA notif (tfd_cnt=%d, size:0x%x)\n",
1357*6b627f88SBjoern A. Zeeb tfd_cnt, pkt_len))
1358*6b627f88SBjoern A. Zeeb return;
1359*6b627f88SBjoern A. Zeeb
1360*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_REPLY(mld,
1361*6b627f88SBjoern A. Zeeb "BA notif received from sta_id=%d, flags=0x%x, sent:%d, acked:%d\n",
1362*6b627f88SBjoern A. Zeeb sta_id, le32_to_cpu(ba_res->flags),
1363*6b627f88SBjoern A. Zeeb le16_to_cpu(ba_res->txed),
1364*6b627f88SBjoern A. Zeeb le16_to_cpu(ba_res->done));
1365*6b627f88SBjoern A. Zeeb
1366*6b627f88SBjoern A. Zeeb for (int i = 0; i < tfd_cnt; i++) {
1367*6b627f88SBjoern A. Zeeb struct iwl_compressed_ba_tfd *ba_tfd = &ba_res->tfd[i];
1368*6b627f88SBjoern A. Zeeb int txq_id = le16_to_cpu(ba_tfd->q_num);
1369*6b627f88SBjoern A. Zeeb int index = le16_to_cpu(ba_tfd->tfd_index);
1370*6b627f88SBjoern A. Zeeb
1371*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld,
1372*6b627f88SBjoern A. Zeeb txq_id >= ARRAY_SIZE(mld->fw_id_to_txq),
1373*6b627f88SBjoern A. Zeeb "Invalid txq id %d\n", txq_id))
1374*6b627f88SBjoern A. Zeeb continue;
1375*6b627f88SBjoern A. Zeeb
1376*6b627f88SBjoern A. Zeeb iwl_mld_tx_reclaim_txq(mld, txq_id, index, false);
1377*6b627f88SBjoern A. Zeeb }
1378*6b627f88SBjoern A. Zeeb
1379*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, sta_id >= mld->fw->ucode_capa.num_stations,
1380*6b627f88SBjoern A. Zeeb "Got invalid sta_id (%d)\n", sta_id))
1381*6b627f88SBjoern A. Zeeb return;
1382*6b627f88SBjoern A. Zeeb
1383*6b627f88SBjoern A. Zeeb rcu_read_lock();
1384*6b627f88SBjoern A. Zeeb
1385*6b627f88SBjoern A. Zeeb link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
1386*6b627f88SBjoern A. Zeeb if (IWL_FW_CHECK(mld, IS_ERR_OR_NULL(link_sta),
1387*6b627f88SBjoern A. Zeeb "Got valid sta_id (%d) but link_sta is NULL\n",
1388*6b627f88SBjoern A. Zeeb sta_id))
1389*6b627f88SBjoern A. Zeeb goto out;
1390*6b627f88SBjoern A. Zeeb
1391*6b627f88SBjoern A. Zeeb iwl_mld_count_mpdu_tx(link_sta, le16_to_cpu(ba_res->txed));
1392*6b627f88SBjoern A. Zeeb out:
1393*6b627f88SBjoern A. Zeeb rcu_read_unlock();
1394*6b627f88SBjoern A. Zeeb }
1395