1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2024-2025 Intel Corporation
4 */
5 #include "agg.h"
6 #include "sta.h"
7 #include "hcmd.h"
8
9 static void
iwl_mld_reorder_release_frames(struct iwl_mld * mld,struct ieee80211_sta * sta,struct napi_struct * napi,struct iwl_mld_baid_data * baid_data,struct iwl_mld_reorder_buffer * reorder_buf,u16 nssn)10 iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
11 struct napi_struct *napi,
12 struct iwl_mld_baid_data *baid_data,
13 struct iwl_mld_reorder_buffer *reorder_buf,
14 u16 nssn)
15 {
16 struct iwl_mld_reorder_buf_entry *entries =
17 &baid_data->entries[reorder_buf->queue *
18 baid_data->entries_per_queue];
19 u16 ssn = reorder_buf->head_sn;
20
21 while (ieee80211_sn_less(ssn, nssn)) {
22 int index = ssn % baid_data->buf_size;
23 struct sk_buff_head *skb_list = &entries[index].frames;
24 struct sk_buff *skb;
25
26 ssn = ieee80211_sn_inc(ssn);
27
28 /* Empty the list. Will have more than one frame for A-MSDU.
29 * Empty list is valid as well since nssn indicates frames were
30 * received.
31 */
32 while ((skb = __skb_dequeue(skb_list))) {
33 iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
34 reorder_buf->queue,
35 sta);
36 reorder_buf->num_stored--;
37 }
38 }
39 reorder_buf->head_sn = nssn;
40 }
41
iwl_mld_release_frames_from_notif(struct iwl_mld * mld,struct napi_struct * napi,u8 baid,u16 nssn,int queue)42 static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
43 struct napi_struct *napi,
44 u8 baid, u16 nssn, int queue)
45 {
46 struct iwl_mld_reorder_buffer *reorder_buf;
47 struct iwl_mld_baid_data *ba_data;
48 struct ieee80211_link_sta *link_sta;
49 u32 sta_id;
50
51 IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
52 baid, nssn);
53
54 if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
55 baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
56 return;
57
58 rcu_read_lock();
59
60 ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
61 if (!ba_data) {
62 IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
63 goto out_unlock;
64 }
65
66 /* pick any STA ID to find the pointer */
67 sta_id = ffs(ba_data->sta_mask) - 1;
68 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
69 if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
70 goto out_unlock;
71
72 reorder_buf = &ba_data->reorder_buf[queue];
73
74 iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
75 reorder_buf, nssn);
76 out_unlock:
77 rcu_read_unlock();
78 }
79
iwl_mld_handle_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)80 void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
81 struct napi_struct *napi,
82 struct iwl_rx_packet *pkt, int queue)
83 {
84 struct iwl_frame_release *release = (void *)pkt->data;
85 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
86
87 if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
88 "Unexpected frame release notif size %u (expected %zu)\n",
89 pkt_len, sizeof(*release)))
90 return;
91
92 iwl_mld_release_frames_from_notif(mld, napi, release->baid,
93 le16_to_cpu(release->nssn),
94 queue);
95 }
96
iwl_mld_handle_bar_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)97 void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
98 struct napi_struct *napi,
99 struct iwl_rx_packet *pkt,
100 int queue)
101 {
102 struct iwl_bar_frame_release *release = (void *)pkt->data;
103 struct iwl_mld_baid_data *baid_data;
104 unsigned int baid, nssn, sta_id, tid;
105 u32 pkt_len = iwl_rx_packet_payload_len(pkt);
106
107 if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
108 "Unexpected frame release notif size %u (expected %zu)\n",
109 pkt_len, sizeof(*release)))
110 return;
111
112 baid = le32_get_bits(release->ba_info,
113 IWL_BAR_FRAME_RELEASE_BAID_MASK);
114 nssn = le32_get_bits(release->ba_info,
115 IWL_BAR_FRAME_RELEASE_NSSN_MASK);
116 sta_id = le32_get_bits(release->sta_tid,
117 IWL_BAR_FRAME_RELEASE_STA_MASK);
118 tid = le32_get_bits(release->sta_tid,
119 IWL_BAR_FRAME_RELEASE_TID_MASK);
120
121 if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
122 "BAR release: invalid BAID (%x)\n", baid))
123 return;
124
125 rcu_read_lock();
126 baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
127 if (!IWL_FW_CHECK(mld, !baid_data,
128 "Got valid BAID %d but not allocated, invalid BAR release!\n",
129 baid))
130 goto out_unlock;
131
132 if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
133 sta_id > mld->fw->ucode_capa.num_stations ||
134 !(baid_data->sta_mask & BIT(sta_id)),
135 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
136 baid, baid_data->sta_mask, baid_data->tid, sta_id,
137 tid))
138 goto out_unlock;
139
140 IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
141 nssn);
142
143 iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
144 out_unlock:
145 rcu_read_unlock();
146 }
147
iwl_mld_del_ba(struct iwl_mld * mld,int queue,struct iwl_mld_delba_data * data)148 void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
149 struct iwl_mld_delba_data *data)
150 {
151 struct iwl_mld_baid_data *ba_data;
152 struct iwl_mld_reorder_buffer *reorder_buf;
153 struct ieee80211_link_sta *link_sta;
154 u8 baid = data->baid;
155 u32 sta_id;
156
157 if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
158 return;
159
160 rcu_read_lock();
161
162 ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
163 if (WARN_ON_ONCE(!ba_data))
164 goto out_unlock;
165
166 /* pick any STA ID to find the pointer */
167 sta_id = ffs(ba_data->sta_mask) - 1;
168 link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
169 if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
170 goto out_unlock;
171
172 reorder_buf = &ba_data->reorder_buf[queue];
173
174 /* release all frames that are in the reorder buffer to the stack */
175 iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
176 ba_data, reorder_buf,
177 ieee80211_sn_add(reorder_buf->head_sn,
178 ba_data->buf_size));
179 out_unlock:
180 rcu_read_unlock();
181 }
182
183 /* Returns true if the MPDU was buffered\dropped, false if it should be passed
184 * to upper layer.
185 */
186 enum iwl_mld_reorder_result
iwl_mld_reorder(struct iwl_mld * mld,struct napi_struct * napi,int queue,struct ieee80211_sta * sta,struct sk_buff * skb,struct iwl_rx_mpdu_desc * desc)187 iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
188 int queue, struct ieee80211_sta *sta,
189 struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
190 {
191 struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
192 struct iwl_mld_baid_data *baid_data;
193 struct iwl_mld_reorder_buffer *buffer;
194 struct iwl_mld_reorder_buf_entry *entries;
195 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
196 struct iwl_mld_link_sta *mld_link_sta;
197 u32 reorder = le32_to_cpu(desc->reorder_data);
198 bool amsdu, last_subframe, is_old_sn, is_dup;
199 u8 tid = ieee80211_get_tid(hdr);
200 u8 baid;
201 u16 nssn, sn;
202 u32 sta_mask = 0;
203 int index;
204 u8 link_id;
205
206 baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
207
208 /* This also covers the case of receiving a Block Ack Request
209 * outside a BA session; we'll pass it to mac80211 and that
210 * then sends a delBA action frame.
211 * This also covers pure monitor mode, in which case we won't
212 * have any BA sessions.
213 */
214 if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
215 return IWL_MLD_PASS_SKB;
216
217 /* no sta yet */
218 if (WARN_ONCE(!sta,
219 "Got valid BAID without a valid station assigned\n"))
220 return IWL_MLD_PASS_SKB;
221
222 /* not a data packet */
223 if (!ieee80211_is_data_qos(hdr->frame_control) ||
224 is_multicast_ether_addr(hdr->addr1))
225 return IWL_MLD_PASS_SKB;
226
227 if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
228 return IWL_MLD_PASS_SKB;
229
230 baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
231 if (!baid_data) {
232 IWL_DEBUG_HT(mld,
233 "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
234 baid, reorder);
235 return IWL_MLD_PASS_SKB;
236 }
237
238 for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
239 sta_mask |= BIT(mld_link_sta->fw_id);
240
241 /* verify the BAID is correctly mapped to the sta and tid */
242 if (IWL_FW_CHECK(mld,
243 tid != baid_data->tid ||
244 !(sta_mask & baid_data->sta_mask),
245 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
246 baid, baid_data->sta_mask, baid_data->tid,
247 sta_mask, tid))
248 return IWL_MLD_PASS_SKB;
249
250 buffer = &baid_data->reorder_buf[queue];
251 entries = &baid_data->entries[queue * baid_data->entries_per_queue];
252
253 is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
254
255 if (!buffer->valid && is_old_sn)
256 return IWL_MLD_PASS_SKB;
257
258 buffer->valid = true;
259
260 is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
261
262 /* drop any duplicated or outdated packets */
263 if (is_dup || is_old_sn)
264 return IWL_MLD_DROP_SKB;
265
266 sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
267 nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
268 amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
269 last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
270
271 /* release immediately if allowed by nssn and no stored frames */
272 if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
273 if (!amsdu || last_subframe)
274 buffer->head_sn = nssn;
275 return IWL_MLD_PASS_SKB;
276 }
277
278 /* release immediately if there are no stored frames, and the sn is
279 * equal to the head.
280 * This can happen due to reorder timer, where NSSN is behind head_sn.
281 * When we released everything, and we got the next frame in the
282 * sequence, according to the NSSN we can't release immediately,
283 * while technically there is no hole and we can move forward.
284 */
285 if (!buffer->num_stored && sn == buffer->head_sn) {
286 if (!amsdu || last_subframe)
287 buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
288 return IWL_MLD_PASS_SKB;
289 }
290
291 /* put in reorder buffer */
292 index = sn % baid_data->buf_size;
293 __skb_queue_tail(&entries[index].frames, skb);
294 buffer->num_stored++;
295
296 /* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
297 * reason is that NSSN advances on the first sub-frame, and may cause
298 * the reorder buffer to advance before all the sub-frames arrive.
299 *
300 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
301 * SN 1. NSSN for first sub frame will be 3 with the result of driver
302 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
303 * already ahead and it will be dropped.
304 * If the last sub-frame is not on this queue - we will get frame
305 * release notification with up to date NSSN.
306 */
307 if (!amsdu || last_subframe)
308 iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
309 buffer, nssn);
310
311 return IWL_MLD_BUFFERED_SKB;
312 }
313 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
314
iwl_mld_rx_agg_session_expired(struct timer_list * t)315 static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
316 {
317 struct iwl_mld_baid_data *data =
318 from_timer(data, t, session_timer);
319 struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
320 struct iwl_mld_baid_data *ba_data;
321 struct ieee80211_link_sta *link_sta;
322 struct iwl_mld_sta *mld_sta;
323 unsigned long timeout;
324 unsigned int sta_id;
325
326 rcu_read_lock();
327
328 ba_data = rcu_dereference(*rcu_ptr);
329 if (WARN_ON(!ba_data))
330 goto unlock;
331
332 if (WARN_ON(!ba_data->timeout))
333 goto unlock;
334
335 timeout = ba_data->last_rx_timestamp +
336 TU_TO_JIFFIES(ba_data->timeout * 2);
337 if (time_is_after_jiffies(timeout)) {
338 mod_timer(&ba_data->session_timer, timeout);
339 goto unlock;
340 }
341
342 /* timer expired, pick any STA ID to find the pointer */
343 sta_id = ffs(ba_data->sta_mask) - 1;
344 link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
345
346 /* sta should be valid unless the following happens:
347 * The firmware asserts which triggers a reconfig flow, but
348 * the reconfig fails before we set the pointer to sta into
349 * the fw_id_to_link_sta pointer table. mac80211 can't stop
350 * A-MPDU and hence the timer continues to run. Then, the
351 * timer expires and sta is NULL.
352 */
353 if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
354 goto unlock;
355
356 mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
357 ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
358 ba_data->tid);
359 unlock:
360 rcu_read_unlock();
361 }
362
363 static int
iwl_mld_stop_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)364 iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
365 {
366 struct iwl_rx_baid_cfg_cmd cmd = {
367 .action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
368 .remove.sta_id_mask =
369 cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
370 .remove.tid = cpu_to_le32(tid),
371
372 };
373 int ret;
374
375 ret = iwl_mld_send_cmd_pdu(mld,
376 WIDE_ID(DATA_PATH_GROUP,
377 RX_BAID_ALLOCATION_CONFIG_CMD),
378 &cmd);
379 if (ret)
380 return ret;
381
382 IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
383
384 return ret;
385 }
386
387 static int
iwl_mld_start_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size)388 iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
389 int tid, u16 ssn, u16 buf_size)
390 {
391 struct iwl_rx_baid_cfg_cmd cmd = {
392 .action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
393 .alloc.sta_id_mask =
394 cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
395 .alloc.tid = tid,
396 .alloc.ssn = cpu_to_le16(ssn),
397 .alloc.win_size = cpu_to_le16(buf_size),
398 };
399 struct iwl_host_cmd hcmd = {
400 .id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
401 .flags = CMD_WANT_SKB,
402 .len[0] = sizeof(cmd),
403 .data[0] = &cmd,
404 };
405 struct iwl_rx_baid_cfg_resp *resp;
406 struct iwl_rx_packet *pkt;
407 u32 resp_len;
408 int ret, baid;
409
410 BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
411
412 ret = iwl_mld_send_cmd(mld, &hcmd);
413 if (ret)
414 return ret;
415
416 pkt = hcmd.resp_pkt;
417
418 resp_len = iwl_rx_packet_payload_len(pkt);
419 if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
420 "BAID_ALLOC_CMD: unexpected response length %d\n",
421 resp_len)) {
422 ret = -EIO;
423 goto out;
424 }
425
426 IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
427
428 resp = (void *)pkt->data;
429 baid = le32_to_cpu(resp->baid);
430
431 if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
432 "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
433 ret = -EINVAL;
434 goto out;
435 }
436
437 ret = baid;
438 out:
439 iwl_free_resp(&hcmd);
440 return ret;
441 }
442
iwl_mld_init_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data,u16 ssn)443 static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
444 struct iwl_mld_baid_data *data,
445 u16 ssn)
446 {
447 for (int i = 0; i < mld->trans->num_rx_queues; i++) {
448 struct iwl_mld_reorder_buffer *reorder_buf =
449 &data->reorder_buf[i];
450 struct iwl_mld_reorder_buf_entry *entries =
451 &data->entries[i * data->entries_per_queue];
452
453 reorder_buf->head_sn = ssn;
454 reorder_buf->queue = i;
455
456 for (int j = 0; j < data->buf_size; j++)
457 __skb_queue_head_init(&entries[j].frames);
458 }
459 }
460
iwl_mld_free_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data)461 static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
462 struct iwl_mld_baid_data *data)
463 {
464 struct iwl_mld_delba_data delba_data = {
465 .baid = data->baid,
466 };
467
468 iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
469 &delba_data, sizeof(delba_data));
470
471 for (int i = 0; i < mld->trans->num_rx_queues; i++) {
472 struct iwl_mld_reorder_buffer *reorder_buf =
473 &data->reorder_buf[i];
474 struct iwl_mld_reorder_buf_entry *entries =
475 &data->entries[i * data->entries_per_queue];
476
477 if (likely(!reorder_buf->num_stored))
478 continue;
479
480 /* This shouldn't happen in regular DELBA since the RX queues
481 * sync internal DELBA notification should trigger a release
482 * of all frames in the reorder buffer.
483 */
484 WARN_ON(1);
485
486 for (int j = 0; j < data->buf_size; j++)
487 __skb_queue_purge(&entries[j].frames);
488 }
489 }
490
iwl_mld_ampdu_rx_start(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size,u16 timeout)491 int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
492 int tid, u16 ssn, u16 buf_size, u16 timeout)
493 {
494 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
495 struct iwl_mld_baid_data *baid_data = NULL;
496 u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
497 int ret, baid;
498 u32 sta_mask;
499
500 lockdep_assert_wiphy(mld->wiphy);
501
502 if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
503 IWL_DEBUG_HT(mld,
504 "Max num of RX BA sessions reached; blocking new session\n");
505 return -ENOSPC;
506 }
507
508 sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
509 if (WARN_ON(!sta_mask))
510 return -EINVAL;
511
512 /* sparse doesn't like the __align() so don't check */
513 #ifndef __CHECKER__
514 /* The division below will be OK if either the cache line size
515 * can be divided by the entry size (ALIGN will round up) or if
516 * the entry size can be divided by the cache line size, in which
517 * case the ALIGN() will do nothing.
518 */
519 BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
520 sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
521 #endif
522
523 /* Upward align the reorder buffer size to fill an entire cache
524 * line for each queue, to avoid sharing cache lines between
525 * different queues.
526 */
527 reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
528
529 /* Allocate here so if allocation fails we can bail out early
530 * before starting the BA session in the firmware
531 */
532 baid_data = kzalloc(sizeof(*baid_data) +
533 mld->trans->num_rx_queues * reorder_buf_size,
534 GFP_KERNEL);
535 if (!baid_data)
536 return -ENOMEM;
537
538 /* This division is why we need the above BUILD_BUG_ON(),
539 * if that doesn't hold then this will not be right.
540 */
541 baid_data->entries_per_queue =
542 reorder_buf_size / sizeof(baid_data->entries[0]);
543
544 baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
545 if (baid < 0) {
546 ret = baid;
547 goto out_free;
548 }
549
550 mld->num_rx_ba_sessions++;
551 mld_sta->tid_to_baid[tid] = baid;
552
553 baid_data->baid = baid;
554 baid_data->mld = mld;
555 baid_data->tid = tid;
556 baid_data->buf_size = buf_size;
557 baid_data->sta_mask = sta_mask;
558 baid_data->timeout = timeout;
559 baid_data->last_rx_timestamp = jiffies;
560 baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
561
562 iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
563
564 timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
565 0);
566 if (timeout)
567 mod_timer(&baid_data->session_timer,
568 TU_TO_EXP_TIME(timeout * 2));
569
570 IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
571 baid_data->sta_mask, tid, baid);
572
573 /* protect the BA data with RCU to cover a case where our
574 * internal RX sync mechanism will timeout (not that it's
575 * supposed to happen) and we will free the session data while
576 * RX is being processed in parallel
577 */
578 WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
579 rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
580
581 return 0;
582
583 out_free:
584 kfree(baid_data);
585 return ret;
586 }
587
iwl_mld_ampdu_rx_stop(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)588 int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
589 int tid)
590 {
591 struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
592 int baid = mld_sta->tid_to_baid[tid];
593 struct iwl_mld_baid_data *baid_data;
594 int ret;
595
596 lockdep_assert_wiphy(mld->wiphy);
597
598 /* during firmware restart, do not send the command as the firmware no
599 * longer recognizes the session. instead, only clear the driver BA
600 * session data.
601 */
602 if (!mld->fw_status.in_hw_restart) {
603 ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
604 if (ret)
605 return ret;
606 }
607
608 if (!WARN_ON(mld->num_rx_ba_sessions == 0))
609 mld->num_rx_ba_sessions--;
610
611 baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
612 if (WARN_ON(!baid_data))
613 return -EINVAL;
614
615 if (timer_pending(&baid_data->session_timer))
616 timer_shutdown_sync(&baid_data->session_timer);
617
618 iwl_mld_free_reorder_buffer(mld, baid_data);
619
620 RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
621 kfree_rcu(baid_data, rcu_head);
622
623 IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
624
625 return 0;
626 }
627
iwl_mld_update_sta_baids(struct iwl_mld * mld,u32 old_sta_mask,u32 new_sta_mask)628 int iwl_mld_update_sta_baids(struct iwl_mld *mld,
629 u32 old_sta_mask,
630 u32 new_sta_mask)
631 {
632 struct iwl_rx_baid_cfg_cmd cmd = {
633 .action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
634 .modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
635 .modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
636 };
637 u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
638 int baid;
639
640 /* mac80211 will remove sessions later, but we ignore all that */
641 if (mld->fw_status.in_hw_restart)
642 return 0;
643
644 BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
645
646 for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
647 struct iwl_mld_baid_data *data;
648 int ret;
649
650 data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
651 if (!data)
652 continue;
653
654 if (!(data->sta_mask & old_sta_mask))
655 continue;
656
657 WARN_ONCE(data->sta_mask != old_sta_mask,
658 "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
659 baid, old_sta_mask, data->sta_mask);
660
661 cmd.modify.tid = cpu_to_le32(data->tid);
662
663 ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
664 if (ret)
665 return ret;
666 data->sta_mask = new_sta_mask;
667 }
668
669 return 0;
670 }
671