xref: /freebsd/sys/contrib/dev/iwlwifi/mld/agg.c (revision 6b627f88584ce13118e0a24951b503c0b1f2d5a7)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "agg.h"
6 #include "sta.h"
7 #include "hcmd.h"
8 #if defined(__FreeBSD__)
9 #include <linux/cache.h>
10 #endif
11 
12 static void
iwl_mld_reorder_release_frames(struct iwl_mld * mld,struct ieee80211_sta * sta,struct napi_struct * napi,struct iwl_mld_baid_data * baid_data,struct iwl_mld_reorder_buffer * reorder_buf,u16 nssn)13 iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
14 			       struct napi_struct *napi,
15 			       struct iwl_mld_baid_data *baid_data,
16 			       struct iwl_mld_reorder_buffer *reorder_buf,
17 			       u16 nssn)
18 {
19 	struct iwl_mld_reorder_buf_entry *entries =
20 		&baid_data->entries[reorder_buf->queue *
21 				    baid_data->entries_per_queue];
22 	u16 ssn = reorder_buf->head_sn;
23 
24 	while (ieee80211_sn_less(ssn, nssn)) {
25 		int index = ssn % baid_data->buf_size;
26 		struct sk_buff_head *skb_list = &entries[index].frames;
27 		struct sk_buff *skb;
28 
29 		ssn = ieee80211_sn_inc(ssn);
30 
31 		/* Empty the list. Will have more than one frame for A-MSDU.
32 		 * Empty list is valid as well since nssn indicates frames were
33 		 * received.
34 		 */
35 		while ((skb = __skb_dequeue(skb_list))) {
36 			iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
37 							reorder_buf->queue,
38 							sta);
39 			reorder_buf->num_stored--;
40 		}
41 	}
42 	reorder_buf->head_sn = nssn;
43 }
44 
iwl_mld_release_frames_from_notif(struct iwl_mld * mld,struct napi_struct * napi,u8 baid,u16 nssn,int queue)45 static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
46 					      struct napi_struct *napi,
47 					      u8 baid, u16 nssn, int queue)
48 {
49 	struct iwl_mld_reorder_buffer *reorder_buf;
50 	struct iwl_mld_baid_data *ba_data;
51 	struct ieee80211_link_sta *link_sta;
52 	u32 sta_id;
53 
54 	IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
55 		     baid, nssn);
56 
57 	if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
58 			 baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
59 		return;
60 
61 	rcu_read_lock();
62 
63 	ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
64 	if (!ba_data) {
65 		IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
66 		goto out_unlock;
67 	}
68 
69 	/* pick any STA ID to find the pointer */
70 	sta_id = ffs(ba_data->sta_mask) - 1;
71 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
72 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
73 		goto out_unlock;
74 
75 	reorder_buf = &ba_data->reorder_buf[queue];
76 
77 	iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
78 				       reorder_buf, nssn);
79 out_unlock:
80 	rcu_read_unlock();
81 }
82 
iwl_mld_handle_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)83 void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
84 					struct napi_struct *napi,
85 					struct iwl_rx_packet *pkt, int queue)
86 {
87 	struct iwl_frame_release *release = (void *)pkt->data;
88 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
89 
90 	if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
91 			 "Unexpected frame release notif size %u (expected %zu)\n",
92 			 pkt_len, sizeof(*release)))
93 		return;
94 
95 	iwl_mld_release_frames_from_notif(mld, napi, release->baid,
96 					  le16_to_cpu(release->nssn),
97 					  queue);
98 }
99 
iwl_mld_handle_bar_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)100 void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
101 					    struct napi_struct *napi,
102 					    struct iwl_rx_packet *pkt,
103 					    int queue)
104 {
105 	struct iwl_bar_frame_release *release = (void *)pkt->data;
106 	struct iwl_mld_baid_data *baid_data;
107 	unsigned int baid, nssn, sta_id, tid;
108 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
109 
110 	if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
111 			 "Unexpected frame release notif size %u (expected %zu)\n",
112 			 pkt_len, sizeof(*release)))
113 		return;
114 
115 	baid = le32_get_bits(release->ba_info,
116 			     IWL_BAR_FRAME_RELEASE_BAID_MASK);
117 	nssn = le32_get_bits(release->ba_info,
118 			     IWL_BAR_FRAME_RELEASE_NSSN_MASK);
119 	sta_id = le32_get_bits(release->sta_tid,
120 			       IWL_BAR_FRAME_RELEASE_STA_MASK);
121 	tid = le32_get_bits(release->sta_tid,
122 			    IWL_BAR_FRAME_RELEASE_TID_MASK);
123 
124 	if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
125 			 "BAR release: invalid BAID (%x)\n", baid))
126 		return;
127 
128 	rcu_read_lock();
129 	baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
130 	if (!baid_data) {
131 		IWL_DEBUG_HT(mld,
132 			     "Got valid BAID %d but not allocated\n",
133 			     baid);
134 		goto out_unlock;
135 	}
136 
137 	if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
138 			 sta_id > mld->fw->ucode_capa.num_stations ||
139 			 !(baid_data->sta_mask & BIT(sta_id)),
140 			 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
141 			 baid, baid_data->sta_mask, baid_data->tid, sta_id,
142 			 tid))
143 		goto out_unlock;
144 
145 	IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
146 		       nssn);
147 
148 	iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
149 out_unlock:
150 	rcu_read_unlock();
151 }
152 
iwl_mld_del_ba(struct iwl_mld * mld,int queue,struct iwl_mld_delba_data * data)153 void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
154 		    struct iwl_mld_delba_data *data)
155 {
156 	struct iwl_mld_baid_data *ba_data;
157 	struct iwl_mld_reorder_buffer *reorder_buf;
158 	struct ieee80211_link_sta *link_sta;
159 	u8 baid = data->baid;
160 	u32 sta_id;
161 
162 	if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
163 		return;
164 
165 	rcu_read_lock();
166 
167 	ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
168 	if (WARN_ON_ONCE(!ba_data))
169 		goto out_unlock;
170 
171 	/* pick any STA ID to find the pointer */
172 	sta_id = ffs(ba_data->sta_mask) - 1;
173 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
174 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
175 		goto out_unlock;
176 
177 	reorder_buf = &ba_data->reorder_buf[queue];
178 
179 	/* release all frames that are in the reorder buffer to the stack */
180 	iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
181 				       ba_data, reorder_buf,
182 				       ieee80211_sn_add(reorder_buf->head_sn,
183 							ba_data->buf_size));
184 out_unlock:
185 	rcu_read_unlock();
186 }
187 
188 /* Returns true if the MPDU was buffered\dropped, false if it should be passed
189  * to upper layer.
190  */
191 enum iwl_mld_reorder_result
iwl_mld_reorder(struct iwl_mld * mld,struct napi_struct * napi,int queue,struct ieee80211_sta * sta,struct sk_buff * skb,struct iwl_rx_mpdu_desc * desc)192 iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
193 		int queue, struct ieee80211_sta *sta,
194 		struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
195 {
196 	struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
197 	struct iwl_mld_baid_data *baid_data;
198 	struct iwl_mld_reorder_buffer *buffer;
199 	struct iwl_mld_reorder_buf_entry *entries;
200 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
201 	struct iwl_mld_link_sta *mld_link_sta;
202 	u32 reorder = le32_to_cpu(desc->reorder_data);
203 	bool amsdu, last_subframe, is_old_sn, is_dup;
204 	u8 tid = ieee80211_get_tid(hdr);
205 	u8 baid;
206 	u16 nssn, sn;
207 	u32 sta_mask = 0;
208 	int index;
209 	u8 link_id;
210 
211 	baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
212 
213 	/* This also covers the case of receiving a Block Ack Request
214 	 * outside a BA session; we'll pass it to mac80211 and that
215 	 * then sends a delBA action frame.
216 	 * This also covers pure monitor mode, in which case we won't
217 	 * have any BA sessions.
218 	 */
219 	if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
220 		return IWL_MLD_PASS_SKB;
221 
222 	/* no sta yet */
223 	if (WARN_ONCE(!sta,
224 		      "Got valid BAID without a valid station assigned\n"))
225 		return IWL_MLD_PASS_SKB;
226 
227 	/* not a data packet */
228 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
229 	    is_multicast_ether_addr(hdr->addr1))
230 		return IWL_MLD_PASS_SKB;
231 
232 	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
233 		return IWL_MLD_PASS_SKB;
234 
235 	baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
236 	if (!baid_data) {
237 		IWL_DEBUG_HT(mld,
238 			     "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
239 			     baid, reorder);
240 		return IWL_MLD_PASS_SKB;
241 	}
242 
243 	for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
244 		sta_mask |= BIT(mld_link_sta->fw_id);
245 
246 	/* verify the BAID is correctly mapped to the sta and tid */
247 	if (IWL_FW_CHECK(mld,
248 			 tid != baid_data->tid ||
249 			 !(sta_mask & baid_data->sta_mask),
250 			 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
251 			 baid, baid_data->sta_mask, baid_data->tid,
252 			 sta_mask, tid))
253 		return IWL_MLD_PASS_SKB;
254 
255 	buffer = &baid_data->reorder_buf[queue];
256 	entries = &baid_data->entries[queue * baid_data->entries_per_queue];
257 
258 	is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
259 
260 	if (!buffer->valid && is_old_sn)
261 		return IWL_MLD_PASS_SKB;
262 
263 	buffer->valid = true;
264 
265 	is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
266 
267 	/* drop any duplicated or outdated packets */
268 	if (is_dup || is_old_sn)
269 		return IWL_MLD_DROP_SKB;
270 
271 	sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
272 	nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
273 	amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
274 	last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
275 
276 	/* release immediately if allowed by nssn and no stored frames */
277 	if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
278 		if (!amsdu || last_subframe)
279 			buffer->head_sn = nssn;
280 		return IWL_MLD_PASS_SKB;
281 	}
282 
283 	/* release immediately if there are no stored frames, and the sn is
284 	 * equal to the head.
285 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
286 	 * When we released everything, and we got the next frame in the
287 	 * sequence, according to the NSSN we can't release immediately,
288 	 * while technically there is no hole and we can move forward.
289 	 */
290 	if (!buffer->num_stored && sn == buffer->head_sn) {
291 		if (!amsdu || last_subframe)
292 			buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
293 		return IWL_MLD_PASS_SKB;
294 	}
295 
296 	/* put in reorder buffer */
297 	index = sn % baid_data->buf_size;
298 	__skb_queue_tail(&entries[index].frames, skb);
299 	buffer->num_stored++;
300 
301 	/* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
302 	 * reason is that NSSN advances on the first sub-frame, and may cause
303 	 * the reorder buffer to advance before all the sub-frames arrive.
304 	 *
305 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
306 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
307 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
308 	 * already ahead and it will be dropped.
309 	 * If the last sub-frame is not on this queue - we will get frame
310 	 * release notification with up to date NSSN.
311 	 * If this is the first frame that is stored in the buffer, the head_sn
312 	 * may be outdated. Update it based on the last NSSN to make sure it
313 	 * will be released when the frame release notification arrives.
314 	 */
315 	if (!amsdu || last_subframe)
316 		iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
317 					       buffer, nssn);
318 	else if (buffer->num_stored == 1)
319 		buffer->head_sn = nssn;
320 
321 	return IWL_MLD_BUFFERED_SKB;
322 }
323 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
324 
iwl_mld_rx_agg_session_expired(struct timer_list * t)325 static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
326 {
327 	struct iwl_mld_baid_data *data =
328 		timer_container_of(data, t, session_timer);
329 	struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
330 	struct iwl_mld_baid_data *ba_data;
331 	struct ieee80211_link_sta *link_sta;
332 	struct iwl_mld_sta *mld_sta;
333 	unsigned long timeout;
334 	unsigned int sta_id;
335 
336 	rcu_read_lock();
337 
338 	ba_data = rcu_dereference(*rcu_ptr);
339 	if (WARN_ON(!ba_data))
340 		goto unlock;
341 
342 	if (WARN_ON(!ba_data->timeout))
343 		goto unlock;
344 
345 	timeout = ba_data->last_rx_timestamp +
346 		  TU_TO_JIFFIES(ba_data->timeout * 2);
347 	if (time_is_after_jiffies(timeout)) {
348 		mod_timer(&ba_data->session_timer, timeout);
349 		goto unlock;
350 	}
351 
352 	/* timer expired, pick any STA ID to find the pointer */
353 	sta_id = ffs(ba_data->sta_mask) - 1;
354 	link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
355 
356 	/* sta should be valid unless the following happens:
357 	 * The firmware asserts which triggers a reconfig flow, but
358 	 * the reconfig fails before we set the pointer to sta into
359 	 * the fw_id_to_link_sta pointer table. mac80211 can't stop
360 	 * A-MPDU and hence the timer continues to run. Then, the
361 	 * timer expires and sta is NULL.
362 	 */
363 	if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
364 		goto unlock;
365 
366 	mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
367 	ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
368 				      ba_data->tid);
369 unlock:
370 	rcu_read_unlock();
371 }
372 
373 static int
iwl_mld_stop_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)374 iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
375 {
376 	struct iwl_rx_baid_cfg_cmd cmd = {
377 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
378 		.remove.sta_id_mask =
379 			cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
380 		.remove.tid = cpu_to_le32(tid),
381 
382 	};
383 	int ret;
384 
385 	ret = iwl_mld_send_cmd_pdu(mld,
386 				   WIDE_ID(DATA_PATH_GROUP,
387 					   RX_BAID_ALLOCATION_CONFIG_CMD),
388 				   &cmd);
389 	if (ret)
390 		return ret;
391 
392 	IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
393 
394 	return ret;
395 }
396 
397 static int
iwl_mld_start_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size)398 iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
399 		       int tid, u16 ssn, u16 buf_size)
400 {
401 	struct iwl_rx_baid_cfg_cmd cmd = {
402 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
403 		.alloc.sta_id_mask =
404 			cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
405 		.alloc.tid = tid,
406 		.alloc.ssn = cpu_to_le16(ssn),
407 		.alloc.win_size = cpu_to_le16(buf_size),
408 	};
409 	struct iwl_host_cmd hcmd = {
410 		.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
411 		.flags = CMD_WANT_SKB,
412 		.len[0] = sizeof(cmd),
413 		.data[0] = &cmd,
414 	};
415 	struct iwl_rx_baid_cfg_resp *resp;
416 	struct iwl_rx_packet *pkt;
417 	u32 resp_len;
418 	int ret, baid;
419 
420 	BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
421 
422 	ret = iwl_mld_send_cmd(mld, &hcmd);
423 	if (ret)
424 		return ret;
425 
426 	pkt = hcmd.resp_pkt;
427 
428 	resp_len = iwl_rx_packet_payload_len(pkt);
429 	if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
430 			 "BAID_ALLOC_CMD: unexpected response length %d\n",
431 			 resp_len)) {
432 		ret = -EIO;
433 		goto out;
434 	}
435 
436 	IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
437 
438 	resp = (void *)pkt->data;
439 	baid = le32_to_cpu(resp->baid);
440 
441 	if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
442 			 "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
443 		ret = -EINVAL;
444 		goto out;
445 	}
446 
447 	ret = baid;
448 out:
449 	iwl_free_resp(&hcmd);
450 	return ret;
451 }
452 
iwl_mld_init_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data,u16 ssn)453 static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
454 					struct iwl_mld_baid_data *data,
455 					u16 ssn)
456 {
457 	for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
458 		struct iwl_mld_reorder_buffer *reorder_buf =
459 			&data->reorder_buf[i];
460 		struct iwl_mld_reorder_buf_entry *entries =
461 			&data->entries[i * data->entries_per_queue];
462 
463 		reorder_buf->head_sn = ssn;
464 		reorder_buf->queue = i;
465 
466 		for (int j = 0; j < data->buf_size; j++)
467 			__skb_queue_head_init(&entries[j].frames);
468 	}
469 }
470 
iwl_mld_free_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data)471 static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
472 					struct iwl_mld_baid_data *data)
473 {
474 	struct iwl_mld_delba_data delba_data = {
475 		.baid = data->baid,
476 	};
477 
478 	iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
479 			       &delba_data, sizeof(delba_data));
480 
481 	for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
482 		struct iwl_mld_reorder_buffer *reorder_buf =
483 			&data->reorder_buf[i];
484 		struct iwl_mld_reorder_buf_entry *entries =
485 			&data->entries[i * data->entries_per_queue];
486 
487 		if (likely(!reorder_buf->num_stored))
488 			continue;
489 
490 		/* This shouldn't happen in regular DELBA since the RX queues
491 		 * sync internal DELBA notification should trigger a release
492 		 * of all frames in the reorder buffer.
493 		 */
494 		WARN_ON(1);
495 
496 		for (int j = 0; j < data->buf_size; j++)
497 			__skb_queue_purge(&entries[j].frames);
498 	}
499 }
500 
iwl_mld_ampdu_rx_start(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size,u16 timeout)501 int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
502 			   int tid, u16 ssn, u16 buf_size, u16 timeout)
503 {
504 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
505 	struct iwl_mld_baid_data *baid_data = NULL;
506 	u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
507 	int ret, baid;
508 	u32 sta_mask;
509 
510 	lockdep_assert_wiphy(mld->wiphy);
511 
512 	if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
513 		IWL_DEBUG_HT(mld,
514 			     "Max num of RX BA sessions reached; blocking new session\n");
515 		return -ENOSPC;
516 	}
517 
518 	sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
519 	if (WARN_ON(!sta_mask))
520 		return -EINVAL;
521 
522 	/* sparse doesn't like the __align() so don't check */
523 #ifndef __CHECKER__
524 	/* The division below will be OK if either the cache line size
525 	 * can be divided by the entry size (ALIGN will round up) or if
526 	 * the entry size can be divided by the cache line size, in which
527 	 * case the ALIGN() will do nothing.
528 	 */
529 	BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
530 		     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
531 #endif
532 
533 	/* Upward align the reorder buffer size to fill an entire cache
534 	 * line for each queue, to avoid sharing cache lines between
535 	 * different queues.
536 	 */
537 	reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
538 
539 	/* Allocate here so if allocation fails we can bail out early
540 	 * before starting the BA session in the firmware
541 	 */
542 	baid_data = kzalloc(sizeof(*baid_data) +
543 			    mld->trans->info.num_rxqs * reorder_buf_size,
544 			    GFP_KERNEL);
545 	if (!baid_data)
546 		return -ENOMEM;
547 
548 	/* This division is why we need the above BUILD_BUG_ON(),
549 	 * if that doesn't hold then this will not be right.
550 	 */
551 	baid_data->entries_per_queue =
552 		reorder_buf_size / sizeof(baid_data->entries[0]);
553 
554 	baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
555 	if (baid < 0) {
556 		ret = baid;
557 		goto out_free;
558 	}
559 
560 	mld->num_rx_ba_sessions++;
561 	mld_sta->tid_to_baid[tid] = baid;
562 
563 	baid_data->baid = baid;
564 	baid_data->mld = mld;
565 	baid_data->tid = tid;
566 	baid_data->buf_size = buf_size;
567 	baid_data->sta_mask = sta_mask;
568 	baid_data->timeout = timeout;
569 	baid_data->last_rx_timestamp = jiffies;
570 	baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
571 
572 	iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
573 
574 	timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
575 		    0);
576 	if (timeout)
577 		mod_timer(&baid_data->session_timer,
578 			  TU_TO_EXP_TIME(timeout * 2));
579 
580 	IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
581 		     baid_data->sta_mask, tid, baid);
582 
583 	/* protect the BA data with RCU to cover a case where our
584 	 * internal RX sync mechanism will timeout (not that it's
585 	 * supposed to happen) and we will free the session data while
586 	 * RX is being processed in parallel
587 	 */
588 	WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
589 	rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
590 
591 	return 0;
592 
593 out_free:
594 	kfree(baid_data);
595 	return ret;
596 }
597 
iwl_mld_ampdu_rx_stop(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)598 int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
599 			  int tid)
600 {
601 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
602 	int baid = mld_sta->tid_to_baid[tid];
603 	struct iwl_mld_baid_data *baid_data;
604 	int ret;
605 
606 	lockdep_assert_wiphy(mld->wiphy);
607 
608 	/* during firmware restart, do not send the command as the firmware no
609 	 * longer recognizes the session. instead, only clear the driver BA
610 	 * session data.
611 	 */
612 	if (!mld->fw_status.in_hw_restart) {
613 		ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
614 		if (ret)
615 			return ret;
616 	}
617 
618 	if (!WARN_ON(mld->num_rx_ba_sessions == 0))
619 		mld->num_rx_ba_sessions--;
620 
621 	baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
622 	if (WARN_ON(!baid_data))
623 		return -EINVAL;
624 
625 	if (timer_pending(&baid_data->session_timer))
626 		timer_shutdown_sync(&baid_data->session_timer);
627 
628 	iwl_mld_free_reorder_buffer(mld, baid_data);
629 
630 	RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
631 	kfree_rcu(baid_data, rcu_head);
632 
633 	IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
634 
635 	return 0;
636 }
637 
iwl_mld_update_sta_baids(struct iwl_mld * mld,u32 old_sta_mask,u32 new_sta_mask)638 int iwl_mld_update_sta_baids(struct iwl_mld *mld,
639 			     u32 old_sta_mask,
640 			     u32 new_sta_mask)
641 {
642 	struct iwl_rx_baid_cfg_cmd cmd = {
643 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
644 		.modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
645 		.modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
646 	};
647 	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
648 	int baid;
649 
650 	/* mac80211 will remove sessions later, but we ignore all that */
651 	if (mld->fw_status.in_hw_restart)
652 		return 0;
653 
654 	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
655 
656 	for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
657 		struct iwl_mld_baid_data *data;
658 		int ret;
659 
660 		data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
661 		if (!data)
662 			continue;
663 
664 		if (!(data->sta_mask & old_sta_mask))
665 			continue;
666 
667 		WARN_ONCE(data->sta_mask != old_sta_mask,
668 			  "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
669 			  baid, old_sta_mask, data->sta_mask);
670 
671 		cmd.modify.tid = cpu_to_le32(data->tid);
672 
673 		ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
674 		if (ret)
675 			return ret;
676 		data->sta_mask = new_sta_mask;
677 	}
678 
679 	return 0;
680 }
681