xref: /linux/drivers/net/wireless/intel/iwlwifi/mld/agg.c (revision 8be4d31cb8aaeea27bde4b7ddb26e28a89062ebf)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2024-2025 Intel Corporation
4  */
5 #include "agg.h"
6 #include "sta.h"
7 #include "hcmd.h"
8 
9 static void
iwl_mld_reorder_release_frames(struct iwl_mld * mld,struct ieee80211_sta * sta,struct napi_struct * napi,struct iwl_mld_baid_data * baid_data,struct iwl_mld_reorder_buffer * reorder_buf,u16 nssn)10 iwl_mld_reorder_release_frames(struct iwl_mld *mld, struct ieee80211_sta *sta,
11 			       struct napi_struct *napi,
12 			       struct iwl_mld_baid_data *baid_data,
13 			       struct iwl_mld_reorder_buffer *reorder_buf,
14 			       u16 nssn)
15 {
16 	struct iwl_mld_reorder_buf_entry *entries =
17 		&baid_data->entries[reorder_buf->queue *
18 				    baid_data->entries_per_queue];
19 	u16 ssn = reorder_buf->head_sn;
20 
21 	while (ieee80211_sn_less(ssn, nssn)) {
22 		int index = ssn % baid_data->buf_size;
23 		struct sk_buff_head *skb_list = &entries[index].frames;
24 		struct sk_buff *skb;
25 
26 		ssn = ieee80211_sn_inc(ssn);
27 
28 		/* Empty the list. Will have more than one frame for A-MSDU.
29 		 * Empty list is valid as well since nssn indicates frames were
30 		 * received.
31 		 */
32 		while ((skb = __skb_dequeue(skb_list))) {
33 			iwl_mld_pass_packet_to_mac80211(mld, napi, skb,
34 							reorder_buf->queue,
35 							sta);
36 			reorder_buf->num_stored--;
37 		}
38 	}
39 	reorder_buf->head_sn = nssn;
40 }
41 
iwl_mld_release_frames_from_notif(struct iwl_mld * mld,struct napi_struct * napi,u8 baid,u16 nssn,int queue)42 static void iwl_mld_release_frames_from_notif(struct iwl_mld *mld,
43 					      struct napi_struct *napi,
44 					      u8 baid, u16 nssn, int queue)
45 {
46 	struct iwl_mld_reorder_buffer *reorder_buf;
47 	struct iwl_mld_baid_data *ba_data;
48 	struct ieee80211_link_sta *link_sta;
49 	u32 sta_id;
50 
51 	IWL_DEBUG_HT(mld, "Frame release notification for BAID %u, NSSN %d\n",
52 		     baid, nssn);
53 
54 	if (WARN_ON_ONCE(baid == IWL_RX_REORDER_DATA_INVALID_BAID ||
55 			 baid >= ARRAY_SIZE(mld->fw_id_to_ba)))
56 		return;
57 
58 	rcu_read_lock();
59 
60 	ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
61 	if (!ba_data) {
62 		IWL_DEBUG_HT(mld, "BAID %d not found in map\n", baid);
63 		goto out_unlock;
64 	}
65 
66 	/* pick any STA ID to find the pointer */
67 	sta_id = ffs(ba_data->sta_mask) - 1;
68 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
69 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
70 		goto out_unlock;
71 
72 	reorder_buf = &ba_data->reorder_buf[queue];
73 
74 	iwl_mld_reorder_release_frames(mld, link_sta->sta, napi, ba_data,
75 				       reorder_buf, nssn);
76 out_unlock:
77 	rcu_read_unlock();
78 }
79 
iwl_mld_handle_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)80 void iwl_mld_handle_frame_release_notif(struct iwl_mld *mld,
81 					struct napi_struct *napi,
82 					struct iwl_rx_packet *pkt, int queue)
83 {
84 	struct iwl_frame_release *release = (void *)pkt->data;
85 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
86 
87 	if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
88 			 "Unexpected frame release notif size %u (expected %zu)\n",
89 			 pkt_len, sizeof(*release)))
90 		return;
91 
92 	iwl_mld_release_frames_from_notif(mld, napi, release->baid,
93 					  le16_to_cpu(release->nssn),
94 					  queue);
95 }
96 
iwl_mld_handle_bar_frame_release_notif(struct iwl_mld * mld,struct napi_struct * napi,struct iwl_rx_packet * pkt,int queue)97 void iwl_mld_handle_bar_frame_release_notif(struct iwl_mld *mld,
98 					    struct napi_struct *napi,
99 					    struct iwl_rx_packet *pkt,
100 					    int queue)
101 {
102 	struct iwl_bar_frame_release *release = (void *)pkt->data;
103 	struct iwl_mld_baid_data *baid_data;
104 	unsigned int baid, nssn, sta_id, tid;
105 	u32 pkt_len = iwl_rx_packet_payload_len(pkt);
106 
107 	if (IWL_FW_CHECK(mld, pkt_len < sizeof(*release),
108 			 "Unexpected frame release notif size %u (expected %zu)\n",
109 			 pkt_len, sizeof(*release)))
110 		return;
111 
112 	baid = le32_get_bits(release->ba_info,
113 			     IWL_BAR_FRAME_RELEASE_BAID_MASK);
114 	nssn = le32_get_bits(release->ba_info,
115 			     IWL_BAR_FRAME_RELEASE_NSSN_MASK);
116 	sta_id = le32_get_bits(release->sta_tid,
117 			       IWL_BAR_FRAME_RELEASE_STA_MASK);
118 	tid = le32_get_bits(release->sta_tid,
119 			    IWL_BAR_FRAME_RELEASE_TID_MASK);
120 
121 	if (IWL_FW_CHECK(mld, baid >= ARRAY_SIZE(mld->fw_id_to_ba),
122 			 "BAR release: invalid BAID (%x)\n", baid))
123 		return;
124 
125 	rcu_read_lock();
126 	baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
127 	if (!baid_data) {
128 		IWL_DEBUG_HT(mld,
129 			     "Got valid BAID %d but not allocated\n",
130 			     baid);
131 		goto out_unlock;
132 	}
133 
134 	if (IWL_FW_CHECK(mld, tid != baid_data->tid ||
135 			 sta_id > mld->fw->ucode_capa.num_stations ||
136 			 !(baid_data->sta_mask & BIT(sta_id)),
137 			 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but BAR release received for sta:%d tid:%d\n",
138 			 baid, baid_data->sta_mask, baid_data->tid, sta_id,
139 			 tid))
140 		goto out_unlock;
141 
142 	IWL_DEBUG_DROP(mld, "Received a BAR, expect packet loss: nssn %d\n",
143 		       nssn);
144 
145 	iwl_mld_release_frames_from_notif(mld, napi, baid, nssn, queue);
146 out_unlock:
147 	rcu_read_unlock();
148 }
149 
iwl_mld_del_ba(struct iwl_mld * mld,int queue,struct iwl_mld_delba_data * data)150 void iwl_mld_del_ba(struct iwl_mld *mld, int queue,
151 		    struct iwl_mld_delba_data *data)
152 {
153 	struct iwl_mld_baid_data *ba_data;
154 	struct iwl_mld_reorder_buffer *reorder_buf;
155 	struct ieee80211_link_sta *link_sta;
156 	u8 baid = data->baid;
157 	u32 sta_id;
158 
159 	if (WARN_ONCE(baid >= IWL_MAX_BAID, "invalid BAID: %x\n", baid))
160 		return;
161 
162 	rcu_read_lock();
163 
164 	ba_data = rcu_dereference(mld->fw_id_to_ba[baid]);
165 	if (WARN_ON_ONCE(!ba_data))
166 		goto out_unlock;
167 
168 	/* pick any STA ID to find the pointer */
169 	sta_id = ffs(ba_data->sta_mask) - 1;
170 	link_sta = rcu_dereference(mld->fw_id_to_link_sta[sta_id]);
171 	if (WARN_ON_ONCE(IS_ERR_OR_NULL(link_sta) || !link_sta->sta))
172 		goto out_unlock;
173 
174 	reorder_buf = &ba_data->reorder_buf[queue];
175 
176 	/* release all frames that are in the reorder buffer to the stack */
177 	iwl_mld_reorder_release_frames(mld, link_sta->sta, NULL,
178 				       ba_data, reorder_buf,
179 				       ieee80211_sn_add(reorder_buf->head_sn,
180 							ba_data->buf_size));
181 out_unlock:
182 	rcu_read_unlock();
183 }
184 
185 /* Returns true if the MPDU was buffered\dropped, false if it should be passed
186  * to upper layer.
187  */
188 enum iwl_mld_reorder_result
iwl_mld_reorder(struct iwl_mld * mld,struct napi_struct * napi,int queue,struct ieee80211_sta * sta,struct sk_buff * skb,struct iwl_rx_mpdu_desc * desc)189 iwl_mld_reorder(struct iwl_mld *mld, struct napi_struct *napi,
190 		int queue, struct ieee80211_sta *sta,
191 		struct sk_buff *skb, struct iwl_rx_mpdu_desc *desc)
192 {
193 	struct ieee80211_hdr *hdr = (void *)skb_mac_header(skb);
194 	struct iwl_mld_baid_data *baid_data;
195 	struct iwl_mld_reorder_buffer *buffer;
196 	struct iwl_mld_reorder_buf_entry *entries;
197 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
198 	struct iwl_mld_link_sta *mld_link_sta;
199 	u32 reorder = le32_to_cpu(desc->reorder_data);
200 	bool amsdu, last_subframe, is_old_sn, is_dup;
201 	u8 tid = ieee80211_get_tid(hdr);
202 	u8 baid;
203 	u16 nssn, sn;
204 	u32 sta_mask = 0;
205 	int index;
206 	u8 link_id;
207 
208 	baid = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_BAID_MASK);
209 
210 	/* This also covers the case of receiving a Block Ack Request
211 	 * outside a BA session; we'll pass it to mac80211 and that
212 	 * then sends a delBA action frame.
213 	 * This also covers pure monitor mode, in which case we won't
214 	 * have any BA sessions.
215 	 */
216 	if (baid == IWL_RX_REORDER_DATA_INVALID_BAID)
217 		return IWL_MLD_PASS_SKB;
218 
219 	/* no sta yet */
220 	if (WARN_ONCE(!sta,
221 		      "Got valid BAID without a valid station assigned\n"))
222 		return IWL_MLD_PASS_SKB;
223 
224 	/* not a data packet */
225 	if (!ieee80211_is_data_qos(hdr->frame_control) ||
226 	    is_multicast_ether_addr(hdr->addr1))
227 		return IWL_MLD_PASS_SKB;
228 
229 	if (unlikely(!ieee80211_is_data_present(hdr->frame_control)))
230 		return IWL_MLD_PASS_SKB;
231 
232 	baid_data = rcu_dereference(mld->fw_id_to_ba[baid]);
233 	if (!baid_data) {
234 		IWL_DEBUG_HT(mld,
235 			     "Got valid BAID but no baid allocated, bypass re-ordering (BAID=%d reorder=0x%x)\n",
236 			     baid, reorder);
237 		return IWL_MLD_PASS_SKB;
238 	}
239 
240 	for_each_mld_link_sta(mld_sta, mld_link_sta, link_id)
241 		sta_mask |= BIT(mld_link_sta->fw_id);
242 
243 	/* verify the BAID is correctly mapped to the sta and tid */
244 	if (IWL_FW_CHECK(mld,
245 			 tid != baid_data->tid ||
246 			 !(sta_mask & baid_data->sta_mask),
247 			 "BAID 0x%x is mapped to sta_mask:0x%x tid:%d, but was received for sta_mask:0x%x tid:%d\n",
248 			 baid, baid_data->sta_mask, baid_data->tid,
249 			 sta_mask, tid))
250 		return IWL_MLD_PASS_SKB;
251 
252 	buffer = &baid_data->reorder_buf[queue];
253 	entries = &baid_data->entries[queue * baid_data->entries_per_queue];
254 
255 	is_old_sn = !!(reorder & IWL_RX_MPDU_REORDER_BA_OLD_SN);
256 
257 	if (!buffer->valid && is_old_sn)
258 		return IWL_MLD_PASS_SKB;
259 
260 	buffer->valid = true;
261 
262 	is_dup = !!(desc->status & cpu_to_le32(IWL_RX_MPDU_STATUS_DUPLICATE));
263 
264 	/* drop any duplicated or outdated packets */
265 	if (is_dup || is_old_sn)
266 		return IWL_MLD_DROP_SKB;
267 
268 	sn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_SN_MASK);
269 	nssn = u32_get_bits(reorder, IWL_RX_MPDU_REORDER_NSSN_MASK);
270 	amsdu = desc->mac_flags2 & IWL_RX_MPDU_MFLG2_AMSDU;
271 	last_subframe = desc->amsdu_info & IWL_RX_MPDU_AMSDU_LAST_SUBFRAME;
272 
273 	/* release immediately if allowed by nssn and no stored frames */
274 	if (!buffer->num_stored && ieee80211_sn_less(sn, nssn)) {
275 		if (!amsdu || last_subframe)
276 			buffer->head_sn = nssn;
277 		return IWL_MLD_PASS_SKB;
278 	}
279 
280 	/* release immediately if there are no stored frames, and the sn is
281 	 * equal to the head.
282 	 * This can happen due to reorder timer, where NSSN is behind head_sn.
283 	 * When we released everything, and we got the next frame in the
284 	 * sequence, according to the NSSN we can't release immediately,
285 	 * while technically there is no hole and we can move forward.
286 	 */
287 	if (!buffer->num_stored && sn == buffer->head_sn) {
288 		if (!amsdu || last_subframe)
289 			buffer->head_sn = ieee80211_sn_inc(buffer->head_sn);
290 		return IWL_MLD_PASS_SKB;
291 	}
292 
293 	/* put in reorder buffer */
294 	index = sn % baid_data->buf_size;
295 	__skb_queue_tail(&entries[index].frames, skb);
296 	buffer->num_stored++;
297 
298 	/* We cannot trust NSSN for AMSDU sub-frames that are not the last. The
299 	 * reason is that NSSN advances on the first sub-frame, and may cause
300 	 * the reorder buffer to advance before all the sub-frames arrive.
301 	 *
302 	 * Example: reorder buffer contains SN 0 & 2, and we receive AMSDU with
303 	 * SN 1. NSSN for first sub frame will be 3 with the result of driver
304 	 * releasing SN 0,1, 2. When sub-frame 1 arrives - reorder buffer is
305 	 * already ahead and it will be dropped.
306 	 * If the last sub-frame is not on this queue - we will get frame
307 	 * release notification with up to date NSSN.
308 	 * If this is the first frame that is stored in the buffer, the head_sn
309 	 * may be outdated. Update it based on the last NSSN to make sure it
310 	 * will be released when the frame release notification arrives.
311 	 */
312 	if (!amsdu || last_subframe)
313 		iwl_mld_reorder_release_frames(mld, sta, napi, baid_data,
314 					       buffer, nssn);
315 	else if (buffer->num_stored == 1)
316 		buffer->head_sn = nssn;
317 
318 	return IWL_MLD_BUFFERED_SKB;
319 }
320 EXPORT_SYMBOL_IF_IWLWIFI_KUNIT(iwl_mld_reorder);
321 
iwl_mld_rx_agg_session_expired(struct timer_list * t)322 static void iwl_mld_rx_agg_session_expired(struct timer_list *t)
323 {
324 	struct iwl_mld_baid_data *data =
325 		timer_container_of(data, t, session_timer);
326 	struct iwl_mld_baid_data __rcu **rcu_ptr = data->rcu_ptr;
327 	struct iwl_mld_baid_data *ba_data;
328 	struct ieee80211_link_sta *link_sta;
329 	struct iwl_mld_sta *mld_sta;
330 	unsigned long timeout;
331 	unsigned int sta_id;
332 
333 	rcu_read_lock();
334 
335 	ba_data = rcu_dereference(*rcu_ptr);
336 	if (WARN_ON(!ba_data))
337 		goto unlock;
338 
339 	if (WARN_ON(!ba_data->timeout))
340 		goto unlock;
341 
342 	timeout = ba_data->last_rx_timestamp +
343 		  TU_TO_JIFFIES(ba_data->timeout * 2);
344 	if (time_is_after_jiffies(timeout)) {
345 		mod_timer(&ba_data->session_timer, timeout);
346 		goto unlock;
347 	}
348 
349 	/* timer expired, pick any STA ID to find the pointer */
350 	sta_id = ffs(ba_data->sta_mask) - 1;
351 	link_sta = rcu_dereference(ba_data->mld->fw_id_to_link_sta[sta_id]);
352 
353 	/* sta should be valid unless the following happens:
354 	 * The firmware asserts which triggers a reconfig flow, but
355 	 * the reconfig fails before we set the pointer to sta into
356 	 * the fw_id_to_link_sta pointer table. mac80211 can't stop
357 	 * A-MPDU and hence the timer continues to run. Then, the
358 	 * timer expires and sta is NULL.
359 	 */
360 	if (IS_ERR_OR_NULL(link_sta) || WARN_ON(!link_sta->sta))
361 		goto unlock;
362 
363 	mld_sta = iwl_mld_sta_from_mac80211(link_sta->sta);
364 	ieee80211_rx_ba_timer_expired(mld_sta->vif, link_sta->sta->addr,
365 				      ba_data->tid);
366 unlock:
367 	rcu_read_unlock();
368 }
369 
370 static int
iwl_mld_stop_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)371 iwl_mld_stop_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta, int tid)
372 {
373 	struct iwl_rx_baid_cfg_cmd cmd = {
374 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_REMOVE),
375 		.remove.sta_id_mask =
376 			cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
377 		.remove.tid = cpu_to_le32(tid),
378 
379 	};
380 	int ret;
381 
382 	ret = iwl_mld_send_cmd_pdu(mld,
383 				   WIDE_ID(DATA_PATH_GROUP,
384 					   RX_BAID_ALLOCATION_CONFIG_CMD),
385 				   &cmd);
386 	if (ret)
387 		return ret;
388 
389 	IWL_DEBUG_HT(mld, "RX BA Session stopped in fw\n");
390 
391 	return ret;
392 }
393 
394 static int
iwl_mld_start_ba_in_fw(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size)395 iwl_mld_start_ba_in_fw(struct iwl_mld *mld, struct ieee80211_sta *sta,
396 		       int tid, u16 ssn, u16 buf_size)
397 {
398 	struct iwl_rx_baid_cfg_cmd cmd = {
399 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_ADD),
400 		.alloc.sta_id_mask =
401 			cpu_to_le32(iwl_mld_fw_sta_id_mask(mld, sta)),
402 		.alloc.tid = tid,
403 		.alloc.ssn = cpu_to_le16(ssn),
404 		.alloc.win_size = cpu_to_le16(buf_size),
405 	};
406 	struct iwl_host_cmd hcmd = {
407 		.id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD),
408 		.flags = CMD_WANT_SKB,
409 		.len[0] = sizeof(cmd),
410 		.data[0] = &cmd,
411 	};
412 	struct iwl_rx_baid_cfg_resp *resp;
413 	struct iwl_rx_packet *pkt;
414 	u32 resp_len;
415 	int ret, baid;
416 
417 	BUILD_BUG_ON(sizeof(*resp) != sizeof(baid));
418 
419 	ret = iwl_mld_send_cmd(mld, &hcmd);
420 	if (ret)
421 		return ret;
422 
423 	pkt = hcmd.resp_pkt;
424 
425 	resp_len = iwl_rx_packet_payload_len(pkt);
426 	if (IWL_FW_CHECK(mld, resp_len != sizeof(*resp),
427 			 "BAID_ALLOC_CMD: unexpected response length %d\n",
428 			 resp_len)) {
429 		ret = -EIO;
430 		goto out;
431 	}
432 
433 	IWL_DEBUG_HT(mld, "RX BA Session started in fw\n");
434 
435 	resp = (void *)pkt->data;
436 	baid = le32_to_cpu(resp->baid);
437 
438 	if (IWL_FW_CHECK(mld, baid < 0 || baid >= ARRAY_SIZE(mld->fw_id_to_ba),
439 			 "BAID_ALLOC_CMD: invalid BAID response %d\n", baid)) {
440 		ret = -EINVAL;
441 		goto out;
442 	}
443 
444 	ret = baid;
445 out:
446 	iwl_free_resp(&hcmd);
447 	return ret;
448 }
449 
iwl_mld_init_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data,u16 ssn)450 static void iwl_mld_init_reorder_buffer(struct iwl_mld *mld,
451 					struct iwl_mld_baid_data *data,
452 					u16 ssn)
453 {
454 	for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
455 		struct iwl_mld_reorder_buffer *reorder_buf =
456 			&data->reorder_buf[i];
457 		struct iwl_mld_reorder_buf_entry *entries =
458 			&data->entries[i * data->entries_per_queue];
459 
460 		reorder_buf->head_sn = ssn;
461 		reorder_buf->queue = i;
462 
463 		for (int j = 0; j < data->buf_size; j++)
464 			__skb_queue_head_init(&entries[j].frames);
465 	}
466 }
467 
iwl_mld_free_reorder_buffer(struct iwl_mld * mld,struct iwl_mld_baid_data * data)468 static void iwl_mld_free_reorder_buffer(struct iwl_mld *mld,
469 					struct iwl_mld_baid_data *data)
470 {
471 	struct iwl_mld_delba_data delba_data = {
472 		.baid = data->baid,
473 	};
474 
475 	iwl_mld_sync_rx_queues(mld, IWL_MLD_RXQ_NOTIF_DEL_BA,
476 			       &delba_data, sizeof(delba_data));
477 
478 	for (int i = 0; i < mld->trans->info.num_rxqs; i++) {
479 		struct iwl_mld_reorder_buffer *reorder_buf =
480 			&data->reorder_buf[i];
481 		struct iwl_mld_reorder_buf_entry *entries =
482 			&data->entries[i * data->entries_per_queue];
483 
484 		if (likely(!reorder_buf->num_stored))
485 			continue;
486 
487 		/* This shouldn't happen in regular DELBA since the RX queues
488 		 * sync internal DELBA notification should trigger a release
489 		 * of all frames in the reorder buffer.
490 		 */
491 		WARN_ON(1);
492 
493 		for (int j = 0; j < data->buf_size; j++)
494 			__skb_queue_purge(&entries[j].frames);
495 	}
496 }
497 
iwl_mld_ampdu_rx_start(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid,u16 ssn,u16 buf_size,u16 timeout)498 int iwl_mld_ampdu_rx_start(struct iwl_mld *mld, struct ieee80211_sta *sta,
499 			   int tid, u16 ssn, u16 buf_size, u16 timeout)
500 {
501 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
502 	struct iwl_mld_baid_data *baid_data = NULL;
503 	u32 reorder_buf_size = buf_size * sizeof(baid_data->entries[0]);
504 	int ret, baid;
505 	u32 sta_mask;
506 
507 	lockdep_assert_wiphy(mld->wiphy);
508 
509 	if (mld->num_rx_ba_sessions >= IWL_MAX_BAID) {
510 		IWL_DEBUG_HT(mld,
511 			     "Max num of RX BA sessions reached; blocking new session\n");
512 		return -ENOSPC;
513 	}
514 
515 	sta_mask = iwl_mld_fw_sta_id_mask(mld, sta);
516 	if (WARN_ON(!sta_mask))
517 		return -EINVAL;
518 
519 	/* sparse doesn't like the __align() so don't check */
520 #ifndef __CHECKER__
521 	/* The division below will be OK if either the cache line size
522 	 * can be divided by the entry size (ALIGN will round up) or if
523 	 * the entry size can be divided by the cache line size, in which
524 	 * case the ALIGN() will do nothing.
525 	 */
526 	BUILD_BUG_ON(SMP_CACHE_BYTES % sizeof(baid_data->entries[0]) &&
527 		     sizeof(baid_data->entries[0]) % SMP_CACHE_BYTES);
528 #endif
529 
530 	/* Upward align the reorder buffer size to fill an entire cache
531 	 * line for each queue, to avoid sharing cache lines between
532 	 * different queues.
533 	 */
534 	reorder_buf_size = ALIGN(reorder_buf_size, SMP_CACHE_BYTES);
535 
536 	/* Allocate here so if allocation fails we can bail out early
537 	 * before starting the BA session in the firmware
538 	 */
539 	baid_data = kzalloc(sizeof(*baid_data) +
540 			    mld->trans->info.num_rxqs * reorder_buf_size,
541 			    GFP_KERNEL);
542 	if (!baid_data)
543 		return -ENOMEM;
544 
545 	/* This division is why we need the above BUILD_BUG_ON(),
546 	 * if that doesn't hold then this will not be right.
547 	 */
548 	baid_data->entries_per_queue =
549 		reorder_buf_size / sizeof(baid_data->entries[0]);
550 
551 	baid = iwl_mld_start_ba_in_fw(mld, sta, tid, ssn, buf_size);
552 	if (baid < 0) {
553 		ret = baid;
554 		goto out_free;
555 	}
556 
557 	mld->num_rx_ba_sessions++;
558 	mld_sta->tid_to_baid[tid] = baid;
559 
560 	baid_data->baid = baid;
561 	baid_data->mld = mld;
562 	baid_data->tid = tid;
563 	baid_data->buf_size = buf_size;
564 	baid_data->sta_mask = sta_mask;
565 	baid_data->timeout = timeout;
566 	baid_data->last_rx_timestamp = jiffies;
567 	baid_data->rcu_ptr = &mld->fw_id_to_ba[baid];
568 
569 	iwl_mld_init_reorder_buffer(mld, baid_data, ssn);
570 
571 	timer_setup(&baid_data->session_timer, iwl_mld_rx_agg_session_expired,
572 		    0);
573 	if (timeout)
574 		mod_timer(&baid_data->session_timer,
575 			  TU_TO_EXP_TIME(timeout * 2));
576 
577 	IWL_DEBUG_HT(mld, "STA mask=0x%x (tid=%d) is assigned to BAID %d\n",
578 		     baid_data->sta_mask, tid, baid);
579 
580 	/* protect the BA data with RCU to cover a case where our
581 	 * internal RX sync mechanism will timeout (not that it's
582 	 * supposed to happen) and we will free the session data while
583 	 * RX is being processed in parallel
584 	 */
585 	WARN_ON(rcu_access_pointer(mld->fw_id_to_ba[baid]));
586 	rcu_assign_pointer(mld->fw_id_to_ba[baid], baid_data);
587 
588 	return 0;
589 
590 out_free:
591 	kfree(baid_data);
592 	return ret;
593 }
594 
iwl_mld_ampdu_rx_stop(struct iwl_mld * mld,struct ieee80211_sta * sta,int tid)595 int iwl_mld_ampdu_rx_stop(struct iwl_mld *mld, struct ieee80211_sta *sta,
596 			  int tid)
597 {
598 	struct iwl_mld_sta *mld_sta = iwl_mld_sta_from_mac80211(sta);
599 	int baid = mld_sta->tid_to_baid[tid];
600 	struct iwl_mld_baid_data *baid_data;
601 	int ret;
602 
603 	lockdep_assert_wiphy(mld->wiphy);
604 
605 	/* during firmware restart, do not send the command as the firmware no
606 	 * longer recognizes the session. instead, only clear the driver BA
607 	 * session data.
608 	 */
609 	if (!mld->fw_status.in_hw_restart) {
610 		ret = iwl_mld_stop_ba_in_fw(mld, sta, tid);
611 		if (ret)
612 			return ret;
613 	}
614 
615 	if (!WARN_ON(mld->num_rx_ba_sessions == 0))
616 		mld->num_rx_ba_sessions--;
617 
618 	baid_data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
619 	if (WARN_ON(!baid_data))
620 		return -EINVAL;
621 
622 	if (timer_pending(&baid_data->session_timer))
623 		timer_shutdown_sync(&baid_data->session_timer);
624 
625 	iwl_mld_free_reorder_buffer(mld, baid_data);
626 
627 	RCU_INIT_POINTER(mld->fw_id_to_ba[baid], NULL);
628 	kfree_rcu(baid_data, rcu_head);
629 
630 	IWL_DEBUG_HT(mld, "BAID %d is free\n", baid);
631 
632 	return 0;
633 }
634 
iwl_mld_update_sta_baids(struct iwl_mld * mld,u32 old_sta_mask,u32 new_sta_mask)635 int iwl_mld_update_sta_baids(struct iwl_mld *mld,
636 			     u32 old_sta_mask,
637 			     u32 new_sta_mask)
638 {
639 	struct iwl_rx_baid_cfg_cmd cmd = {
640 		.action = cpu_to_le32(IWL_RX_BAID_ACTION_MODIFY),
641 		.modify.old_sta_id_mask = cpu_to_le32(old_sta_mask),
642 		.modify.new_sta_id_mask = cpu_to_le32(new_sta_mask),
643 	};
644 	u32 cmd_id = WIDE_ID(DATA_PATH_GROUP, RX_BAID_ALLOCATION_CONFIG_CMD);
645 	int baid;
646 
647 	/* mac80211 will remove sessions later, but we ignore all that */
648 	if (mld->fw_status.in_hw_restart)
649 		return 0;
650 
651 	BUILD_BUG_ON(sizeof(struct iwl_rx_baid_cfg_resp) != sizeof(baid));
652 
653 	for (baid = 0; baid < ARRAY_SIZE(mld->fw_id_to_ba); baid++) {
654 		struct iwl_mld_baid_data *data;
655 		int ret;
656 
657 		data = wiphy_dereference(mld->wiphy, mld->fw_id_to_ba[baid]);
658 		if (!data)
659 			continue;
660 
661 		if (!(data->sta_mask & old_sta_mask))
662 			continue;
663 
664 		WARN_ONCE(data->sta_mask != old_sta_mask,
665 			  "BAID data for %d corrupted - expected 0x%x found 0x%x\n",
666 			  baid, old_sta_mask, data->sta_mask);
667 
668 		cmd.modify.tid = cpu_to_le32(data->tid);
669 
670 		ret = iwl_mld_send_cmd_pdu(mld, cmd_id, &cmd);
671 		if (ret)
672 			return ret;
673 		data->sta_mask = new_sta_mask;
674 	}
675 
676 	return 0;
677 }
678