xref: /linux/drivers/net/wireless/ath/ath12k/dp_rx.c (revision c5fbdf0ba7c1a6ed52dc3650bee73ce00c86cf7f)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "hal_desc.h"
14 #include "hw.h"
15 #include "dp_rx.h"
16 #include "hal_rx.h"
17 #include "dp_tx.h"
18 #include "peer.h"
19 #include "dp_mon.h"
20 #include "debugfs_htt_stats.h"
21 
22 #define ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23 
24 static enum hal_encrypt_type ath12k_dp_rx_h_enctype(struct ath12k_base *ab,
25 						    struct hal_rx_desc *desc)
26 {
27 	if (!ab->hal_rx_ops->rx_desc_encrypt_valid(desc))
28 		return HAL_ENCRYPT_TYPE_OPEN;
29 
30 	return ab->hal_rx_ops->rx_desc_get_encrypt_type(desc);
31 }
32 
33 u8 ath12k_dp_rx_h_decap_type(struct ath12k_base *ab,
34 			     struct hal_rx_desc *desc)
35 {
36 	return ab->hal_rx_ops->rx_desc_get_decap_type(desc);
37 }
38 
39 static u8 ath12k_dp_rx_h_mesh_ctl_present(struct ath12k_base *ab,
40 					  struct hal_rx_desc *desc)
41 {
42 	return ab->hal_rx_ops->rx_desc_get_mesh_ctl(desc);
43 }
44 
45 static bool ath12k_dp_rx_h_seq_ctrl_valid(struct ath12k_base *ab,
46 					  struct hal_rx_desc *desc)
47 {
48 	return ab->hal_rx_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
49 }
50 
51 static bool ath12k_dp_rx_h_fc_valid(struct ath12k_base *ab,
52 				    struct hal_rx_desc *desc)
53 {
54 	return ab->hal_rx_ops->rx_desc_get_mpdu_fc_valid(desc);
55 }
56 
57 static bool ath12k_dp_rx_h_more_frags(struct ath12k_base *ab,
58 				      struct sk_buff *skb)
59 {
60 	struct ieee80211_hdr *hdr;
61 
62 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
63 	return ieee80211_has_morefrags(hdr->frame_control);
64 }
65 
66 static u16 ath12k_dp_rx_h_frag_no(struct ath12k_base *ab,
67 				  struct sk_buff *skb)
68 {
69 	struct ieee80211_hdr *hdr;
70 
71 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hal.hal_desc_sz);
72 	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
73 }
74 
75 static u16 ath12k_dp_rx_h_seq_no(struct ath12k_base *ab,
76 				 struct hal_rx_desc *desc)
77 {
78 	return ab->hal_rx_ops->rx_desc_get_mpdu_start_seq_no(desc);
79 }
80 
81 static bool ath12k_dp_rx_h_msdu_done(struct ath12k_base *ab,
82 				     struct hal_rx_desc *desc)
83 {
84 	return ab->hal_rx_ops->dp_rx_h_msdu_done(desc);
85 }
86 
87 static bool ath12k_dp_rx_h_l4_cksum_fail(struct ath12k_base *ab,
88 					 struct hal_rx_desc *desc)
89 {
90 	return ab->hal_rx_ops->dp_rx_h_l4_cksum_fail(desc);
91 }
92 
93 static bool ath12k_dp_rx_h_ip_cksum_fail(struct ath12k_base *ab,
94 					 struct hal_rx_desc *desc)
95 {
96 	return ab->hal_rx_ops->dp_rx_h_ip_cksum_fail(desc);
97 }
98 
99 static bool ath12k_dp_rx_h_is_decrypted(struct ath12k_base *ab,
100 					struct hal_rx_desc *desc)
101 {
102 	return ab->hal_rx_ops->dp_rx_h_is_decrypted(desc);
103 }
104 
105 u32 ath12k_dp_rx_h_mpdu_err(struct ath12k_base *ab,
106 			    struct hal_rx_desc *desc)
107 {
108 	return ab->hal_rx_ops->dp_rx_h_mpdu_err(desc);
109 }
110 
111 static u16 ath12k_dp_rx_h_msdu_len(struct ath12k_base *ab,
112 				   struct hal_rx_desc *desc)
113 {
114 	return ab->hal_rx_ops->rx_desc_get_msdu_len(desc);
115 }
116 
117 static u8 ath12k_dp_rx_h_sgi(struct ath12k_base *ab,
118 			     struct hal_rx_desc *desc)
119 {
120 	return ab->hal_rx_ops->rx_desc_get_msdu_sgi(desc);
121 }
122 
123 static u8 ath12k_dp_rx_h_rate_mcs(struct ath12k_base *ab,
124 				  struct hal_rx_desc *desc)
125 {
126 	return ab->hal_rx_ops->rx_desc_get_msdu_rate_mcs(desc);
127 }
128 
129 static u8 ath12k_dp_rx_h_rx_bw(struct ath12k_base *ab,
130 			       struct hal_rx_desc *desc)
131 {
132 	return ab->hal_rx_ops->rx_desc_get_msdu_rx_bw(desc);
133 }
134 
135 static u32 ath12k_dp_rx_h_freq(struct ath12k_base *ab,
136 			       struct hal_rx_desc *desc)
137 {
138 	return ab->hal_rx_ops->rx_desc_get_msdu_freq(desc);
139 }
140 
141 static u8 ath12k_dp_rx_h_pkt_type(struct ath12k_base *ab,
142 				  struct hal_rx_desc *desc)
143 {
144 	return ab->hal_rx_ops->rx_desc_get_msdu_pkt_type(desc);
145 }
146 
147 static u8 ath12k_dp_rx_h_nss(struct ath12k_base *ab,
148 			     struct hal_rx_desc *desc)
149 {
150 	return hweight8(ab->hal_rx_ops->rx_desc_get_msdu_nss(desc));
151 }
152 
153 static u8 ath12k_dp_rx_h_tid(struct ath12k_base *ab,
154 			     struct hal_rx_desc *desc)
155 {
156 	return ab->hal_rx_ops->rx_desc_get_mpdu_tid(desc);
157 }
158 
159 static u16 ath12k_dp_rx_h_peer_id(struct ath12k_base *ab,
160 				  struct hal_rx_desc *desc)
161 {
162 	return ab->hal_rx_ops->rx_desc_get_mpdu_peer_id(desc);
163 }
164 
165 u8 ath12k_dp_rx_h_l3pad(struct ath12k_base *ab,
166 			struct hal_rx_desc *desc)
167 {
168 	return ab->hal_rx_ops->rx_desc_get_l3_pad_bytes(desc);
169 }
170 
171 static bool ath12k_dp_rx_h_first_msdu(struct ath12k_base *ab,
172 				      struct hal_rx_desc *desc)
173 {
174 	return ab->hal_rx_ops->rx_desc_get_first_msdu(desc);
175 }
176 
177 static bool ath12k_dp_rx_h_last_msdu(struct ath12k_base *ab,
178 				     struct hal_rx_desc *desc)
179 {
180 	return ab->hal_rx_ops->rx_desc_get_last_msdu(desc);
181 }
182 
183 static void ath12k_dp_rx_desc_end_tlv_copy(struct ath12k_base *ab,
184 					   struct hal_rx_desc *fdesc,
185 					   struct hal_rx_desc *ldesc)
186 {
187 	ab->hal_rx_ops->rx_desc_copy_end_tlv(fdesc, ldesc);
188 }
189 
190 static void ath12k_dp_rxdesc_set_msdu_len(struct ath12k_base *ab,
191 					  struct hal_rx_desc *desc,
192 					  u16 len)
193 {
194 	ab->hal_rx_ops->rx_desc_set_msdu_len(desc, len);
195 }
196 
197 u32 ath12k_dp_rxdesc_get_ppduid(struct ath12k_base *ab,
198 				struct hal_rx_desc *rx_desc)
199 {
200 	return ab->hal_rx_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
201 }
202 
203 bool ath12k_dp_rxdesc_mpdu_valid(struct ath12k_base *ab,
204 				 struct hal_rx_desc *rx_desc)
205 {
206 	u32 tlv_tag;
207 
208 	tlv_tag = ab->hal_rx_ops->rx_desc_get_mpdu_start_tag(rx_desc);
209 
210 	return tlv_tag == HAL_RX_MPDU_START;
211 }
212 
213 static bool ath12k_dp_rx_h_is_da_mcbc(struct ath12k_base *ab,
214 				      struct hal_rx_desc *desc)
215 {
216 	return (ath12k_dp_rx_h_first_msdu(ab, desc) &&
217 		ab->hal_rx_ops->rx_desc_is_da_mcbc(desc));
218 }
219 
220 static bool ath12k_dp_rxdesc_mac_addr2_valid(struct ath12k_base *ab,
221 					     struct hal_rx_desc *desc)
222 {
223 	return ab->hal_rx_ops->rx_desc_mac_addr2_valid(desc);
224 }
225 
226 static u8 *ath12k_dp_rxdesc_get_mpdu_start_addr2(struct ath12k_base *ab,
227 						 struct hal_rx_desc *desc)
228 {
229 	return ab->hal_rx_ops->rx_desc_mpdu_start_addr2(desc);
230 }
231 
232 static void ath12k_dp_rx_desc_get_dot11_hdr(struct ath12k_base *ab,
233 					    struct hal_rx_desc *desc,
234 					    struct ieee80211_hdr *hdr)
235 {
236 	ab->hal_rx_ops->rx_desc_get_dot11_hdr(desc, hdr);
237 }
238 
239 static void ath12k_dp_rx_desc_get_crypto_header(struct ath12k_base *ab,
240 						struct hal_rx_desc *desc,
241 						u8 *crypto_hdr,
242 						enum hal_encrypt_type enctype)
243 {
244 	ab->hal_rx_ops->rx_desc_get_crypto_header(desc, crypto_hdr, enctype);
245 }
246 
247 static inline u8 ath12k_dp_rx_get_msdu_src_link(struct ath12k_base *ab,
248 						struct hal_rx_desc *desc)
249 {
250 	return ab->hal_rx_ops->rx_desc_get_msdu_src_link_id(desc);
251 }
252 
253 static void ath12k_dp_clean_up_skb_list(struct sk_buff_head *skb_list)
254 {
255 	struct sk_buff *skb;
256 
257 	while ((skb = __skb_dequeue(skb_list)))
258 		dev_kfree_skb_any(skb);
259 }
260 
261 static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
262 				       struct list_head *head,
263 				       size_t count)
264 {
265 	struct list_head *cur;
266 	struct ath12k_rx_desc_info *rx_desc;
267 	size_t nodes = 0;
268 
269 	if (!count) {
270 		INIT_LIST_HEAD(list);
271 		goto out;
272 	}
273 
274 	list_for_each(cur, head) {
275 		if (!count)
276 			break;
277 
278 		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
279 		rx_desc->in_use = true;
280 
281 		count--;
282 		nodes++;
283 	}
284 
285 	list_cut_before(list, head, cur);
286 out:
287 	return nodes;
288 }
289 
290 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
291 				      struct list_head *used_list)
292 {
293 	struct ath12k_rx_desc_info *rx_desc, *safe;
294 
295 	/* Reset the use flag */
296 	list_for_each_entry_safe(rx_desc, safe, used_list, list)
297 		rx_desc->in_use = false;
298 
299 	spin_lock_bh(&dp->rx_desc_lock);
300 	list_splice_tail(used_list, &dp->rx_desc_free_list);
301 	spin_unlock_bh(&dp->rx_desc_lock);
302 }
303 
304 /* Returns number of Rx buffers replenished */
305 int ath12k_dp_rx_bufs_replenish(struct ath12k_base *ab,
306 				struct dp_rxdma_ring *rx_ring,
307 				struct list_head *used_list,
308 				int req_entries)
309 {
310 	struct ath12k_buffer_addr *desc;
311 	struct hal_srng *srng;
312 	struct sk_buff *skb;
313 	int num_free;
314 	int num_remain;
315 	u32 cookie;
316 	dma_addr_t paddr;
317 	struct ath12k_dp *dp = &ab->dp;
318 	struct ath12k_rx_desc_info *rx_desc;
319 	enum hal_rx_buf_return_buf_manager mgr = ab->hw_params->hal_params->rx_buf_rbm;
320 
321 	req_entries = min(req_entries, rx_ring->bufs_max);
322 
323 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
324 
325 	spin_lock_bh(&srng->lock);
326 
327 	ath12k_hal_srng_access_begin(ab, srng);
328 
329 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
330 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
331 		req_entries = num_free;
332 
333 	req_entries = min(num_free, req_entries);
334 	num_remain = req_entries;
335 
336 	if (!num_remain)
337 		goto out;
338 
339 	/* Get the descriptor from free list */
340 	if (list_empty(used_list)) {
341 		spin_lock_bh(&dp->rx_desc_lock);
342 		req_entries = ath12k_dp_list_cut_nodes(used_list,
343 						       &dp->rx_desc_free_list,
344 						       num_remain);
345 		spin_unlock_bh(&dp->rx_desc_lock);
346 		num_remain = req_entries;
347 	}
348 
349 	while (num_remain > 0) {
350 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
351 				    DP_RX_BUFFER_ALIGN_SIZE);
352 		if (!skb)
353 			break;
354 
355 		if (!IS_ALIGNED((unsigned long)skb->data,
356 				DP_RX_BUFFER_ALIGN_SIZE)) {
357 			skb_pull(skb,
358 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
359 				 skb->data);
360 		}
361 
362 		paddr = dma_map_single(ab->dev, skb->data,
363 				       skb->len + skb_tailroom(skb),
364 				       DMA_FROM_DEVICE);
365 		if (dma_mapping_error(ab->dev, paddr))
366 			goto fail_free_skb;
367 
368 		rx_desc = list_first_entry_or_null(used_list,
369 						   struct ath12k_rx_desc_info,
370 						   list);
371 		if (!rx_desc)
372 			goto fail_dma_unmap;
373 
374 		rx_desc->skb = skb;
375 		cookie = rx_desc->cookie;
376 
377 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
378 		if (!desc)
379 			goto fail_dma_unmap;
380 
381 		list_del(&rx_desc->list);
382 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
383 
384 		num_remain--;
385 
386 		ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
387 	}
388 
389 	goto out;
390 
391 fail_dma_unmap:
392 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
393 			 DMA_FROM_DEVICE);
394 fail_free_skb:
395 	dev_kfree_skb_any(skb);
396 out:
397 	ath12k_hal_srng_access_end(ab, srng);
398 
399 	if (!list_empty(used_list))
400 		ath12k_dp_rx_enqueue_free(dp, used_list);
401 
402 	spin_unlock_bh(&srng->lock);
403 
404 	return req_entries - num_remain;
405 }
406 
407 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
408 					     struct dp_rxdma_mon_ring *rx_ring)
409 {
410 	struct sk_buff *skb;
411 	int buf_id;
412 
413 	spin_lock_bh(&rx_ring->idr_lock);
414 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
415 		idr_remove(&rx_ring->bufs_idr, buf_id);
416 		/* TODO: Understand where internal driver does this dma_unmap
417 		 * of rxdma_buffer.
418 		 */
419 		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
420 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
421 		dev_kfree_skb_any(skb);
422 	}
423 
424 	idr_destroy(&rx_ring->bufs_idr);
425 	spin_unlock_bh(&rx_ring->idr_lock);
426 
427 	return 0;
428 }
429 
430 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
431 {
432 	struct ath12k_dp *dp = &ab->dp;
433 	int i;
434 
435 	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
436 
437 	if (ab->hw_params->rxdma1_enable)
438 		return 0;
439 
440 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
441 		ath12k_dp_rxdma_mon_buf_ring_free(ab,
442 						  &dp->rx_mon_status_refill_ring[i]);
443 
444 	return 0;
445 }
446 
447 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
448 					      struct dp_rxdma_mon_ring *rx_ring,
449 					      u32 ringtype)
450 {
451 	int num_entries;
452 
453 	num_entries = rx_ring->refill_buf_ring.size /
454 		ath12k_hal_srng_get_entrysize(ab, ringtype);
455 
456 	rx_ring->bufs_max = num_entries;
457 
458 	if (ringtype == HAL_RXDMA_MONITOR_STATUS)
459 		ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
460 						    num_entries);
461 	else
462 		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
463 
464 	return 0;
465 }
466 
467 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
468 					  struct dp_rxdma_ring *rx_ring)
469 {
470 	LIST_HEAD(list);
471 
472 	rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
473 			ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
474 
475 	ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
476 
477 	return 0;
478 }
479 
480 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
481 {
482 	struct ath12k_dp *dp = &ab->dp;
483 	struct dp_rxdma_mon_ring *mon_ring;
484 	int ret, i;
485 
486 	ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
487 	if (ret) {
488 		ath12k_warn(ab,
489 			    "failed to setup HAL_RXDMA_BUF\n");
490 		return ret;
491 	}
492 
493 	if (ab->hw_params->rxdma1_enable) {
494 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
495 							 &dp->rxdma_mon_buf_ring,
496 							 HAL_RXDMA_MONITOR_BUF);
497 		if (ret)
498 			ath12k_warn(ab,
499 				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
500 		return ret;
501 	}
502 
503 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
504 		mon_ring = &dp->rx_mon_status_refill_ring[i];
505 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
506 							 HAL_RXDMA_MONITOR_STATUS);
507 		if (ret) {
508 			ath12k_warn(ab,
509 				    "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
510 			return ret;
511 		}
512 	}
513 
514 	return 0;
515 }
516 
517 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
518 {
519 	struct ath12k_pdev_dp *dp = &ar->dp;
520 	struct ath12k_base *ab = ar->ab;
521 	int i;
522 
523 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
524 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
525 }
526 
527 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
528 {
529 	struct ath12k_dp *dp = &ab->dp;
530 	int i;
531 
532 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
533 		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
534 }
535 
536 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
537 {
538 	struct ath12k_dp *dp = &ab->dp;
539 	int ret;
540 	int i;
541 
542 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
543 		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
544 					   HAL_REO_DST, i, 0,
545 					   DP_REO_DST_RING_SIZE);
546 		if (ret) {
547 			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
548 			goto err_reo_cleanup;
549 		}
550 	}
551 
552 	return 0;
553 
554 err_reo_cleanup:
555 	ath12k_dp_rx_pdev_reo_cleanup(ab);
556 
557 	return ret;
558 }
559 
560 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
561 {
562 	struct ath12k_pdev_dp *dp = &ar->dp;
563 	struct ath12k_base *ab = ar->ab;
564 	int i;
565 	int ret;
566 	u32 mac_id = dp->mac_id;
567 
568 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
569 		ret = ath12k_dp_srng_setup(ar->ab,
570 					   &dp->rxdma_mon_dst_ring[i],
571 					   HAL_RXDMA_MONITOR_DST,
572 					   0, mac_id + i,
573 					   DP_RXDMA_MONITOR_DST_RING_SIZE);
574 		if (ret) {
575 			ath12k_warn(ar->ab,
576 				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
577 			return ret;
578 		}
579 	}
580 
581 	return 0;
582 }
583 
584 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
585 {
586 	struct ath12k_dp *dp = &ab->dp;
587 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
588 	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
589 
590 	spin_lock_bh(&dp->reo_cmd_lock);
591 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
592 		list_del(&cmd->list);
593 		dma_unmap_single(ab->dev, cmd->data.qbuf.paddr_aligned,
594 				 cmd->data.qbuf.size, DMA_BIDIRECTIONAL);
595 		kfree(cmd->data.qbuf.vaddr);
596 		kfree(cmd);
597 	}
598 
599 	list_for_each_entry_safe(cmd_cache, tmp_cache,
600 				 &dp->reo_cmd_cache_flush_list, list) {
601 		list_del(&cmd_cache->list);
602 		dp->reo_cmd_cache_flush_count--;
603 		dma_unmap_single(ab->dev, cmd_cache->data.qbuf.paddr_aligned,
604 				 cmd_cache->data.qbuf.size, DMA_BIDIRECTIONAL);
605 		kfree(cmd_cache->data.qbuf.vaddr);
606 		kfree(cmd_cache);
607 	}
608 	spin_unlock_bh(&dp->reo_cmd_lock);
609 }
610 
611 static void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
612 				   enum hal_reo_cmd_status status)
613 {
614 	struct ath12k_dp_rx_tid *rx_tid = ctx;
615 
616 	if (status != HAL_REO_CMD_SUCCESS)
617 		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
618 			    rx_tid->tid, status);
619 
620 	dma_unmap_single(dp->ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
621 			 DMA_BIDIRECTIONAL);
622 	kfree(rx_tid->qbuf.vaddr);
623 	rx_tid->qbuf.vaddr = NULL;
624 }
625 
626 static int ath12k_dp_reo_cmd_send(struct ath12k_base *ab, struct ath12k_dp_rx_tid *rx_tid,
627 				  enum hal_reo_cmd_type type,
628 				  struct ath12k_hal_reo_cmd *cmd,
629 				  void (*cb)(struct ath12k_dp *dp, void *ctx,
630 					     enum hal_reo_cmd_status status))
631 {
632 	struct ath12k_dp *dp = &ab->dp;
633 	struct ath12k_dp_rx_reo_cmd *dp_cmd;
634 	struct hal_srng *cmd_ring;
635 	int cmd_num;
636 
637 	cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
638 	cmd_num = ath12k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
639 
640 	/* cmd_num should start from 1, during failure return the error code */
641 	if (cmd_num < 0)
642 		return cmd_num;
643 
644 	/* reo cmd ring descriptors has cmd_num starting from 1 */
645 	if (cmd_num == 0)
646 		return -EINVAL;
647 
648 	if (!cb)
649 		return 0;
650 
651 	/* Can this be optimized so that we keep the pending command list only
652 	 * for tid delete command to free up the resource on the command status
653 	 * indication?
654 	 */
655 	dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC);
656 
657 	if (!dp_cmd)
658 		return -ENOMEM;
659 
660 	memcpy(&dp_cmd->data, rx_tid, sizeof(*rx_tid));
661 	dp_cmd->cmd_num = cmd_num;
662 	dp_cmd->handler = cb;
663 
664 	spin_lock_bh(&dp->reo_cmd_lock);
665 	list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
666 	spin_unlock_bh(&dp->reo_cmd_lock);
667 
668 	return 0;
669 }
670 
671 static void ath12k_dp_reo_cache_flush(struct ath12k_base *ab,
672 				      struct ath12k_dp_rx_tid *rx_tid)
673 {
674 	struct ath12k_hal_reo_cmd cmd = {0};
675 	unsigned long tot_desc_sz, desc_sz;
676 	int ret;
677 
678 	tot_desc_sz = rx_tid->qbuf.size;
679 	desc_sz = ath12k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
680 
681 	while (tot_desc_sz > desc_sz) {
682 		tot_desc_sz -= desc_sz;
683 		cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned + tot_desc_sz);
684 		cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
685 		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
686 					     HAL_REO_CMD_FLUSH_CACHE, &cmd,
687 					     NULL);
688 		if (ret)
689 			ath12k_warn(ab,
690 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
691 				    rx_tid->tid, ret);
692 	}
693 
694 	memset(&cmd, 0, sizeof(cmd));
695 	cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
696 	cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
697 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
698 	ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
699 				     HAL_REO_CMD_FLUSH_CACHE,
700 				     &cmd, ath12k_dp_reo_cmd_free);
701 	if (ret) {
702 		ath12k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
703 			   rx_tid->tid, ret);
704 		dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
705 				 DMA_BIDIRECTIONAL);
706 		kfree(rx_tid->qbuf.vaddr);
707 		rx_tid->qbuf.vaddr = NULL;
708 	}
709 }
710 
711 static void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
712 				      enum hal_reo_cmd_status status)
713 {
714 	struct ath12k_base *ab = dp->ab;
715 	struct ath12k_dp_rx_tid *rx_tid = ctx;
716 	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
717 
718 	if (status == HAL_REO_CMD_DRAIN) {
719 		goto free_desc;
720 	} else if (status != HAL_REO_CMD_SUCCESS) {
721 		/* Shouldn't happen! Cleanup in case of other failure? */
722 		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
723 			    rx_tid->tid, status);
724 		return;
725 	}
726 
727 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
728 	if (!elem)
729 		goto free_desc;
730 
731 	elem->ts = jiffies;
732 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
733 
734 	spin_lock_bh(&dp->reo_cmd_lock);
735 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
736 	dp->reo_cmd_cache_flush_count++;
737 
738 	/* Flush and invalidate aged REO desc from HW cache */
739 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
740 				 list) {
741 		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
742 		    time_after(jiffies, elem->ts +
743 			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
744 			list_del(&elem->list);
745 			dp->reo_cmd_cache_flush_count--;
746 
747 			/* Unlock the reo_cmd_lock before using ath12k_dp_reo_cmd_send()
748 			 * within ath12k_dp_reo_cache_flush. The reo_cmd_cache_flush_list
749 			 * is used in only two contexts, one is in this function called
750 			 * from napi and the other in ath12k_dp_free during core destroy.
751 			 * Before dp_free, the irqs would be disabled and would wait to
752 			 * synchronize. Hence there wouldn’t be any race against add or
753 			 * delete to this list. Hence unlock-lock is safe here.
754 			 */
755 			spin_unlock_bh(&dp->reo_cmd_lock);
756 
757 			ath12k_dp_reo_cache_flush(ab, &elem->data);
758 			kfree(elem);
759 			spin_lock_bh(&dp->reo_cmd_lock);
760 		}
761 	}
762 	spin_unlock_bh(&dp->reo_cmd_lock);
763 
764 	return;
765 free_desc:
766 	dma_unmap_single(ab->dev, rx_tid->qbuf.paddr_aligned, rx_tid->qbuf.size,
767 			 DMA_BIDIRECTIONAL);
768 	kfree(rx_tid->qbuf.vaddr);
769 	rx_tid->qbuf.vaddr = NULL;
770 }
771 
772 static void ath12k_peer_rx_tid_qref_setup(struct ath12k_base *ab, u16 peer_id, u16 tid,
773 					  dma_addr_t paddr)
774 {
775 	struct ath12k_reo_queue_ref *qref;
776 	struct ath12k_dp *dp = &ab->dp;
777 	bool ml_peer = false;
778 
779 	if (!ab->hw_params->reoq_lut_support)
780 		return;
781 
782 	if (peer_id & ATH12K_PEER_ML_ID_VALID) {
783 		peer_id &= ~ATH12K_PEER_ML_ID_VALID;
784 		ml_peer = true;
785 	}
786 
787 	if (ml_peer)
788 		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
789 				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
790 	else
791 		qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
792 				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
793 
794 	qref->info0 = u32_encode_bits(lower_32_bits(paddr),
795 				      BUFFER_ADDR_INFO0_ADDR);
796 	qref->info1 = u32_encode_bits(upper_32_bits(paddr),
797 				      BUFFER_ADDR_INFO1_ADDR) |
798 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
799 	ath12k_hal_reo_shared_qaddr_cache_clear(ab);
800 }
801 
802 static void ath12k_peer_rx_tid_qref_reset(struct ath12k_base *ab, u16 peer_id, u16 tid)
803 {
804 	struct ath12k_reo_queue_ref *qref;
805 	struct ath12k_dp *dp = &ab->dp;
806 	bool ml_peer = false;
807 
808 	if (!ab->hw_params->reoq_lut_support)
809 		return;
810 
811 	if (peer_id & ATH12K_PEER_ML_ID_VALID) {
812 		peer_id &= ~ATH12K_PEER_ML_ID_VALID;
813 		ml_peer = true;
814 	}
815 
816 	if (ml_peer)
817 		qref = (struct ath12k_reo_queue_ref *)dp->ml_reoq_lut.vaddr +
818 				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
819 	else
820 		qref = (struct ath12k_reo_queue_ref *)dp->reoq_lut.vaddr +
821 				(peer_id * (IEEE80211_NUM_TIDS + 1) + tid);
822 
823 	qref->info0 = u32_encode_bits(0, BUFFER_ADDR_INFO0_ADDR);
824 	qref->info1 = u32_encode_bits(0, BUFFER_ADDR_INFO1_ADDR) |
825 		      u32_encode_bits(tid, DP_REO_QREF_NUM);
826 }
827 
828 void ath12k_dp_rx_peer_tid_delete(struct ath12k *ar,
829 				  struct ath12k_peer *peer, u8 tid)
830 {
831 	struct ath12k_hal_reo_cmd cmd = {0};
832 	struct ath12k_dp_rx_tid *rx_tid = &peer->rx_tid[tid];
833 	int ret;
834 
835 	if (!rx_tid->active)
836 		return;
837 
838 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
839 	cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
840 	cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
841 	cmd.upd0 = HAL_REO_CMD_UPD0_VLD;
842 	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
843 				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
844 				     ath12k_dp_rx_tid_del_func);
845 	if (ret) {
846 		ath12k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
847 			   tid, ret);
848 		dma_unmap_single(ar->ab->dev, rx_tid->qbuf.paddr_aligned,
849 				 rx_tid->qbuf.size, DMA_BIDIRECTIONAL);
850 		kfree(rx_tid->qbuf.vaddr);
851 		rx_tid->qbuf.vaddr = NULL;
852 	}
853 
854 	if (peer->mlo)
855 		ath12k_peer_rx_tid_qref_reset(ar->ab, peer->ml_id, tid);
856 	else
857 		ath12k_peer_rx_tid_qref_reset(ar->ab, peer->peer_id, tid);
858 
859 	rx_tid->active = false;
860 }
861 
862 int ath12k_dp_rx_link_desc_return(struct ath12k_base *ab,
863 				  struct ath12k_buffer_addr *buf_addr_info,
864 				  enum hal_wbm_rel_bm_act action)
865 {
866 	struct hal_wbm_release_ring *desc;
867 	struct ath12k_dp *dp = &ab->dp;
868 	struct hal_srng *srng;
869 	int ret = 0;
870 
871 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
872 
873 	spin_lock_bh(&srng->lock);
874 
875 	ath12k_hal_srng_access_begin(ab, srng);
876 
877 	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
878 	if (!desc) {
879 		ret = -ENOBUFS;
880 		goto exit;
881 	}
882 
883 	ath12k_hal_rx_msdu_link_desc_set(ab, desc, buf_addr_info, action);
884 
885 exit:
886 	ath12k_hal_srng_access_end(ab, srng);
887 
888 	spin_unlock_bh(&srng->lock);
889 
890 	return ret;
891 }
892 
893 static void ath12k_dp_rx_frags_cleanup(struct ath12k_dp_rx_tid *rx_tid,
894 				       bool rel_link_desc)
895 {
896 	struct ath12k_buffer_addr *buf_addr_info;
897 	struct ath12k_base *ab = rx_tid->ab;
898 
899 	lockdep_assert_held(&ab->base_lock);
900 
901 	if (rx_tid->dst_ring_desc) {
902 		if (rel_link_desc) {
903 			buf_addr_info = &rx_tid->dst_ring_desc->buf_addr_info;
904 			ath12k_dp_rx_link_desc_return(ab, buf_addr_info,
905 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
906 		}
907 		kfree(rx_tid->dst_ring_desc);
908 		rx_tid->dst_ring_desc = NULL;
909 	}
910 
911 	rx_tid->cur_sn = 0;
912 	rx_tid->last_frag_no = 0;
913 	rx_tid->rx_frag_bitmap = 0;
914 	__skb_queue_purge(&rx_tid->rx_frags);
915 }
916 
917 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_peer *peer)
918 {
919 	struct ath12k_dp_rx_tid *rx_tid;
920 	int i;
921 
922 	lockdep_assert_held(&ar->ab->base_lock);
923 
924 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
925 		rx_tid = &peer->rx_tid[i];
926 
927 		ath12k_dp_rx_peer_tid_delete(ar, peer, i);
928 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
929 
930 		spin_unlock_bh(&ar->ab->base_lock);
931 		timer_delete_sync(&rx_tid->frag_timer);
932 		spin_lock_bh(&ar->ab->base_lock);
933 	}
934 }
935 
936 static int ath12k_peer_rx_tid_reo_update(struct ath12k *ar,
937 					 struct ath12k_peer *peer,
938 					 struct ath12k_dp_rx_tid *rx_tid,
939 					 u32 ba_win_sz, u16 ssn,
940 					 bool update_ssn)
941 {
942 	struct ath12k_hal_reo_cmd cmd = {0};
943 	int ret;
944 
945 	cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
946 	cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
947 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
948 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
949 	cmd.ba_window_size = ba_win_sz;
950 
951 	if (update_ssn) {
952 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
953 		cmd.upd2 = u32_encode_bits(ssn, HAL_REO_CMD_UPD2_SSN);
954 	}
955 
956 	ret = ath12k_dp_reo_cmd_send(ar->ab, rx_tid,
957 				     HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
958 				     NULL);
959 	if (ret) {
960 		ath12k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
961 			    rx_tid->tid, ret);
962 		return ret;
963 	}
964 
965 	rx_tid->ba_win_sz = ba_win_sz;
966 
967 	return 0;
968 }
969 
970 static int ath12k_dp_rx_assign_reoq(struct ath12k_base *ab,
971 				    struct ath12k_sta *ahsta,
972 				    struct ath12k_dp_rx_tid *rx_tid,
973 				    u16 ssn, enum hal_pn_type pn_type)
974 {
975 	u32 ba_win_sz = rx_tid->ba_win_sz;
976 	struct ath12k_reoq_buf *buf;
977 	void *vaddr, *vaddr_aligned;
978 	dma_addr_t paddr_aligned;
979 	u8 tid = rx_tid->tid;
980 	u32 hw_desc_sz;
981 	int ret;
982 
983 	buf = &ahsta->reoq_bufs[tid];
984 	if (!buf->vaddr) {
985 		/* TODO: Optimize the memory allocation for qos tid based on
986 		 * the actual BA window size in REO tid update path.
987 		 */
988 		if (tid == HAL_DESC_REO_NON_QOS_TID)
989 			hw_desc_sz = ath12k_hal_reo_qdesc_size(ba_win_sz, tid);
990 		else
991 			hw_desc_sz = ath12k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
992 
993 		vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
994 		if (!vaddr)
995 			return -ENOMEM;
996 
997 		vaddr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
998 
999 		ath12k_hal_reo_qdesc_setup(vaddr_aligned, tid, ba_win_sz,
1000 					   ssn, pn_type);
1001 
1002 		paddr_aligned = dma_map_single(ab->dev, vaddr_aligned, hw_desc_sz,
1003 					       DMA_BIDIRECTIONAL);
1004 		ret = dma_mapping_error(ab->dev, paddr_aligned);
1005 		if (ret) {
1006 			kfree(vaddr);
1007 			return ret;
1008 		}
1009 
1010 		buf->vaddr = vaddr;
1011 		buf->paddr_aligned = paddr_aligned;
1012 		buf->size = hw_desc_sz;
1013 	}
1014 
1015 	rx_tid->qbuf = *buf;
1016 	rx_tid->active = true;
1017 
1018 	return 0;
1019 }
1020 
1021 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
1022 				u8 tid, u32 ba_win_sz, u16 ssn,
1023 				enum hal_pn_type pn_type)
1024 {
1025 	struct ath12k_base *ab = ar->ab;
1026 	struct ath12k_dp *dp = &ab->dp;
1027 	struct ath12k_peer *peer;
1028 	struct ath12k_sta *ahsta;
1029 	struct ath12k_dp_rx_tid *rx_tid;
1030 	dma_addr_t paddr_aligned;
1031 	int ret;
1032 
1033 	spin_lock_bh(&ab->base_lock);
1034 
1035 	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
1036 	if (!peer) {
1037 		spin_unlock_bh(&ab->base_lock);
1038 		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
1039 		return -ENOENT;
1040 	}
1041 
1042 	if (ab->hw_params->dp_primary_link_only &&
1043 	    !peer->primary_link) {
1044 		spin_unlock_bh(&ab->base_lock);
1045 		return 0;
1046 	}
1047 
1048 	if (ab->hw_params->reoq_lut_support &&
1049 	    (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
1050 		spin_unlock_bh(&ab->base_lock);
1051 		ath12k_warn(ab, "reo qref table is not setup\n");
1052 		return -EINVAL;
1053 	}
1054 
1055 	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
1056 		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
1057 			    peer->peer_id, tid);
1058 		spin_unlock_bh(&ab->base_lock);
1059 		return -EINVAL;
1060 	}
1061 
1062 	rx_tid = &peer->rx_tid[tid];
1063 	paddr_aligned = rx_tid->qbuf.paddr_aligned;
1064 	/* Update the tid queue if it is already setup */
1065 	if (rx_tid->active) {
1066 		ret = ath12k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1067 						    ba_win_sz, ssn, true);
1068 		spin_unlock_bh(&ab->base_lock);
1069 		if (ret) {
1070 			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
1071 			return ret;
1072 		}
1073 
1074 		if (!ab->hw_params->reoq_lut_support) {
1075 			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1076 								     peer_mac,
1077 								     paddr_aligned, tid,
1078 								     1, ba_win_sz);
1079 			if (ret) {
1080 				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
1081 					    tid, ret);
1082 				return ret;
1083 			}
1084 		}
1085 
1086 		return 0;
1087 	}
1088 
1089 	rx_tid->tid = tid;
1090 
1091 	rx_tid->ba_win_sz = ba_win_sz;
1092 
1093 	ahsta = ath12k_sta_to_ahsta(peer->sta);
1094 	ret = ath12k_dp_rx_assign_reoq(ab, ahsta, rx_tid, ssn, pn_type);
1095 	if (ret) {
1096 		spin_unlock_bh(&ab->base_lock);
1097 		ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
1098 		return ret;
1099 	}
1100 
1101 	if (ab->hw_params->reoq_lut_support) {
1102 		/* Update the REO queue LUT at the corresponding peer id
1103 		 * and tid with qaddr.
1104 		 */
1105 		if (peer->mlo)
1106 			ath12k_peer_rx_tid_qref_setup(ab, peer->ml_id, tid,
1107 						      paddr_aligned);
1108 		else
1109 			ath12k_peer_rx_tid_qref_setup(ab, peer->peer_id, tid,
1110 						      paddr_aligned);
1111 
1112 		spin_unlock_bh(&ab->base_lock);
1113 	} else {
1114 		spin_unlock_bh(&ab->base_lock);
1115 		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1116 							     paddr_aligned, tid, 1,
1117 							     ba_win_sz);
1118 	}
1119 
1120 	return ret;
1121 }
1122 
1123 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
1124 			     struct ieee80211_ampdu_params *params,
1125 			     u8 link_id)
1126 {
1127 	struct ath12k_base *ab = ar->ab;
1128 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1129 	struct ath12k_link_sta *arsta;
1130 	int vdev_id;
1131 	int ret;
1132 
1133 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1134 
1135 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
1136 				  ahsta->link[link_id]);
1137 	if (!arsta)
1138 		return -ENOLINK;
1139 
1140 	vdev_id = arsta->arvif->vdev_id;
1141 
1142 	ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
1143 					  params->tid, params->buf_size,
1144 					  params->ssn, arsta->ahsta->pn_type);
1145 	if (ret)
1146 		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
1147 
1148 	return ret;
1149 }
1150 
1151 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
1152 			    struct ieee80211_ampdu_params *params,
1153 			    u8 link_id)
1154 {
1155 	struct ath12k_base *ab = ar->ab;
1156 	struct ath12k_peer *peer;
1157 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
1158 	struct ath12k_link_sta *arsta;
1159 	int vdev_id;
1160 	bool active;
1161 	int ret;
1162 
1163 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
1164 
1165 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
1166 				  ahsta->link[link_id]);
1167 	if (!arsta)
1168 		return -ENOLINK;
1169 
1170 	vdev_id = arsta->arvif->vdev_id;
1171 
1172 	spin_lock_bh(&ab->base_lock);
1173 
1174 	peer = ath12k_peer_find(ab, vdev_id, arsta->addr);
1175 	if (!peer) {
1176 		spin_unlock_bh(&ab->base_lock);
1177 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1178 		return -ENOENT;
1179 	}
1180 
1181 	active = peer->rx_tid[params->tid].active;
1182 
1183 	if (!active) {
1184 		spin_unlock_bh(&ab->base_lock);
1185 		return 0;
1186 	}
1187 
1188 	ret = ath12k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1189 	spin_unlock_bh(&ab->base_lock);
1190 	if (ret) {
1191 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1192 			    params->tid, ret);
1193 		return ret;
1194 	}
1195 
1196 	return ret;
1197 }
1198 
1199 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
1200 				       const u8 *peer_addr,
1201 				       enum set_key_cmd key_cmd,
1202 				       struct ieee80211_key_conf *key)
1203 {
1204 	struct ath12k *ar = arvif->ar;
1205 	struct ath12k_base *ab = ar->ab;
1206 	struct ath12k_hal_reo_cmd cmd = {0};
1207 	struct ath12k_peer *peer;
1208 	struct ath12k_dp_rx_tid *rx_tid;
1209 	u8 tid;
1210 	int ret = 0;
1211 
1212 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1213 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1214 	 * for now.
1215 	 */
1216 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1217 		return 0;
1218 
1219 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
1220 	cmd.upd0 = HAL_REO_CMD_UPD0_PN |
1221 		    HAL_REO_CMD_UPD0_PN_SIZE |
1222 		    HAL_REO_CMD_UPD0_PN_VALID |
1223 		    HAL_REO_CMD_UPD0_PN_CHECK |
1224 		    HAL_REO_CMD_UPD0_SVLD;
1225 
1226 	switch (key->cipher) {
1227 	case WLAN_CIPHER_SUITE_TKIP:
1228 	case WLAN_CIPHER_SUITE_CCMP:
1229 	case WLAN_CIPHER_SUITE_CCMP_256:
1230 	case WLAN_CIPHER_SUITE_GCMP:
1231 	case WLAN_CIPHER_SUITE_GCMP_256:
1232 		if (key_cmd == SET_KEY) {
1233 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1234 			cmd.pn_size = 48;
1235 		}
1236 		break;
1237 	default:
1238 		break;
1239 	}
1240 
1241 	spin_lock_bh(&ab->base_lock);
1242 
1243 	peer = ath12k_peer_find(ab, arvif->vdev_id, peer_addr);
1244 	if (!peer) {
1245 		spin_unlock_bh(&ab->base_lock);
1246 		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
1247 			    peer_addr);
1248 		return -ENOENT;
1249 	}
1250 
1251 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1252 		rx_tid = &peer->rx_tid[tid];
1253 		if (!rx_tid->active)
1254 			continue;
1255 		cmd.addr_lo = lower_32_bits(rx_tid->qbuf.paddr_aligned);
1256 		cmd.addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
1257 		ret = ath12k_dp_reo_cmd_send(ab, rx_tid,
1258 					     HAL_REO_CMD_UPDATE_RX_QUEUE,
1259 					     &cmd, NULL);
1260 		if (ret) {
1261 			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
1262 				    tid, peer_addr, ret);
1263 			break;
1264 		}
1265 	}
1266 
1267 	spin_unlock_bh(&ab->base_lock);
1268 
1269 	return ret;
1270 }
1271 
1272 static int ath12k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1273 				      u16 peer_id)
1274 {
1275 	int i;
1276 
1277 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1278 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1279 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1280 				return i;
1281 		} else {
1282 			return i;
1283 		}
1284 	}
1285 
1286 	return -EINVAL;
1287 }
1288 
1289 static int ath12k_htt_tlv_ppdu_stats_parse(struct ath12k_base *ab,
1290 					   u16 tag, u16 len, const void *ptr,
1291 					   void *data)
1292 {
1293 	const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *ba_status;
1294 	const struct htt_ppdu_stats_usr_cmpltn_cmn *cmplt_cmn;
1295 	const struct htt_ppdu_stats_user_rate *user_rate;
1296 	struct htt_ppdu_stats_info *ppdu_info;
1297 	struct htt_ppdu_user_stats *user_stats;
1298 	int cur_user;
1299 	u16 peer_id;
1300 
1301 	ppdu_info = data;
1302 
1303 	switch (tag) {
1304 	case HTT_PPDU_STATS_TAG_COMMON:
1305 		if (len < sizeof(struct htt_ppdu_stats_common)) {
1306 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1307 				    len, tag);
1308 			return -EINVAL;
1309 		}
1310 		memcpy(&ppdu_info->ppdu_stats.common, ptr,
1311 		       sizeof(struct htt_ppdu_stats_common));
1312 		break;
1313 	case HTT_PPDU_STATS_TAG_USR_RATE:
1314 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1315 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1316 				    len, tag);
1317 			return -EINVAL;
1318 		}
1319 		user_rate = ptr;
1320 		peer_id = le16_to_cpu(user_rate->sw_peer_id);
1321 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1322 						      peer_id);
1323 		if (cur_user < 0)
1324 			return -EINVAL;
1325 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1326 		user_stats->peer_id = peer_id;
1327 		user_stats->is_valid_peer_id = true;
1328 		memcpy(&user_stats->rate, ptr,
1329 		       sizeof(struct htt_ppdu_stats_user_rate));
1330 		user_stats->tlv_flags |= BIT(tag);
1331 		break;
1332 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1333 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1334 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1335 				    len, tag);
1336 			return -EINVAL;
1337 		}
1338 
1339 		cmplt_cmn = ptr;
1340 		peer_id = le16_to_cpu(cmplt_cmn->sw_peer_id);
1341 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1342 						      peer_id);
1343 		if (cur_user < 0)
1344 			return -EINVAL;
1345 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1346 		user_stats->peer_id = peer_id;
1347 		user_stats->is_valid_peer_id = true;
1348 		memcpy(&user_stats->cmpltn_cmn, ptr,
1349 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1350 		user_stats->tlv_flags |= BIT(tag);
1351 		break;
1352 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1353 		if (len <
1354 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1355 			ath12k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1356 				    len, tag);
1357 			return -EINVAL;
1358 		}
1359 
1360 		ba_status = ptr;
1361 		peer_id = le16_to_cpu(ba_status->sw_peer_id);
1362 		cur_user = ath12k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1363 						      peer_id);
1364 		if (cur_user < 0)
1365 			return -EINVAL;
1366 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1367 		user_stats->peer_id = peer_id;
1368 		user_stats->is_valid_peer_id = true;
1369 		memcpy(&user_stats->ack_ba, ptr,
1370 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1371 		user_stats->tlv_flags |= BIT(tag);
1372 		break;
1373 	}
1374 	return 0;
1375 }
1376 
1377 int ath12k_dp_htt_tlv_iter(struct ath12k_base *ab, const void *ptr, size_t len,
1378 			   int (*iter)(struct ath12k_base *ar, u16 tag, u16 len,
1379 				       const void *ptr, void *data),
1380 			   void *data)
1381 {
1382 	const struct htt_tlv *tlv;
1383 	const void *begin = ptr;
1384 	u16 tlv_tag, tlv_len;
1385 	int ret = -EINVAL;
1386 
1387 	while (len > 0) {
1388 		if (len < sizeof(*tlv)) {
1389 			ath12k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1390 				   ptr - begin, len, sizeof(*tlv));
1391 			return -EINVAL;
1392 		}
1393 		tlv = (struct htt_tlv *)ptr;
1394 		tlv_tag = le32_get_bits(tlv->header, HTT_TLV_TAG);
1395 		tlv_len = le32_get_bits(tlv->header, HTT_TLV_LEN);
1396 		ptr += sizeof(*tlv);
1397 		len -= sizeof(*tlv);
1398 
1399 		if (tlv_len > len) {
1400 			ath12k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1401 				   tlv_tag, ptr - begin, len, tlv_len);
1402 			return -EINVAL;
1403 		}
1404 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1405 		if (ret == -ENOMEM)
1406 			return ret;
1407 
1408 		ptr += tlv_len;
1409 		len -= tlv_len;
1410 	}
1411 	return 0;
1412 }
1413 
1414 static void
1415 ath12k_update_per_peer_tx_stats(struct ath12k *ar,
1416 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1417 {
1418 	struct ath12k_base *ab = ar->ab;
1419 	struct ath12k_peer *peer;
1420 	struct ieee80211_sta *sta;
1421 	struct ath12k_sta *ahsta;
1422 	struct ath12k_link_sta *arsta;
1423 	struct htt_ppdu_stats_user_rate *user_rate;
1424 	struct ath12k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1425 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1426 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1427 	int ret;
1428 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1429 	u32 v, succ_bytes = 0;
1430 	u16 tones, rate = 0, succ_pkts = 0;
1431 	u32 tx_duration = 0;
1432 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1433 	bool is_ampdu = false;
1434 
1435 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1436 		return;
1437 
1438 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1439 		is_ampdu =
1440 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1441 
1442 	if (usr_stats->tlv_flags &
1443 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1444 		succ_bytes = le32_to_cpu(usr_stats->ack_ba.success_bytes);
1445 		succ_pkts = le32_get_bits(usr_stats->ack_ba.info,
1446 					  HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M);
1447 		tid = le32_get_bits(usr_stats->ack_ba.info,
1448 				    HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM);
1449 	}
1450 
1451 	if (common->fes_duration_us)
1452 		tx_duration = le32_to_cpu(common->fes_duration_us);
1453 
1454 	user_rate = &usr_stats->rate;
1455 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1456 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1457 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1458 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1459 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1460 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1461 
1462 	/* Note: If host configured fixed rates and in some other special
1463 	 * cases, the broadcast/management frames are sent in different rates.
1464 	 * Firmware rate's control to be skipped for this?
1465 	 */
1466 
1467 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH12K_HE_MCS_MAX) {
1468 		ath12k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1469 		return;
1470 	}
1471 
1472 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH12K_VHT_MCS_MAX) {
1473 		ath12k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1474 		return;
1475 	}
1476 
1477 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH12K_HT_MCS_MAX || nss < 1)) {
1478 		ath12k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1479 			    mcs, nss);
1480 		return;
1481 	}
1482 
1483 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1484 		ret = ath12k_mac_hw_ratecode_to_legacy_rate(mcs,
1485 							    flags,
1486 							    &rate_idx,
1487 							    &rate);
1488 		if (ret < 0)
1489 			return;
1490 	}
1491 
1492 	rcu_read_lock();
1493 	spin_lock_bh(&ab->base_lock);
1494 	peer = ath12k_peer_find_by_id(ab, usr_stats->peer_id);
1495 
1496 	if (!peer || !peer->sta) {
1497 		spin_unlock_bh(&ab->base_lock);
1498 		rcu_read_unlock();
1499 		return;
1500 	}
1501 
1502 	sta = peer->sta;
1503 	ahsta = ath12k_sta_to_ahsta(sta);
1504 	arsta = &ahsta->deflink;
1505 
1506 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1507 
1508 	switch (flags) {
1509 	case WMI_RATE_PREAMBLE_OFDM:
1510 		arsta->txrate.legacy = rate;
1511 		break;
1512 	case WMI_RATE_PREAMBLE_CCK:
1513 		arsta->txrate.legacy = rate;
1514 		break;
1515 	case WMI_RATE_PREAMBLE_HT:
1516 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1517 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1518 		if (sgi)
1519 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1520 		break;
1521 	case WMI_RATE_PREAMBLE_VHT:
1522 		arsta->txrate.mcs = mcs;
1523 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1524 		if (sgi)
1525 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1526 		break;
1527 	case WMI_RATE_PREAMBLE_HE:
1528 		arsta->txrate.mcs = mcs;
1529 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1530 		arsta->txrate.he_dcm = dcm;
1531 		arsta->txrate.he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1532 		tones = le16_to_cpu(user_rate->ru_end) -
1533 			le16_to_cpu(user_rate->ru_start) + 1;
1534 		v = ath12k_he_ru_tones_to_nl80211_he_ru_alloc(tones);
1535 		arsta->txrate.he_ru_alloc = v;
1536 		break;
1537 	}
1538 
1539 	arsta->txrate.nss = nss;
1540 	arsta->txrate.bw = ath12k_mac_bw_to_mac80211_bw(bw);
1541 	arsta->tx_duration += tx_duration;
1542 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1543 
1544 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1545 	 * So skip peer stats update for mgmt packets.
1546 	 */
1547 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1548 		memset(peer_stats, 0, sizeof(*peer_stats));
1549 		peer_stats->succ_pkts = succ_pkts;
1550 		peer_stats->succ_bytes = succ_bytes;
1551 		peer_stats->is_ampdu = is_ampdu;
1552 		peer_stats->duration = tx_duration;
1553 		peer_stats->ba_fails =
1554 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1555 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1556 	}
1557 
1558 	spin_unlock_bh(&ab->base_lock);
1559 	rcu_read_unlock();
1560 }
1561 
1562 static void ath12k_htt_update_ppdu_stats(struct ath12k *ar,
1563 					 struct htt_ppdu_stats *ppdu_stats)
1564 {
1565 	u8 user;
1566 
1567 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1568 		ath12k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1569 }
1570 
1571 static
1572 struct htt_ppdu_stats_info *ath12k_dp_htt_get_ppdu_desc(struct ath12k *ar,
1573 							u32 ppdu_id)
1574 {
1575 	struct htt_ppdu_stats_info *ppdu_info;
1576 
1577 	lockdep_assert_held(&ar->data_lock);
1578 	if (!list_empty(&ar->ppdu_stats_info)) {
1579 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1580 			if (ppdu_info->ppdu_id == ppdu_id)
1581 				return ppdu_info;
1582 		}
1583 
1584 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1585 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1586 						     typeof(*ppdu_info), list);
1587 			list_del(&ppdu_info->list);
1588 			ar->ppdu_stat_list_depth--;
1589 			ath12k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1590 			kfree(ppdu_info);
1591 		}
1592 	}
1593 
1594 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1595 	if (!ppdu_info)
1596 		return NULL;
1597 
1598 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1599 	ar->ppdu_stat_list_depth++;
1600 
1601 	return ppdu_info;
1602 }
1603 
1604 static void ath12k_copy_to_delay_stats(struct ath12k_peer *peer,
1605 				       struct htt_ppdu_user_stats *usr_stats)
1606 {
1607 	peer->ppdu_stats_delayba.sw_peer_id = le16_to_cpu(usr_stats->rate.sw_peer_id);
1608 	peer->ppdu_stats_delayba.info0 = le32_to_cpu(usr_stats->rate.info0);
1609 	peer->ppdu_stats_delayba.ru_end = le16_to_cpu(usr_stats->rate.ru_end);
1610 	peer->ppdu_stats_delayba.ru_start = le16_to_cpu(usr_stats->rate.ru_start);
1611 	peer->ppdu_stats_delayba.info1 = le32_to_cpu(usr_stats->rate.info1);
1612 	peer->ppdu_stats_delayba.rate_flags = le32_to_cpu(usr_stats->rate.rate_flags);
1613 	peer->ppdu_stats_delayba.resp_rate_flags =
1614 		le32_to_cpu(usr_stats->rate.resp_rate_flags);
1615 
1616 	peer->delayba_flag = true;
1617 }
1618 
1619 static void ath12k_copy_to_bar(struct ath12k_peer *peer,
1620 			       struct htt_ppdu_user_stats *usr_stats)
1621 {
1622 	usr_stats->rate.sw_peer_id = cpu_to_le16(peer->ppdu_stats_delayba.sw_peer_id);
1623 	usr_stats->rate.info0 = cpu_to_le32(peer->ppdu_stats_delayba.info0);
1624 	usr_stats->rate.ru_end = cpu_to_le16(peer->ppdu_stats_delayba.ru_end);
1625 	usr_stats->rate.ru_start = cpu_to_le16(peer->ppdu_stats_delayba.ru_start);
1626 	usr_stats->rate.info1 = cpu_to_le32(peer->ppdu_stats_delayba.info1);
1627 	usr_stats->rate.rate_flags = cpu_to_le32(peer->ppdu_stats_delayba.rate_flags);
1628 	usr_stats->rate.resp_rate_flags =
1629 		cpu_to_le32(peer->ppdu_stats_delayba.resp_rate_flags);
1630 
1631 	peer->delayba_flag = false;
1632 }
1633 
1634 static int ath12k_htt_pull_ppdu_stats(struct ath12k_base *ab,
1635 				      struct sk_buff *skb)
1636 {
1637 	struct ath12k_htt_ppdu_stats_msg *msg;
1638 	struct htt_ppdu_stats_info *ppdu_info;
1639 	struct ath12k_peer *peer = NULL;
1640 	struct htt_ppdu_user_stats *usr_stats = NULL;
1641 	u32 peer_id = 0;
1642 	struct ath12k *ar;
1643 	int ret, i;
1644 	u8 pdev_id;
1645 	u32 ppdu_id, len;
1646 
1647 	msg = (struct ath12k_htt_ppdu_stats_msg *)skb->data;
1648 	len = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE);
1649 	if (len > (skb->len - struct_size(msg, data, 0))) {
1650 		ath12k_warn(ab,
1651 			    "HTT PPDU STATS event has unexpected payload size %u, should be smaller than %u\n",
1652 			    len, skb->len);
1653 		return -EINVAL;
1654 	}
1655 
1656 	pdev_id = le32_get_bits(msg->info, HTT_T2H_PPDU_STATS_INFO_PDEV_ID);
1657 	ppdu_id = le32_to_cpu(msg->ppdu_id);
1658 
1659 	rcu_read_lock();
1660 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1661 	if (!ar) {
1662 		ret = -EINVAL;
1663 		goto exit;
1664 	}
1665 
1666 	spin_lock_bh(&ar->data_lock);
1667 	ppdu_info = ath12k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1668 	if (!ppdu_info) {
1669 		spin_unlock_bh(&ar->data_lock);
1670 		ret = -EINVAL;
1671 		goto exit;
1672 	}
1673 
1674 	ppdu_info->ppdu_id = ppdu_id;
1675 	ret = ath12k_dp_htt_tlv_iter(ab, msg->data, len,
1676 				     ath12k_htt_tlv_ppdu_stats_parse,
1677 				     (void *)ppdu_info);
1678 	if (ret) {
1679 		spin_unlock_bh(&ar->data_lock);
1680 		ath12k_warn(ab, "Failed to parse tlv %d\n", ret);
1681 		goto exit;
1682 	}
1683 
1684 	if (ppdu_info->ppdu_stats.common.num_users >= HTT_PPDU_STATS_MAX_USERS) {
1685 		spin_unlock_bh(&ar->data_lock);
1686 		ath12k_warn(ab,
1687 			    "HTT PPDU STATS event has unexpected num_users %u, should be smaller than %u\n",
1688 			    ppdu_info->ppdu_stats.common.num_users,
1689 			    HTT_PPDU_STATS_MAX_USERS);
1690 		ret = -EINVAL;
1691 		goto exit;
1692 	}
1693 
1694 	/* back up data rate tlv for all peers */
1695 	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_DATA &&
1696 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON)) &&
1697 	    ppdu_info->delay_ba) {
1698 		for (i = 0; i < ppdu_info->ppdu_stats.common.num_users; i++) {
1699 			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1700 			spin_lock_bh(&ab->base_lock);
1701 			peer = ath12k_peer_find_by_id(ab, peer_id);
1702 			if (!peer) {
1703 				spin_unlock_bh(&ab->base_lock);
1704 				continue;
1705 			}
1706 
1707 			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1708 			if (usr_stats->delay_ba)
1709 				ath12k_copy_to_delay_stats(peer, usr_stats);
1710 			spin_unlock_bh(&ab->base_lock);
1711 		}
1712 	}
1713 
1714 	/* restore all peers' data rate tlv to mu-bar tlv */
1715 	if (ppdu_info->frame_type == HTT_STATS_PPDU_FTYPE_BAR &&
1716 	    (ppdu_info->tlv_bitmap & (1 << HTT_PPDU_STATS_TAG_USR_COMMON))) {
1717 		for (i = 0; i < ppdu_info->bar_num_users; i++) {
1718 			peer_id = ppdu_info->ppdu_stats.user_stats[i].peer_id;
1719 			spin_lock_bh(&ab->base_lock);
1720 			peer = ath12k_peer_find_by_id(ab, peer_id);
1721 			if (!peer) {
1722 				spin_unlock_bh(&ab->base_lock);
1723 				continue;
1724 			}
1725 
1726 			usr_stats = &ppdu_info->ppdu_stats.user_stats[i];
1727 			if (peer->delayba_flag)
1728 				ath12k_copy_to_bar(peer, usr_stats);
1729 			spin_unlock_bh(&ab->base_lock);
1730 		}
1731 	}
1732 
1733 	spin_unlock_bh(&ar->data_lock);
1734 
1735 exit:
1736 	rcu_read_unlock();
1737 
1738 	return ret;
1739 }
1740 
1741 static void ath12k_htt_mlo_offset_event_handler(struct ath12k_base *ab,
1742 						struct sk_buff *skb)
1743 {
1744 	struct ath12k_htt_mlo_offset_msg *msg;
1745 	struct ath12k_pdev *pdev;
1746 	struct ath12k *ar;
1747 	u8 pdev_id;
1748 
1749 	msg = (struct ath12k_htt_mlo_offset_msg *)skb->data;
1750 	pdev_id = u32_get_bits(__le32_to_cpu(msg->info),
1751 			       HTT_T2H_MLO_OFFSET_INFO_PDEV_ID);
1752 
1753 	rcu_read_lock();
1754 	ar = ath12k_mac_get_ar_by_pdev_id(ab, pdev_id);
1755 	if (!ar) {
1756 		/* It is possible that the ar is not yet active (started).
1757 		 * The above function will only look for the active pdev
1758 		 * and hence %NULL return is possible. Just silently
1759 		 * discard this message
1760 		 */
1761 		goto exit;
1762 	}
1763 
1764 	spin_lock_bh(&ar->data_lock);
1765 	pdev = ar->pdev;
1766 
1767 	pdev->timestamp.info = __le32_to_cpu(msg->info);
1768 	pdev->timestamp.sync_timestamp_lo_us = __le32_to_cpu(msg->sync_timestamp_lo_us);
1769 	pdev->timestamp.sync_timestamp_hi_us = __le32_to_cpu(msg->sync_timestamp_hi_us);
1770 	pdev->timestamp.mlo_offset_lo = __le32_to_cpu(msg->mlo_offset_lo);
1771 	pdev->timestamp.mlo_offset_hi = __le32_to_cpu(msg->mlo_offset_hi);
1772 	pdev->timestamp.mlo_offset_clks = __le32_to_cpu(msg->mlo_offset_clks);
1773 	pdev->timestamp.mlo_comp_clks = __le32_to_cpu(msg->mlo_comp_clks);
1774 	pdev->timestamp.mlo_comp_timer = __le32_to_cpu(msg->mlo_comp_timer);
1775 
1776 	spin_unlock_bh(&ar->data_lock);
1777 exit:
1778 	rcu_read_unlock();
1779 }
1780 
1781 void ath12k_dp_htt_htc_t2h_msg_handler(struct ath12k_base *ab,
1782 				       struct sk_buff *skb)
1783 {
1784 	struct ath12k_dp *dp = &ab->dp;
1785 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1786 	enum htt_t2h_msg_type type;
1787 	u16 peer_id;
1788 	u8 vdev_id;
1789 	u8 mac_addr[ETH_ALEN];
1790 	u16 peer_mac_h16;
1791 	u16 ast_hash = 0;
1792 	u16 hw_peer_id;
1793 
1794 	type = le32_get_bits(resp->version_msg.version, HTT_T2H_MSG_TYPE);
1795 
1796 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1797 
1798 	switch (type) {
1799 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1800 		dp->htt_tgt_ver_major = le32_get_bits(resp->version_msg.version,
1801 						      HTT_T2H_VERSION_CONF_MAJOR);
1802 		dp->htt_tgt_ver_minor = le32_get_bits(resp->version_msg.version,
1803 						      HTT_T2H_VERSION_CONF_MINOR);
1804 		complete(&dp->htt_tgt_version_received);
1805 		break;
1806 	/* TODO: remove unused peer map versions after testing */
1807 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1808 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1809 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1810 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1811 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1812 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1813 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1814 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1815 				       peer_mac_h16, mac_addr);
1816 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1817 		break;
1818 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1819 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1820 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1821 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1822 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1823 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1824 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1825 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1826 				       peer_mac_h16, mac_addr);
1827 		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1828 					 HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL);
1829 		hw_peer_id = le32_get_bits(resp->peer_map_ev.info1,
1830 					   HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID);
1831 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1832 				      hw_peer_id);
1833 		break;
1834 	case HTT_T2H_MSG_TYPE_PEER_MAP3:
1835 		vdev_id = le32_get_bits(resp->peer_map_ev.info,
1836 					HTT_T2H_PEER_MAP_INFO_VDEV_ID);
1837 		peer_id = le32_get_bits(resp->peer_map_ev.info,
1838 					HTT_T2H_PEER_MAP_INFO_PEER_ID);
1839 		peer_mac_h16 = le32_get_bits(resp->peer_map_ev.info1,
1840 					     HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16);
1841 		ath12k_dp_get_mac_addr(le32_to_cpu(resp->peer_map_ev.mac_addr_l32),
1842 				       peer_mac_h16, mac_addr);
1843 		ast_hash = le32_get_bits(resp->peer_map_ev.info2,
1844 					 HTT_T2H_PEER_MAP3_INFO2_AST_HASH_VAL);
1845 		hw_peer_id = le32_get_bits(resp->peer_map_ev.info2,
1846 					   HTT_T2H_PEER_MAP3_INFO2_HW_PEER_ID);
1847 		ath12k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1848 				      hw_peer_id);
1849 		break;
1850 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1851 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1852 		peer_id = le32_get_bits(resp->peer_unmap_ev.info,
1853 					HTT_T2H_PEER_UNMAP_INFO_PEER_ID);
1854 		ath12k_peer_unmap_event(ab, peer_id);
1855 		break;
1856 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1857 		ath12k_htt_pull_ppdu_stats(ab, skb);
1858 		break;
1859 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1860 		ath12k_debugfs_htt_ext_stats_handler(ab, skb);
1861 		break;
1862 	case HTT_T2H_MSG_TYPE_MLO_TIMESTAMP_OFFSET_IND:
1863 		ath12k_htt_mlo_offset_event_handler(ab, skb);
1864 		break;
1865 	default:
1866 		ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt event %d not handled\n",
1867 			   type);
1868 		break;
1869 	}
1870 
1871 	dev_kfree_skb_any(skb);
1872 }
1873 
1874 static int ath12k_dp_rx_msdu_coalesce(struct ath12k *ar,
1875 				      struct sk_buff_head *msdu_list,
1876 				      struct sk_buff *first, struct sk_buff *last,
1877 				      u8 l3pad_bytes, int msdu_len)
1878 {
1879 	struct ath12k_base *ab = ar->ab;
1880 	struct sk_buff *skb;
1881 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1882 	int buf_first_hdr_len, buf_first_len;
1883 	struct hal_rx_desc *ldesc;
1884 	int space_extra, rem_len, buf_len;
1885 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
1886 	bool is_continuation;
1887 
1888 	/* As the msdu is spread across multiple rx buffers,
1889 	 * find the offset to the start of msdu for computing
1890 	 * the length of the msdu in the first buffer.
1891 	 */
1892 	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1893 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1894 
1895 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1896 		skb_put(first, buf_first_hdr_len + msdu_len);
1897 		skb_pull(first, buf_first_hdr_len);
1898 		return 0;
1899 	}
1900 
1901 	ldesc = (struct hal_rx_desc *)last->data;
1902 	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, ldesc);
1903 	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, ldesc);
1904 
1905 	/* MSDU spans over multiple buffers because the length of the MSDU
1906 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1907 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1908 	 */
1909 	skb_put(first, DP_RX_BUFFER_SIZE);
1910 	skb_pull(first, buf_first_hdr_len);
1911 
1912 	/* When an MSDU spread over multiple buffers MSDU_END
1913 	 * tlvs are valid only in the last buffer. Copy those tlvs.
1914 	 */
1915 	ath12k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1916 
1917 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1918 	if (space_extra > 0 &&
1919 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1920 		/* Free up all buffers of the MSDU */
1921 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1922 			rxcb = ATH12K_SKB_RXCB(skb);
1923 			if (!rxcb->is_continuation) {
1924 				dev_kfree_skb_any(skb);
1925 				break;
1926 			}
1927 			dev_kfree_skb_any(skb);
1928 		}
1929 		return -ENOMEM;
1930 	}
1931 
1932 	rem_len = msdu_len - buf_first_len;
1933 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1934 		rxcb = ATH12K_SKB_RXCB(skb);
1935 		is_continuation = rxcb->is_continuation;
1936 		if (is_continuation)
1937 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1938 		else
1939 			buf_len = rem_len;
1940 
1941 		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1942 			WARN_ON_ONCE(1);
1943 			dev_kfree_skb_any(skb);
1944 			return -EINVAL;
1945 		}
1946 
1947 		skb_put(skb, buf_len + hal_rx_desc_sz);
1948 		skb_pull(skb, hal_rx_desc_sz);
1949 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1950 					  buf_len);
1951 		dev_kfree_skb_any(skb);
1952 
1953 		rem_len -= buf_len;
1954 		if (!is_continuation)
1955 			break;
1956 	}
1957 
1958 	return 0;
1959 }
1960 
1961 static struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1962 						      struct sk_buff *first)
1963 {
1964 	struct sk_buff *skb;
1965 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
1966 
1967 	if (!rxcb->is_continuation)
1968 		return first;
1969 
1970 	skb_queue_walk(msdu_list, skb) {
1971 		rxcb = ATH12K_SKB_RXCB(skb);
1972 		if (!rxcb->is_continuation)
1973 			return skb;
1974 	}
1975 
1976 	return NULL;
1977 }
1978 
1979 static void ath12k_dp_rx_h_csum_offload(struct sk_buff *msdu,
1980 					struct ath12k_dp_rx_info *rx_info)
1981 {
1982 	msdu->ip_summed = (rx_info->ip_csum_fail || rx_info->l4_csum_fail) ?
1983 			   CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1984 }
1985 
1986 int ath12k_dp_rx_crypto_mic_len(struct ath12k *ar, enum hal_encrypt_type enctype)
1987 {
1988 	switch (enctype) {
1989 	case HAL_ENCRYPT_TYPE_OPEN:
1990 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1991 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1992 		return 0;
1993 	case HAL_ENCRYPT_TYPE_CCMP_128:
1994 		return IEEE80211_CCMP_MIC_LEN;
1995 	case HAL_ENCRYPT_TYPE_CCMP_256:
1996 		return IEEE80211_CCMP_256_MIC_LEN;
1997 	case HAL_ENCRYPT_TYPE_GCMP_128:
1998 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1999 		return IEEE80211_GCMP_MIC_LEN;
2000 	case HAL_ENCRYPT_TYPE_WEP_40:
2001 	case HAL_ENCRYPT_TYPE_WEP_104:
2002 	case HAL_ENCRYPT_TYPE_WEP_128:
2003 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2004 	case HAL_ENCRYPT_TYPE_WAPI:
2005 		break;
2006 	}
2007 
2008 	ath12k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
2009 	return 0;
2010 }
2011 
2012 static int ath12k_dp_rx_crypto_param_len(struct ath12k *ar,
2013 					 enum hal_encrypt_type enctype)
2014 {
2015 	switch (enctype) {
2016 	case HAL_ENCRYPT_TYPE_OPEN:
2017 		return 0;
2018 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
2019 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
2020 		return IEEE80211_TKIP_IV_LEN;
2021 	case HAL_ENCRYPT_TYPE_CCMP_128:
2022 		return IEEE80211_CCMP_HDR_LEN;
2023 	case HAL_ENCRYPT_TYPE_CCMP_256:
2024 		return IEEE80211_CCMP_256_HDR_LEN;
2025 	case HAL_ENCRYPT_TYPE_GCMP_128:
2026 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
2027 		return IEEE80211_GCMP_HDR_LEN;
2028 	case HAL_ENCRYPT_TYPE_WEP_40:
2029 	case HAL_ENCRYPT_TYPE_WEP_104:
2030 	case HAL_ENCRYPT_TYPE_WEP_128:
2031 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2032 	case HAL_ENCRYPT_TYPE_WAPI:
2033 		break;
2034 	}
2035 
2036 	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
2037 	return 0;
2038 }
2039 
2040 static int ath12k_dp_rx_crypto_icv_len(struct ath12k *ar,
2041 				       enum hal_encrypt_type enctype)
2042 {
2043 	switch (enctype) {
2044 	case HAL_ENCRYPT_TYPE_OPEN:
2045 	case HAL_ENCRYPT_TYPE_CCMP_128:
2046 	case HAL_ENCRYPT_TYPE_CCMP_256:
2047 	case HAL_ENCRYPT_TYPE_GCMP_128:
2048 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
2049 		return 0;
2050 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
2051 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
2052 		return IEEE80211_TKIP_ICV_LEN;
2053 	case HAL_ENCRYPT_TYPE_WEP_40:
2054 	case HAL_ENCRYPT_TYPE_WEP_104:
2055 	case HAL_ENCRYPT_TYPE_WEP_128:
2056 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
2057 	case HAL_ENCRYPT_TYPE_WAPI:
2058 		break;
2059 	}
2060 
2061 	ath12k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
2062 	return 0;
2063 }
2064 
2065 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k *ar,
2066 					 struct sk_buff *msdu,
2067 					 enum hal_encrypt_type enctype,
2068 					 struct ieee80211_rx_status *status)
2069 {
2070 	struct ath12k_base *ab = ar->ab;
2071 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2072 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
2073 	struct ieee80211_hdr *hdr;
2074 	size_t hdr_len;
2075 	u8 *crypto_hdr;
2076 	u16 qos_ctl;
2077 
2078 	/* pull decapped header */
2079 	hdr = (struct ieee80211_hdr *)msdu->data;
2080 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2081 	skb_pull(msdu, hdr_len);
2082 
2083 	/*  Rebuild qos header */
2084 	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2085 
2086 	/* Reset the order bit as the HT_Control header is stripped */
2087 	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2088 
2089 	qos_ctl = rxcb->tid;
2090 
2091 	if (ath12k_dp_rx_h_mesh_ctl_present(ab, rxcb->rx_desc))
2092 		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2093 
2094 	/* TODO: Add other QoS ctl fields when required */
2095 
2096 	/* copy decap header before overwriting for reuse below */
2097 	memcpy(decap_hdr, hdr, hdr_len);
2098 
2099 	/* Rebuild crypto header for mac80211 use */
2100 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2101 		crypto_hdr = skb_push(msdu, ath12k_dp_rx_crypto_param_len(ar, enctype));
2102 		ath12k_dp_rx_desc_get_crypto_header(ar->ab,
2103 						    rxcb->rx_desc, crypto_hdr,
2104 						    enctype);
2105 	}
2106 
2107 	memcpy(skb_push(msdu,
2108 			IEEE80211_QOS_CTL_LEN), &qos_ctl,
2109 			IEEE80211_QOS_CTL_LEN);
2110 	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2111 }
2112 
2113 static void ath12k_dp_rx_h_undecap_raw(struct ath12k *ar, struct sk_buff *msdu,
2114 				       enum hal_encrypt_type enctype,
2115 				       struct ieee80211_rx_status *status,
2116 				       bool decrypted)
2117 {
2118 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2119 	struct ieee80211_hdr *hdr;
2120 	size_t hdr_len;
2121 	size_t crypto_len;
2122 
2123 	if (!rxcb->is_first_msdu ||
2124 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2125 		WARN_ON_ONCE(1);
2126 		return;
2127 	}
2128 
2129 	skb_trim(msdu, msdu->len - FCS_LEN);
2130 
2131 	if (!decrypted)
2132 		return;
2133 
2134 	hdr = (void *)msdu->data;
2135 
2136 	/* Tail */
2137 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2138 		skb_trim(msdu, msdu->len -
2139 			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2140 
2141 		skb_trim(msdu, msdu->len -
2142 			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2143 	} else {
2144 		/* MIC */
2145 		if (status->flag & RX_FLAG_MIC_STRIPPED)
2146 			skb_trim(msdu, msdu->len -
2147 				 ath12k_dp_rx_crypto_mic_len(ar, enctype));
2148 
2149 		/* ICV */
2150 		if (status->flag & RX_FLAG_ICV_STRIPPED)
2151 			skb_trim(msdu, msdu->len -
2152 				 ath12k_dp_rx_crypto_icv_len(ar, enctype));
2153 	}
2154 
2155 	/* MMIC */
2156 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2157 	    !ieee80211_has_morefrags(hdr->frame_control) &&
2158 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2159 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2160 
2161 	/* Head */
2162 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2163 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2164 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2165 
2166 		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
2167 		skb_pull(msdu, crypto_len);
2168 	}
2169 }
2170 
2171 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k *ar,
2172 					      struct sk_buff *msdu,
2173 					      struct ath12k_skb_rxcb *rxcb,
2174 					      struct ieee80211_rx_status *status,
2175 					      enum hal_encrypt_type enctype)
2176 {
2177 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2178 	struct ath12k_base *ab = ar->ab;
2179 	size_t hdr_len, crypto_len;
2180 	struct ieee80211_hdr hdr;
2181 	__le16 qos_ctl;
2182 	u8 *crypto_hdr, mesh_ctrl;
2183 
2184 	ath12k_dp_rx_desc_get_dot11_hdr(ab, rx_desc, &hdr);
2185 	hdr_len = ieee80211_hdrlen(hdr.frame_control);
2186 	mesh_ctrl = ath12k_dp_rx_h_mesh_ctl_present(ab, rx_desc);
2187 
2188 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2189 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
2190 		crypto_hdr = skb_push(msdu, crypto_len);
2191 		ath12k_dp_rx_desc_get_crypto_header(ab, rx_desc, crypto_hdr, enctype);
2192 	}
2193 
2194 	skb_push(msdu, hdr_len);
2195 	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
2196 
2197 	if (rxcb->is_mcbc)
2198 		status->flag &= ~RX_FLAG_PN_VALIDATED;
2199 
2200 	/* Add QOS header */
2201 	if (ieee80211_is_data_qos(hdr.frame_control)) {
2202 		struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
2203 
2204 		qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
2205 		if (mesh_ctrl)
2206 			qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
2207 
2208 		memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
2209 	}
2210 }
2211 
2212 static void ath12k_dp_rx_h_undecap_eth(struct ath12k *ar,
2213 				       struct sk_buff *msdu,
2214 				       enum hal_encrypt_type enctype,
2215 				       struct ieee80211_rx_status *status)
2216 {
2217 	struct ieee80211_hdr *hdr;
2218 	struct ethhdr *eth;
2219 	u8 da[ETH_ALEN];
2220 	u8 sa[ETH_ALEN];
2221 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2222 	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
2223 
2224 	eth = (struct ethhdr *)msdu->data;
2225 	ether_addr_copy(da, eth->h_dest);
2226 	ether_addr_copy(sa, eth->h_source);
2227 	rfc.snap_type = eth->h_proto;
2228 	skb_pull(msdu, sizeof(*eth));
2229 	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
2230 	       sizeof(rfc));
2231 	ath12k_get_dot11_hdr_from_rx_desc(ar, msdu, rxcb, status, enctype);
2232 
2233 	/* original 802.11 header has a different DA and in
2234 	 * case of 4addr it may also have different SA
2235 	 */
2236 	hdr = (struct ieee80211_hdr *)msdu->data;
2237 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2238 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2239 }
2240 
2241 static void ath12k_dp_rx_h_undecap(struct ath12k *ar, struct sk_buff *msdu,
2242 				   struct hal_rx_desc *rx_desc,
2243 				   enum hal_encrypt_type enctype,
2244 				   struct ieee80211_rx_status *status,
2245 				   bool decrypted)
2246 {
2247 	struct ath12k_base *ab = ar->ab;
2248 	u8 decap;
2249 	struct ethhdr *ehdr;
2250 
2251 	decap = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2252 
2253 	switch (decap) {
2254 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2255 		ath12k_dp_rx_h_undecap_nwifi(ar, msdu, enctype, status);
2256 		break;
2257 	case DP_RX_DECAP_TYPE_RAW:
2258 		ath12k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2259 					   decrypted);
2260 		break;
2261 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2262 		ehdr = (struct ethhdr *)msdu->data;
2263 
2264 		/* mac80211 allows fast path only for authorized STA */
2265 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2266 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
2267 			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2268 			break;
2269 		}
2270 
2271 		/* PN for mcast packets will be validated in mac80211;
2272 		 * remove eth header and add 802.11 header.
2273 		 */
2274 		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2275 			ath12k_dp_rx_h_undecap_eth(ar, msdu, enctype, status);
2276 		break;
2277 	case DP_RX_DECAP_TYPE_8023:
2278 		/* TODO: Handle undecap for these formats */
2279 		break;
2280 	}
2281 }
2282 
2283 struct ath12k_peer *
2284 ath12k_dp_rx_h_find_peer(struct ath12k_base *ab, struct sk_buff *msdu,
2285 			 struct ath12k_dp_rx_info *rx_info)
2286 {
2287 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2288 	struct ath12k_peer *peer = NULL;
2289 
2290 	lockdep_assert_held(&ab->base_lock);
2291 
2292 	if (rxcb->peer_id)
2293 		peer = ath12k_peer_find_by_id(ab, rxcb->peer_id);
2294 
2295 	if (peer)
2296 		return peer;
2297 
2298 	if (rx_info->addr2_present)
2299 		peer = ath12k_peer_find_by_addr(ab, rx_info->addr2);
2300 
2301 	return peer;
2302 }
2303 
2304 static void ath12k_dp_rx_h_mpdu(struct ath12k *ar,
2305 				struct sk_buff *msdu,
2306 				struct hal_rx_desc *rx_desc,
2307 				struct ath12k_dp_rx_info *rx_info)
2308 {
2309 	struct ath12k_base *ab = ar->ab;
2310 	struct ath12k_skb_rxcb *rxcb;
2311 	enum hal_encrypt_type enctype;
2312 	bool is_decrypted = false;
2313 	struct ieee80211_hdr *hdr;
2314 	struct ath12k_peer *peer;
2315 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2316 	u32 err_bitmap;
2317 
2318 	/* PN for multicast packets will be checked in mac80211 */
2319 	rxcb = ATH12K_SKB_RXCB(msdu);
2320 	rxcb->is_mcbc = rx_info->is_mcbc;
2321 
2322 	if (rxcb->is_mcbc)
2323 		rxcb->peer_id = rx_info->peer_id;
2324 
2325 	spin_lock_bh(&ar->ab->base_lock);
2326 	peer = ath12k_dp_rx_h_find_peer(ar->ab, msdu, rx_info);
2327 	if (peer) {
2328 		/* resetting mcbc bit because mcbc packets are unicast
2329 		 * packets only for AP as STA sends unicast packets.
2330 		 */
2331 		rxcb->is_mcbc = rxcb->is_mcbc && !peer->ucast_ra_only;
2332 
2333 		if (rxcb->is_mcbc)
2334 			enctype = peer->sec_type_grp;
2335 		else
2336 			enctype = peer->sec_type;
2337 	} else {
2338 		enctype = HAL_ENCRYPT_TYPE_OPEN;
2339 	}
2340 	spin_unlock_bh(&ar->ab->base_lock);
2341 
2342 	err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
2343 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2344 		is_decrypted = ath12k_dp_rx_h_is_decrypted(ab, rx_desc);
2345 
2346 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2347 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2348 			     RX_FLAG_MMIC_ERROR |
2349 			     RX_FLAG_DECRYPTED |
2350 			     RX_FLAG_IV_STRIPPED |
2351 			     RX_FLAG_MMIC_STRIPPED);
2352 
2353 	if (err_bitmap & HAL_RX_MPDU_ERR_FCS)
2354 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2355 	if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC)
2356 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2357 
2358 	if (is_decrypted) {
2359 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2360 
2361 		if (rx_info->is_mcbc)
2362 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2363 					RX_FLAG_ICV_STRIPPED;
2364 		else
2365 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2366 					   RX_FLAG_PN_VALIDATED;
2367 	}
2368 
2369 	ath12k_dp_rx_h_csum_offload(msdu, rx_info);
2370 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
2371 			       enctype, rx_status, is_decrypted);
2372 
2373 	if (!is_decrypted || rx_info->is_mcbc)
2374 		return;
2375 
2376 	if (rx_info->decap_type != DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2377 		hdr = (void *)msdu->data;
2378 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2379 	}
2380 }
2381 
2382 static void ath12k_dp_rx_h_rate(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
2383 {
2384 	struct ieee80211_supported_band *sband;
2385 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2386 	enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
2387 	u8 bw = rx_info->bw, sgi = rx_info->sgi;
2388 	u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
2389 	bool is_cck;
2390 
2391 	switch (pkt_type) {
2392 	case RX_MSDU_START_PKT_TYPE_11A:
2393 	case RX_MSDU_START_PKT_TYPE_11B:
2394 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2395 		sband = &ar->mac.sbands[rx_status->band];
2396 		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
2397 								is_cck);
2398 		break;
2399 	case RX_MSDU_START_PKT_TYPE_11N:
2400 		rx_status->encoding = RX_ENC_HT;
2401 		if (rate_mcs > ATH12K_HT_MCS_MAX) {
2402 			ath12k_warn(ar->ab,
2403 				    "Received with invalid mcs in HT mode %d\n",
2404 				     rate_mcs);
2405 			break;
2406 		}
2407 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2408 		if (sgi)
2409 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2410 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2411 		break;
2412 	case RX_MSDU_START_PKT_TYPE_11AC:
2413 		rx_status->encoding = RX_ENC_VHT;
2414 		rx_status->rate_idx = rate_mcs;
2415 		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
2416 			ath12k_warn(ar->ab,
2417 				    "Received with invalid mcs in VHT mode %d\n",
2418 				     rate_mcs);
2419 			break;
2420 		}
2421 		rx_status->nss = nss;
2422 		if (sgi)
2423 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2424 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2425 		break;
2426 	case RX_MSDU_START_PKT_TYPE_11AX:
2427 		rx_status->rate_idx = rate_mcs;
2428 		if (rate_mcs > ATH12K_HE_MCS_MAX) {
2429 			ath12k_warn(ar->ab,
2430 				    "Received with invalid mcs in HE mode %d\n",
2431 				    rate_mcs);
2432 			break;
2433 		}
2434 		rx_status->encoding = RX_ENC_HE;
2435 		rx_status->nss = nss;
2436 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
2437 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2438 		break;
2439 	case RX_MSDU_START_PKT_TYPE_11BE:
2440 		rx_status->rate_idx = rate_mcs;
2441 
2442 		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
2443 			ath12k_warn(ar->ab,
2444 				    "Received with invalid mcs in EHT mode %d\n",
2445 				    rate_mcs);
2446 			break;
2447 		}
2448 
2449 		rx_status->encoding = RX_ENC_EHT;
2450 		rx_status->nss = nss;
2451 		rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
2452 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
2453 		break;
2454 	default:
2455 		break;
2456 	}
2457 }
2458 
2459 void ath12k_dp_rx_h_fetch_info(struct ath12k_base *ab, struct hal_rx_desc *rx_desc,
2460 			       struct ath12k_dp_rx_info *rx_info)
2461 {
2462 	rx_info->ip_csum_fail = ath12k_dp_rx_h_ip_cksum_fail(ab, rx_desc);
2463 	rx_info->l4_csum_fail = ath12k_dp_rx_h_l4_cksum_fail(ab, rx_desc);
2464 	rx_info->is_mcbc = ath12k_dp_rx_h_is_da_mcbc(ab, rx_desc);
2465 	rx_info->decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2466 	rx_info->pkt_type = ath12k_dp_rx_h_pkt_type(ab, rx_desc);
2467 	rx_info->sgi = ath12k_dp_rx_h_sgi(ab, rx_desc);
2468 	rx_info->rate_mcs = ath12k_dp_rx_h_rate_mcs(ab, rx_desc);
2469 	rx_info->bw = ath12k_dp_rx_h_rx_bw(ab, rx_desc);
2470 	rx_info->nss = ath12k_dp_rx_h_nss(ab, rx_desc);
2471 	rx_info->tid = ath12k_dp_rx_h_tid(ab, rx_desc);
2472 	rx_info->peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
2473 	rx_info->phy_meta_data = ath12k_dp_rx_h_freq(ab, rx_desc);
2474 
2475 	if (ath12k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)) {
2476 		ether_addr_copy(rx_info->addr2,
2477 				ath12k_dp_rxdesc_get_mpdu_start_addr2(ab, rx_desc));
2478 		rx_info->addr2_present = true;
2479 	}
2480 
2481 	ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "rx_desc: ",
2482 			rx_desc, sizeof(*rx_desc));
2483 }
2484 
2485 void ath12k_dp_rx_h_ppdu(struct ath12k *ar, struct ath12k_dp_rx_info *rx_info)
2486 {
2487 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
2488 	u8 channel_num;
2489 	u32 center_freq, meta_data;
2490 	struct ieee80211_channel *channel;
2491 
2492 	rx_status->freq = 0;
2493 	rx_status->rate_idx = 0;
2494 	rx_status->nss = 0;
2495 	rx_status->encoding = RX_ENC_LEGACY;
2496 	rx_status->bw = RATE_INFO_BW_20;
2497 	rx_status->enc_flags = 0;
2498 
2499 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2500 
2501 	meta_data = rx_info->phy_meta_data;
2502 	channel_num = meta_data;
2503 	center_freq = meta_data >> 16;
2504 
2505 	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
2506 	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
2507 		rx_status->band = NL80211_BAND_6GHZ;
2508 		rx_status->freq = center_freq;
2509 	} else if (channel_num >= 1 && channel_num <= 14) {
2510 		rx_status->band = NL80211_BAND_2GHZ;
2511 	} else if (channel_num >= 36 && channel_num <= 173) {
2512 		rx_status->band = NL80211_BAND_5GHZ;
2513 	} else {
2514 		spin_lock_bh(&ar->data_lock);
2515 		channel = ar->rx_channel;
2516 		if (channel) {
2517 			rx_status->band = channel->band;
2518 			channel_num =
2519 				ieee80211_frequency_to_channel(channel->center_freq);
2520 		}
2521 		spin_unlock_bh(&ar->data_lock);
2522 	}
2523 
2524 	if (rx_status->band != NL80211_BAND_6GHZ)
2525 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2526 								 rx_status->band);
2527 
2528 	ath12k_dp_rx_h_rate(ar, rx_info);
2529 }
2530 
2531 static void ath12k_dp_rx_deliver_msdu(struct ath12k *ar, struct napi_struct *napi,
2532 				      struct sk_buff *msdu,
2533 				      struct ath12k_dp_rx_info *rx_info)
2534 {
2535 	struct ath12k_base *ab = ar->ab;
2536 	struct ieee80211_rx_status *rx_status;
2537 	struct ieee80211_sta *pubsta;
2538 	struct ath12k_peer *peer;
2539 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
2540 	struct ieee80211_rx_status *status = rx_info->rx_status;
2541 	u8 decap = rx_info->decap_type;
2542 	bool is_mcbc = rxcb->is_mcbc;
2543 	bool is_eapol = rxcb->is_eapol;
2544 
2545 	spin_lock_bh(&ab->base_lock);
2546 	peer = ath12k_dp_rx_h_find_peer(ab, msdu, rx_info);
2547 
2548 	pubsta = peer ? peer->sta : NULL;
2549 
2550 	if (pubsta && pubsta->valid_links) {
2551 		status->link_valid = 1;
2552 		status->link_id = peer->link_id;
2553 	}
2554 
2555 	spin_unlock_bh(&ab->base_lock);
2556 
2557 	ath12k_dbg(ab, ATH12K_DBG_DATA,
2558 		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2559 		   msdu,
2560 		   msdu->len,
2561 		   peer ? peer->addr : NULL,
2562 		   rxcb->tid,
2563 		   is_mcbc ? "mcast" : "ucast",
2564 		   ath12k_dp_rx_h_seq_no(ab, rxcb->rx_desc),
2565 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2566 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2567 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2568 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2569 		   (status->encoding == RX_ENC_EHT) ? "eht" : "",
2570 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2571 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2572 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2573 		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
2574 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2575 		   status->rate_idx,
2576 		   status->nss,
2577 		   status->freq,
2578 		   status->band, status->flag,
2579 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2580 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2581 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2582 
2583 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
2584 			msdu->data, msdu->len);
2585 
2586 	rx_status = IEEE80211_SKB_RXCB(msdu);
2587 	*rx_status = *status;
2588 
2589 	/* TODO: trace rx packet */
2590 
2591 	/* PN for multicast packets are not validate in HW,
2592 	 * so skip 802.3 rx path
2593 	 * Also, fast_rx expects the STA to be authorized, hence
2594 	 * eapol packets are sent in slow path.
2595 	 */
2596 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2597 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2598 		rx_status->flag |= RX_FLAG_8023;
2599 
2600 	ieee80211_rx_napi(ath12k_ar_to_hw(ar), pubsta, msdu, napi);
2601 }
2602 
2603 static bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_base *ab,
2604 						   struct hal_rx_desc *rx_desc,
2605 						   struct sk_buff *msdu)
2606 {
2607 	struct ieee80211_hdr *hdr;
2608 	u8 decap_type;
2609 	u32 hdr_len;
2610 
2611 	decap_type = ath12k_dp_rx_h_decap_type(ab, rx_desc);
2612 	if (decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
2613 		return true;
2614 
2615 	hdr = (struct ieee80211_hdr *)msdu->data;
2616 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2617 
2618 	if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
2619 		return true;
2620 
2621 	ab->device_stats.invalid_rbm++;
2622 	WARN_ON_ONCE(1);
2623 	return false;
2624 }
2625 
2626 static int ath12k_dp_rx_process_msdu(struct ath12k *ar,
2627 				     struct sk_buff *msdu,
2628 				     struct sk_buff_head *msdu_list,
2629 				     struct ath12k_dp_rx_info *rx_info)
2630 {
2631 	struct ath12k_base *ab = ar->ab;
2632 	struct hal_rx_desc *rx_desc, *lrx_desc;
2633 	struct ath12k_skb_rxcb *rxcb;
2634 	struct sk_buff *last_buf;
2635 	u8 l3_pad_bytes;
2636 	u16 msdu_len;
2637 	int ret;
2638 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
2639 
2640 	last_buf = ath12k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2641 	if (!last_buf) {
2642 		ath12k_warn(ab,
2643 			    "No valid Rx buffer to access MSDU_END tlv\n");
2644 		ret = -EIO;
2645 		goto free_out;
2646 	}
2647 
2648 	rx_desc = (struct hal_rx_desc *)msdu->data;
2649 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2650 	if (!ath12k_dp_rx_h_msdu_done(ab, lrx_desc)) {
2651 		ath12k_warn(ab, "msdu_done bit in msdu_end is not set\n");
2652 		ret = -EIO;
2653 		goto free_out;
2654 	}
2655 
2656 	rxcb = ATH12K_SKB_RXCB(msdu);
2657 	rxcb->rx_desc = rx_desc;
2658 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, lrx_desc);
2659 	l3_pad_bytes = ath12k_dp_rx_h_l3pad(ab, lrx_desc);
2660 
2661 	if (rxcb->is_frag) {
2662 		skb_pull(msdu, hal_rx_desc_sz);
2663 	} else if (!rxcb->is_continuation) {
2664 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2665 			ret = -EINVAL;
2666 			ath12k_warn(ab, "invalid msdu len %u\n", msdu_len);
2667 			ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
2668 					sizeof(*rx_desc));
2669 			goto free_out;
2670 		}
2671 		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2672 		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2673 	} else {
2674 		ret = ath12k_dp_rx_msdu_coalesce(ar, msdu_list,
2675 						 msdu, last_buf,
2676 						 l3_pad_bytes, msdu_len);
2677 		if (ret) {
2678 			ath12k_warn(ab,
2679 				    "failed to coalesce msdu rx buffer%d\n", ret);
2680 			goto free_out;
2681 		}
2682 	}
2683 
2684 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu))) {
2685 		ret = -EINVAL;
2686 		goto free_out;
2687 	}
2688 
2689 	ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
2690 	ath12k_dp_rx_h_ppdu(ar, rx_info);
2691 	ath12k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_info);
2692 
2693 	rx_info->rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2694 
2695 	return 0;
2696 
2697 free_out:
2698 	return ret;
2699 }
2700 
2701 static void ath12k_dp_rx_process_received_packets(struct ath12k_base *ab,
2702 						  struct napi_struct *napi,
2703 						  struct sk_buff_head *msdu_list,
2704 						  int ring_id)
2705 {
2706 	struct ath12k_hw_group *ag = ab->ag;
2707 	struct ieee80211_rx_status rx_status = {0};
2708 	struct ath12k_skb_rxcb *rxcb;
2709 	struct sk_buff *msdu;
2710 	struct ath12k *ar;
2711 	struct ath12k_hw_link *hw_links = ag->hw_links;
2712 	struct ath12k_base *partner_ab;
2713 	struct ath12k_dp_rx_info rx_info;
2714 	u8 hw_link_id, pdev_id;
2715 	int ret;
2716 
2717 	if (skb_queue_empty(msdu_list))
2718 		return;
2719 
2720 	rx_info.addr2_present = false;
2721 	rx_info.rx_status = &rx_status;
2722 
2723 	rcu_read_lock();
2724 
2725 	while ((msdu = __skb_dequeue(msdu_list))) {
2726 		rxcb = ATH12K_SKB_RXCB(msdu);
2727 		hw_link_id = rxcb->hw_link_id;
2728 		partner_ab = ath12k_ag_to_ab(ag,
2729 					     hw_links[hw_link_id].device_id);
2730 		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
2731 						      hw_links[hw_link_id].pdev_idx);
2732 		ar = partner_ab->pdevs[pdev_id].ar;
2733 		if (!rcu_dereference(partner_ab->pdevs_active[pdev_id])) {
2734 			dev_kfree_skb_any(msdu);
2735 			continue;
2736 		}
2737 
2738 		if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
2739 			dev_kfree_skb_any(msdu);
2740 			continue;
2741 		}
2742 
2743 		ret = ath12k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_info);
2744 		if (ret) {
2745 			ath12k_dbg(ab, ATH12K_DBG_DATA,
2746 				   "Unable to process msdu %d", ret);
2747 			dev_kfree_skb_any(msdu);
2748 			continue;
2749 		}
2750 
2751 		ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
2752 	}
2753 
2754 	rcu_read_unlock();
2755 }
2756 
2757 static u16 ath12k_dp_rx_get_peer_id(struct ath12k_base *ab,
2758 				    enum ath12k_peer_metadata_version ver,
2759 				    __le32 peer_metadata)
2760 {
2761 	switch (ver) {
2762 	default:
2763 		ath12k_warn(ab, "Unknown peer metadata version: %d", ver);
2764 		fallthrough;
2765 	case ATH12K_PEER_METADATA_V0:
2766 		return le32_get_bits(peer_metadata,
2767 				     RX_MPDU_DESC_META_DATA_V0_PEER_ID);
2768 	case ATH12K_PEER_METADATA_V1:
2769 		return le32_get_bits(peer_metadata,
2770 				     RX_MPDU_DESC_META_DATA_V1_PEER_ID);
2771 	case ATH12K_PEER_METADATA_V1A:
2772 		return le32_get_bits(peer_metadata,
2773 				     RX_MPDU_DESC_META_DATA_V1A_PEER_ID);
2774 	case ATH12K_PEER_METADATA_V1B:
2775 		return le32_get_bits(peer_metadata,
2776 				     RX_MPDU_DESC_META_DATA_V1B_PEER_ID);
2777 	}
2778 }
2779 
2780 int ath12k_dp_rx_process(struct ath12k_base *ab, int ring_id,
2781 			 struct napi_struct *napi, int budget)
2782 {
2783 	struct ath12k_hw_group *ag = ab->ag;
2784 	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
2785 	struct ath12k_hw_link *hw_links = ag->hw_links;
2786 	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
2787 	struct ath12k_rx_desc_info *desc_info;
2788 	struct ath12k_dp *dp = &ab->dp;
2789 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
2790 	struct hal_reo_dest_ring *desc;
2791 	struct ath12k_base *partner_ab;
2792 	struct sk_buff_head msdu_list;
2793 	struct ath12k_skb_rxcb *rxcb;
2794 	int total_msdu_reaped = 0;
2795 	u8 hw_link_id, device_id;
2796 	struct hal_srng *srng;
2797 	struct sk_buff *msdu;
2798 	bool done = false;
2799 	u64 desc_va;
2800 
2801 	__skb_queue_head_init(&msdu_list);
2802 
2803 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
2804 		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
2805 
2806 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2807 
2808 	spin_lock_bh(&srng->lock);
2809 
2810 try_again:
2811 	ath12k_hal_srng_access_begin(ab, srng);
2812 
2813 	while ((desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
2814 		struct rx_mpdu_desc *mpdu_info;
2815 		struct rx_msdu_desc *msdu_info;
2816 		enum hal_reo_dest_ring_push_reason push_reason;
2817 		u32 cookie;
2818 
2819 		cookie = le32_get_bits(desc->buf_addr_info.info1,
2820 				       BUFFER_ADDR_INFO1_SW_COOKIE);
2821 
2822 		hw_link_id = le32_get_bits(desc->info0,
2823 					   HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
2824 
2825 		desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
2826 			   le32_to_cpu(desc->buf_va_lo));
2827 		desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
2828 
2829 		device_id = hw_links[hw_link_id].device_id;
2830 		partner_ab = ath12k_ag_to_ab(ag, device_id);
2831 		if (unlikely(!partner_ab)) {
2832 			if (desc_info->skb) {
2833 				dev_kfree_skb_any(desc_info->skb);
2834 				desc_info->skb = NULL;
2835 			}
2836 
2837 			continue;
2838 		}
2839 
2840 		/* retry manual desc retrieval */
2841 		if (!desc_info) {
2842 			desc_info = ath12k_dp_get_rx_desc(partner_ab, cookie);
2843 			if (!desc_info) {
2844 				ath12k_warn(partner_ab, "Invalid cookie in manual descriptor retrieval: 0x%x\n",
2845 					    cookie);
2846 				continue;
2847 			}
2848 		}
2849 
2850 		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
2851 			ath12k_warn(ab, "Check HW CC implementation");
2852 
2853 		msdu = desc_info->skb;
2854 		desc_info->skb = NULL;
2855 
2856 		list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
2857 
2858 		rxcb = ATH12K_SKB_RXCB(msdu);
2859 		dma_unmap_single(partner_ab->dev, rxcb->paddr,
2860 				 msdu->len + skb_tailroom(msdu),
2861 				 DMA_FROM_DEVICE);
2862 
2863 		num_buffs_reaped[device_id]++;
2864 		ab->device_stats.reo_rx[ring_id][ab->device_id]++;
2865 
2866 		push_reason = le32_get_bits(desc->info0,
2867 					    HAL_REO_DEST_RING_INFO0_PUSH_REASON);
2868 		if (push_reason !=
2869 		    HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION) {
2870 			dev_kfree_skb_any(msdu);
2871 			ab->device_stats.hal_reo_error[ring_id]++;
2872 			continue;
2873 		}
2874 
2875 		msdu_info = &desc->rx_msdu_info;
2876 		mpdu_info = &desc->rx_mpdu_info;
2877 
2878 		rxcb->is_first_msdu = !!(le32_to_cpu(msdu_info->info0) &
2879 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2880 		rxcb->is_last_msdu = !!(le32_to_cpu(msdu_info->info0) &
2881 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2882 		rxcb->is_continuation = !!(le32_to_cpu(msdu_info->info0) &
2883 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2884 		rxcb->hw_link_id = hw_link_id;
2885 		rxcb->peer_id = ath12k_dp_rx_get_peer_id(ab, dp->peer_metadata_ver,
2886 							 mpdu_info->peer_meta_data);
2887 		rxcb->tid = le32_get_bits(mpdu_info->info0,
2888 					  RX_MPDU_DESC_INFO0_TID);
2889 
2890 		__skb_queue_tail(&msdu_list, msdu);
2891 
2892 		if (!rxcb->is_continuation) {
2893 			total_msdu_reaped++;
2894 			done = true;
2895 		} else {
2896 			done = false;
2897 		}
2898 
2899 		if (total_msdu_reaped >= budget)
2900 			break;
2901 	}
2902 
2903 	/* Hw might have updated the head pointer after we cached it.
2904 	 * In this case, even though there are entries in the ring we'll
2905 	 * get rx_desc NULL. Give the read another try with updated cached
2906 	 * head pointer so that we can reap complete MPDU in the current
2907 	 * rx processing.
2908 	 */
2909 	if (!done && ath12k_hal_srng_dst_num_free(ab, srng, true)) {
2910 		ath12k_hal_srng_access_end(ab, srng);
2911 		goto try_again;
2912 	}
2913 
2914 	ath12k_hal_srng_access_end(ab, srng);
2915 
2916 	spin_unlock_bh(&srng->lock);
2917 
2918 	if (!total_msdu_reaped)
2919 		goto exit;
2920 
2921 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
2922 		if (!num_buffs_reaped[device_id])
2923 			continue;
2924 
2925 		partner_ab = ath12k_ag_to_ab(ag, device_id);
2926 		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
2927 
2928 		ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
2929 					    &rx_desc_used_list[device_id],
2930 					    num_buffs_reaped[device_id]);
2931 	}
2932 
2933 	ath12k_dp_rx_process_received_packets(ab, napi, &msdu_list,
2934 					      ring_id);
2935 
2936 exit:
2937 	return total_msdu_reaped;
2938 }
2939 
2940 static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
2941 {
2942 	struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
2943 							     frag_timer);
2944 
2945 	spin_lock_bh(&rx_tid->ab->base_lock);
2946 	if (rx_tid->last_frag_no &&
2947 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
2948 		spin_unlock_bh(&rx_tid->ab->base_lock);
2949 		return;
2950 	}
2951 	ath12k_dp_rx_frags_cleanup(rx_tid, true);
2952 	spin_unlock_bh(&rx_tid->ab->base_lock);
2953 }
2954 
2955 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
2956 {
2957 	struct ath12k_base *ab = ar->ab;
2958 	struct crypto_shash *tfm;
2959 	struct ath12k_peer *peer;
2960 	struct ath12k_dp_rx_tid *rx_tid;
2961 	int i;
2962 
2963 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
2964 	if (IS_ERR(tfm))
2965 		return PTR_ERR(tfm);
2966 
2967 	spin_lock_bh(&ab->base_lock);
2968 
2969 	peer = ath12k_peer_find(ab, vdev_id, peer_mac);
2970 	if (!peer) {
2971 		spin_unlock_bh(&ab->base_lock);
2972 		crypto_free_shash(tfm);
2973 		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
2974 		return -ENOENT;
2975 	}
2976 
2977 	if (!peer->primary_link) {
2978 		spin_unlock_bh(&ab->base_lock);
2979 		crypto_free_shash(tfm);
2980 		return 0;
2981 	}
2982 
2983 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
2984 		rx_tid = &peer->rx_tid[i];
2985 		rx_tid->ab = ab;
2986 		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
2987 		skb_queue_head_init(&rx_tid->rx_frags);
2988 	}
2989 
2990 	peer->tfm_mmic = tfm;
2991 	peer->dp_setup_done = true;
2992 	spin_unlock_bh(&ab->base_lock);
2993 
2994 	return 0;
2995 }
2996 
2997 static int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
2998 				      struct ieee80211_hdr *hdr, u8 *data,
2999 				      size_t data_len, u8 *mic)
3000 {
3001 	SHASH_DESC_ON_STACK(desc, tfm);
3002 	u8 mic_hdr[16] = {0};
3003 	u8 tid = 0;
3004 	int ret;
3005 
3006 	if (!tfm)
3007 		return -EINVAL;
3008 
3009 	desc->tfm = tfm;
3010 
3011 	ret = crypto_shash_setkey(tfm, key, 8);
3012 	if (ret)
3013 		goto out;
3014 
3015 	ret = crypto_shash_init(desc);
3016 	if (ret)
3017 		goto out;
3018 
3019 	/* TKIP MIC header */
3020 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3021 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3022 	if (ieee80211_is_data_qos(hdr->frame_control))
3023 		tid = ieee80211_get_tid(hdr);
3024 	mic_hdr[12] = tid;
3025 
3026 	ret = crypto_shash_update(desc, mic_hdr, 16);
3027 	if (ret)
3028 		goto out;
3029 	ret = crypto_shash_update(desc, data, data_len);
3030 	if (ret)
3031 		goto out;
3032 	ret = crypto_shash_final(desc, mic);
3033 out:
3034 	shash_desc_zero(desc);
3035 	return ret;
3036 }
3037 
3038 static int ath12k_dp_rx_h_verify_tkip_mic(struct ath12k *ar, struct ath12k_peer *peer,
3039 					  struct sk_buff *msdu)
3040 {
3041 	struct ath12k_base *ab = ar->ab;
3042 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3043 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3044 	struct ieee80211_key_conf *key_conf;
3045 	struct ieee80211_hdr *hdr;
3046 	struct ath12k_dp_rx_info rx_info;
3047 	u8 mic[IEEE80211_CCMP_MIC_LEN];
3048 	int head_len, tail_len, ret;
3049 	size_t data_len;
3050 	u32 hdr_len, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3051 	u8 *key, *data;
3052 	u8 key_idx;
3053 
3054 	if (ath12k_dp_rx_h_enctype(ab, rx_desc) != HAL_ENCRYPT_TYPE_TKIP_MIC)
3055 		return 0;
3056 
3057 	rx_info.addr2_present = false;
3058 	rx_info.rx_status = rxs;
3059 
3060 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3061 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
3062 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3063 	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3064 
3065 	if (!is_multicast_ether_addr(hdr->addr1))
3066 		key_idx = peer->ucast_keyidx;
3067 	else
3068 		key_idx = peer->mcast_keyidx;
3069 
3070 	key_conf = peer->keys[key_idx];
3071 
3072 	data = msdu->data + head_len;
3073 	data_len = msdu->len - head_len - tail_len;
3074 	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3075 
3076 	ret = ath12k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3077 	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3078 		goto mic_fail;
3079 
3080 	return 0;
3081 
3082 mic_fail:
3083 	(ATH12K_SKB_RXCB(msdu))->is_first_msdu = true;
3084 	(ATH12K_SKB_RXCB(msdu))->is_last_msdu = true;
3085 
3086 	ath12k_dp_rx_h_fetch_info(ab, rx_desc, &rx_info);
3087 
3088 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3089 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3090 	skb_pull(msdu, hal_rx_desc_sz);
3091 
3092 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, rx_desc, msdu)))
3093 		return -EINVAL;
3094 
3095 	ath12k_dp_rx_h_ppdu(ar, &rx_info);
3096 	ath12k_dp_rx_h_undecap(ar, msdu, rx_desc,
3097 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3098 	ieee80211_rx(ath12k_ar_to_hw(ar), msdu);
3099 	return -EINVAL;
3100 }
3101 
3102 static void ath12k_dp_rx_h_undecap_frag(struct ath12k *ar, struct sk_buff *msdu,
3103 					enum hal_encrypt_type enctype, u32 flags)
3104 {
3105 	struct ieee80211_hdr *hdr;
3106 	size_t hdr_len;
3107 	size_t crypto_len;
3108 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3109 
3110 	if (!flags)
3111 		return;
3112 
3113 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3114 
3115 	if (flags & RX_FLAG_MIC_STRIPPED)
3116 		skb_trim(msdu, msdu->len -
3117 			 ath12k_dp_rx_crypto_mic_len(ar, enctype));
3118 
3119 	if (flags & RX_FLAG_ICV_STRIPPED)
3120 		skb_trim(msdu, msdu->len -
3121 			 ath12k_dp_rx_crypto_icv_len(ar, enctype));
3122 
3123 	if (flags & RX_FLAG_IV_STRIPPED) {
3124 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
3125 		crypto_len = ath12k_dp_rx_crypto_param_len(ar, enctype);
3126 
3127 		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
3128 			msdu->data + hal_rx_desc_sz, hdr_len);
3129 		skb_pull(msdu, crypto_len);
3130 	}
3131 }
3132 
3133 static int ath12k_dp_rx_h_defrag(struct ath12k *ar,
3134 				 struct ath12k_peer *peer,
3135 				 struct ath12k_dp_rx_tid *rx_tid,
3136 				 struct sk_buff **defrag_skb)
3137 {
3138 	struct ath12k_base *ab = ar->ab;
3139 	struct hal_rx_desc *rx_desc;
3140 	struct sk_buff *skb, *first_frag, *last_frag;
3141 	struct ieee80211_hdr *hdr;
3142 	enum hal_encrypt_type enctype;
3143 	bool is_decrypted = false;
3144 	int msdu_len = 0;
3145 	int extra_space;
3146 	u32 flags, hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3147 
3148 	first_frag = skb_peek(&rx_tid->rx_frags);
3149 	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3150 
3151 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3152 		flags = 0;
3153 		rx_desc = (struct hal_rx_desc *)skb->data;
3154 		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3155 
3156 		enctype = ath12k_dp_rx_h_enctype(ab, rx_desc);
3157 		if (enctype != HAL_ENCRYPT_TYPE_OPEN)
3158 			is_decrypted = ath12k_dp_rx_h_is_decrypted(ab,
3159 								   rx_desc);
3160 
3161 		if (is_decrypted) {
3162 			if (skb != first_frag)
3163 				flags |= RX_FLAG_IV_STRIPPED;
3164 			if (skb != last_frag)
3165 				flags |= RX_FLAG_ICV_STRIPPED |
3166 					 RX_FLAG_MIC_STRIPPED;
3167 		}
3168 
3169 		/* RX fragments are always raw packets */
3170 		if (skb != last_frag)
3171 			skb_trim(skb, skb->len - FCS_LEN);
3172 		ath12k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3173 
3174 		if (skb != first_frag)
3175 			skb_pull(skb, hal_rx_desc_sz +
3176 				      ieee80211_hdrlen(hdr->frame_control));
3177 		msdu_len += skb->len;
3178 	}
3179 
3180 	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3181 	if (extra_space > 0 &&
3182 	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3183 		return -ENOMEM;
3184 
3185 	__skb_unlink(first_frag, &rx_tid->rx_frags);
3186 	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3187 		skb_put_data(first_frag, skb->data, skb->len);
3188 		dev_kfree_skb_any(skb);
3189 	}
3190 
3191 	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3192 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3193 	ATH12K_SKB_RXCB(first_frag)->is_frag = 1;
3194 
3195 	if (ath12k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3196 		first_frag = NULL;
3197 
3198 	*defrag_skb = first_frag;
3199 	return 0;
3200 }
3201 
3202 static int ath12k_dp_rx_h_defrag_reo_reinject(struct ath12k *ar,
3203 					      struct ath12k_dp_rx_tid *rx_tid,
3204 					      struct sk_buff *defrag_skb)
3205 {
3206 	struct ath12k_base *ab = ar->ab;
3207 	struct ath12k_dp *dp = &ab->dp;
3208 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3209 	struct hal_reo_entrance_ring *reo_ent_ring;
3210 	struct hal_reo_dest_ring *reo_dest_ring;
3211 	struct dp_link_desc_bank *link_desc_banks;
3212 	struct hal_rx_msdu_link *msdu_link;
3213 	struct hal_rx_msdu_details *msdu0;
3214 	struct hal_srng *srng;
3215 	dma_addr_t link_paddr, buf_paddr;
3216 	u32 desc_bank, msdu_info, msdu_ext_info, mpdu_info;
3217 	u32 cookie, hal_rx_desc_sz, dest_ring_info0, queue_addr_hi;
3218 	int ret;
3219 	struct ath12k_rx_desc_info *desc_info;
3220 	enum hal_rx_buf_return_buf_manager idle_link_rbm = dp->idle_link_rbm;
3221 	u8 dst_ind;
3222 
3223 	hal_rx_desc_sz = ab->hal.hal_desc_sz;
3224 	link_desc_banks = dp->link_desc_banks;
3225 	reo_dest_ring = rx_tid->dst_ring_desc;
3226 
3227 	ath12k_hal_rx_reo_ent_paddr_get(ab, &reo_dest_ring->buf_addr_info,
3228 					&link_paddr, &cookie);
3229 	desc_bank = u32_get_bits(cookie, DP_LINK_DESC_BANK_MASK);
3230 
3231 	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3232 			(link_paddr - link_desc_banks[desc_bank].paddr));
3233 	msdu0 = &msdu_link->msdu_link[0];
3234 	msdu_ext_info = le32_to_cpu(msdu0->rx_msdu_ext_info.info0);
3235 	dst_ind = u32_get_bits(msdu_ext_info, RX_MSDU_EXT_DESC_INFO0_REO_DEST_IND);
3236 
3237 	memset(msdu0, 0, sizeof(*msdu0));
3238 
3239 	msdu_info = u32_encode_bits(1, RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU) |
3240 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU) |
3241 		    u32_encode_bits(0, RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) |
3242 		    u32_encode_bits(defrag_skb->len - hal_rx_desc_sz,
3243 				    RX_MSDU_DESC_INFO0_MSDU_LENGTH) |
3244 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_SA) |
3245 		    u32_encode_bits(1, RX_MSDU_DESC_INFO0_VALID_DA);
3246 	msdu0->rx_msdu_info.info0 = cpu_to_le32(msdu_info);
3247 	msdu0->rx_msdu_ext_info.info0 = cpu_to_le32(msdu_ext_info);
3248 
3249 	/* change msdu len in hal rx desc */
3250 	ath12k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3251 
3252 	buf_paddr = dma_map_single(ab->dev, defrag_skb->data,
3253 				   defrag_skb->len + skb_tailroom(defrag_skb),
3254 				   DMA_TO_DEVICE);
3255 	if (dma_mapping_error(ab->dev, buf_paddr))
3256 		return -ENOMEM;
3257 
3258 	spin_lock_bh(&dp->rx_desc_lock);
3259 	desc_info = list_first_entry_or_null(&dp->rx_desc_free_list,
3260 					     struct ath12k_rx_desc_info,
3261 					     list);
3262 	if (!desc_info) {
3263 		spin_unlock_bh(&dp->rx_desc_lock);
3264 		ath12k_warn(ab, "failed to find rx desc for reinject\n");
3265 		ret = -ENOMEM;
3266 		goto err_unmap_dma;
3267 	}
3268 
3269 	desc_info->skb = defrag_skb;
3270 	desc_info->in_use = true;
3271 
3272 	list_del(&desc_info->list);
3273 	spin_unlock_bh(&dp->rx_desc_lock);
3274 
3275 	ATH12K_SKB_RXCB(defrag_skb)->paddr = buf_paddr;
3276 
3277 	ath12k_hal_rx_buf_addr_info_set(&msdu0->buf_addr_info, buf_paddr,
3278 					desc_info->cookie,
3279 					HAL_RX_BUF_RBM_SW3_BM);
3280 
3281 	/* Fill mpdu details into reo entrance ring */
3282 	srng = &ab->hal.srng_list[dp->reo_reinject_ring.ring_id];
3283 
3284 	spin_lock_bh(&srng->lock);
3285 	ath12k_hal_srng_access_begin(ab, srng);
3286 
3287 	reo_ent_ring = ath12k_hal_srng_src_get_next_entry(ab, srng);
3288 	if (!reo_ent_ring) {
3289 		ath12k_hal_srng_access_end(ab, srng);
3290 		spin_unlock_bh(&srng->lock);
3291 		ret = -ENOSPC;
3292 		goto err_free_desc;
3293 	}
3294 	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3295 
3296 	ath12k_hal_rx_buf_addr_info_set(&reo_ent_ring->buf_addr_info, link_paddr,
3297 					cookie,
3298 					idle_link_rbm);
3299 
3300 	mpdu_info = u32_encode_bits(1, RX_MPDU_DESC_INFO0_MSDU_COUNT) |
3301 		    u32_encode_bits(0, RX_MPDU_DESC_INFO0_FRAG_FLAG) |
3302 		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_RAW_MPDU) |
3303 		    u32_encode_bits(1, RX_MPDU_DESC_INFO0_VALID_PN) |
3304 		    u32_encode_bits(rx_tid->tid, RX_MPDU_DESC_INFO0_TID);
3305 
3306 	reo_ent_ring->rx_mpdu_info.info0 = cpu_to_le32(mpdu_info);
3307 	reo_ent_ring->rx_mpdu_info.peer_meta_data =
3308 		reo_dest_ring->rx_mpdu_info.peer_meta_data;
3309 
3310 	if (ab->hw_params->reoq_lut_support) {
3311 		reo_ent_ring->queue_addr_lo = reo_dest_ring->rx_mpdu_info.peer_meta_data;
3312 		queue_addr_hi = 0;
3313 	} else {
3314 		reo_ent_ring->queue_addr_lo =
3315 				cpu_to_le32(lower_32_bits(rx_tid->qbuf.paddr_aligned));
3316 		queue_addr_hi = upper_32_bits(rx_tid->qbuf.paddr_aligned);
3317 	}
3318 
3319 	reo_ent_ring->info0 = le32_encode_bits(queue_addr_hi,
3320 					       HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI) |
3321 			      le32_encode_bits(dst_ind,
3322 					       HAL_REO_ENTR_RING_INFO0_DEST_IND);
3323 
3324 	reo_ent_ring->info1 = le32_encode_bits(rx_tid->cur_sn,
3325 					       HAL_REO_ENTR_RING_INFO1_MPDU_SEQ_NUM);
3326 	dest_ring_info0 = le32_get_bits(reo_dest_ring->info0,
3327 					HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3328 	reo_ent_ring->info2 =
3329 		cpu_to_le32(u32_get_bits(dest_ring_info0,
3330 					 HAL_REO_ENTR_RING_INFO2_SRC_LINK_ID));
3331 
3332 	ath12k_hal_srng_access_end(ab, srng);
3333 	spin_unlock_bh(&srng->lock);
3334 
3335 	return 0;
3336 
3337 err_free_desc:
3338 	spin_lock_bh(&dp->rx_desc_lock);
3339 	desc_info->in_use = false;
3340 	desc_info->skb = NULL;
3341 	list_add_tail(&desc_info->list, &dp->rx_desc_free_list);
3342 	spin_unlock_bh(&dp->rx_desc_lock);
3343 err_unmap_dma:
3344 	dma_unmap_single(ab->dev, buf_paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3345 			 DMA_TO_DEVICE);
3346 	return ret;
3347 }
3348 
3349 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_base *ab,
3350 				    struct sk_buff *a, struct sk_buff *b)
3351 {
3352 	int frag1, frag2;
3353 
3354 	frag1 = ath12k_dp_rx_h_frag_no(ab, a);
3355 	frag2 = ath12k_dp_rx_h_frag_no(ab, b);
3356 
3357 	return frag1 - frag2;
3358 }
3359 
3360 static void ath12k_dp_rx_h_sort_frags(struct ath12k_base *ab,
3361 				      struct sk_buff_head *frag_list,
3362 				      struct sk_buff *cur_frag)
3363 {
3364 	struct sk_buff *skb;
3365 	int cmp;
3366 
3367 	skb_queue_walk(frag_list, skb) {
3368 		cmp = ath12k_dp_rx_h_cmp_frags(ab, skb, cur_frag);
3369 		if (cmp < 0)
3370 			continue;
3371 		__skb_queue_before(frag_list, skb, cur_frag);
3372 		return;
3373 	}
3374 	__skb_queue_tail(frag_list, cur_frag);
3375 }
3376 
3377 static u64 ath12k_dp_rx_h_get_pn(struct ath12k *ar, struct sk_buff *skb)
3378 {
3379 	struct ieee80211_hdr *hdr;
3380 	u64 pn = 0;
3381 	u8 *ehdr;
3382 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3383 
3384 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3385 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3386 
3387 	pn = ehdr[0];
3388 	pn |= (u64)ehdr[1] << 8;
3389 	pn |= (u64)ehdr[4] << 16;
3390 	pn |= (u64)ehdr[5] << 24;
3391 	pn |= (u64)ehdr[6] << 32;
3392 	pn |= (u64)ehdr[7] << 40;
3393 
3394 	return pn;
3395 }
3396 
3397 static bool
3398 ath12k_dp_rx_h_defrag_validate_incr_pn(struct ath12k *ar, struct ath12k_dp_rx_tid *rx_tid)
3399 {
3400 	struct ath12k_base *ab = ar->ab;
3401 	enum hal_encrypt_type encrypt_type;
3402 	struct sk_buff *first_frag, *skb;
3403 	struct hal_rx_desc *desc;
3404 	u64 last_pn;
3405 	u64 cur_pn;
3406 
3407 	first_frag = skb_peek(&rx_tid->rx_frags);
3408 	desc = (struct hal_rx_desc *)first_frag->data;
3409 
3410 	encrypt_type = ath12k_dp_rx_h_enctype(ab, desc);
3411 	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3412 	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3413 	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3414 	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3415 		return true;
3416 
3417 	last_pn = ath12k_dp_rx_h_get_pn(ar, first_frag);
3418 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3419 		if (skb == first_frag)
3420 			continue;
3421 
3422 		cur_pn = ath12k_dp_rx_h_get_pn(ar, skb);
3423 		if (cur_pn != last_pn + 1)
3424 			return false;
3425 		last_pn = cur_pn;
3426 	}
3427 	return true;
3428 }
3429 
3430 static int ath12k_dp_rx_frag_h_mpdu(struct ath12k *ar,
3431 				    struct sk_buff *msdu,
3432 				    struct hal_reo_dest_ring *ring_desc)
3433 {
3434 	struct ath12k_base *ab = ar->ab;
3435 	struct hal_rx_desc *rx_desc;
3436 	struct ath12k_peer *peer;
3437 	struct ath12k_dp_rx_tid *rx_tid;
3438 	struct sk_buff *defrag_skb = NULL;
3439 	u32 peer_id;
3440 	u16 seqno, frag_no;
3441 	u8 tid;
3442 	int ret = 0;
3443 	bool more_frags;
3444 
3445 	rx_desc = (struct hal_rx_desc *)msdu->data;
3446 	peer_id = ath12k_dp_rx_h_peer_id(ab, rx_desc);
3447 	tid = ath12k_dp_rx_h_tid(ab, rx_desc);
3448 	seqno = ath12k_dp_rx_h_seq_no(ab, rx_desc);
3449 	frag_no = ath12k_dp_rx_h_frag_no(ab, msdu);
3450 	more_frags = ath12k_dp_rx_h_more_frags(ab, msdu);
3451 
3452 	if (!ath12k_dp_rx_h_seq_ctrl_valid(ab, rx_desc) ||
3453 	    !ath12k_dp_rx_h_fc_valid(ab, rx_desc) ||
3454 	    tid > IEEE80211_NUM_TIDS)
3455 		return -EINVAL;
3456 
3457 	/* received unfragmented packet in reo
3458 	 * exception ring, this shouldn't happen
3459 	 * as these packets typically come from
3460 	 * reo2sw srngs.
3461 	 */
3462 	if (WARN_ON_ONCE(!frag_no && !more_frags))
3463 		return -EINVAL;
3464 
3465 	spin_lock_bh(&ab->base_lock);
3466 	peer = ath12k_peer_find_by_id(ab, peer_id);
3467 	if (!peer) {
3468 		ath12k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3469 			    peer_id);
3470 		ret = -ENOENT;
3471 		goto out_unlock;
3472 	}
3473 
3474 	if (!peer->dp_setup_done) {
3475 		ath12k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3476 			    peer->addr, peer_id);
3477 		ret = -ENOENT;
3478 		goto out_unlock;
3479 	}
3480 
3481 	rx_tid = &peer->rx_tid[tid];
3482 
3483 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3484 	    skb_queue_empty(&rx_tid->rx_frags)) {
3485 		/* Flush stored fragments and start a new sequence */
3486 		ath12k_dp_rx_frags_cleanup(rx_tid, true);
3487 		rx_tid->cur_sn = seqno;
3488 	}
3489 
3490 	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3491 		/* Fragment already present */
3492 		ret = -EINVAL;
3493 		goto out_unlock;
3494 	}
3495 
3496 	if ((!rx_tid->rx_frag_bitmap || frag_no > __fls(rx_tid->rx_frag_bitmap)))
3497 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3498 	else
3499 		ath12k_dp_rx_h_sort_frags(ab, &rx_tid->rx_frags, msdu);
3500 
3501 	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3502 	if (!more_frags)
3503 		rx_tid->last_frag_no = frag_no;
3504 
3505 	if (frag_no == 0) {
3506 		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3507 						sizeof(*rx_tid->dst_ring_desc),
3508 						GFP_ATOMIC);
3509 		if (!rx_tid->dst_ring_desc) {
3510 			ret = -ENOMEM;
3511 			goto out_unlock;
3512 		}
3513 	} else {
3514 		ath12k_dp_rx_link_desc_return(ab, &ring_desc->buf_addr_info,
3515 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3516 	}
3517 
3518 	if (!rx_tid->last_frag_no ||
3519 	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3520 		mod_timer(&rx_tid->frag_timer, jiffies +
3521 					       ATH12K_DP_RX_FRAGMENT_TIMEOUT_MS);
3522 		goto out_unlock;
3523 	}
3524 
3525 	spin_unlock_bh(&ab->base_lock);
3526 	timer_delete_sync(&rx_tid->frag_timer);
3527 	spin_lock_bh(&ab->base_lock);
3528 
3529 	peer = ath12k_peer_find_by_id(ab, peer_id);
3530 	if (!peer)
3531 		goto err_frags_cleanup;
3532 
3533 	if (!ath12k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3534 		goto err_frags_cleanup;
3535 
3536 	if (ath12k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3537 		goto err_frags_cleanup;
3538 
3539 	if (!defrag_skb)
3540 		goto err_frags_cleanup;
3541 
3542 	if (ath12k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3543 		goto err_frags_cleanup;
3544 
3545 	ath12k_dp_rx_frags_cleanup(rx_tid, false);
3546 	goto out_unlock;
3547 
3548 err_frags_cleanup:
3549 	dev_kfree_skb_any(defrag_skb);
3550 	ath12k_dp_rx_frags_cleanup(rx_tid, true);
3551 out_unlock:
3552 	spin_unlock_bh(&ab->base_lock);
3553 	return ret;
3554 }
3555 
3556 static int
3557 ath12k_dp_process_rx_err_buf(struct ath12k *ar, struct hal_reo_dest_ring *desc,
3558 			     struct list_head *used_list,
3559 			     bool drop, u32 cookie)
3560 {
3561 	struct ath12k_base *ab = ar->ab;
3562 	struct sk_buff *msdu;
3563 	struct ath12k_skb_rxcb *rxcb;
3564 	struct hal_rx_desc *rx_desc;
3565 	u16 msdu_len;
3566 	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
3567 	struct ath12k_rx_desc_info *desc_info;
3568 	u64 desc_va;
3569 
3570 	desc_va = ((u64)le32_to_cpu(desc->buf_va_hi) << 32 |
3571 		   le32_to_cpu(desc->buf_va_lo));
3572 	desc_info = (struct ath12k_rx_desc_info *)((unsigned long)desc_va);
3573 
3574 	/* retry manual desc retrieval */
3575 	if (!desc_info) {
3576 		desc_info = ath12k_dp_get_rx_desc(ab, cookie);
3577 		if (!desc_info) {
3578 			ath12k_warn(ab, "Invalid cookie in DP rx error descriptor retrieval: 0x%x\n",
3579 				    cookie);
3580 			return -EINVAL;
3581 		}
3582 	}
3583 
3584 	if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
3585 		ath12k_warn(ab, " RX Exception, Check HW CC implementation");
3586 
3587 	msdu = desc_info->skb;
3588 	desc_info->skb = NULL;
3589 
3590 	list_add_tail(&desc_info->list, used_list);
3591 
3592 	rxcb = ATH12K_SKB_RXCB(msdu);
3593 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3594 			 msdu->len + skb_tailroom(msdu),
3595 			 DMA_FROM_DEVICE);
3596 
3597 	if (drop) {
3598 		dev_kfree_skb_any(msdu);
3599 		return 0;
3600 	}
3601 
3602 	rcu_read_lock();
3603 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3604 		dev_kfree_skb_any(msdu);
3605 		goto exit;
3606 	}
3607 
3608 	if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
3609 		dev_kfree_skb_any(msdu);
3610 		goto exit;
3611 	}
3612 
3613 	rx_desc = (struct hal_rx_desc *)msdu->data;
3614 	msdu_len = ath12k_dp_rx_h_msdu_len(ar->ab, rx_desc);
3615 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3616 		ath12k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3617 		ath12k_dbg_dump(ar->ab, ATH12K_DBG_DATA, NULL, "", rx_desc,
3618 				sizeof(*rx_desc));
3619 		dev_kfree_skb_any(msdu);
3620 		goto exit;
3621 	}
3622 
3623 	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3624 
3625 	if (ath12k_dp_rx_frag_h_mpdu(ar, msdu, desc)) {
3626 		dev_kfree_skb_any(msdu);
3627 		ath12k_dp_rx_link_desc_return(ar->ab, &desc->buf_addr_info,
3628 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3629 	}
3630 exit:
3631 	rcu_read_unlock();
3632 	return 0;
3633 }
3634 
3635 int ath12k_dp_rx_process_err(struct ath12k_base *ab, struct napi_struct *napi,
3636 			     int budget)
3637 {
3638 	struct ath12k_hw_group *ag = ab->ag;
3639 	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
3640 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3641 	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
3642 	struct dp_link_desc_bank *link_desc_banks;
3643 	enum hal_rx_buf_return_buf_manager rbm;
3644 	struct hal_rx_msdu_link *link_desc_va;
3645 	int tot_n_bufs_reaped, quota, ret, i;
3646 	struct hal_reo_dest_ring *reo_desc;
3647 	struct dp_rxdma_ring *rx_ring;
3648 	struct dp_srng *reo_except;
3649 	struct ath12k_hw_link *hw_links = ag->hw_links;
3650 	struct ath12k_base *partner_ab;
3651 	u8 hw_link_id, device_id;
3652 	u32 desc_bank, num_msdus;
3653 	struct hal_srng *srng;
3654 	struct ath12k *ar;
3655 	dma_addr_t paddr;
3656 	bool is_frag;
3657 	bool drop;
3658 	int pdev_id;
3659 
3660 	tot_n_bufs_reaped = 0;
3661 	quota = budget;
3662 
3663 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
3664 		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
3665 
3666 	reo_except = &ab->dp.reo_except_ring;
3667 
3668 	srng = &ab->hal.srng_list[reo_except->ring_id];
3669 
3670 	spin_lock_bh(&srng->lock);
3671 
3672 	ath12k_hal_srng_access_begin(ab, srng);
3673 
3674 	while (budget &&
3675 	       (reo_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
3676 		drop = false;
3677 		ab->device_stats.err_ring_pkts++;
3678 
3679 		ret = ath12k_hal_desc_reo_parse_err(ab, reo_desc, &paddr,
3680 						    &desc_bank);
3681 		if (ret) {
3682 			ath12k_warn(ab, "failed to parse error reo desc %d\n",
3683 				    ret);
3684 			continue;
3685 		}
3686 
3687 		hw_link_id = le32_get_bits(reo_desc->info0,
3688 					   HAL_REO_DEST_RING_INFO0_SRC_LINK_ID);
3689 		device_id = hw_links[hw_link_id].device_id;
3690 		partner_ab = ath12k_ag_to_ab(ag, device_id);
3691 
3692 		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
3693 						      hw_links[hw_link_id].pdev_idx);
3694 		ar = partner_ab->pdevs[pdev_id].ar;
3695 
3696 		link_desc_banks = partner_ab->dp.link_desc_banks;
3697 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3698 			       (paddr - link_desc_banks[desc_bank].paddr);
3699 		ath12k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3700 						 &rbm);
3701 		if (rbm != partner_ab->dp.idle_link_rbm &&
3702 		    rbm != HAL_RX_BUF_RBM_SW3_BM &&
3703 		    rbm != partner_ab->hw_params->hal_params->rx_buf_rbm) {
3704 			ab->device_stats.invalid_rbm++;
3705 			ath12k_warn(ab, "invalid return buffer manager %d\n", rbm);
3706 			ath12k_dp_rx_link_desc_return(partner_ab,
3707 						      &reo_desc->buf_addr_info,
3708 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3709 			continue;
3710 		}
3711 
3712 		is_frag = !!(le32_to_cpu(reo_desc->rx_mpdu_info.info0) &
3713 			     RX_MPDU_DESC_INFO0_FRAG_FLAG);
3714 
3715 		/* Process only rx fragments with one msdu per link desc below, and drop
3716 		 * msdu's indicated due to error reasons.
3717 		 * Dynamic fragmentation not supported in Multi-link client, so drop the
3718 		 * partner device buffers.
3719 		 */
3720 		if (!is_frag || num_msdus > 1 ||
3721 		    partner_ab->device_id != ab->device_id) {
3722 			drop = true;
3723 
3724 			/* Return the link desc back to wbm idle list */
3725 			ath12k_dp_rx_link_desc_return(partner_ab,
3726 						      &reo_desc->buf_addr_info,
3727 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3728 		}
3729 
3730 		for (i = 0; i < num_msdus; i++) {
3731 			if (!ath12k_dp_process_rx_err_buf(ar, reo_desc,
3732 							  &rx_desc_used_list[device_id],
3733 							  drop,
3734 							  msdu_cookies[i])) {
3735 				num_buffs_reaped[device_id]++;
3736 				tot_n_bufs_reaped++;
3737 			}
3738 		}
3739 
3740 		if (tot_n_bufs_reaped >= quota) {
3741 			tot_n_bufs_reaped = quota;
3742 			goto exit;
3743 		}
3744 
3745 		budget = quota - tot_n_bufs_reaped;
3746 	}
3747 
3748 exit:
3749 	ath12k_hal_srng_access_end(ab, srng);
3750 
3751 	spin_unlock_bh(&srng->lock);
3752 
3753 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
3754 		if (!num_buffs_reaped[device_id])
3755 			continue;
3756 
3757 		partner_ab = ath12k_ag_to_ab(ag, device_id);
3758 		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
3759 
3760 		ath12k_dp_rx_bufs_replenish(partner_ab, rx_ring,
3761 					    &rx_desc_used_list[device_id],
3762 					    num_buffs_reaped[device_id]);
3763 	}
3764 
3765 	return tot_n_bufs_reaped;
3766 }
3767 
3768 static void ath12k_dp_rx_null_q_desc_sg_drop(struct ath12k *ar,
3769 					     int msdu_len,
3770 					     struct sk_buff_head *msdu_list)
3771 {
3772 	struct sk_buff *skb, *tmp;
3773 	struct ath12k_skb_rxcb *rxcb;
3774 	int n_buffs;
3775 
3776 	n_buffs = DIV_ROUND_UP(msdu_len,
3777 			       (DP_RX_BUFFER_SIZE - ar->ab->hal.hal_desc_sz));
3778 
3779 	skb_queue_walk_safe(msdu_list, skb, tmp) {
3780 		rxcb = ATH12K_SKB_RXCB(skb);
3781 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3782 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3783 			if (!n_buffs)
3784 				break;
3785 			__skb_unlink(skb, msdu_list);
3786 			dev_kfree_skb_any(skb);
3787 			n_buffs--;
3788 		}
3789 	}
3790 }
3791 
3792 static int ath12k_dp_rx_h_null_q_desc(struct ath12k *ar, struct sk_buff *msdu,
3793 				      struct ath12k_dp_rx_info *rx_info,
3794 				      struct sk_buff_head *msdu_list)
3795 {
3796 	struct ath12k_base *ab = ar->ab;
3797 	u16 msdu_len;
3798 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3799 	u8 l3pad_bytes;
3800 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3801 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3802 
3803 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3804 
3805 	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3806 		/* First buffer will be freed by the caller, so deduct it's length */
3807 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3808 		ath12k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3809 		return -EINVAL;
3810 	}
3811 
3812 	/* Even after cleaning up the sg buffers in the msdu list with above check
3813 	 * any msdu received with continuation flag needs to be dropped as invalid.
3814 	 * This protects against some random err frame with continuation flag.
3815 	 */
3816 	if (rxcb->is_continuation)
3817 		return -EINVAL;
3818 
3819 	if (!ath12k_dp_rx_h_msdu_done(ab, desc)) {
3820 		ath12k_warn(ar->ab,
3821 			    "msdu_done bit not set in null_q_des processing\n");
3822 		__skb_queue_purge(msdu_list);
3823 		return -EIO;
3824 	}
3825 
3826 	/* Handle NULL queue descriptor violations arising out a missing
3827 	 * REO queue for a given peer or a given TID. This typically
3828 	 * may happen if a packet is received on a QOS enabled TID before the
3829 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3830 	 * it may also happen for MC/BC frames if they are not routed to the
3831 	 * non-QOS TID queue, in the absence of any other default TID queue.
3832 	 * This error can show up both in a REO destination or WBM release ring.
3833 	 */
3834 
3835 	if (rxcb->is_frag) {
3836 		skb_pull(msdu, hal_rx_desc_sz);
3837 	} else {
3838 		l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3839 
3840 		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3841 			return -EINVAL;
3842 
3843 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3844 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3845 	}
3846 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
3847 		return -EINVAL;
3848 
3849 	ath12k_dp_rx_h_fetch_info(ab, desc, rx_info);
3850 	ath12k_dp_rx_h_ppdu(ar, rx_info);
3851 	ath12k_dp_rx_h_mpdu(ar, msdu, desc, rx_info);
3852 
3853 	rxcb->tid = rx_info->tid;
3854 
3855 	/* Please note that caller will having the access to msdu and completing
3856 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3857 	 */
3858 
3859 	return 0;
3860 }
3861 
3862 static bool ath12k_dp_rx_h_reo_err(struct ath12k *ar, struct sk_buff *msdu,
3863 				   struct ath12k_dp_rx_info *rx_info,
3864 				   struct sk_buff_head *msdu_list)
3865 {
3866 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3867 	bool drop = false;
3868 
3869 	ar->ab->device_stats.reo_error[rxcb->err_code]++;
3870 
3871 	switch (rxcb->err_code) {
3872 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3873 		if (ath12k_dp_rx_h_null_q_desc(ar, msdu, rx_info, msdu_list))
3874 			drop = true;
3875 		break;
3876 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
3877 		/* TODO: Do not drop PN failed packets in the driver;
3878 		 * instead, it is good to drop such packets in mac80211
3879 		 * after incrementing the replay counters.
3880 		 */
3881 		fallthrough;
3882 	default:
3883 		/* TODO: Review other errors and process them to mac80211
3884 		 * as appropriate.
3885 		 */
3886 		drop = true;
3887 		break;
3888 	}
3889 
3890 	return drop;
3891 }
3892 
3893 static bool ath12k_dp_rx_h_tkip_mic_err(struct ath12k *ar, struct sk_buff *msdu,
3894 					struct ath12k_dp_rx_info *rx_info)
3895 {
3896 	struct ath12k_base *ab = ar->ab;
3897 	u16 msdu_len;
3898 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3899 	u8 l3pad_bytes;
3900 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3901 	u32 hal_rx_desc_sz = ar->ab->hal.hal_desc_sz;
3902 
3903 	rxcb->is_first_msdu = ath12k_dp_rx_h_first_msdu(ab, desc);
3904 	rxcb->is_last_msdu = ath12k_dp_rx_h_last_msdu(ab, desc);
3905 
3906 	l3pad_bytes = ath12k_dp_rx_h_l3pad(ab, desc);
3907 	msdu_len = ath12k_dp_rx_h_msdu_len(ab, desc);
3908 
3909 	if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE) {
3910 		ath12k_dbg(ab, ATH12K_DBG_DATA,
3911 			   "invalid msdu len in tkip mic err %u\n", msdu_len);
3912 		ath12k_dbg_dump(ab, ATH12K_DBG_DATA, NULL, "", desc,
3913 				sizeof(*desc));
3914 		return true;
3915 	}
3916 
3917 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3918 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3919 
3920 	if (unlikely(!ath12k_dp_rx_check_nwifi_hdr_len_valid(ab, desc, msdu)))
3921 		return true;
3922 
3923 	ath12k_dp_rx_h_ppdu(ar, rx_info);
3924 
3925 	rx_info->rx_status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
3926 				     RX_FLAG_DECRYPTED);
3927 
3928 	ath12k_dp_rx_h_undecap(ar, msdu, desc,
3929 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rx_info->rx_status, false);
3930 	return false;
3931 }
3932 
3933 static bool ath12k_dp_rx_h_rxdma_err(struct ath12k *ar,  struct sk_buff *msdu,
3934 				     struct ath12k_dp_rx_info *rx_info)
3935 {
3936 	struct ath12k_base *ab = ar->ab;
3937 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3938 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3939 	bool drop = false;
3940 	u32 err_bitmap;
3941 
3942 	ar->ab->device_stats.rxdma_error[rxcb->err_code]++;
3943 
3944 	switch (rxcb->err_code) {
3945 	case HAL_REO_ENTR_RING_RXDMA_ECODE_DECRYPT_ERR:
3946 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
3947 		err_bitmap = ath12k_dp_rx_h_mpdu_err(ab, rx_desc);
3948 		if (err_bitmap & HAL_RX_MPDU_ERR_TKIP_MIC) {
3949 			ath12k_dp_rx_h_fetch_info(ab, rx_desc, rx_info);
3950 			drop = ath12k_dp_rx_h_tkip_mic_err(ar, msdu, rx_info);
3951 			break;
3952 		}
3953 		fallthrough;
3954 	default:
3955 		/* TODO: Review other rxdma error code to check if anything is
3956 		 * worth reporting to mac80211
3957 		 */
3958 		drop = true;
3959 		break;
3960 	}
3961 
3962 	return drop;
3963 }
3964 
3965 static void ath12k_dp_rx_wbm_err(struct ath12k *ar,
3966 				 struct napi_struct *napi,
3967 				 struct sk_buff *msdu,
3968 				 struct sk_buff_head *msdu_list)
3969 {
3970 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
3971 	struct ieee80211_rx_status rxs = {0};
3972 	struct ath12k_dp_rx_info rx_info;
3973 	bool drop = true;
3974 
3975 	rx_info.addr2_present = false;
3976 	rx_info.rx_status = &rxs;
3977 
3978 	switch (rxcb->err_rel_src) {
3979 	case HAL_WBM_REL_SRC_MODULE_REO:
3980 		drop = ath12k_dp_rx_h_reo_err(ar, msdu, &rx_info, msdu_list);
3981 		break;
3982 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
3983 		drop = ath12k_dp_rx_h_rxdma_err(ar, msdu, &rx_info);
3984 		break;
3985 	default:
3986 		/* msdu will get freed */
3987 		break;
3988 	}
3989 
3990 	if (drop) {
3991 		dev_kfree_skb_any(msdu);
3992 		return;
3993 	}
3994 
3995 	ath12k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_info);
3996 }
3997 
3998 int ath12k_dp_rx_process_wbm_err(struct ath12k_base *ab,
3999 				 struct napi_struct *napi, int budget)
4000 {
4001 	struct list_head rx_desc_used_list[ATH12K_MAX_DEVICES];
4002 	struct ath12k_hw_group *ag = ab->ag;
4003 	struct ath12k *ar;
4004 	struct ath12k_dp *dp = &ab->dp;
4005 	struct dp_rxdma_ring *rx_ring;
4006 	struct hal_rx_wbm_rel_info err_info;
4007 	struct hal_srng *srng;
4008 	struct sk_buff *msdu;
4009 	struct sk_buff_head msdu_list, scatter_msdu_list;
4010 	struct ath12k_skb_rxcb *rxcb;
4011 	void *rx_desc;
4012 	int num_buffs_reaped[ATH12K_MAX_DEVICES] = {};
4013 	int total_num_buffs_reaped = 0;
4014 	struct ath12k_rx_desc_info *desc_info;
4015 	struct ath12k_device_dp_stats *device_stats = &ab->device_stats;
4016 	struct ath12k_hw_link *hw_links = ag->hw_links;
4017 	struct ath12k_base *partner_ab;
4018 	u8 hw_link_id, device_id;
4019 	int ret, pdev_id;
4020 	struct hal_rx_desc *msdu_data;
4021 
4022 	__skb_queue_head_init(&msdu_list);
4023 	__skb_queue_head_init(&scatter_msdu_list);
4024 
4025 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++)
4026 		INIT_LIST_HEAD(&rx_desc_used_list[device_id]);
4027 
4028 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4029 	spin_lock_bh(&srng->lock);
4030 
4031 	ath12k_hal_srng_access_begin(ab, srng);
4032 
4033 	while (budget) {
4034 		rx_desc = ath12k_hal_srng_dst_get_next_entry(ab, srng);
4035 		if (!rx_desc)
4036 			break;
4037 
4038 		ret = ath12k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4039 		if (ret) {
4040 			ath12k_warn(ab,
4041 				    "failed to parse rx error in wbm_rel ring desc %d\n",
4042 				    ret);
4043 			continue;
4044 		}
4045 
4046 		desc_info = err_info.rx_desc;
4047 
4048 		/* retry manual desc retrieval if hw cc is not done */
4049 		if (!desc_info) {
4050 			desc_info = ath12k_dp_get_rx_desc(ab, err_info.cookie);
4051 			if (!desc_info) {
4052 				ath12k_warn(ab, "Invalid cookie in DP WBM rx error descriptor retrieval: 0x%x\n",
4053 					    err_info.cookie);
4054 				continue;
4055 			}
4056 		}
4057 
4058 		if (desc_info->magic != ATH12K_DP_RX_DESC_MAGIC)
4059 			ath12k_warn(ab, "WBM RX err, Check HW CC implementation");
4060 
4061 		msdu = desc_info->skb;
4062 		desc_info->skb = NULL;
4063 
4064 		device_id = desc_info->device_id;
4065 		partner_ab = ath12k_ag_to_ab(ag, device_id);
4066 		if (unlikely(!partner_ab)) {
4067 			dev_kfree_skb_any(msdu);
4068 
4069 			/* In any case continuation bit is set
4070 			 * in the previous record, cleanup scatter_msdu_list
4071 			 */
4072 			ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4073 			continue;
4074 		}
4075 
4076 		list_add_tail(&desc_info->list, &rx_desc_used_list[device_id]);
4077 
4078 		rxcb = ATH12K_SKB_RXCB(msdu);
4079 		dma_unmap_single(partner_ab->dev, rxcb->paddr,
4080 				 msdu->len + skb_tailroom(msdu),
4081 				 DMA_FROM_DEVICE);
4082 
4083 		num_buffs_reaped[device_id]++;
4084 		total_num_buffs_reaped++;
4085 
4086 		if (!err_info.continuation)
4087 			budget--;
4088 
4089 		if (err_info.push_reason !=
4090 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4091 			dev_kfree_skb_any(msdu);
4092 			continue;
4093 		}
4094 
4095 		msdu_data = (struct hal_rx_desc *)msdu->data;
4096 		rxcb->err_rel_src = err_info.err_rel_src;
4097 		rxcb->err_code = err_info.err_code;
4098 		rxcb->is_first_msdu = err_info.first_msdu;
4099 		rxcb->is_last_msdu = err_info.last_msdu;
4100 		rxcb->is_continuation = err_info.continuation;
4101 		rxcb->rx_desc = msdu_data;
4102 
4103 		if (err_info.continuation) {
4104 			__skb_queue_tail(&scatter_msdu_list, msdu);
4105 			continue;
4106 		}
4107 
4108 		hw_link_id = ath12k_dp_rx_get_msdu_src_link(partner_ab,
4109 							    msdu_data);
4110 		if (hw_link_id >= ATH12K_GROUP_MAX_RADIO) {
4111 			dev_kfree_skb_any(msdu);
4112 
4113 			/* In any case continuation bit is set
4114 			 * in the previous record, cleanup scatter_msdu_list
4115 			 */
4116 			ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4117 			continue;
4118 		}
4119 
4120 		if (!skb_queue_empty(&scatter_msdu_list)) {
4121 			struct sk_buff *msdu;
4122 
4123 			skb_queue_walk(&scatter_msdu_list, msdu) {
4124 				rxcb = ATH12K_SKB_RXCB(msdu);
4125 				rxcb->hw_link_id = hw_link_id;
4126 			}
4127 
4128 			skb_queue_splice_tail_init(&scatter_msdu_list,
4129 						   &msdu_list);
4130 		}
4131 
4132 		rxcb = ATH12K_SKB_RXCB(msdu);
4133 		rxcb->hw_link_id = hw_link_id;
4134 		__skb_queue_tail(&msdu_list, msdu);
4135 	}
4136 
4137 	/* In any case continuation bit is set in the
4138 	 * last record, cleanup scatter_msdu_list
4139 	 */
4140 	ath12k_dp_clean_up_skb_list(&scatter_msdu_list);
4141 
4142 	ath12k_hal_srng_access_end(ab, srng);
4143 
4144 	spin_unlock_bh(&srng->lock);
4145 
4146 	if (!total_num_buffs_reaped)
4147 		goto done;
4148 
4149 	for (device_id = 0; device_id < ATH12K_MAX_DEVICES; device_id++) {
4150 		if (!num_buffs_reaped[device_id])
4151 			continue;
4152 
4153 		partner_ab = ath12k_ag_to_ab(ag, device_id);
4154 		rx_ring = &partner_ab->dp.rx_refill_buf_ring;
4155 
4156 		ath12k_dp_rx_bufs_replenish(ab, rx_ring,
4157 					    &rx_desc_used_list[device_id],
4158 					    num_buffs_reaped[device_id]);
4159 	}
4160 
4161 	rcu_read_lock();
4162 	while ((msdu = __skb_dequeue(&msdu_list))) {
4163 		rxcb = ATH12K_SKB_RXCB(msdu);
4164 		hw_link_id = rxcb->hw_link_id;
4165 
4166 		device_id = hw_links[hw_link_id].device_id;
4167 		partner_ab = ath12k_ag_to_ab(ag, device_id);
4168 		if (unlikely(!partner_ab)) {
4169 			ath12k_dbg(ab, ATH12K_DBG_DATA,
4170 				   "Unable to process WBM error msdu due to invalid hw link id %d device id %d\n",
4171 				   hw_link_id, device_id);
4172 			dev_kfree_skb_any(msdu);
4173 			continue;
4174 		}
4175 
4176 		pdev_id = ath12k_hw_mac_id_to_pdev_id(partner_ab->hw_params,
4177 						      hw_links[hw_link_id].pdev_idx);
4178 		ar = partner_ab->pdevs[pdev_id].ar;
4179 
4180 		if (!ar || !rcu_dereference(ar->ab->pdevs_active[pdev_id])) {
4181 			dev_kfree_skb_any(msdu);
4182 			continue;
4183 		}
4184 
4185 		if (test_bit(ATH12K_FLAG_CAC_RUNNING, &ar->dev_flags)) {
4186 			dev_kfree_skb_any(msdu);
4187 			continue;
4188 		}
4189 
4190 		if (rxcb->err_rel_src < HAL_WBM_REL_SRC_MODULE_MAX) {
4191 			device_id = ar->ab->device_id;
4192 			device_stats->rx_wbm_rel_source[rxcb->err_rel_src][device_id]++;
4193 		}
4194 
4195 		ath12k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list);
4196 	}
4197 	rcu_read_unlock();
4198 done:
4199 	return total_num_buffs_reaped;
4200 }
4201 
4202 void ath12k_dp_rx_process_reo_status(struct ath12k_base *ab)
4203 {
4204 	struct ath12k_dp *dp = &ab->dp;
4205 	struct hal_tlv_64_hdr *hdr;
4206 	struct hal_srng *srng;
4207 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
4208 	bool found = false;
4209 	u16 tag;
4210 	struct hal_reo_status reo_status;
4211 
4212 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4213 
4214 	memset(&reo_status, 0, sizeof(reo_status));
4215 
4216 	spin_lock_bh(&srng->lock);
4217 
4218 	ath12k_hal_srng_access_begin(ab, srng);
4219 
4220 	while ((hdr = ath12k_hal_srng_dst_get_next_entry(ab, srng))) {
4221 		tag = le64_get_bits(hdr->tl, HAL_SRNG_TLV_HDR_TAG);
4222 
4223 		switch (tag) {
4224 		case HAL_REO_GET_QUEUE_STATS_STATUS:
4225 			ath12k_hal_reo_status_queue_stats(ab, hdr,
4226 							  &reo_status);
4227 			break;
4228 		case HAL_REO_FLUSH_QUEUE_STATUS:
4229 			ath12k_hal_reo_flush_queue_status(ab, hdr,
4230 							  &reo_status);
4231 			break;
4232 		case HAL_REO_FLUSH_CACHE_STATUS:
4233 			ath12k_hal_reo_flush_cache_status(ab, hdr,
4234 							  &reo_status);
4235 			break;
4236 		case HAL_REO_UNBLOCK_CACHE_STATUS:
4237 			ath12k_hal_reo_unblk_cache_status(ab, hdr,
4238 							  &reo_status);
4239 			break;
4240 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4241 			ath12k_hal_reo_flush_timeout_list_status(ab, hdr,
4242 								 &reo_status);
4243 			break;
4244 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4245 			ath12k_hal_reo_desc_thresh_reached_status(ab, hdr,
4246 								  &reo_status);
4247 			break;
4248 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4249 			ath12k_hal_reo_update_rx_reo_queue_status(ab, hdr,
4250 								  &reo_status);
4251 			break;
4252 		default:
4253 			ath12k_warn(ab, "Unknown reo status type %d\n", tag);
4254 			continue;
4255 		}
4256 
4257 		spin_lock_bh(&dp->reo_cmd_lock);
4258 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4259 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4260 				found = true;
4261 				list_del(&cmd->list);
4262 				break;
4263 			}
4264 		}
4265 		spin_unlock_bh(&dp->reo_cmd_lock);
4266 
4267 		if (found) {
4268 			cmd->handler(dp, (void *)&cmd->data,
4269 				     reo_status.uniform_hdr.cmd_status);
4270 			kfree(cmd);
4271 		}
4272 
4273 		found = false;
4274 	}
4275 
4276 	ath12k_hal_srng_access_end(ab, srng);
4277 
4278 	spin_unlock_bh(&srng->lock);
4279 }
4280 
4281 void ath12k_dp_rx_free(struct ath12k_base *ab)
4282 {
4283 	struct ath12k_dp *dp = &ab->dp;
4284 	struct dp_srng *srng;
4285 	int i;
4286 
4287 	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
4288 
4289 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4290 		if (ab->hw_params->rx_mac_buf_ring)
4291 			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
4292 		if (!ab->hw_params->rxdma1_enable) {
4293 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
4294 			ath12k_dp_srng_cleanup(ab, srng);
4295 		}
4296 	}
4297 
4298 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
4299 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
4300 
4301 	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
4302 
4303 	ath12k_dp_rxdma_buf_free(ab);
4304 }
4305 
4306 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
4307 {
4308 	struct ath12k *ar = ab->pdevs[mac_id].ar;
4309 
4310 	ath12k_dp_rx_pdev_srng_free(ar);
4311 }
4312 
4313 int ath12k_dp_rxdma_ring_sel_config_qcn9274(struct ath12k_base *ab)
4314 {
4315 	struct ath12k_dp *dp = &ab->dp;
4316 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
4317 	u32 ring_id;
4318 	int ret;
4319 	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4320 
4321 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4322 
4323 	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4324 	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4325 	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4326 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4327 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4328 	tlv_filter.offset_valid = true;
4329 	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4330 
4331 	tlv_filter.rx_mpdu_start_offset =
4332 		ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4333 	tlv_filter.rx_msdu_end_offset =
4334 		ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4335 
4336 	if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
4337 		tlv_filter.rx_mpdu_start_wmask =
4338 			ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start();
4339 		tlv_filter.rx_msdu_end_wmask =
4340 			ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end();
4341 		ath12k_dbg(ab, ATH12K_DBG_DATA,
4342 			   "Configuring compact tlv masks rx_mpdu_start_wmask 0x%x rx_msdu_end_wmask 0x%x\n",
4343 			   tlv_filter.rx_mpdu_start_wmask, tlv_filter.rx_msdu_end_wmask);
4344 	}
4345 
4346 	ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, 0,
4347 					       HAL_RXDMA_BUF,
4348 					       DP_RXDMA_REFILL_RING_SIZE,
4349 					       &tlv_filter);
4350 
4351 	return ret;
4352 }
4353 
4354 int ath12k_dp_rxdma_ring_sel_config_wcn7850(struct ath12k_base *ab)
4355 {
4356 	struct ath12k_dp *dp = &ab->dp;
4357 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
4358 	u32 ring_id;
4359 	int ret = 0;
4360 	u32 hal_rx_desc_sz = ab->hal.hal_desc_sz;
4361 	int i;
4362 
4363 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4364 
4365 	tlv_filter.rx_filter = HTT_RX_TLV_FLAGS_RXDMA_RING;
4366 	tlv_filter.pkt_filter_flags2 = HTT_RX_FP_CTRL_PKT_FILTER_TLV_FLAGS2_BAR;
4367 	tlv_filter.pkt_filter_flags3 = HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_MCAST |
4368 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_UCAST |
4369 					HTT_RX_FP_DATA_PKT_FILTER_TLV_FLASG3_NULL_DATA;
4370 	tlv_filter.offset_valid = true;
4371 	tlv_filter.rx_packet_offset = hal_rx_desc_sz;
4372 
4373 	tlv_filter.rx_header_offset = offsetof(struct hal_rx_desc_wcn7850, pkt_hdr_tlv);
4374 
4375 	tlv_filter.rx_mpdu_start_offset =
4376 		ab->hal_rx_ops->rx_desc_get_mpdu_start_offset();
4377 	tlv_filter.rx_msdu_end_offset =
4378 		ab->hal_rx_ops->rx_desc_get_msdu_end_offset();
4379 
4380 	/* TODO: Selectively subscribe to required qwords within msdu_end
4381 	 * and mpdu_start and setup the mask in below msg
4382 	 * and modify the rx_desc struct
4383 	 */
4384 
4385 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4386 		ring_id = dp->rx_mac_buf_ring[i].ring_id;
4387 		ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id, i,
4388 						       HAL_RXDMA_BUF,
4389 						       DP_RXDMA_REFILL_RING_SIZE,
4390 						       &tlv_filter);
4391 	}
4392 
4393 	return ret;
4394 }
4395 
4396 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
4397 {
4398 	struct ath12k_dp *dp = &ab->dp;
4399 	u32 ring_id;
4400 	int i, ret;
4401 
4402 	/* TODO: Need to verify the HTT setup for QCN9224 */
4403 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4404 	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
4405 	if (ret) {
4406 		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4407 			    ret);
4408 		return ret;
4409 	}
4410 
4411 	if (ab->hw_params->rx_mac_buf_ring) {
4412 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4413 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4414 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4415 							  i, HAL_RXDMA_BUF);
4416 			if (ret) {
4417 				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4418 					    i, ret);
4419 				return ret;
4420 			}
4421 		}
4422 	}
4423 
4424 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4425 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4426 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4427 						  i, HAL_RXDMA_DST);
4428 		if (ret) {
4429 			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4430 				    i, ret);
4431 			return ret;
4432 		}
4433 	}
4434 
4435 	if (ab->hw_params->rxdma1_enable) {
4436 		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4437 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4438 						  0, HAL_RXDMA_MONITOR_BUF);
4439 		if (ret) {
4440 			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4441 				    ret);
4442 			return ret;
4443 		}
4444 	} else {
4445 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4446 			ring_id =
4447 				dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4448 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
4449 							  HAL_RXDMA_MONITOR_STATUS);
4450 			if (ret) {
4451 				ath12k_warn(ab,
4452 					    "failed to configure mon_status_refill_ring%d %d\n",
4453 					    i, ret);
4454 				return ret;
4455 			}
4456 		}
4457 	}
4458 
4459 	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
4460 	if (ret) {
4461 		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
4462 		return ret;
4463 	}
4464 
4465 	return 0;
4466 }
4467 
4468 int ath12k_dp_rx_alloc(struct ath12k_base *ab)
4469 {
4470 	struct ath12k_dp *dp = &ab->dp;
4471 	struct dp_srng *srng;
4472 	int i, ret;
4473 
4474 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
4475 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
4476 
4477 	ret = ath12k_dp_srng_setup(ab,
4478 				   &dp->rx_refill_buf_ring.refill_buf_ring,
4479 				   HAL_RXDMA_BUF, 0, 0,
4480 				   DP_RXDMA_BUF_RING_SIZE);
4481 	if (ret) {
4482 		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
4483 		return ret;
4484 	}
4485 
4486 	if (ab->hw_params->rx_mac_buf_ring) {
4487 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4488 			ret = ath12k_dp_srng_setup(ab,
4489 						   &dp->rx_mac_buf_ring[i],
4490 						   HAL_RXDMA_BUF, 1,
4491 						   i, DP_RX_MAC_BUF_RING_SIZE);
4492 			if (ret) {
4493 				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
4494 					    i);
4495 				return ret;
4496 			}
4497 		}
4498 	}
4499 
4500 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
4501 		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
4502 					   HAL_RXDMA_DST, 0, i,
4503 					   DP_RXDMA_ERR_DST_RING_SIZE);
4504 		if (ret) {
4505 			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
4506 			return ret;
4507 		}
4508 	}
4509 
4510 	if (ab->hw_params->rxdma1_enable) {
4511 		ret = ath12k_dp_srng_setup(ab,
4512 					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
4513 					   HAL_RXDMA_MONITOR_BUF, 0, 0,
4514 					   DP_RXDMA_MONITOR_BUF_RING_SIZE);
4515 		if (ret) {
4516 			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
4517 			return ret;
4518 		}
4519 	} else {
4520 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4521 			idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
4522 			spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
4523 		}
4524 
4525 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4526 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
4527 			ret = ath12k_dp_srng_setup(ab, srng,
4528 						   HAL_RXDMA_MONITOR_STATUS, 0, i,
4529 						   DP_RXDMA_MON_STATUS_RING_SIZE);
4530 			if (ret) {
4531 				ath12k_warn(ab, "failed to setup mon status ring %d\n",
4532 					    i);
4533 				return ret;
4534 			}
4535 		}
4536 	}
4537 
4538 	ret = ath12k_dp_rxdma_buf_setup(ab);
4539 	if (ret) {
4540 		ath12k_warn(ab, "failed to setup rxdma ring\n");
4541 		return ret;
4542 	}
4543 
4544 	return 0;
4545 }
4546 
4547 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
4548 {
4549 	struct ath12k *ar = ab->pdevs[mac_id].ar;
4550 	struct ath12k_pdev_dp *dp = &ar->dp;
4551 	u32 ring_id;
4552 	int i;
4553 	int ret;
4554 
4555 	if (!ab->hw_params->rxdma1_enable)
4556 		goto out;
4557 
4558 	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
4559 	if (ret) {
4560 		ath12k_warn(ab, "failed to setup rx srngs\n");
4561 		return ret;
4562 	}
4563 
4564 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
4565 		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
4566 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
4567 						  mac_id + i,
4568 						  HAL_RXDMA_MONITOR_DST);
4569 		if (ret) {
4570 			ath12k_warn(ab,
4571 				    "failed to configure rxdma_mon_dst_ring %d %d\n",
4572 				    i, ret);
4573 			return ret;
4574 		}
4575 	}
4576 out:
4577 	return 0;
4578 }
4579 
4580 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
4581 {
4582 	struct ath12k_pdev_dp *dp = &ar->dp;
4583 	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
4584 
4585 	skb_queue_head_init(&pmon->rx_status_q);
4586 
4587 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
4588 
4589 	memset(&pmon->rx_mon_stats, 0,
4590 	       sizeof(pmon->rx_mon_stats));
4591 	return 0;
4592 }
4593 
4594 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
4595 {
4596 	struct ath12k_pdev_dp *dp = &ar->dp;
4597 	struct ath12k_mon_data *pmon = &dp->mon_data;
4598 	int ret = 0;
4599 
4600 	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
4601 	if (ret) {
4602 		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
4603 		return ret;
4604 	}
4605 
4606 	pmon->mon_last_linkdesc_paddr = 0;
4607 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
4608 	spin_lock_init(&pmon->mon_lock);
4609 
4610 	if (!ar->ab->hw_params->rxdma1_enable)
4611 		return 0;
4612 
4613 	INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
4614 	pmon->mon_mpdu = NULL;
4615 
4616 	return 0;
4617 }
4618