xref: /freebsd/sys/contrib/dev/athk/ath11k/dp_rx.c (revision 7ef62cebc2f965b0f640263e179276928885e33d)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  */
5 
6 #include <linux/ieee80211.h>
7 #include <linux/kernel.h>
8 #include <linux/skbuff.h>
9 #include <crypto/hash.h>
10 #include "core.h"
11 #include "debug.h"
12 #include "debugfs_htt_stats.h"
13 #include "debugfs_sta.h"
14 #include "hal_desc.h"
15 #include "hw.h"
16 #include "dp_rx.h"
17 #include "hal_rx.h"
18 #include "dp_tx.h"
19 #include "peer.h"
20 
21 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
22 
23 static inline
24 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
25 {
26 	return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
27 }
28 
29 static inline
30 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
31 							struct hal_rx_desc *desc)
32 {
33 	if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
34 		return HAL_ENCRYPT_TYPE_OPEN;
35 
36 	return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
37 }
38 
39 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
40 						      struct hal_rx_desc *desc)
41 {
42 	return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
43 }
44 
45 static inline
46 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
47 					    struct hal_rx_desc *desc)
48 {
49 	return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
50 }
51 
52 static inline
53 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
54 					      struct hal_rx_desc *desc)
55 {
56 	return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
57 }
58 
59 static inline
60 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
61 					      struct hal_rx_desc *desc)
62 {
63 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
64 }
65 
66 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
67 						      struct hal_rx_desc *desc)
68 {
69 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
70 }
71 
72 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
73 							struct sk_buff *skb)
74 {
75 	struct ieee80211_hdr *hdr;
76 
77 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
78 	return ieee80211_has_morefrags(hdr->frame_control);
79 }
80 
81 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
82 						    struct sk_buff *skb)
83 {
84 	struct ieee80211_hdr *hdr;
85 
86 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
87 	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
88 }
89 
90 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
91 						   struct hal_rx_desc *desc)
92 {
93 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
94 }
95 
96 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
97 					       struct hal_rx_desc *desc)
98 {
99 	return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
100 }
101 
102 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
103 {
104 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
105 			   __le32_to_cpu(attn->info2));
106 }
107 
108 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
109 {
110 	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
111 			   __le32_to_cpu(attn->info1));
112 }
113 
114 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
115 {
116 	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
117 			   __le32_to_cpu(attn->info1));
118 }
119 
120 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
121 {
122 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
123 			  __le32_to_cpu(attn->info2)) ==
124 		RX_DESC_DECRYPT_STATUS_CODE_OK);
125 }
126 
127 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
128 {
129 	u32 info = __le32_to_cpu(attn->info1);
130 	u32 errmap = 0;
131 
132 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
133 		errmap |= DP_RX_MPDU_ERR_FCS;
134 
135 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
136 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
137 
138 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
139 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
140 
141 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
142 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
143 
144 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
145 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
146 
147 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
148 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
149 
150 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
151 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
152 
153 	return errmap;
154 }
155 
156 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
157 					     struct hal_rx_desc *desc)
158 {
159 	struct rx_attention *rx_attention;
160 	u32 errmap;
161 
162 	rx_attention = ath11k_dp_rx_get_attention(ab, desc);
163 	errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
164 
165 	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
166 }
167 
168 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
169 						     struct hal_rx_desc *desc)
170 {
171 	return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
172 }
173 
174 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
175 					       struct hal_rx_desc *desc)
176 {
177 	return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
178 }
179 
180 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
181 						    struct hal_rx_desc *desc)
182 {
183 	return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
184 }
185 
186 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
187 						 struct hal_rx_desc *desc)
188 {
189 	return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
190 }
191 
192 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
193 						 struct hal_rx_desc *desc)
194 {
195 	return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
196 }
197 
198 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
199 						    struct hal_rx_desc *desc)
200 {
201 	return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
202 }
203 
204 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
205 					       struct hal_rx_desc *desc)
206 {
207 	return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
208 }
209 
210 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
211 					       struct hal_rx_desc *desc)
212 {
213 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
214 }
215 
216 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
217 						    struct hal_rx_desc *desc)
218 {
219 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
220 }
221 
222 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
223 					       struct hal_rx_desc *desc)
224 {
225 	return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
226 }
227 
228 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
229 						      struct hal_rx_desc *desc)
230 {
231 	return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
232 }
233 
234 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
235 					      struct hal_rx_desc *desc)
236 {
237 	return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
238 }
239 
240 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
241 					   struct hal_rx_desc *fdesc,
242 					   struct hal_rx_desc *ldesc)
243 {
244 	ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
245 }
246 
247 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
248 {
249 	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
250 			 __le32_to_cpu(attn->info1));
251 }
252 
253 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
254 						struct hal_rx_desc *rx_desc)
255 {
256 	u8 *rx_pkt_hdr;
257 
258 	rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
259 
260 	return rx_pkt_hdr;
261 }
262 
263 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
264 					       struct hal_rx_desc *rx_desc)
265 {
266 	u32 tlv_tag;
267 
268 	tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
269 
270 	return tlv_tag == HAL_RX_MPDU_START;
271 }
272 
273 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
274 					      struct hal_rx_desc *rx_desc)
275 {
276 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
277 }
278 
279 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
280 						 struct hal_rx_desc *desc,
281 						 u16 len)
282 {
283 	ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
284 }
285 
286 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
287 					struct hal_rx_desc *desc)
288 {
289 	struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
290 
291 	return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
292 		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
293 		 __le32_to_cpu(attn->info1)));
294 }
295 
296 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
297 					     struct hal_rx_desc *desc)
298 {
299 	return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
300 }
301 
302 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
303 					     struct hal_rx_desc *desc)
304 {
305 	return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
306 }
307 
308 static void ath11k_dp_service_mon_ring(struct timer_list *t)
309 {
310 	struct ath11k_base *ab = from_timer(ab, t, mon_reap_timer);
311 	int i;
312 
313 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
314 		ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
315 
316 	mod_timer(&ab->mon_reap_timer, jiffies +
317 		  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
318 }
319 
320 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
321 {
322 	int i, reaped = 0;
323 	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
324 
325 	do {
326 		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++)
327 			reaped += ath11k_dp_rx_process_mon_rings(ab, i,
328 								 NULL,
329 								 DP_MON_SERVICE_BUDGET);
330 
331 		/* nothing more to reap */
332 		if (reaped < DP_MON_SERVICE_BUDGET)
333 			return 0;
334 
335 	} while (time_before(jiffies, timeout));
336 
337 	ath11k_warn(ab, "dp mon ring purge timeout");
338 
339 	return -ETIMEDOUT;
340 }
341 
342 /* Returns number of Rx buffers replenished */
343 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
344 			       struct dp_rxdma_ring *rx_ring,
345 			       int req_entries,
346 			       enum hal_rx_buf_return_buf_manager mgr)
347 {
348 	struct hal_srng *srng;
349 	u32 *desc;
350 	struct sk_buff *skb;
351 	int num_free;
352 	int num_remain;
353 	int buf_id;
354 	u32 cookie;
355 	dma_addr_t paddr;
356 
357 	req_entries = min(req_entries, rx_ring->bufs_max);
358 
359 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
360 
361 	spin_lock_bh(&srng->lock);
362 
363 	ath11k_hal_srng_access_begin(ab, srng);
364 
365 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
366 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
367 		req_entries = num_free;
368 
369 	req_entries = min(num_free, req_entries);
370 	num_remain = req_entries;
371 
372 	while (num_remain > 0) {
373 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
374 				    DP_RX_BUFFER_ALIGN_SIZE);
375 		if (!skb)
376 			break;
377 
378 		if (!IS_ALIGNED((unsigned long)skb->data,
379 				DP_RX_BUFFER_ALIGN_SIZE)) {
380 			skb_pull(skb,
381 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
382 				 skb->data);
383 		}
384 
385 		paddr = dma_map_single(ab->dev, skb->data,
386 				       skb->len + skb_tailroom(skb),
387 				       DMA_FROM_DEVICE);
388 		if (dma_mapping_error(ab->dev, paddr))
389 			goto fail_free_skb;
390 
391 		spin_lock_bh(&rx_ring->idr_lock);
392 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
393 				   rx_ring->bufs_max * 3, GFP_ATOMIC);
394 		spin_unlock_bh(&rx_ring->idr_lock);
395 		if (buf_id < 0)
396 			goto fail_dma_unmap;
397 
398 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
399 		if (!desc)
400 			goto fail_idr_remove;
401 
402 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
403 
404 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
405 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
406 
407 		num_remain--;
408 
409 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
410 	}
411 
412 	ath11k_hal_srng_access_end(ab, srng);
413 
414 	spin_unlock_bh(&srng->lock);
415 
416 	return req_entries - num_remain;
417 
418 fail_idr_remove:
419 	spin_lock_bh(&rx_ring->idr_lock);
420 	idr_remove(&rx_ring->bufs_idr, buf_id);
421 	spin_unlock_bh(&rx_ring->idr_lock);
422 fail_dma_unmap:
423 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
424 			 DMA_FROM_DEVICE);
425 fail_free_skb:
426 	dev_kfree_skb_any(skb);
427 
428 	ath11k_hal_srng_access_end(ab, srng);
429 
430 	spin_unlock_bh(&srng->lock);
431 
432 	return req_entries - num_remain;
433 }
434 
435 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
436 					 struct dp_rxdma_ring *rx_ring)
437 {
438 	struct ath11k_pdev_dp *dp = &ar->dp;
439 	struct sk_buff *skb;
440 	int buf_id;
441 
442 	spin_lock_bh(&rx_ring->idr_lock);
443 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444 		idr_remove(&rx_ring->bufs_idr, buf_id);
445 		/* TODO: Understand where internal driver does this dma_unmap
446 		 * of rxdma_buffer.
447 		 */
448 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450 		dev_kfree_skb_any(skb);
451 	}
452 
453 	idr_destroy(&rx_ring->bufs_idr);
454 	spin_unlock_bh(&rx_ring->idr_lock);
455 
456 	/* if rxdma1_enable is false, mon_status_refill_ring
457 	 * isn't setup, so don't clean.
458 	 */
459 	if (!ar->ab->hw_params.rxdma1_enable)
460 		return 0;
461 
462 	rx_ring = &dp->rx_mon_status_refill_ring[0];
463 
464 	spin_lock_bh(&rx_ring->idr_lock);
465 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
466 		idr_remove(&rx_ring->bufs_idr, buf_id);
467 		/* XXX: Understand where internal driver does this dma_unmap
468 		 * of rxdma_buffer.
469 		 */
470 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
471 				 skb->len + skb_tailroom(skb), DMA_BIDIRECTIONAL);
472 		dev_kfree_skb_any(skb);
473 	}
474 
475 	idr_destroy(&rx_ring->bufs_idr);
476 	spin_unlock_bh(&rx_ring->idr_lock);
477 
478 	return 0;
479 }
480 
481 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
482 {
483 	struct ath11k_pdev_dp *dp = &ar->dp;
484 	struct ath11k_base *ab = ar->ab;
485 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
486 	int i;
487 
488 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
489 
490 	rx_ring = &dp->rxdma_mon_buf_ring;
491 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
492 
493 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
494 		rx_ring = &dp->rx_mon_status_refill_ring[i];
495 		ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
496 	}
497 
498 	return 0;
499 }
500 
501 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
502 					  struct dp_rxdma_ring *rx_ring,
503 					  u32 ringtype)
504 {
505 	struct ath11k_pdev_dp *dp = &ar->dp;
506 	int num_entries;
507 
508 	num_entries = rx_ring->refill_buf_ring.size /
509 		ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
510 
511 	rx_ring->bufs_max = num_entries;
512 	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
513 				   ar->ab->hw_params.hal_params->rx_buf_rbm);
514 	return 0;
515 }
516 
517 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
518 {
519 	struct ath11k_pdev_dp *dp = &ar->dp;
520 	struct ath11k_base *ab = ar->ab;
521 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
522 	int i;
523 
524 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
525 
526 	if (ar->ab->hw_params.rxdma1_enable) {
527 		rx_ring = &dp->rxdma_mon_buf_ring;
528 		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
529 	}
530 
531 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
532 		rx_ring = &dp->rx_mon_status_refill_ring[i];
533 		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
534 	}
535 
536 	return 0;
537 }
538 
539 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
540 {
541 	struct ath11k_pdev_dp *dp = &ar->dp;
542 	struct ath11k_base *ab = ar->ab;
543 	int i;
544 
545 	ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
546 
547 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
548 		if (ab->hw_params.rx_mac_buf_ring)
549 			ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
550 
551 		ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
552 		ath11k_dp_srng_cleanup(ab,
553 				       &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
554 	}
555 
556 	ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
557 }
558 
559 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
560 {
561 	struct ath11k_dp *dp = &ab->dp;
562 	int i;
563 
564 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
565 		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
566 }
567 
568 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
569 {
570 	struct ath11k_dp *dp = &ab->dp;
571 	int ret;
572 	int i;
573 
574 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
575 		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
576 					   HAL_REO_DST, i, 0,
577 					   DP_REO_DST_RING_SIZE);
578 		if (ret) {
579 			ath11k_warn(ab, "failed to setup reo_dst_ring\n");
580 			goto err_reo_cleanup;
581 		}
582 	}
583 
584 	return 0;
585 
586 err_reo_cleanup:
587 	ath11k_dp_pdev_reo_cleanup(ab);
588 
589 	return ret;
590 }
591 
592 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
593 {
594 	struct ath11k_pdev_dp *dp = &ar->dp;
595 	struct ath11k_base *ab = ar->ab;
596 	struct dp_srng *srng = NULL;
597 	int i;
598 	int ret;
599 
600 	ret = ath11k_dp_srng_setup(ar->ab,
601 				   &dp->rx_refill_buf_ring.refill_buf_ring,
602 				   HAL_RXDMA_BUF, 0,
603 				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
604 	if (ret) {
605 		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
606 		return ret;
607 	}
608 
609 	if (ar->ab->hw_params.rx_mac_buf_ring) {
610 		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
611 			ret = ath11k_dp_srng_setup(ar->ab,
612 						   &dp->rx_mac_buf_ring[i],
613 						   HAL_RXDMA_BUF, 1,
614 						   dp->mac_id + i, 1024);
615 			if (ret) {
616 				ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
617 					    i);
618 				return ret;
619 			}
620 		}
621 	}
622 
623 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
624 		ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
625 					   HAL_RXDMA_DST, 0, dp->mac_id + i,
626 					   DP_RXDMA_ERR_DST_RING_SIZE);
627 		if (ret) {
628 			ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
629 			return ret;
630 		}
631 	}
632 
633 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
634 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
635 		ret = ath11k_dp_srng_setup(ar->ab,
636 					   srng,
637 					   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
638 					   DP_RXDMA_MON_STATUS_RING_SIZE);
639 		if (ret) {
640 			ath11k_warn(ar->ab,
641 				    "failed to setup rx_mon_status_refill_ring %d\n", i);
642 			return ret;
643 		}
644 	}
645 
646 	/* if rxdma1_enable is false, then it doesn't need
647 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
648 	 * and rxdma_mon_desc_ring.
649 	 * init reap timer for QCA6390.
650 	 */
651 	if (!ar->ab->hw_params.rxdma1_enable) {
652 		//init mon status buffer reap timer
653 		timer_setup(&ar->ab->mon_reap_timer,
654 			    ath11k_dp_service_mon_ring, 0);
655 		return 0;
656 	}
657 
658 	ret = ath11k_dp_srng_setup(ar->ab,
659 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
660 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
661 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
662 	if (ret) {
663 		ath11k_warn(ar->ab,
664 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
665 		return ret;
666 	}
667 
668 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
669 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
670 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
671 	if (ret) {
672 		ath11k_warn(ar->ab,
673 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
674 		return ret;
675 	}
676 
677 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
678 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
679 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
680 	if (ret) {
681 		ath11k_warn(ar->ab,
682 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
683 		return ret;
684 	}
685 
686 	return 0;
687 }
688 
689 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
690 {
691 	struct ath11k_dp *dp = &ab->dp;
692 	struct dp_reo_cmd *cmd, *tmp;
693 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
694 
695 	spin_lock_bh(&dp->reo_cmd_lock);
696 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
697 		list_del(&cmd->list);
698 		dma_unmap_single(ab->dev, cmd->data.paddr,
699 				 cmd->data.size, DMA_BIDIRECTIONAL);
700 		kfree(cmd->data.vaddr);
701 		kfree(cmd);
702 	}
703 
704 	list_for_each_entry_safe(cmd_cache, tmp_cache,
705 				 &dp->reo_cmd_cache_flush_list, list) {
706 		list_del(&cmd_cache->list);
707 		dp->reo_cmd_cache_flush_count--;
708 		dma_unmap_single(ab->dev, cmd_cache->data.paddr,
709 				 cmd_cache->data.size, DMA_BIDIRECTIONAL);
710 		kfree(cmd_cache->data.vaddr);
711 		kfree(cmd_cache);
712 	}
713 	spin_unlock_bh(&dp->reo_cmd_lock);
714 }
715 
716 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
717 				   enum hal_reo_cmd_status status)
718 {
719 	struct dp_rx_tid *rx_tid = ctx;
720 
721 	if (status != HAL_REO_CMD_SUCCESS)
722 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
723 			    rx_tid->tid, status);
724 
725 	dma_unmap_single(dp->ab->dev, rx_tid->paddr, rx_tid->size,
726 			 DMA_BIDIRECTIONAL);
727 	kfree(rx_tid->vaddr);
728 }
729 
730 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
731 				      struct dp_rx_tid *rx_tid)
732 {
733 	struct ath11k_hal_reo_cmd cmd = {0};
734 	unsigned long tot_desc_sz, desc_sz;
735 	int ret;
736 
737 	tot_desc_sz = rx_tid->size;
738 	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
739 
740 	while (tot_desc_sz > desc_sz) {
741 		tot_desc_sz -= desc_sz;
742 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
743 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
744 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
745 						HAL_REO_CMD_FLUSH_CACHE, &cmd,
746 						NULL);
747 		if (ret)
748 			ath11k_warn(ab,
749 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
750 				    rx_tid->tid, ret);
751 	}
752 
753 	memset(&cmd, 0, sizeof(cmd));
754 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
755 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
756 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
757 	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
758 					HAL_REO_CMD_FLUSH_CACHE,
759 					&cmd, ath11k_dp_reo_cmd_free);
760 	if (ret) {
761 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
762 			   rx_tid->tid, ret);
763 		dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
764 				 DMA_BIDIRECTIONAL);
765 		kfree(rx_tid->vaddr);
766 	}
767 }
768 
769 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
770 				      enum hal_reo_cmd_status status)
771 {
772 	struct ath11k_base *ab = dp->ab;
773 	struct dp_rx_tid *rx_tid = ctx;
774 	struct dp_reo_cache_flush_elem *elem, *tmp;
775 
776 	if (status == HAL_REO_CMD_DRAIN) {
777 		goto free_desc;
778 	} else if (status != HAL_REO_CMD_SUCCESS) {
779 		/* Shouldn't happen! Cleanup in case of other failure? */
780 		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
781 			    rx_tid->tid, status);
782 		return;
783 	}
784 
785 	elem = kzalloc(sizeof(*elem), GFP_ATOMIC);
786 	if (!elem)
787 		goto free_desc;
788 
789 	elem->ts = jiffies;
790 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
791 
792 	spin_lock_bh(&dp->reo_cmd_lock);
793 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
794 	dp->reo_cmd_cache_flush_count++;
795 
796 	/* Flush and invalidate aged REO desc from HW cache */
797 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
798 				 list) {
799 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
800 		    time_after(jiffies, elem->ts +
801 			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
802 			list_del(&elem->list);
803 			dp->reo_cmd_cache_flush_count--;
804 			spin_unlock_bh(&dp->reo_cmd_lock);
805 
806 			ath11k_dp_reo_cache_flush(ab, &elem->data);
807 			kfree(elem);
808 			spin_lock_bh(&dp->reo_cmd_lock);
809 		}
810 	}
811 	spin_unlock_bh(&dp->reo_cmd_lock);
812 
813 	return;
814 free_desc:
815 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
816 			 DMA_BIDIRECTIONAL);
817 	kfree(rx_tid->vaddr);
818 }
819 
820 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
821 			       struct ath11k_peer *peer, u8 tid)
822 {
823 	struct ath11k_hal_reo_cmd cmd = {0};
824 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
825 	int ret;
826 
827 	if (!rx_tid->active)
828 		return;
829 
830 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
831 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
832 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
833 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
834 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
835 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
836 					ath11k_dp_rx_tid_del_func);
837 	if (ret) {
838 		ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
839 			   tid, ret);
840 		dma_unmap_single(ar->ab->dev, rx_tid->paddr, rx_tid->size,
841 				 DMA_BIDIRECTIONAL);
842 		kfree(rx_tid->vaddr);
843 	}
844 
845 	rx_tid->active = false;
846 }
847 
848 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
849 					 u32 *link_desc,
850 					 enum hal_wbm_rel_bm_act action)
851 {
852 	struct ath11k_dp *dp = &ab->dp;
853 	struct hal_srng *srng;
854 	u32 *desc;
855 	int ret = 0;
856 
857 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
858 
859 	spin_lock_bh(&srng->lock);
860 
861 	ath11k_hal_srng_access_begin(ab, srng);
862 
863 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
864 	if (!desc) {
865 		ret = -ENOBUFS;
866 		goto exit;
867 	}
868 
869 	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
870 					 action);
871 
872 exit:
873 	ath11k_hal_srng_access_end(ab, srng);
874 
875 	spin_unlock_bh(&srng->lock);
876 
877 	return ret;
878 }
879 
880 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
881 {
882 	struct ath11k_base *ab = rx_tid->ab;
883 
884 	lockdep_assert_held(&ab->base_lock);
885 
886 	if (rx_tid->dst_ring_desc) {
887 		if (rel_link_desc)
888 			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
889 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
890 		kfree(rx_tid->dst_ring_desc);
891 		rx_tid->dst_ring_desc = NULL;
892 	}
893 
894 	rx_tid->cur_sn = 0;
895 	rx_tid->last_frag_no = 0;
896 	rx_tid->rx_frag_bitmap = 0;
897 	__skb_queue_purge(&rx_tid->rx_frags);
898 }
899 
900 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
901 {
902 	struct dp_rx_tid *rx_tid;
903 	int i;
904 
905 	lockdep_assert_held(&ar->ab->base_lock);
906 
907 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
908 		rx_tid = &peer->rx_tid[i];
909 
910 		spin_unlock_bh(&ar->ab->base_lock);
911 		del_timer_sync(&rx_tid->frag_timer);
912 		spin_lock_bh(&ar->ab->base_lock);
913 
914 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
915 	}
916 }
917 
918 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
919 {
920 	struct dp_rx_tid *rx_tid;
921 	int i;
922 
923 	lockdep_assert_held(&ar->ab->base_lock);
924 
925 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
926 		rx_tid = &peer->rx_tid[i];
927 
928 		ath11k_peer_rx_tid_delete(ar, peer, i);
929 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
930 
931 		spin_unlock_bh(&ar->ab->base_lock);
932 		del_timer_sync(&rx_tid->frag_timer);
933 		spin_lock_bh(&ar->ab->base_lock);
934 	}
935 }
936 
937 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
938 					 struct ath11k_peer *peer,
939 					 struct dp_rx_tid *rx_tid,
940 					 u32 ba_win_sz, u16 ssn,
941 					 bool update_ssn)
942 {
943 	struct ath11k_hal_reo_cmd cmd = {0};
944 	int ret;
945 
946 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
947 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
948 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
949 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
950 	cmd.ba_window_size = ba_win_sz;
951 
952 	if (update_ssn) {
953 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
954 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
955 	}
956 
957 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
958 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
959 					NULL);
960 	if (ret) {
961 		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
962 			    rx_tid->tid, ret);
963 		return ret;
964 	}
965 
966 	rx_tid->ba_win_sz = ba_win_sz;
967 
968 	return 0;
969 }
970 
971 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
972 				      const u8 *peer_mac, int vdev_id, u8 tid)
973 {
974 	struct ath11k_peer *peer;
975 	struct dp_rx_tid *rx_tid;
976 
977 	spin_lock_bh(&ab->base_lock);
978 
979 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
980 	if (!peer) {
981 		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
982 		goto unlock_exit;
983 	}
984 
985 	rx_tid = &peer->rx_tid[tid];
986 	if (!rx_tid->active)
987 		goto unlock_exit;
988 
989 	dma_unmap_single(ab->dev, rx_tid->paddr, rx_tid->size,
990 			 DMA_BIDIRECTIONAL);
991 	kfree(rx_tid->vaddr);
992 
993 	rx_tid->active = false;
994 
995 unlock_exit:
996 	spin_unlock_bh(&ab->base_lock);
997 }
998 
999 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
1000 			     u8 tid, u32 ba_win_sz, u16 ssn,
1001 			     enum hal_pn_type pn_type)
1002 {
1003 	struct ath11k_base *ab = ar->ab;
1004 	struct ath11k_peer *peer;
1005 	struct dp_rx_tid *rx_tid;
1006 	u32 hw_desc_sz;
1007 	u32 *addr_aligned;
1008 	void *vaddr;
1009 	dma_addr_t paddr;
1010 	int ret;
1011 
1012 	spin_lock_bh(&ab->base_lock);
1013 
1014 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1015 	if (!peer) {
1016 		ath11k_warn(ab, "failed to find the peer to set up rx tid\n");
1017 		spin_unlock_bh(&ab->base_lock);
1018 		return -ENOENT;
1019 	}
1020 
1021 	rx_tid = &peer->rx_tid[tid];
1022 	/* Update the tid queue if it is already setup */
1023 	if (rx_tid->active) {
1024 		paddr = rx_tid->paddr;
1025 		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1026 						    ba_win_sz, ssn, true);
1027 		spin_unlock_bh(&ab->base_lock);
1028 		if (ret) {
1029 			ath11k_warn(ab, "failed to update reo for rx tid %d\n", tid);
1030 			return ret;
1031 		}
1032 
1033 		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1034 							     peer_mac, paddr,
1035 							     tid, 1, ba_win_sz);
1036 		if (ret)
1037 			ath11k_warn(ab, "failed to send wmi command to update rx reorder queue, tid :%d (%d)\n",
1038 				    tid, ret);
1039 		return ret;
1040 	}
1041 
1042 	rx_tid->tid = tid;
1043 
1044 	rx_tid->ba_win_sz = ba_win_sz;
1045 
1046 	/* TODO: Optimize the memory allocation for qos tid based on
1047 	 * the actual BA window size in REO tid update path.
1048 	 */
1049 	if (tid == HAL_DESC_REO_NON_QOS_TID)
1050 		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1051 	else
1052 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1053 
1054 	vaddr = kzalloc(hw_desc_sz + HAL_LINK_DESC_ALIGN - 1, GFP_ATOMIC);
1055 	if (!vaddr) {
1056 		spin_unlock_bh(&ab->base_lock);
1057 		return -ENOMEM;
1058 	}
1059 
1060 	addr_aligned = PTR_ALIGN(vaddr, HAL_LINK_DESC_ALIGN);
1061 
1062 	ath11k_hal_reo_qdesc_setup(addr_aligned, tid, ba_win_sz,
1063 				   ssn, pn_type);
1064 
1065 	paddr = dma_map_single(ab->dev, addr_aligned, hw_desc_sz,
1066 			       DMA_BIDIRECTIONAL);
1067 
1068 	ret = dma_mapping_error(ab->dev, paddr);
1069 	if (ret) {
1070 		spin_unlock_bh(&ab->base_lock);
1071 		goto err_mem_free;
1072 	}
1073 
1074 	rx_tid->vaddr = vaddr;
1075 	rx_tid->paddr = paddr;
1076 	rx_tid->size = hw_desc_sz;
1077 	rx_tid->active = true;
1078 
1079 	spin_unlock_bh(&ab->base_lock);
1080 
1081 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
1082 						     paddr, tid, 1, ba_win_sz);
1083 	if (ret) {
1084 		ath11k_warn(ar->ab, "failed to setup rx reorder queue, tid :%d (%d)\n",
1085 			    tid, ret);
1086 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1087 	}
1088 
1089 	return ret;
1090 
1091 err_mem_free:
1092 	kfree(vaddr);
1093 
1094 	return ret;
1095 }
1096 
1097 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1098 			     struct ieee80211_ampdu_params *params)
1099 {
1100 	struct ath11k_base *ab = ar->ab;
1101 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1102 	int vdev_id = arsta->arvif->vdev_id;
1103 	int ret;
1104 
1105 	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1106 				       params->tid, params->buf_size,
1107 				       params->ssn, arsta->pn_type);
1108 	if (ret)
1109 		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1110 
1111 	return ret;
1112 }
1113 
1114 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1115 			    struct ieee80211_ampdu_params *params)
1116 {
1117 	struct ath11k_base *ab = ar->ab;
1118 	struct ath11k_peer *peer;
1119 	struct ath11k_sta *arsta = (void *)params->sta->drv_priv;
1120 	int vdev_id = arsta->arvif->vdev_id;
1121 	dma_addr_t paddr;
1122 	bool active;
1123 	int ret;
1124 
1125 	spin_lock_bh(&ab->base_lock);
1126 
1127 	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1128 	if (!peer) {
1129 		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1130 		spin_unlock_bh(&ab->base_lock);
1131 		return -ENOENT;
1132 	}
1133 
1134 	paddr = peer->rx_tid[params->tid].paddr;
1135 	active = peer->rx_tid[params->tid].active;
1136 
1137 	if (!active) {
1138 		spin_unlock_bh(&ab->base_lock);
1139 		return 0;
1140 	}
1141 
1142 	ret = ath11k_peer_rx_tid_reo_update(ar, peer, peer->rx_tid, 1, 0, false);
1143 	spin_unlock_bh(&ab->base_lock);
1144 	if (ret) {
1145 		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1146 			    params->tid, ret);
1147 		return ret;
1148 	}
1149 
1150 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1151 						     params->sta->addr, paddr,
1152 						     params->tid, 1, 1);
1153 	if (ret)
1154 		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1155 			    ret);
1156 
1157 	return ret;
1158 }
1159 
1160 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1161 				       const u8 *peer_addr,
1162 				       enum set_key_cmd key_cmd,
1163 				       struct ieee80211_key_conf *key)
1164 {
1165 	struct ath11k *ar = arvif->ar;
1166 	struct ath11k_base *ab = ar->ab;
1167 	struct ath11k_hal_reo_cmd cmd = {0};
1168 	struct ath11k_peer *peer;
1169 	struct dp_rx_tid *rx_tid;
1170 	u8 tid;
1171 	int ret = 0;
1172 
1173 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1174 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1175 	 * for now.
1176 	 */
1177 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1178 		return 0;
1179 
1180 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1181 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1182 		    HAL_REO_CMD_UPD0_PN_SIZE |
1183 		    HAL_REO_CMD_UPD0_PN_VALID |
1184 		    HAL_REO_CMD_UPD0_PN_CHECK |
1185 		    HAL_REO_CMD_UPD0_SVLD;
1186 
1187 	switch (key->cipher) {
1188 	case WLAN_CIPHER_SUITE_TKIP:
1189 	case WLAN_CIPHER_SUITE_CCMP:
1190 	case WLAN_CIPHER_SUITE_CCMP_256:
1191 	case WLAN_CIPHER_SUITE_GCMP:
1192 	case WLAN_CIPHER_SUITE_GCMP_256:
1193 		if (key_cmd == SET_KEY) {
1194 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1195 			cmd.pn_size = 48;
1196 		}
1197 		break;
1198 	default:
1199 		break;
1200 	}
1201 
1202 	spin_lock_bh(&ab->base_lock);
1203 
1204 	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1205 	if (!peer) {
1206 		ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1207 		spin_unlock_bh(&ab->base_lock);
1208 		return -ENOENT;
1209 	}
1210 
1211 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1212 		rx_tid = &peer->rx_tid[tid];
1213 		if (!rx_tid->active)
1214 			continue;
1215 		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1216 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1217 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1218 						HAL_REO_CMD_UPDATE_RX_QUEUE,
1219 						&cmd, NULL);
1220 		if (ret) {
1221 			ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1222 				    tid, ret);
1223 			break;
1224 		}
1225 	}
1226 
1227 	spin_unlock_bh(&ab->base_lock);
1228 
1229 	return ret;
1230 }
1231 
1232 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1233 					     u16 peer_id)
1234 {
1235 	int i;
1236 
1237 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1238 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1239 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1240 				return i;
1241 		} else {
1242 			return i;
1243 		}
1244 	}
1245 
1246 	return -EINVAL;
1247 }
1248 
1249 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1250 					   u16 tag, u16 len, const void *ptr,
1251 					   void *data)
1252 {
1253 	struct htt_ppdu_stats_info *ppdu_info;
1254 	struct htt_ppdu_user_stats *user_stats;
1255 	int cur_user;
1256 	u16 peer_id;
1257 
1258 	ppdu_info = (struct htt_ppdu_stats_info *)data;
1259 
1260 	switch (tag) {
1261 	case HTT_PPDU_STATS_TAG_COMMON:
1262 		if (len < sizeof(struct htt_ppdu_stats_common)) {
1263 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1264 				    len, tag);
1265 			return -EINVAL;
1266 		}
1267 		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1268 		       sizeof(struct htt_ppdu_stats_common));
1269 		break;
1270 	case HTT_PPDU_STATS_TAG_USR_RATE:
1271 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1272 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1273 				    len, tag);
1274 			return -EINVAL;
1275 		}
1276 
1277 #if defined(__linux__)
1278 		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1279 #elif defined(__FreeBSD__)
1280 		peer_id = ((const struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1281 #endif
1282 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1283 						      peer_id);
1284 		if (cur_user < 0)
1285 			return -EINVAL;
1286 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1287 		user_stats->peer_id = peer_id;
1288 		user_stats->is_valid_peer_id = true;
1289 		memcpy((void *)&user_stats->rate, ptr,
1290 		       sizeof(struct htt_ppdu_stats_user_rate));
1291 		user_stats->tlv_flags |= BIT(tag);
1292 		break;
1293 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1294 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1295 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1296 				    len, tag);
1297 			return -EINVAL;
1298 		}
1299 
1300 #if defined(__linux__)
1301 		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1302 #elif defined(__FreeBSD__)
1303 		peer_id = ((const struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1304 #endif
1305 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1306 						      peer_id);
1307 		if (cur_user < 0)
1308 			return -EINVAL;
1309 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1310 		user_stats->peer_id = peer_id;
1311 		user_stats->is_valid_peer_id = true;
1312 		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1313 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1314 		user_stats->tlv_flags |= BIT(tag);
1315 		break;
1316 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1317 		if (len <
1318 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1319 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1320 				    len, tag);
1321 			return -EINVAL;
1322 		}
1323 
1324 		peer_id =
1325 #if defined(__linux__)
1326 		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1327 #elif defined(__FreeBSD__)
1328 		((const struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1329 #endif
1330 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1331 						      peer_id);
1332 		if (cur_user < 0)
1333 			return -EINVAL;
1334 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1335 		user_stats->peer_id = peer_id;
1336 		user_stats->is_valid_peer_id = true;
1337 		memcpy((void *)&user_stats->ack_ba, ptr,
1338 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1339 		user_stats->tlv_flags |= BIT(tag);
1340 		break;
1341 	}
1342 	return 0;
1343 }
1344 
1345 #if defined(__linux__)
1346 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1347 #elif defined(__FreeBSD__)
1348 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const u8 *ptr, size_t len,
1349 #endif
1350 			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1351 				       const void *ptr, void *data),
1352 			   void *data)
1353 {
1354 	const struct htt_tlv *tlv;
1355 #if defined(__linux__)
1356 	const void *begin = ptr;
1357 #elif defined(__FreeBSD__)
1358 	const u8 *begin = ptr;
1359 #endif
1360 	u16 tlv_tag, tlv_len;
1361 	int ret = -EINVAL;
1362 
1363 	while (len > 0) {
1364 		if (len < sizeof(*tlv)) {
1365 			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1366 				   ptr - begin, len, sizeof(*tlv));
1367 			return -EINVAL;
1368 		}
1369 #if defined(__linux__)
1370 		tlv = (struct htt_tlv *)ptr;
1371 #elif defined(__FreeBSD__)
1372 		tlv = (const struct htt_tlv *)(const void *)ptr;
1373 #endif
1374 		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1375 		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1376 		ptr += sizeof(*tlv);
1377 		len -= sizeof(*tlv);
1378 
1379 		if (tlv_len > len) {
1380 			ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1381 				   tlv_tag, ptr - begin, len, tlv_len);
1382 			return -EINVAL;
1383 		}
1384 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1385 		if (ret == -ENOMEM)
1386 			return ret;
1387 
1388 		ptr += tlv_len;
1389 		len -= tlv_len;
1390 	}
1391 	return 0;
1392 }
1393 
1394 static void
1395 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1396 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1397 {
1398 	struct ath11k_base *ab = ar->ab;
1399 	struct ath11k_peer *peer;
1400 	struct ieee80211_sta *sta;
1401 	struct ath11k_sta *arsta;
1402 	struct htt_ppdu_stats_user_rate *user_rate;
1403 	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1404 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1405 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1406 	int ret;
1407 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1408 	u32 succ_bytes = 0;
1409 	u16 rate = 0, succ_pkts = 0;
1410 	u32 tx_duration = 0;
1411 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1412 	bool is_ampdu = false;
1413 
1414 	if (!usr_stats)
1415 		return;
1416 
1417 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1418 		return;
1419 
1420 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1421 		is_ampdu =
1422 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1423 
1424 	if (usr_stats->tlv_flags &
1425 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1426 		succ_bytes = usr_stats->ack_ba.success_bytes;
1427 		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1428 				      usr_stats->ack_ba.info);
1429 		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1430 				usr_stats->ack_ba.info);
1431 	}
1432 
1433 	if (common->fes_duration_us)
1434 		tx_duration = common->fes_duration_us;
1435 
1436 	user_rate = &usr_stats->rate;
1437 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1438 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1439 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1440 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1441 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1442 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1443 
1444 	/* Note: If host configured fixed rates and in some other special
1445 	 * cases, the broadcast/management frames are sent in different rates.
1446 	 * Firmware rate's control to be skipped for this?
1447 	 */
1448 
1449 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1450 		ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1451 		return;
1452 	}
1453 
1454 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1455 		ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1456 		return;
1457 	}
1458 
1459 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1460 		ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1461 			    mcs, nss);
1462 		return;
1463 	}
1464 
1465 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1466 		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1467 							    flags,
1468 							    &rate_idx,
1469 							    &rate);
1470 		if (ret < 0)
1471 			return;
1472 	}
1473 
1474 	rcu_read_lock();
1475 	spin_lock_bh(&ab->base_lock);
1476 	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1477 
1478 	if (!peer || !peer->sta) {
1479 		spin_unlock_bh(&ab->base_lock);
1480 		rcu_read_unlock();
1481 		return;
1482 	}
1483 
1484 	sta = peer->sta;
1485 	arsta = (struct ath11k_sta *)sta->drv_priv;
1486 
1487 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1488 
1489 	switch (flags) {
1490 	case WMI_RATE_PREAMBLE_OFDM:
1491 		arsta->txrate.legacy = rate;
1492 		break;
1493 	case WMI_RATE_PREAMBLE_CCK:
1494 		arsta->txrate.legacy = rate;
1495 		break;
1496 	case WMI_RATE_PREAMBLE_HT:
1497 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1498 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1499 		if (sgi)
1500 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1501 		break;
1502 	case WMI_RATE_PREAMBLE_VHT:
1503 		arsta->txrate.mcs = mcs;
1504 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1505 		if (sgi)
1506 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1507 		break;
1508 	case WMI_RATE_PREAMBLE_HE:
1509 		arsta->txrate.mcs = mcs;
1510 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1511 		arsta->txrate.he_dcm = dcm;
1512 		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1513 		arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1514 						((user_rate->ru_end -
1515 						 user_rate->ru_start) + 1);
1516 		break;
1517 	}
1518 
1519 	arsta->txrate.nss = nss;
1520 
1521 	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1522 	arsta->tx_duration += tx_duration;
1523 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1524 
1525 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1526 	 * So skip peer stats update for mgmt packets.
1527 	 */
1528 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1529 		memset(peer_stats, 0, sizeof(*peer_stats));
1530 		peer_stats->succ_pkts = succ_pkts;
1531 		peer_stats->succ_bytes = succ_bytes;
1532 		peer_stats->is_ampdu = is_ampdu;
1533 		peer_stats->duration = tx_duration;
1534 		peer_stats->ba_fails =
1535 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1536 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1537 
1538 		if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1539 			ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1540 	}
1541 
1542 	spin_unlock_bh(&ab->base_lock);
1543 	rcu_read_unlock();
1544 }
1545 
1546 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1547 					 struct htt_ppdu_stats *ppdu_stats)
1548 {
1549 	u8 user;
1550 
1551 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1552 		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1553 }
1554 
1555 static
1556 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1557 							u32 ppdu_id)
1558 {
1559 	struct htt_ppdu_stats_info *ppdu_info;
1560 
1561 	spin_lock_bh(&ar->data_lock);
1562 	if (!list_empty(&ar->ppdu_stats_info)) {
1563 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1564 			if (ppdu_info->ppdu_id == ppdu_id) {
1565 				spin_unlock_bh(&ar->data_lock);
1566 				return ppdu_info;
1567 			}
1568 		}
1569 
1570 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1571 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1572 						     typeof(*ppdu_info), list);
1573 			list_del(&ppdu_info->list);
1574 			ar->ppdu_stat_list_depth--;
1575 			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1576 			kfree(ppdu_info);
1577 		}
1578 	}
1579 	spin_unlock_bh(&ar->data_lock);
1580 
1581 	ppdu_info = kzalloc(sizeof(*ppdu_info), GFP_ATOMIC);
1582 	if (!ppdu_info)
1583 		return NULL;
1584 
1585 	spin_lock_bh(&ar->data_lock);
1586 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1587 	ar->ppdu_stat_list_depth++;
1588 	spin_unlock_bh(&ar->data_lock);
1589 
1590 	return ppdu_info;
1591 }
1592 
1593 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1594 				      struct sk_buff *skb)
1595 {
1596 	struct ath11k_htt_ppdu_stats_msg *msg;
1597 	struct htt_ppdu_stats_info *ppdu_info;
1598 	struct ath11k *ar;
1599 	int ret;
1600 	u8 pdev_id;
1601 	u32 ppdu_id, len;
1602 
1603 	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1604 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1605 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1606 	ppdu_id = msg->ppdu_id;
1607 
1608 	rcu_read_lock();
1609 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1610 	if (!ar) {
1611 		ret = -EINVAL;
1612 		goto exit;
1613 	}
1614 
1615 	if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1616 		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1617 
1618 	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1619 	if (!ppdu_info) {
1620 		ret = -EINVAL;
1621 		goto exit;
1622 	}
1623 
1624 	ppdu_info->ppdu_id = ppdu_id;
1625 	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1626 				     ath11k_htt_tlv_ppdu_stats_parse,
1627 				     (void *)ppdu_info);
1628 	if (ret) {
1629 		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1630 		goto exit;
1631 	}
1632 
1633 exit:
1634 	rcu_read_unlock();
1635 
1636 	return ret;
1637 }
1638 
1639 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1640 {
1641 	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1642 	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1643 	struct ath11k *ar;
1644 	u8 pdev_id;
1645 
1646 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1647 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1648 	if (!ar) {
1649 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1650 		return;
1651 	}
1652 
1653 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1654 				ar->ab->pktlog_defs_checksum);
1655 }
1656 
1657 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1658 						  struct sk_buff *skb)
1659 {
1660 	u32 *data = (u32 *)skb->data;
1661 	u8 pdev_id, ring_type, ring_id, pdev_idx;
1662 	u16 hp, tp;
1663 	u32 backpressure_time;
1664 	struct ath11k_bp_stats *bp_stats;
1665 
1666 	pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1667 	ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1668 	ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1669 	++data;
1670 
1671 	hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1672 	tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1673 	++data;
1674 
1675 	backpressure_time = *data;
1676 
1677 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "htt backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1678 		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1679 
1680 	if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1681 		if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1682 			return;
1683 
1684 		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1685 	} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1686 		pdev_idx = DP_HW2SW_MACID(pdev_id);
1687 
1688 		if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1689 			return;
1690 
1691 		bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1692 	} else {
1693 		ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1694 			    ring_type);
1695 		return;
1696 	}
1697 
1698 	spin_lock_bh(&ab->base_lock);
1699 	bp_stats->hp = hp;
1700 	bp_stats->tp = tp;
1701 	bp_stats->count++;
1702 	bp_stats->jiffies = jiffies;
1703 	spin_unlock_bh(&ab->base_lock);
1704 }
1705 
1706 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1707 				       struct sk_buff *skb)
1708 {
1709 	struct ath11k_dp *dp = &ab->dp;
1710 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1711 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1712 	u16 peer_id;
1713 	u8 vdev_id;
1714 	u8 mac_addr[ETH_ALEN];
1715 	u16 peer_mac_h16;
1716 	u16 ast_hash;
1717 	u16 hw_peer_id;
1718 
1719 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1720 
1721 	switch (type) {
1722 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1723 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1724 						  resp->version_msg.version);
1725 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1726 						  resp->version_msg.version);
1727 		complete(&dp->htt_tgt_version_received);
1728 		break;
1729 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1730 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1731 				    resp->peer_map_ev.info);
1732 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1733 				    resp->peer_map_ev.info);
1734 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1735 					 resp->peer_map_ev.info1);
1736 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1737 				       peer_mac_h16, mac_addr);
1738 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1739 		break;
1740 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1741 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1742 				    resp->peer_map_ev.info);
1743 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1744 				    resp->peer_map_ev.info);
1745 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1746 					 resp->peer_map_ev.info1);
1747 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1748 				       peer_mac_h16, mac_addr);
1749 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1750 				     resp->peer_map_ev.info2);
1751 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1752 				       resp->peer_map_ev.info1);
1753 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1754 				      hw_peer_id);
1755 		break;
1756 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1757 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1758 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1759 				    resp->peer_unmap_ev.info);
1760 		ath11k_peer_unmap_event(ab, peer_id);
1761 		break;
1762 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1763 		ath11k_htt_pull_ppdu_stats(ab, skb);
1764 		break;
1765 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1766 		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1767 		break;
1768 	case HTT_T2H_MSG_TYPE_PKTLOG:
1769 		ath11k_htt_pktlog(ab, skb);
1770 		break;
1771 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1772 		ath11k_htt_backpressure_event_handler(ab, skb);
1773 		break;
1774 	default:
1775 		ath11k_warn(ab, "htt event %d not handled\n", type);
1776 		break;
1777 	}
1778 
1779 	dev_kfree_skb_any(skb);
1780 }
1781 
1782 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1783 				      struct sk_buff_head *msdu_list,
1784 				      struct sk_buff *first, struct sk_buff *last,
1785 				      u8 l3pad_bytes, int msdu_len)
1786 {
1787 	struct ath11k_base *ab = ar->ab;
1788 	struct sk_buff *skb;
1789 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1790 	int buf_first_hdr_len, buf_first_len;
1791 	struct hal_rx_desc *ldesc;
1792 	int space_extra, rem_len, buf_len;
1793 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1794 
1795 	/* As the msdu is spread across multiple rx buffers,
1796 	 * find the offset to the start of msdu for computing
1797 	 * the length of the msdu in the first buffer.
1798 	 */
1799 	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1800 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1801 
1802 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1803 		skb_put(first, buf_first_hdr_len + msdu_len);
1804 		skb_pull(first, buf_first_hdr_len);
1805 		return 0;
1806 	}
1807 
1808 	ldesc = (struct hal_rx_desc *)last->data;
1809 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1810 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1811 
1812 	/* MSDU spans over multiple buffers because the length of the MSDU
1813 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1814 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1815 	 */
1816 	skb_put(first, DP_RX_BUFFER_SIZE);
1817 	skb_pull(first, buf_first_hdr_len);
1818 
1819 	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1820 	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1821 	 */
1822 	ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1823 
1824 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1825 	if (space_extra > 0 &&
1826 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1827 		/* Free up all buffers of the MSDU */
1828 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1829 			rxcb = ATH11K_SKB_RXCB(skb);
1830 			if (!rxcb->is_continuation) {
1831 				dev_kfree_skb_any(skb);
1832 				break;
1833 			}
1834 			dev_kfree_skb_any(skb);
1835 		}
1836 		return -ENOMEM;
1837 	}
1838 
1839 	rem_len = msdu_len - buf_first_len;
1840 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1841 		rxcb = ATH11K_SKB_RXCB(skb);
1842 		if (rxcb->is_continuation)
1843 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1844 		else
1845 			buf_len = rem_len;
1846 
1847 		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1848 			WARN_ON_ONCE(1);
1849 			dev_kfree_skb_any(skb);
1850 			return -EINVAL;
1851 		}
1852 
1853 		skb_put(skb, buf_len + hal_rx_desc_sz);
1854 		skb_pull(skb, hal_rx_desc_sz);
1855 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1856 					  buf_len);
1857 		dev_kfree_skb_any(skb);
1858 
1859 		rem_len -= buf_len;
1860 		if (!rxcb->is_continuation)
1861 			break;
1862 	}
1863 
1864 	return 0;
1865 }
1866 
1867 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1868 						      struct sk_buff *first)
1869 {
1870 	struct sk_buff *skb;
1871 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1872 
1873 	if (!rxcb->is_continuation)
1874 		return first;
1875 
1876 	skb_queue_walk(msdu_list, skb) {
1877 		rxcb = ATH11K_SKB_RXCB(skb);
1878 		if (!rxcb->is_continuation)
1879 			return skb;
1880 	}
1881 
1882 	return NULL;
1883 }
1884 
1885 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1886 {
1887 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1888 	struct rx_attention *rx_attention;
1889 	bool ip_csum_fail, l4_csum_fail;
1890 
1891 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1892 	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1893 	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1894 
1895 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1896 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1897 }
1898 
1899 static int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar,
1900 				       enum hal_encrypt_type enctype)
1901 {
1902 	switch (enctype) {
1903 	case HAL_ENCRYPT_TYPE_OPEN:
1904 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1905 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1906 		return 0;
1907 	case HAL_ENCRYPT_TYPE_CCMP_128:
1908 		return IEEE80211_CCMP_MIC_LEN;
1909 	case HAL_ENCRYPT_TYPE_CCMP_256:
1910 		return IEEE80211_CCMP_256_MIC_LEN;
1911 	case HAL_ENCRYPT_TYPE_GCMP_128:
1912 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1913 		return IEEE80211_GCMP_MIC_LEN;
1914 	case HAL_ENCRYPT_TYPE_WEP_40:
1915 	case HAL_ENCRYPT_TYPE_WEP_104:
1916 	case HAL_ENCRYPT_TYPE_WEP_128:
1917 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1918 	case HAL_ENCRYPT_TYPE_WAPI:
1919 		break;
1920 	}
1921 
1922 	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1923 	return 0;
1924 }
1925 
1926 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1927 					 enum hal_encrypt_type enctype)
1928 {
1929 	switch (enctype) {
1930 	case HAL_ENCRYPT_TYPE_OPEN:
1931 		return 0;
1932 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1933 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1934 		return IEEE80211_TKIP_IV_LEN;
1935 	case HAL_ENCRYPT_TYPE_CCMP_128:
1936 		return IEEE80211_CCMP_HDR_LEN;
1937 	case HAL_ENCRYPT_TYPE_CCMP_256:
1938 		return IEEE80211_CCMP_256_HDR_LEN;
1939 	case HAL_ENCRYPT_TYPE_GCMP_128:
1940 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1941 		return IEEE80211_GCMP_HDR_LEN;
1942 	case HAL_ENCRYPT_TYPE_WEP_40:
1943 	case HAL_ENCRYPT_TYPE_WEP_104:
1944 	case HAL_ENCRYPT_TYPE_WEP_128:
1945 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1946 	case HAL_ENCRYPT_TYPE_WAPI:
1947 		break;
1948 	}
1949 
1950 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1951 	return 0;
1952 }
1953 
1954 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1955 				       enum hal_encrypt_type enctype)
1956 {
1957 	switch (enctype) {
1958 	case HAL_ENCRYPT_TYPE_OPEN:
1959 	case HAL_ENCRYPT_TYPE_CCMP_128:
1960 	case HAL_ENCRYPT_TYPE_CCMP_256:
1961 	case HAL_ENCRYPT_TYPE_GCMP_128:
1962 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1963 		return 0;
1964 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1965 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1966 		return IEEE80211_TKIP_ICV_LEN;
1967 	case HAL_ENCRYPT_TYPE_WEP_40:
1968 	case HAL_ENCRYPT_TYPE_WEP_104:
1969 	case HAL_ENCRYPT_TYPE_WEP_128:
1970 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1971 	case HAL_ENCRYPT_TYPE_WAPI:
1972 		break;
1973 	}
1974 
1975 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1976 	return 0;
1977 }
1978 
1979 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1980 					 struct sk_buff *msdu,
1981 					 u8 *first_hdr,
1982 					 enum hal_encrypt_type enctype,
1983 					 struct ieee80211_rx_status *status)
1984 {
1985 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1986 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1987 	struct ieee80211_hdr *hdr;
1988 	size_t hdr_len;
1989 	u8 da[ETH_ALEN];
1990 	u8 sa[ETH_ALEN];
1991 	u16 qos_ctl = 0;
1992 	u8 *qos;
1993 
1994 	/* copy SA & DA and pull decapped header */
1995 	hdr = (struct ieee80211_hdr *)msdu->data;
1996 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1997 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1998 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1999 	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
2000 
2001 	if (rxcb->is_first_msdu) {
2002 		/* original 802.11 header is valid for the first msdu
2003 		 * hence we can reuse the same header
2004 		 */
2005 		hdr = (struct ieee80211_hdr *)first_hdr;
2006 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2007 
2008 		/* Each A-MSDU subframe will be reported as a separate MSDU,
2009 		 * so strip the A-MSDU bit from QoS Ctl.
2010 		 */
2011 		if (ieee80211_is_data_qos(hdr->frame_control)) {
2012 			qos = ieee80211_get_qos_ctl(hdr);
2013 			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
2014 		}
2015 	} else {
2016 		/*  Rebuild qos header if this is a middle/last msdu */
2017 		hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
2018 
2019 		/* Reset the order bit as the HT_Control header is stripped */
2020 		hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
2021 
2022 		qos_ctl = rxcb->tid;
2023 
2024 		if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
2025 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
2026 
2027 		/* TODO Add other QoS ctl fields when required */
2028 
2029 		/* copy decap header before overwriting for reuse below */
2030 		memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2031 	}
2032 
2033 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2034 		memcpy(skb_push(msdu,
2035 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2036 #if defined(__linux__)
2037 		       (void *)hdr + hdr_len,
2038 #elif defined(__FreeBSD__)
2039 		       (u8 *)hdr + hdr_len,
2040 #endif
2041 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2042 	}
2043 
2044 	if (!rxcb->is_first_msdu) {
2045 		memcpy(skb_push(msdu,
2046 				IEEE80211_QOS_CTL_LEN), &qos_ctl,
2047 				IEEE80211_QOS_CTL_LEN);
2048 		memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2049 		return;
2050 	}
2051 
2052 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2053 
2054 	/* original 802.11 header has a different DA and in
2055 	 * case of 4addr it may also have different SA
2056 	 */
2057 	hdr = (struct ieee80211_hdr *)msdu->data;
2058 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2059 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2060 }
2061 
2062 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2063 				       enum hal_encrypt_type enctype,
2064 				       struct ieee80211_rx_status *status,
2065 				       bool decrypted)
2066 {
2067 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2068 	struct ieee80211_hdr *hdr;
2069 	size_t hdr_len;
2070 	size_t crypto_len;
2071 
2072 	if (!rxcb->is_first_msdu ||
2073 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2074 		WARN_ON_ONCE(1);
2075 		return;
2076 	}
2077 
2078 	skb_trim(msdu, msdu->len - FCS_LEN);
2079 
2080 	if (!decrypted)
2081 		return;
2082 
2083 	hdr = (void *)msdu->data;
2084 
2085 	/* Tail */
2086 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2087 		skb_trim(msdu, msdu->len -
2088 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2089 
2090 		skb_trim(msdu, msdu->len -
2091 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2092 	} else {
2093 		/* MIC */
2094 		if (status->flag & RX_FLAG_MIC_STRIPPED)
2095 			skb_trim(msdu, msdu->len -
2096 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2097 
2098 		/* ICV */
2099 		if (status->flag & RX_FLAG_ICV_STRIPPED)
2100 			skb_trim(msdu, msdu->len -
2101 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2102 	}
2103 
2104 	/* MMIC */
2105 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2106 	    !ieee80211_has_morefrags(hdr->frame_control) &&
2107 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2108 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2109 
2110 	/* Head */
2111 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2112 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2113 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2114 
2115 #if defined(__linux__)
2116 		memmove((void *)msdu->data + crypto_len,
2117 			(void *)msdu->data, hdr_len);
2118 #elif defined(__FreeBSD__)
2119 		memmove((u8 *)msdu->data + crypto_len,
2120 			(u8 *)msdu->data, hdr_len);
2121 #endif
2122 		skb_pull(msdu, crypto_len);
2123 	}
2124 }
2125 
2126 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2127 					 struct sk_buff *msdu,
2128 					 enum hal_encrypt_type enctype)
2129 {
2130 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2131 	struct ieee80211_hdr *hdr;
2132 	size_t hdr_len, crypto_len;
2133 #if defined(__linux__)
2134 	void *rfc1042;
2135 #elif defined(__FreeBSD__)
2136 	u8 *rfc1042;
2137 #endif
2138 	bool is_amsdu;
2139 
2140 	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2141 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2142 #if defined(__linux__)
2143 	rfc1042 = hdr;
2144 #elif defined(__FreeBSD__)
2145 	rfc1042 = (void *)hdr;
2146 #endif
2147 
2148 	if (rxcb->is_first_msdu) {
2149 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2150 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2151 
2152 		rfc1042 += hdr_len + crypto_len;
2153 	}
2154 
2155 	if (is_amsdu)
2156 		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2157 
2158 	return rfc1042;
2159 }
2160 
2161 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2162 				       struct sk_buff *msdu,
2163 				       u8 *first_hdr,
2164 				       enum hal_encrypt_type enctype,
2165 				       struct ieee80211_rx_status *status)
2166 {
2167 	struct ieee80211_hdr *hdr;
2168 	struct ethhdr *eth;
2169 	size_t hdr_len;
2170 	u8 da[ETH_ALEN];
2171 	u8 sa[ETH_ALEN];
2172 	void *rfc1042;
2173 
2174 	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2175 	if (WARN_ON_ONCE(!rfc1042))
2176 		return;
2177 
2178 	/* pull decapped header and copy SA & DA */
2179 	eth = (struct ethhdr *)msdu->data;
2180 	ether_addr_copy(da, eth->h_dest);
2181 	ether_addr_copy(sa, eth->h_source);
2182 	skb_pull(msdu, sizeof(struct ethhdr));
2183 
2184 	/* push rfc1042/llc/snap */
2185 	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2186 	       sizeof(struct ath11k_dp_rfc1042_hdr));
2187 
2188 	/* push original 802.11 header */
2189 	hdr = (struct ieee80211_hdr *)first_hdr;
2190 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2191 
2192 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2193 		memcpy(skb_push(msdu,
2194 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2195 #if defined(__linux__)
2196 		       (void *)hdr + hdr_len,
2197 #elif defined(__FreeBSD__)
2198 		       (u8 *)hdr + hdr_len,
2199 #endif
2200 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2201 	}
2202 
2203 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2204 
2205 	/* original 802.11 header has a different DA and in
2206 	 * case of 4addr it may also have different SA
2207 	 */
2208 	hdr = (struct ieee80211_hdr *)msdu->data;
2209 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2210 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2211 }
2212 
2213 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2214 				   struct hal_rx_desc *rx_desc,
2215 				   enum hal_encrypt_type enctype,
2216 				   struct ieee80211_rx_status *status,
2217 				   bool decrypted)
2218 {
2219 	u8 *first_hdr;
2220 	u8 decap;
2221 	struct ethhdr *ehdr;
2222 
2223 	first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2224 	decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2225 
2226 	switch (decap) {
2227 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2228 		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2229 					     enctype, status);
2230 		break;
2231 	case DP_RX_DECAP_TYPE_RAW:
2232 		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2233 					   decrypted);
2234 		break;
2235 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2236 		ehdr = (struct ethhdr *)msdu->data;
2237 
2238 		/* mac80211 allows fast path only for authorized STA */
2239 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2240 			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2241 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2242 						   enctype, status);
2243 			break;
2244 		}
2245 
2246 		/* PN for mcast packets will be validated in mac80211;
2247 		 * remove eth header and add 802.11 header.
2248 		 */
2249 		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2250 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2251 						   enctype, status);
2252 		break;
2253 	case DP_RX_DECAP_TYPE_8023:
2254 		/* TODO: Handle undecap for these formats */
2255 		break;
2256 	}
2257 }
2258 
2259 static struct ath11k_peer *
2260 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2261 {
2262 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2263 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2264 	struct ath11k_peer *peer = NULL;
2265 
2266 	lockdep_assert_held(&ab->base_lock);
2267 
2268 	if (rxcb->peer_id)
2269 		peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2270 
2271 	if (peer)
2272 		return peer;
2273 
2274 	if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2275 		return NULL;
2276 
2277 	peer = ath11k_peer_find_by_addr(ab,
2278 					ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2279 	return peer;
2280 }
2281 
2282 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2283 				struct sk_buff *msdu,
2284 				struct hal_rx_desc *rx_desc,
2285 				struct ieee80211_rx_status *rx_status)
2286 {
2287 	bool  fill_crypto_hdr;
2288 	enum hal_encrypt_type enctype;
2289 	bool is_decrypted = false;
2290 	struct ath11k_skb_rxcb *rxcb;
2291 	struct ieee80211_hdr *hdr;
2292 	struct ath11k_peer *peer;
2293 	struct rx_attention *rx_attention;
2294 	u32 err_bitmap;
2295 
2296 	/* PN for multicast packets will be checked in mac80211 */
2297 	rxcb = ATH11K_SKB_RXCB(msdu);
2298 	fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2299 	rxcb->is_mcbc = fill_crypto_hdr;
2300 
2301 	if (rxcb->is_mcbc) {
2302 		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2303 		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2304 	}
2305 
2306 	spin_lock_bh(&ar->ab->base_lock);
2307 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2308 	if (peer) {
2309 		if (rxcb->is_mcbc)
2310 			enctype = peer->sec_type_grp;
2311 		else
2312 			enctype = peer->sec_type;
2313 	} else {
2314 		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2315 	}
2316 	spin_unlock_bh(&ar->ab->base_lock);
2317 
2318 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2319 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2320 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2321 		is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2322 
2323 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2324 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2325 			     RX_FLAG_MMIC_ERROR |
2326 			     RX_FLAG_DECRYPTED |
2327 			     RX_FLAG_IV_STRIPPED |
2328 			     RX_FLAG_MMIC_STRIPPED);
2329 
2330 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2331 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2332 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2333 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2334 
2335 	if (is_decrypted) {
2336 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2337 
2338 		if (fill_crypto_hdr)
2339 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2340 					RX_FLAG_ICV_STRIPPED;
2341 		else
2342 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2343 					   RX_FLAG_PN_VALIDATED;
2344 	}
2345 
2346 	ath11k_dp_rx_h_csum_offload(ar, msdu);
2347 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2348 			       enctype, rx_status, is_decrypted);
2349 
2350 	if (!is_decrypted || fill_crypto_hdr)
2351 		return;
2352 
2353 	if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2354 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2355 		hdr = (void *)msdu->data;
2356 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2357 	}
2358 }
2359 
2360 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2361 				struct ieee80211_rx_status *rx_status)
2362 {
2363 	struct ieee80211_supported_band *sband;
2364 	enum rx_msdu_start_pkt_type pkt_type;
2365 	u8 bw;
2366 	u8 rate_mcs, nss;
2367 	u8 sgi;
2368 	bool is_cck, is_ldpc;
2369 
2370 	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2371 	bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2372 	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2373 	nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2374 	sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2375 
2376 	switch (pkt_type) {
2377 	case RX_MSDU_START_PKT_TYPE_11A:
2378 	case RX_MSDU_START_PKT_TYPE_11B:
2379 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2380 		sband = &ar->mac.sbands[rx_status->band];
2381 		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2382 								is_cck);
2383 		break;
2384 	case RX_MSDU_START_PKT_TYPE_11N:
2385 		rx_status->encoding = RX_ENC_HT;
2386 		if (rate_mcs > ATH11K_HT_MCS_MAX) {
2387 			ath11k_warn(ar->ab,
2388 				    "Received with invalid mcs in HT mode %d\n",
2389 				     rate_mcs);
2390 			break;
2391 		}
2392 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2393 		if (sgi)
2394 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2395 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2396 		break;
2397 	case RX_MSDU_START_PKT_TYPE_11AC:
2398 		rx_status->encoding = RX_ENC_VHT;
2399 		rx_status->rate_idx = rate_mcs;
2400 		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2401 			ath11k_warn(ar->ab,
2402 				    "Received with invalid mcs in VHT mode %d\n",
2403 				     rate_mcs);
2404 			break;
2405 		}
2406 		rx_status->nss = nss;
2407 		if (sgi)
2408 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2409 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2410 		is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2411 		if (is_ldpc)
2412 			rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2413 		break;
2414 	case RX_MSDU_START_PKT_TYPE_11AX:
2415 		rx_status->rate_idx = rate_mcs;
2416 		if (rate_mcs > ATH11K_HE_MCS_MAX) {
2417 			ath11k_warn(ar->ab,
2418 				    "Received with invalid mcs in HE mode %d\n",
2419 				    rate_mcs);
2420 			break;
2421 		}
2422 		rx_status->encoding = RX_ENC_HE;
2423 		rx_status->nss = nss;
2424 		rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2425 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2426 		break;
2427 	}
2428 }
2429 
2430 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2431 				struct ieee80211_rx_status *rx_status)
2432 {
2433 	u8 channel_num;
2434 	u32 center_freq, meta_data;
2435 	struct ieee80211_channel *channel;
2436 
2437 	rx_status->freq = 0;
2438 	rx_status->rate_idx = 0;
2439 	rx_status->nss = 0;
2440 	rx_status->encoding = RX_ENC_LEGACY;
2441 	rx_status->bw = RATE_INFO_BW_20;
2442 
2443 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2444 
2445 	meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2446 	channel_num = meta_data;
2447 	center_freq = meta_data >> 16;
2448 
2449 	if (center_freq >= ATH11K_MIN_6G_FREQ &&
2450 	    center_freq <= ATH11K_MAX_6G_FREQ) {
2451 		rx_status->band = NL80211_BAND_6GHZ;
2452 		rx_status->freq = center_freq;
2453 	} else if (channel_num >= 1 && channel_num <= 14) {
2454 		rx_status->band = NL80211_BAND_2GHZ;
2455 	} else if (channel_num >= 36 && channel_num <= 173) {
2456 		rx_status->band = NL80211_BAND_5GHZ;
2457 	} else {
2458 		spin_lock_bh(&ar->data_lock);
2459 		channel = ar->rx_channel;
2460 		if (channel) {
2461 			rx_status->band = channel->band;
2462 			channel_num =
2463 				ieee80211_frequency_to_channel(channel->center_freq);
2464 		}
2465 		spin_unlock_bh(&ar->data_lock);
2466 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2467 				rx_desc, sizeof(struct hal_rx_desc));
2468 	}
2469 
2470 	if (rx_status->band != NL80211_BAND_6GHZ)
2471 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2472 								 rx_status->band);
2473 
2474 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2475 }
2476 
2477 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2478 				      struct sk_buff *msdu,
2479 				      struct ieee80211_rx_status *status)
2480 {
2481 	static const struct ieee80211_radiotap_he known = {
2482 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2483 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2484 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2485 	};
2486 	struct ieee80211_rx_status *rx_status;
2487 	struct ieee80211_radiotap_he *he = NULL;
2488 	struct ieee80211_sta *pubsta = NULL;
2489 	struct ath11k_peer *peer;
2490 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2491 	u8 decap = DP_RX_DECAP_TYPE_RAW;
2492 	bool is_mcbc = rxcb->is_mcbc;
2493 	bool is_eapol = rxcb->is_eapol;
2494 
2495 	if (status->encoding == RX_ENC_HE &&
2496 	    !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2497 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2498 		he = skb_push(msdu, sizeof(known));
2499 		memcpy(he, &known, sizeof(known));
2500 		status->flag |= RX_FLAG_RADIOTAP_HE;
2501 	}
2502 
2503 	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2504 		decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2505 
2506 	spin_lock_bh(&ar->ab->base_lock);
2507 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2508 	if (peer && peer->sta)
2509 		pubsta = peer->sta;
2510 	spin_unlock_bh(&ar->ab->base_lock);
2511 
2512 	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2513 		   "rx skb %pK len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2514 		   msdu,
2515 		   msdu->len,
2516 		   peer ? peer->addr : NULL,
2517 		   rxcb->tid,
2518 		   is_mcbc ? "mcast" : "ucast",
2519 		   rxcb->seq_no,
2520 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2521 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2522 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2523 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2524 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2525 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2526 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2527 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2528 		   status->rate_idx,
2529 		   status->nss,
2530 		   status->freq,
2531 		   status->band, status->flag,
2532 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2533 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2534 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2535 
2536 	ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2537 			msdu->data, msdu->len);
2538 
2539 	rx_status = IEEE80211_SKB_RXCB(msdu);
2540 	*rx_status = *status;
2541 
2542 	/* TODO: trace rx packet */
2543 
2544 	/* PN for multicast packets are not validate in HW,
2545 	 * so skip 802.3 rx path
2546 	 * Also, fast_rx expectes the STA to be authorized, hence
2547 	 * eapol packets are sent in slow path.
2548 	 */
2549 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2550 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2551 		rx_status->flag |= RX_FLAG_8023;
2552 
2553 	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2554 }
2555 
2556 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2557 				     struct sk_buff *msdu,
2558 				     struct sk_buff_head *msdu_list,
2559 				     struct ieee80211_rx_status *rx_status)
2560 {
2561 	struct ath11k_base *ab = ar->ab;
2562 	struct hal_rx_desc *rx_desc, *lrx_desc;
2563 	struct rx_attention *rx_attention;
2564 	struct ath11k_skb_rxcb *rxcb;
2565 	struct sk_buff *last_buf;
2566 	u8 l3_pad_bytes;
2567 	u8 *hdr_status;
2568 	u16 msdu_len;
2569 	int ret;
2570 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2571 
2572 	last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2573 	if (!last_buf) {
2574 		ath11k_warn(ab,
2575 			    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2576 		ret = -EIO;
2577 		goto free_out;
2578 	}
2579 
2580 	rx_desc = (struct hal_rx_desc *)msdu->data;
2581 	if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2582 		ath11k_warn(ar->ab, "msdu len not valid\n");
2583 		ret = -EIO;
2584 		goto free_out;
2585 	}
2586 
2587 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2588 	rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2589 	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2590 		ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2591 		ret = -EIO;
2592 		goto free_out;
2593 	}
2594 
2595 	rxcb = ATH11K_SKB_RXCB(msdu);
2596 	rxcb->rx_desc = rx_desc;
2597 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2598 	l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2599 
2600 	if (rxcb->is_frag) {
2601 		skb_pull(msdu, hal_rx_desc_sz);
2602 	} else if (!rxcb->is_continuation) {
2603 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2604 			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2605 			ret = -EINVAL;
2606 			ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2607 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2608 					sizeof(struct ieee80211_hdr));
2609 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2610 					sizeof(struct hal_rx_desc));
2611 			goto free_out;
2612 		}
2613 		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2614 		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2615 	} else {
2616 		ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2617 						 msdu, last_buf,
2618 						 l3_pad_bytes, msdu_len);
2619 		if (ret) {
2620 			ath11k_warn(ab,
2621 				    "failed to coalesce msdu rx buffer%d\n", ret);
2622 			goto free_out;
2623 		}
2624 	}
2625 
2626 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2627 	ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2628 
2629 	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2630 
2631 	return 0;
2632 
2633 free_out:
2634 	return ret;
2635 }
2636 
2637 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2638 						  struct napi_struct *napi,
2639 						  struct sk_buff_head *msdu_list,
2640 						  int mac_id)
2641 {
2642 	struct sk_buff *msdu;
2643 	struct ath11k *ar;
2644 	struct ieee80211_rx_status rx_status = {0};
2645 	int ret;
2646 
2647 	if (skb_queue_empty(msdu_list))
2648 		return;
2649 
2650 	if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2651 		__skb_queue_purge(msdu_list);
2652 		return;
2653 	}
2654 
2655 	ar = ab->pdevs[mac_id].ar;
2656 	if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2657 		__skb_queue_purge(msdu_list);
2658 		return;
2659 	}
2660 
2661 	while ((msdu = __skb_dequeue(msdu_list))) {
2662 		ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2663 		if (unlikely(ret)) {
2664 			ath11k_dbg(ab, ATH11K_DBG_DATA,
2665 				   "Unable to process msdu %d", ret);
2666 			dev_kfree_skb_any(msdu);
2667 			continue;
2668 		}
2669 
2670 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2671 	}
2672 }
2673 
2674 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2675 			 struct napi_struct *napi, int budget)
2676 {
2677 	struct ath11k_dp *dp = &ab->dp;
2678 	struct dp_rxdma_ring *rx_ring;
2679 	int num_buffs_reaped[MAX_RADIOS] = {0};
2680 	struct sk_buff_head msdu_list[MAX_RADIOS];
2681 	struct ath11k_skb_rxcb *rxcb;
2682 	int total_msdu_reaped = 0;
2683 	struct hal_srng *srng;
2684 	struct sk_buff *msdu;
2685 	bool done = false;
2686 	int buf_id, mac_id;
2687 	struct ath11k *ar;
2688 	struct hal_reo_dest_ring *desc;
2689 	enum hal_reo_dest_ring_push_reason push_reason;
2690 	u32 cookie;
2691 	int i;
2692 
2693 	for (i = 0; i < MAX_RADIOS; i++)
2694 		__skb_queue_head_init(&msdu_list[i]);
2695 
2696 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2697 
2698 	spin_lock_bh(&srng->lock);
2699 
2700 	ath11k_hal_srng_access_begin(ab, srng);
2701 
2702 try_again:
2703 	while (likely(desc =
2704 	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2705 									     srng))) {
2706 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2707 				   desc->buf_addr_info.info1);
2708 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2709 				   cookie);
2710 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2711 
2712 		ar = ab->pdevs[mac_id].ar;
2713 		rx_ring = &ar->dp.rx_refill_buf_ring;
2714 		spin_lock_bh(&rx_ring->idr_lock);
2715 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2716 		if (unlikely(!msdu)) {
2717 			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2718 				    buf_id);
2719 			spin_unlock_bh(&rx_ring->idr_lock);
2720 			continue;
2721 		}
2722 
2723 		idr_remove(&rx_ring->bufs_idr, buf_id);
2724 		spin_unlock_bh(&rx_ring->idr_lock);
2725 
2726 		rxcb = ATH11K_SKB_RXCB(msdu);
2727 		dma_unmap_single(ab->dev, rxcb->paddr,
2728 				 msdu->len + skb_tailroom(msdu),
2729 				 DMA_FROM_DEVICE);
2730 
2731 		num_buffs_reaped[mac_id]++;
2732 
2733 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2734 					desc->info0);
2735 		if (unlikely(push_reason !=
2736 			     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2737 			dev_kfree_skb_any(msdu);
2738 			ab->soc_stats.hal_reo_error[dp->reo_dst_ring[ring_id].ring_id]++;
2739 			continue;
2740 		}
2741 
2742 		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2743 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2744 		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2745 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2746 		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2747 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2748 		rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2749 					  desc->rx_mpdu_info.meta_data);
2750 		rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2751 					 desc->rx_mpdu_info.info0);
2752 		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2753 				      desc->info0);
2754 
2755 		rxcb->mac_id = mac_id;
2756 		__skb_queue_tail(&msdu_list[mac_id], msdu);
2757 
2758 		if (rxcb->is_continuation) {
2759 			done = false;
2760 		} else {
2761 			total_msdu_reaped++;
2762 			done = true;
2763 		}
2764 
2765 		if (total_msdu_reaped >= budget)
2766 			break;
2767 	}
2768 
2769 	/* Hw might have updated the head pointer after we cached it.
2770 	 * In this case, even though there are entries in the ring we'll
2771 	 * get rx_desc NULL. Give the read another try with updated cached
2772 	 * head pointer so that we can reap complete MPDU in the current
2773 	 * rx processing.
2774 	 */
2775 	if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2776 		ath11k_hal_srng_access_end(ab, srng);
2777 		goto try_again;
2778 	}
2779 
2780 	ath11k_hal_srng_access_end(ab, srng);
2781 
2782 	spin_unlock_bh(&srng->lock);
2783 
2784 	if (unlikely(!total_msdu_reaped))
2785 		goto exit;
2786 
2787 	for (i = 0; i < ab->num_radios; i++) {
2788 		if (!num_buffs_reaped[i])
2789 			continue;
2790 
2791 		ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2792 
2793 		ar = ab->pdevs[i].ar;
2794 		rx_ring = &ar->dp.rx_refill_buf_ring;
2795 
2796 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2797 					   ab->hw_params.hal_params->rx_buf_rbm);
2798 	}
2799 exit:
2800 	return total_msdu_reaped;
2801 }
2802 
2803 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2804 					   struct hal_rx_mon_ppdu_info *ppdu_info)
2805 {
2806 	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2807 	u32 num_msdu;
2808 	int i;
2809 
2810 	if (!rx_stats)
2811 		return;
2812 
2813 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2814 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2815 
2816 	rx_stats->num_msdu += num_msdu;
2817 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2818 				    ppdu_info->tcp_ack_msdu_count;
2819 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2820 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2821 
2822 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2823 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2824 		ppdu_info->nss = 1;
2825 		ppdu_info->mcs = HAL_RX_MAX_MCS;
2826 		ppdu_info->tid = IEEE80211_NUM_TIDS;
2827 	}
2828 
2829 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2830 		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2831 
2832 	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2833 		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2834 
2835 	if (ppdu_info->gi < HAL_RX_GI_MAX)
2836 		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2837 
2838 	if (ppdu_info->bw < HAL_RX_BW_MAX)
2839 		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2840 
2841 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2842 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2843 
2844 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2845 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2846 
2847 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2848 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2849 
2850 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2851 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2852 
2853 	if (ppdu_info->is_stbc)
2854 		rx_stats->stbc_count += num_msdu;
2855 
2856 	if (ppdu_info->beamformed)
2857 		rx_stats->beamformed_count += num_msdu;
2858 
2859 	if (ppdu_info->num_mpdu_fcs_ok > 1)
2860 		rx_stats->ampdu_msdu_count += num_msdu;
2861 	else
2862 		rx_stats->non_ampdu_msdu_count += num_msdu;
2863 
2864 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2865 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2866 	rx_stats->dcm_count += ppdu_info->dcm;
2867 	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2868 
2869 	arsta->rssi_comb = ppdu_info->rssi_comb;
2870 
2871 	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2872 			     ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2873 
2874 	for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2875 		arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2876 
2877 	rx_stats->rx_duration += ppdu_info->rx_duration;
2878 	arsta->rx_duration = rx_stats->rx_duration;
2879 }
2880 
2881 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2882 							 struct dp_rxdma_ring *rx_ring,
2883 							 int *buf_id)
2884 {
2885 	struct sk_buff *skb;
2886 	dma_addr_t paddr;
2887 
2888 	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2889 			    DP_RX_BUFFER_ALIGN_SIZE);
2890 
2891 	if (!skb)
2892 		goto fail_alloc_skb;
2893 
2894 	if (!IS_ALIGNED((unsigned long)skb->data,
2895 			DP_RX_BUFFER_ALIGN_SIZE)) {
2896 		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2897 			 skb->data);
2898 	}
2899 
2900 	paddr = dma_map_single(ab->dev, skb->data,
2901 			       skb->len + skb_tailroom(skb),
2902 			       DMA_FROM_DEVICE);
2903 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2904 		goto fail_free_skb;
2905 
2906 	spin_lock_bh(&rx_ring->idr_lock);
2907 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2908 			    rx_ring->bufs_max, GFP_ATOMIC);
2909 	spin_unlock_bh(&rx_ring->idr_lock);
2910 	if (*buf_id < 0)
2911 		goto fail_dma_unmap;
2912 
2913 	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2914 	return skb;
2915 
2916 fail_dma_unmap:
2917 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2918 			 DMA_FROM_DEVICE);
2919 fail_free_skb:
2920 	dev_kfree_skb_any(skb);
2921 fail_alloc_skb:
2922 	return NULL;
2923 }
2924 
2925 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2926 					   struct dp_rxdma_ring *rx_ring,
2927 					   int req_entries,
2928 					   enum hal_rx_buf_return_buf_manager mgr)
2929 {
2930 	struct hal_srng *srng;
2931 	u32 *desc;
2932 	struct sk_buff *skb;
2933 	int num_free;
2934 	int num_remain;
2935 	int buf_id;
2936 	u32 cookie;
2937 	dma_addr_t paddr;
2938 
2939 	req_entries = min(req_entries, rx_ring->bufs_max);
2940 
2941 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2942 
2943 	spin_lock_bh(&srng->lock);
2944 
2945 	ath11k_hal_srng_access_begin(ab, srng);
2946 
2947 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2948 
2949 	req_entries = min(num_free, req_entries);
2950 	num_remain = req_entries;
2951 
2952 	while (num_remain > 0) {
2953 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2954 							&buf_id);
2955 		if (!skb)
2956 			break;
2957 		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2958 
2959 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2960 		if (!desc)
2961 			goto fail_desc_get;
2962 
2963 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2964 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2965 
2966 		num_remain--;
2967 
2968 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2969 	}
2970 
2971 	ath11k_hal_srng_access_end(ab, srng);
2972 
2973 	spin_unlock_bh(&srng->lock);
2974 
2975 	return req_entries - num_remain;
2976 
2977 fail_desc_get:
2978 	spin_lock_bh(&rx_ring->idr_lock);
2979 	idr_remove(&rx_ring->bufs_idr, buf_id);
2980 	spin_unlock_bh(&rx_ring->idr_lock);
2981 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2982 			 DMA_FROM_DEVICE);
2983 	dev_kfree_skb_any(skb);
2984 	ath11k_hal_srng_access_end(ab, srng);
2985 	spin_unlock_bh(&srng->lock);
2986 
2987 	return req_entries - num_remain;
2988 }
2989 
2990 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2991 
2992 static void
2993 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2994 					 struct hal_tlv_hdr *tlv)
2995 {
2996 	struct hal_rx_ppdu_start *ppdu_start;
2997 	u16 ppdu_id_diff, ppdu_id, tlv_len;
2998 	u8 *ptr;
2999 
3000 	/* PPDU id is part of second tlv, move ptr to second tlv */
3001 	tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
3002 	ptr = (u8 *)tlv;
3003 	ptr += sizeof(*tlv) + tlv_len;
3004 	tlv = (struct hal_tlv_hdr *)ptr;
3005 
3006 	if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
3007 		return;
3008 
3009 	ptr += sizeof(*tlv);
3010 	ppdu_start = (struct hal_rx_ppdu_start *)ptr;
3011 	ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
3012 			    __le32_to_cpu(ppdu_start->info0));
3013 
3014 	if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
3015 		pmon->buf_state = DP_MON_STATUS_LEAD;
3016 		ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
3017 		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3018 			pmon->buf_state = DP_MON_STATUS_LAG;
3019 	} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
3020 		pmon->buf_state = DP_MON_STATUS_LAG;
3021 		ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
3022 		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
3023 			pmon->buf_state = DP_MON_STATUS_LEAD;
3024 	}
3025 }
3026 
3027 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
3028 					     int *budget, struct sk_buff_head *skb_list)
3029 {
3030 	struct ath11k *ar;
3031 	const struct ath11k_hw_hal_params *hal_params;
3032 	struct ath11k_pdev_dp *dp;
3033 	struct dp_rxdma_ring *rx_ring;
3034 	struct ath11k_mon_data *pmon;
3035 	struct hal_srng *srng;
3036 	void *rx_mon_status_desc;
3037 	struct sk_buff *skb;
3038 	struct ath11k_skb_rxcb *rxcb;
3039 	struct hal_tlv_hdr *tlv;
3040 	u32 cookie;
3041 	int buf_id, srng_id;
3042 	dma_addr_t paddr;
3043 	u8 rbm;
3044 	int num_buffs_reaped = 0;
3045 
3046 	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3047 	dp = &ar->dp;
3048 	pmon = &dp->mon_data;
3049 	srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3050 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3051 
3052 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3053 
3054 	spin_lock_bh(&srng->lock);
3055 
3056 	ath11k_hal_srng_access_begin(ab, srng);
3057 	while (*budget) {
3058 		*budget -= 1;
3059 		rx_mon_status_desc =
3060 			ath11k_hal_srng_src_peek(ab, srng);
3061 		if (!rx_mon_status_desc) {
3062 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3063 			break;
3064 		}
3065 
3066 		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3067 						&cookie, &rbm);
3068 		if (paddr) {
3069 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3070 
3071 			spin_lock_bh(&rx_ring->idr_lock);
3072 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3073 			if (!skb) {
3074 				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3075 					    buf_id);
3076 				spin_unlock_bh(&rx_ring->idr_lock);
3077 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
3078 				goto move_next;
3079 			}
3080 
3081 			idr_remove(&rx_ring->bufs_idr, buf_id);
3082 			spin_unlock_bh(&rx_ring->idr_lock);
3083 
3084 			rxcb = ATH11K_SKB_RXCB(skb);
3085 
3086 			dma_unmap_single(ab->dev, rxcb->paddr,
3087 					 skb->len + skb_tailroom(skb),
3088 					 DMA_FROM_DEVICE);
3089 
3090 			tlv = (struct hal_tlv_hdr *)skb->data;
3091 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3092 					HAL_RX_STATUS_BUFFER_DONE) {
3093 				ath11k_warn(ab, "mon status DONE not set %lx\n",
3094 					    FIELD_GET(HAL_TLV_HDR_TAG,
3095 						      tlv->tl));
3096 				dev_kfree_skb_any(skb);
3097 				pmon->buf_state = DP_MON_STATUS_NO_DMA;
3098 				goto move_next;
3099 			}
3100 
3101 			if (ab->hw_params.full_monitor_mode) {
3102 				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3103 				if (paddr == pmon->mon_status_paddr)
3104 					pmon->buf_state = DP_MON_STATUS_MATCH;
3105 			}
3106 			__skb_queue_tail(skb_list, skb);
3107 		} else {
3108 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3109 		}
3110 move_next:
3111 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3112 							&buf_id);
3113 
3114 		if (!skb) {
3115 			hal_params = ab->hw_params.hal_params;
3116 			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3117 							hal_params->rx_buf_rbm);
3118 			num_buffs_reaped++;
3119 			break;
3120 		}
3121 		rxcb = ATH11K_SKB_RXCB(skb);
3122 
3123 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3124 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3125 
3126 		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3127 						cookie,
3128 						ab->hw_params.hal_params->rx_buf_rbm);
3129 		ath11k_hal_srng_src_get_next_entry(ab, srng);
3130 		num_buffs_reaped++;
3131 	}
3132 	ath11k_hal_srng_access_end(ab, srng);
3133 	spin_unlock_bh(&srng->lock);
3134 
3135 	return num_buffs_reaped;
3136 }
3137 
3138 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3139 {
3140 	struct dp_rx_tid *rx_tid = from_timer(rx_tid, timer, frag_timer);
3141 
3142 	spin_lock_bh(&rx_tid->ab->base_lock);
3143 	if (rx_tid->last_frag_no &&
3144 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3145 		spin_unlock_bh(&rx_tid->ab->base_lock);
3146 		return;
3147 	}
3148 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3149 	spin_unlock_bh(&rx_tid->ab->base_lock);
3150 }
3151 
3152 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3153 {
3154 	struct ath11k_base *ab = ar->ab;
3155 	struct crypto_shash *tfm;
3156 	struct ath11k_peer *peer;
3157 	struct dp_rx_tid *rx_tid;
3158 	int i;
3159 
3160 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
3161 	if (IS_ERR(tfm))
3162 		return PTR_ERR(tfm);
3163 
3164 	spin_lock_bh(&ab->base_lock);
3165 
3166 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3167 	if (!peer) {
3168 		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3169 		spin_unlock_bh(&ab->base_lock);
3170 		return -ENOENT;
3171 	}
3172 
3173 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3174 		rx_tid = &peer->rx_tid[i];
3175 		rx_tid->ab = ab;
3176 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3177 		skb_queue_head_init(&rx_tid->rx_frags);
3178 	}
3179 
3180 	peer->tfm_mmic = tfm;
3181 	spin_unlock_bh(&ab->base_lock);
3182 
3183 	return 0;
3184 }
3185 
3186 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3187 				      struct ieee80211_hdr *hdr, u8 *data,
3188 				      size_t data_len, u8 *mic)
3189 {
3190 	SHASH_DESC_ON_STACK(desc, tfm);
3191 	u8 mic_hdr[16] = {0};
3192 	u8 tid = 0;
3193 	int ret;
3194 
3195 	if (!tfm)
3196 		return -EINVAL;
3197 
3198 	desc->tfm = tfm;
3199 
3200 	ret = crypto_shash_setkey(tfm, key, 8);
3201 	if (ret)
3202 		goto out;
3203 
3204 	ret = crypto_shash_init(desc);
3205 	if (ret)
3206 		goto out;
3207 
3208 	/* TKIP MIC header */
3209 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3210 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3211 	if (ieee80211_is_data_qos(hdr->frame_control))
3212 		tid = ieee80211_get_tid(hdr);
3213 	mic_hdr[12] = tid;
3214 
3215 	ret = crypto_shash_update(desc, mic_hdr, 16);
3216 	if (ret)
3217 		goto out;
3218 	ret = crypto_shash_update(desc, data, data_len);
3219 	if (ret)
3220 		goto out;
3221 	ret = crypto_shash_final(desc, mic);
3222 out:
3223 	shash_desc_zero(desc);
3224 	return ret;
3225 }
3226 
3227 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3228 					  struct sk_buff *msdu)
3229 {
3230 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3231 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3232 	struct ieee80211_key_conf *key_conf;
3233 	struct ieee80211_hdr *hdr;
3234 	u8 mic[IEEE80211_CCMP_MIC_LEN];
3235 	int head_len, tail_len, ret;
3236 	size_t data_len;
3237 	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3238 	u8 *key, *data;
3239 	u8 key_idx;
3240 
3241 	if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3242 	    HAL_ENCRYPT_TYPE_TKIP_MIC)
3243 		return 0;
3244 
3245 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3246 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
3247 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3248 	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3249 
3250 	if (!is_multicast_ether_addr(hdr->addr1))
3251 		key_idx = peer->ucast_keyidx;
3252 	else
3253 		key_idx = peer->mcast_keyidx;
3254 
3255 	key_conf = peer->keys[key_idx];
3256 
3257 	data = msdu->data + head_len;
3258 	data_len = msdu->len - head_len - tail_len;
3259 	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3260 
3261 	ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3262 	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3263 		goto mic_fail;
3264 
3265 	return 0;
3266 
3267 mic_fail:
3268 	(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3269 	(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3270 
3271 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3272 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3273 	skb_pull(msdu, hal_rx_desc_sz);
3274 
3275 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3276 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3277 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3278 	ieee80211_rx(ar->hw, msdu);
3279 	return -EINVAL;
3280 }
3281 
3282 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3283 					enum hal_encrypt_type enctype, u32 flags)
3284 {
3285 	struct ieee80211_hdr *hdr;
3286 	size_t hdr_len;
3287 	size_t crypto_len;
3288 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3289 
3290 	if (!flags)
3291 		return;
3292 
3293 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3294 
3295 	if (flags & RX_FLAG_MIC_STRIPPED)
3296 		skb_trim(msdu, msdu->len -
3297 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3298 
3299 	if (flags & RX_FLAG_ICV_STRIPPED)
3300 		skb_trim(msdu, msdu->len -
3301 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3302 
3303 	if (flags & RX_FLAG_IV_STRIPPED) {
3304 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
3305 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3306 
3307 #if defined(__linux__)
3308 		memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3309 			(void *)msdu->data + hal_rx_desc_sz, hdr_len);
3310 #elif defined(__FreeBSD__)
3311 		memmove((u8 *)msdu->data + hal_rx_desc_sz + crypto_len,
3312 			(u8 *)msdu->data + hal_rx_desc_sz, hdr_len);
3313 #endif
3314 		skb_pull(msdu, crypto_len);
3315 	}
3316 }
3317 
3318 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3319 				 struct ath11k_peer *peer,
3320 				 struct dp_rx_tid *rx_tid,
3321 				 struct sk_buff **defrag_skb)
3322 {
3323 	struct hal_rx_desc *rx_desc;
3324 	struct sk_buff *skb, *first_frag, *last_frag;
3325 	struct ieee80211_hdr *hdr;
3326 	struct rx_attention *rx_attention;
3327 	enum hal_encrypt_type enctype;
3328 	bool is_decrypted = false;
3329 	int msdu_len = 0;
3330 	int extra_space;
3331 	u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3332 
3333 	first_frag = skb_peek(&rx_tid->rx_frags);
3334 	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3335 
3336 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3337 		flags = 0;
3338 		rx_desc = (struct hal_rx_desc *)skb->data;
3339 		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3340 
3341 		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3342 		if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3343 			rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3344 			is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3345 		}
3346 
3347 		if (is_decrypted) {
3348 			if (skb != first_frag)
3349 				flags |=  RX_FLAG_IV_STRIPPED;
3350 			if (skb != last_frag)
3351 				flags |= RX_FLAG_ICV_STRIPPED |
3352 					 RX_FLAG_MIC_STRIPPED;
3353 		}
3354 
3355 		/* RX fragments are always raw packets */
3356 		if (skb != last_frag)
3357 			skb_trim(skb, skb->len - FCS_LEN);
3358 		ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3359 
3360 		if (skb != first_frag)
3361 			skb_pull(skb, hal_rx_desc_sz +
3362 				      ieee80211_hdrlen(hdr->frame_control));
3363 		msdu_len += skb->len;
3364 	}
3365 
3366 	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3367 	if (extra_space > 0 &&
3368 	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3369 		return -ENOMEM;
3370 
3371 	__skb_unlink(first_frag, &rx_tid->rx_frags);
3372 	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3373 		skb_put_data(first_frag, skb->data, skb->len);
3374 		dev_kfree_skb_any(skb);
3375 	}
3376 
3377 	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3378 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3379 	ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3380 
3381 	if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3382 		first_frag = NULL;
3383 
3384 	*defrag_skb = first_frag;
3385 	return 0;
3386 }
3387 
3388 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3389 					      struct sk_buff *defrag_skb)
3390 {
3391 	struct ath11k_base *ab = ar->ab;
3392 	struct ath11k_pdev_dp *dp = &ar->dp;
3393 	struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3394 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3395 	struct hal_reo_entrance_ring *reo_ent_ring;
3396 	struct hal_reo_dest_ring *reo_dest_ring;
3397 	struct dp_link_desc_bank *link_desc_banks;
3398 	struct hal_rx_msdu_link *msdu_link;
3399 	struct hal_rx_msdu_details *msdu0;
3400 	struct hal_srng *srng;
3401 	dma_addr_t paddr;
3402 	u32 desc_bank, msdu_info, mpdu_info;
3403 	u32 dst_idx, cookie, hal_rx_desc_sz;
3404 	int ret, buf_id;
3405 
3406 	hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3407 	link_desc_banks = ab->dp.link_desc_banks;
3408 	reo_dest_ring = rx_tid->dst_ring_desc;
3409 
3410 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3411 #if defined(__linux__)
3412 	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3413 #elif defined(__FreeBSD__)
3414 	msdu_link = (struct hal_rx_msdu_link *)((u8 *)link_desc_banks[desc_bank].vaddr +
3415 #endif
3416 			(paddr - link_desc_banks[desc_bank].paddr));
3417 	msdu0 = &msdu_link->msdu_link[0];
3418 	dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3419 	memset(msdu0, 0, sizeof(*msdu0));
3420 
3421 	msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3422 		    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3423 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3424 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3425 			       defrag_skb->len - hal_rx_desc_sz) |
3426 		    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3427 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3428 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3429 	msdu0->rx_msdu_info.info0 = msdu_info;
3430 
3431 	/* change msdu len in hal rx desc */
3432 	ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3433 
3434 	paddr = dma_map_single(ab->dev, defrag_skb->data,
3435 			       defrag_skb->len + skb_tailroom(defrag_skb),
3436 			       DMA_TO_DEVICE);
3437 	if (dma_mapping_error(ab->dev, paddr))
3438 		return -ENOMEM;
3439 
3440 	spin_lock_bh(&rx_refill_ring->idr_lock);
3441 	buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3442 			   rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3443 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3444 	if (buf_id < 0) {
3445 		ret = -ENOMEM;
3446 		goto err_unmap_dma;
3447 	}
3448 
3449 	ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3450 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3451 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3452 
3453 	ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3454 					ab->hw_params.hal_params->rx_buf_rbm);
3455 
3456 	/* Fill mpdu details into reo entrace ring */
3457 	srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3458 
3459 	spin_lock_bh(&srng->lock);
3460 	ath11k_hal_srng_access_begin(ab, srng);
3461 
3462 	reo_ent_ring = (struct hal_reo_entrance_ring *)
3463 			ath11k_hal_srng_src_get_next_entry(ab, srng);
3464 	if (!reo_ent_ring) {
3465 		ath11k_hal_srng_access_end(ab, srng);
3466 		spin_unlock_bh(&srng->lock);
3467 		ret = -ENOSPC;
3468 		goto err_free_idr;
3469 	}
3470 	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3471 
3472 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3473 	ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3474 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3475 
3476 	mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3477 		    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3478 		    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3479 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3480 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3481 		    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3482 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3483 
3484 	reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3485 	reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3486 	reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3487 	reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3488 					 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3489 						   reo_dest_ring->info0)) |
3490 			      FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3491 	ath11k_hal_srng_access_end(ab, srng);
3492 	spin_unlock_bh(&srng->lock);
3493 
3494 	return 0;
3495 
3496 err_free_idr:
3497 	spin_lock_bh(&rx_refill_ring->idr_lock);
3498 	idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3499 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3500 err_unmap_dma:
3501 	dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3502 			 DMA_TO_DEVICE);
3503 	return ret;
3504 }
3505 
3506 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3507 				    struct sk_buff *a, struct sk_buff *b)
3508 {
3509 	int frag1, frag2;
3510 
3511 	frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3512 	frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3513 
3514 	return frag1 - frag2;
3515 }
3516 
3517 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3518 				      struct sk_buff_head *frag_list,
3519 				      struct sk_buff *cur_frag)
3520 {
3521 	struct sk_buff *skb;
3522 	int cmp;
3523 
3524 	skb_queue_walk(frag_list, skb) {
3525 		cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3526 		if (cmp < 0)
3527 			continue;
3528 		__skb_queue_before(frag_list, skb, cur_frag);
3529 		return;
3530 	}
3531 	__skb_queue_tail(frag_list, cur_frag);
3532 }
3533 
3534 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3535 {
3536 	struct ieee80211_hdr *hdr;
3537 	u64 pn = 0;
3538 	u8 *ehdr;
3539 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3540 
3541 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3542 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3543 
3544 	pn = ehdr[0];
3545 	pn |= (u64)ehdr[1] << 8;
3546 	pn |= (u64)ehdr[4] << 16;
3547 	pn |= (u64)ehdr[5] << 24;
3548 	pn |= (u64)ehdr[6] << 32;
3549 	pn |= (u64)ehdr[7] << 40;
3550 
3551 	return pn;
3552 }
3553 
3554 static bool
3555 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3556 {
3557 	enum hal_encrypt_type encrypt_type;
3558 	struct sk_buff *first_frag, *skb;
3559 	struct hal_rx_desc *desc;
3560 	u64 last_pn;
3561 	u64 cur_pn;
3562 
3563 	first_frag = skb_peek(&rx_tid->rx_frags);
3564 	desc = (struct hal_rx_desc *)first_frag->data;
3565 
3566 	encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3567 	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3568 	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3569 	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3570 	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3571 		return true;
3572 
3573 	last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3574 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3575 		if (skb == first_frag)
3576 			continue;
3577 
3578 		cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3579 		if (cur_pn != last_pn + 1)
3580 			return false;
3581 		last_pn = cur_pn;
3582 	}
3583 	return true;
3584 }
3585 
3586 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3587 				    struct sk_buff *msdu,
3588 				    u32 *ring_desc)
3589 {
3590 	struct ath11k_base *ab = ar->ab;
3591 	struct hal_rx_desc *rx_desc;
3592 	struct ath11k_peer *peer;
3593 	struct dp_rx_tid *rx_tid;
3594 	struct sk_buff *defrag_skb = NULL;
3595 	u32 peer_id;
3596 	u16 seqno, frag_no;
3597 	u8 tid;
3598 	int ret = 0;
3599 	bool more_frags;
3600 	bool is_mcbc;
3601 
3602 	rx_desc = (struct hal_rx_desc *)msdu->data;
3603 	peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3604 	tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3605 	seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3606 	frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3607 	more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3608 	is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3609 
3610 	/* Multicast/Broadcast fragments are not expected */
3611 	if (is_mcbc)
3612 		return -EINVAL;
3613 
3614 	if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3615 	    !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3616 	    tid > IEEE80211_NUM_TIDS)
3617 		return -EINVAL;
3618 
3619 	/* received unfragmented packet in reo
3620 	 * exception ring, this shouldn't happen
3621 	 * as these packets typically come from
3622 	 * reo2sw srngs.
3623 	 */
3624 	if (WARN_ON_ONCE(!frag_no && !more_frags))
3625 		return -EINVAL;
3626 
3627 	spin_lock_bh(&ab->base_lock);
3628 	peer = ath11k_peer_find_by_id(ab, peer_id);
3629 	if (!peer) {
3630 		ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3631 			    peer_id);
3632 		ret = -ENOENT;
3633 		goto out_unlock;
3634 	}
3635 	rx_tid = &peer->rx_tid[tid];
3636 
3637 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3638 	    skb_queue_empty(&rx_tid->rx_frags)) {
3639 		/* Flush stored fragments and start a new sequence */
3640 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
3641 		rx_tid->cur_sn = seqno;
3642 	}
3643 
3644 	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3645 		/* Fragment already present */
3646 		ret = -EINVAL;
3647 		goto out_unlock;
3648 	}
3649 
3650 	if (frag_no > __fls(rx_tid->rx_frag_bitmap))
3651 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3652 	else
3653 		ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3654 
3655 	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3656 	if (!more_frags)
3657 		rx_tid->last_frag_no = frag_no;
3658 
3659 	if (frag_no == 0) {
3660 		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3661 						sizeof(*rx_tid->dst_ring_desc),
3662 						GFP_ATOMIC);
3663 		if (!rx_tid->dst_ring_desc) {
3664 			ret = -ENOMEM;
3665 			goto out_unlock;
3666 		}
3667 	} else {
3668 		ath11k_dp_rx_link_desc_return(ab, ring_desc,
3669 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3670 	}
3671 
3672 	if (!rx_tid->last_frag_no ||
3673 	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3674 		mod_timer(&rx_tid->frag_timer, jiffies +
3675 					       ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3676 		goto out_unlock;
3677 	}
3678 
3679 	spin_unlock_bh(&ab->base_lock);
3680 	del_timer_sync(&rx_tid->frag_timer);
3681 	spin_lock_bh(&ab->base_lock);
3682 
3683 	peer = ath11k_peer_find_by_id(ab, peer_id);
3684 	if (!peer)
3685 		goto err_frags_cleanup;
3686 
3687 	if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3688 		goto err_frags_cleanup;
3689 
3690 	if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3691 		goto err_frags_cleanup;
3692 
3693 	if (!defrag_skb)
3694 		goto err_frags_cleanup;
3695 
3696 	if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3697 		goto err_frags_cleanup;
3698 
3699 	ath11k_dp_rx_frags_cleanup(rx_tid, false);
3700 	goto out_unlock;
3701 
3702 err_frags_cleanup:
3703 	dev_kfree_skb_any(defrag_skb);
3704 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3705 out_unlock:
3706 	spin_unlock_bh(&ab->base_lock);
3707 	return ret;
3708 }
3709 
3710 static int
3711 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3712 {
3713 	struct ath11k_pdev_dp *dp = &ar->dp;
3714 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3715 	struct sk_buff *msdu;
3716 	struct ath11k_skb_rxcb *rxcb;
3717 	struct hal_rx_desc *rx_desc;
3718 	u8 *hdr_status;
3719 	u16 msdu_len;
3720 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3721 
3722 	spin_lock_bh(&rx_ring->idr_lock);
3723 	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3724 	if (!msdu) {
3725 		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3726 			    buf_id);
3727 		spin_unlock_bh(&rx_ring->idr_lock);
3728 		return -EINVAL;
3729 	}
3730 
3731 	idr_remove(&rx_ring->bufs_idr, buf_id);
3732 	spin_unlock_bh(&rx_ring->idr_lock);
3733 
3734 	rxcb = ATH11K_SKB_RXCB(msdu);
3735 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3736 			 msdu->len + skb_tailroom(msdu),
3737 			 DMA_FROM_DEVICE);
3738 
3739 	if (drop) {
3740 		dev_kfree_skb_any(msdu);
3741 		return 0;
3742 	}
3743 
3744 	rcu_read_lock();
3745 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3746 		dev_kfree_skb_any(msdu);
3747 		goto exit;
3748 	}
3749 
3750 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3751 		dev_kfree_skb_any(msdu);
3752 		goto exit;
3753 	}
3754 
3755 	rx_desc = (struct hal_rx_desc *)msdu->data;
3756 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3757 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3758 		hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3759 		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3760 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3761 				sizeof(struct ieee80211_hdr));
3762 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3763 				sizeof(struct hal_rx_desc));
3764 		dev_kfree_skb_any(msdu);
3765 		goto exit;
3766 	}
3767 
3768 	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3769 
3770 	if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3771 		dev_kfree_skb_any(msdu);
3772 		ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3773 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3774 	}
3775 exit:
3776 	rcu_read_unlock();
3777 	return 0;
3778 }
3779 
3780 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3781 			     int budget)
3782 {
3783 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3784 	struct dp_link_desc_bank *link_desc_banks;
3785 	enum hal_rx_buf_return_buf_manager rbm;
3786 	int tot_n_bufs_reaped, quota, ret, i;
3787 	int n_bufs_reaped[MAX_RADIOS] = {0};
3788 	struct dp_rxdma_ring *rx_ring;
3789 	struct dp_srng *reo_except;
3790 	u32 desc_bank, num_msdus;
3791 	struct hal_srng *srng;
3792 	struct ath11k_dp *dp;
3793 	void *link_desc_va;
3794 	int buf_id, mac_id;
3795 	struct ath11k *ar;
3796 	dma_addr_t paddr;
3797 	u32 *desc;
3798 	bool is_frag;
3799 	u8 drop = 0;
3800 
3801 	tot_n_bufs_reaped = 0;
3802 	quota = budget;
3803 
3804 	dp = &ab->dp;
3805 	reo_except = &dp->reo_except_ring;
3806 	link_desc_banks = dp->link_desc_banks;
3807 
3808 	srng = &ab->hal.srng_list[reo_except->ring_id];
3809 
3810 	spin_lock_bh(&srng->lock);
3811 
3812 	ath11k_hal_srng_access_begin(ab, srng);
3813 
3814 	while (budget &&
3815 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3816 		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3817 
3818 		ab->soc_stats.err_ring_pkts++;
3819 		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3820 						    &desc_bank);
3821 		if (ret) {
3822 			ath11k_warn(ab, "failed to parse error reo desc %d\n",
3823 				    ret);
3824 			continue;
3825 		}
3826 #if defined(__linux__)
3827 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3828 #elif defined(__FreeBSD__)
3829 		link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
3830 #endif
3831 			       (paddr - link_desc_banks[desc_bank].paddr);
3832 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3833 						 &rbm);
3834 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3835 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3836 			ab->soc_stats.invalid_rbm++;
3837 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3838 			ath11k_dp_rx_link_desc_return(ab, desc,
3839 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3840 			continue;
3841 		}
3842 
3843 		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3844 
3845 		/* Process only rx fragments with one msdu per link desc below, and drop
3846 		 * msdu's indicated due to error reasons.
3847 		 */
3848 		if (!is_frag || num_msdus > 1) {
3849 			drop = 1;
3850 			/* Return the link desc back to wbm idle list */
3851 			ath11k_dp_rx_link_desc_return(ab, desc,
3852 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3853 		}
3854 
3855 		for (i = 0; i < num_msdus; i++) {
3856 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3857 					   msdu_cookies[i]);
3858 
3859 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3860 					   msdu_cookies[i]);
3861 
3862 			ar = ab->pdevs[mac_id].ar;
3863 
3864 			if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3865 				n_bufs_reaped[mac_id]++;
3866 				tot_n_bufs_reaped++;
3867 			}
3868 		}
3869 
3870 		if (tot_n_bufs_reaped >= quota) {
3871 			tot_n_bufs_reaped = quota;
3872 			goto exit;
3873 		}
3874 
3875 		budget = quota - tot_n_bufs_reaped;
3876 	}
3877 
3878 exit:
3879 	ath11k_hal_srng_access_end(ab, srng);
3880 
3881 	spin_unlock_bh(&srng->lock);
3882 
3883 	for (i = 0; i <  ab->num_radios; i++) {
3884 		if (!n_bufs_reaped[i])
3885 			continue;
3886 
3887 		ar = ab->pdevs[i].ar;
3888 		rx_ring = &ar->dp.rx_refill_buf_ring;
3889 
3890 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3891 					   ab->hw_params.hal_params->rx_buf_rbm);
3892 	}
3893 
3894 	return tot_n_bufs_reaped;
3895 }
3896 
3897 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3898 					     int msdu_len,
3899 					     struct sk_buff_head *msdu_list)
3900 {
3901 	struct sk_buff *skb, *tmp;
3902 	struct ath11k_skb_rxcb *rxcb;
3903 	int n_buffs;
3904 
3905 	n_buffs = DIV_ROUND_UP(msdu_len,
3906 			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3907 
3908 	skb_queue_walk_safe(msdu_list, skb, tmp) {
3909 		rxcb = ATH11K_SKB_RXCB(skb);
3910 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3911 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3912 			if (!n_buffs)
3913 				break;
3914 			__skb_unlink(skb, msdu_list);
3915 			dev_kfree_skb_any(skb);
3916 			n_buffs--;
3917 		}
3918 	}
3919 }
3920 
3921 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3922 				      struct ieee80211_rx_status *status,
3923 				      struct sk_buff_head *msdu_list)
3924 {
3925 	u16 msdu_len;
3926 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3927 	struct rx_attention *rx_attention;
3928 	u8 l3pad_bytes;
3929 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3930 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3931 
3932 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3933 
3934 	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3935 		/* First buffer will be freed by the caller, so deduct it's length */
3936 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3937 		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3938 		return -EINVAL;
3939 	}
3940 
3941 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3942 	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3943 		ath11k_warn(ar->ab,
3944 			    "msdu_done bit not set in null_q_des processing\n");
3945 		__skb_queue_purge(msdu_list);
3946 		return -EIO;
3947 	}
3948 
3949 	/* Handle NULL queue descriptor violations arising out a missing
3950 	 * REO queue for a given peer or a given TID. This typically
3951 	 * may happen if a packet is received on a QOS enabled TID before the
3952 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3953 	 * it may also happen for MC/BC frames if they are not routed to the
3954 	 * non-QOS TID queue, in the absence of any other default TID queue.
3955 	 * This error can show up both in a REO destination or WBM release ring.
3956 	 */
3957 
3958 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3959 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3960 
3961 	if (rxcb->is_frag) {
3962 		skb_pull(msdu, hal_rx_desc_sz);
3963 	} else {
3964 		l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3965 
3966 		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3967 			return -EINVAL;
3968 
3969 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
3970 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
3971 	}
3972 	ath11k_dp_rx_h_ppdu(ar, desc, status);
3973 
3974 	ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
3975 
3976 	rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
3977 
3978 	/* Please note that caller will having the access to msdu and completing
3979 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
3980 	 */
3981 
3982 	return 0;
3983 }
3984 
3985 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
3986 				   struct ieee80211_rx_status *status,
3987 				   struct sk_buff_head *msdu_list)
3988 {
3989 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3990 	bool drop = false;
3991 
3992 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
3993 
3994 	switch (rxcb->err_code) {
3995 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
3996 		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
3997 			drop = true;
3998 		break;
3999 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
4000 		/* TODO: Do not drop PN failed packets in the driver;
4001 		 * instead, it is good to drop such packets in mac80211
4002 		 * after incrementing the replay counters.
4003 		 */
4004 		fallthrough;
4005 	default:
4006 		/* TODO: Review other errors and process them to mac80211
4007 		 * as appropriate.
4008 		 */
4009 		drop = true;
4010 		break;
4011 	}
4012 
4013 	return drop;
4014 }
4015 
4016 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
4017 					struct ieee80211_rx_status *status)
4018 {
4019 	u16 msdu_len;
4020 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4021 	u8 l3pad_bytes;
4022 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4023 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4024 
4025 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4026 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4027 
4028 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4029 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4030 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4031 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4032 
4033 	ath11k_dp_rx_h_ppdu(ar, desc, status);
4034 
4035 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4036 			 RX_FLAG_DECRYPTED);
4037 
4038 	ath11k_dp_rx_h_undecap(ar, msdu, desc,
4039 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4040 }
4041 
4042 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
4043 				     struct ieee80211_rx_status *status)
4044 {
4045 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4046 	bool drop = false;
4047 
4048 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4049 
4050 	switch (rxcb->err_code) {
4051 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4052 		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4053 		break;
4054 	default:
4055 		/* TODO: Review other rxdma error code to check if anything is
4056 		 * worth reporting to mac80211
4057 		 */
4058 		drop = true;
4059 		break;
4060 	}
4061 
4062 	return drop;
4063 }
4064 
4065 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4066 				 struct napi_struct *napi,
4067 				 struct sk_buff *msdu,
4068 				 struct sk_buff_head *msdu_list)
4069 {
4070 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4071 	struct ieee80211_rx_status rxs = {0};
4072 	bool drop = true;
4073 
4074 	switch (rxcb->err_rel_src) {
4075 	case HAL_WBM_REL_SRC_MODULE_REO:
4076 		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4077 		break;
4078 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
4079 		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4080 		break;
4081 	default:
4082 		/* msdu will get freed */
4083 		break;
4084 	}
4085 
4086 	if (drop) {
4087 		dev_kfree_skb_any(msdu);
4088 		return;
4089 	}
4090 
4091 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4092 }
4093 
4094 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4095 				 struct napi_struct *napi, int budget)
4096 {
4097 	struct ath11k *ar;
4098 	struct ath11k_dp *dp = &ab->dp;
4099 	struct dp_rxdma_ring *rx_ring;
4100 	struct hal_rx_wbm_rel_info err_info;
4101 	struct hal_srng *srng;
4102 	struct sk_buff *msdu;
4103 	struct sk_buff_head msdu_list[MAX_RADIOS];
4104 	struct ath11k_skb_rxcb *rxcb;
4105 	u32 *rx_desc;
4106 	int buf_id, mac_id;
4107 	int num_buffs_reaped[MAX_RADIOS] = {0};
4108 	int total_num_buffs_reaped = 0;
4109 	int ret, i;
4110 
4111 	for (i = 0; i < ab->num_radios; i++)
4112 		__skb_queue_head_init(&msdu_list[i]);
4113 
4114 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4115 
4116 	spin_lock_bh(&srng->lock);
4117 
4118 	ath11k_hal_srng_access_begin(ab, srng);
4119 
4120 	while (budget) {
4121 		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4122 		if (!rx_desc)
4123 			break;
4124 
4125 		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4126 		if (ret) {
4127 			ath11k_warn(ab,
4128 				    "failed to parse rx error in wbm_rel ring desc %d\n",
4129 				    ret);
4130 			continue;
4131 		}
4132 
4133 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4134 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4135 
4136 		ar = ab->pdevs[mac_id].ar;
4137 		rx_ring = &ar->dp.rx_refill_buf_ring;
4138 
4139 		spin_lock_bh(&rx_ring->idr_lock);
4140 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4141 		if (!msdu) {
4142 			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4143 				    buf_id, mac_id);
4144 			spin_unlock_bh(&rx_ring->idr_lock);
4145 			continue;
4146 		}
4147 
4148 		idr_remove(&rx_ring->bufs_idr, buf_id);
4149 		spin_unlock_bh(&rx_ring->idr_lock);
4150 
4151 		rxcb = ATH11K_SKB_RXCB(msdu);
4152 		dma_unmap_single(ab->dev, rxcb->paddr,
4153 				 msdu->len + skb_tailroom(msdu),
4154 				 DMA_FROM_DEVICE);
4155 
4156 		num_buffs_reaped[mac_id]++;
4157 		total_num_buffs_reaped++;
4158 		budget--;
4159 
4160 		if (err_info.push_reason !=
4161 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4162 			dev_kfree_skb_any(msdu);
4163 			continue;
4164 		}
4165 
4166 		rxcb->err_rel_src = err_info.err_rel_src;
4167 		rxcb->err_code = err_info.err_code;
4168 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4169 		__skb_queue_tail(&msdu_list[mac_id], msdu);
4170 	}
4171 
4172 	ath11k_hal_srng_access_end(ab, srng);
4173 
4174 	spin_unlock_bh(&srng->lock);
4175 
4176 	if (!total_num_buffs_reaped)
4177 		goto done;
4178 
4179 	for (i = 0; i <  ab->num_radios; i++) {
4180 		if (!num_buffs_reaped[i])
4181 			continue;
4182 
4183 		ar = ab->pdevs[i].ar;
4184 		rx_ring = &ar->dp.rx_refill_buf_ring;
4185 
4186 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4187 					   ab->hw_params.hal_params->rx_buf_rbm);
4188 	}
4189 
4190 	rcu_read_lock();
4191 	for (i = 0; i <  ab->num_radios; i++) {
4192 		if (!rcu_dereference(ab->pdevs_active[i])) {
4193 			__skb_queue_purge(&msdu_list[i]);
4194 			continue;
4195 		}
4196 
4197 		ar = ab->pdevs[i].ar;
4198 
4199 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4200 			__skb_queue_purge(&msdu_list[i]);
4201 			continue;
4202 		}
4203 
4204 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4205 			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4206 	}
4207 	rcu_read_unlock();
4208 done:
4209 	return total_num_buffs_reaped;
4210 }
4211 
4212 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4213 {
4214 	struct ath11k *ar;
4215 	struct dp_srng *err_ring;
4216 	struct dp_rxdma_ring *rx_ring;
4217 	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4218 	struct hal_srng *srng;
4219 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4220 	enum hal_rx_buf_return_buf_manager rbm;
4221 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4222 	struct ath11k_skb_rxcb *rxcb;
4223 	struct sk_buff *skb;
4224 	struct hal_reo_entrance_ring *entr_ring;
4225 	void *desc;
4226 	int num_buf_freed = 0;
4227 	int quota = budget;
4228 	dma_addr_t paddr;
4229 	u32 desc_bank;
4230 	void *link_desc_va;
4231 	int num_msdus;
4232 	int i;
4233 	int buf_id;
4234 
4235 	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4236 	err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4237 									  mac_id)];
4238 	rx_ring = &ar->dp.rx_refill_buf_ring;
4239 
4240 	srng = &ab->hal.srng_list[err_ring->ring_id];
4241 
4242 	spin_lock_bh(&srng->lock);
4243 
4244 	ath11k_hal_srng_access_begin(ab, srng);
4245 
4246 	while (quota-- &&
4247 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4248 		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4249 
4250 		entr_ring = (struct hal_reo_entrance_ring *)desc;
4251 		rxdma_err_code =
4252 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4253 				  entr_ring->info1);
4254 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
4255 
4256 #if defined(__linux__)
4257 		link_desc_va = link_desc_banks[desc_bank].vaddr +
4258 #elif defined(__FreeBSD__)
4259 		link_desc_va = (u8 *)link_desc_banks[desc_bank].vaddr +
4260 #endif
4261 			       (paddr - link_desc_banks[desc_bank].paddr);
4262 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4263 						 msdu_cookies, &rbm);
4264 
4265 		for (i = 0; i < num_msdus; i++) {
4266 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4267 					   msdu_cookies[i]);
4268 
4269 			spin_lock_bh(&rx_ring->idr_lock);
4270 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
4271 			if (!skb) {
4272 				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4273 					    buf_id);
4274 				spin_unlock_bh(&rx_ring->idr_lock);
4275 				continue;
4276 			}
4277 
4278 			idr_remove(&rx_ring->bufs_idr, buf_id);
4279 			spin_unlock_bh(&rx_ring->idr_lock);
4280 
4281 			rxcb = ATH11K_SKB_RXCB(skb);
4282 			dma_unmap_single(ab->dev, rxcb->paddr,
4283 					 skb->len + skb_tailroom(skb),
4284 					 DMA_FROM_DEVICE);
4285 			dev_kfree_skb_any(skb);
4286 
4287 			num_buf_freed++;
4288 		}
4289 
4290 		ath11k_dp_rx_link_desc_return(ab, desc,
4291 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4292 	}
4293 
4294 	ath11k_hal_srng_access_end(ab, srng);
4295 
4296 	spin_unlock_bh(&srng->lock);
4297 
4298 	if (num_buf_freed)
4299 		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4300 					   ab->hw_params.hal_params->rx_buf_rbm);
4301 
4302 	return budget - quota;
4303 }
4304 
4305 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4306 {
4307 	struct ath11k_dp *dp = &ab->dp;
4308 	struct hal_srng *srng;
4309 	struct dp_reo_cmd *cmd, *tmp;
4310 	bool found = false;
4311 	u32 *reo_desc;
4312 	u16 tag;
4313 	struct hal_reo_status reo_status;
4314 
4315 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4316 
4317 	memset(&reo_status, 0, sizeof(reo_status));
4318 
4319 	spin_lock_bh(&srng->lock);
4320 
4321 	ath11k_hal_srng_access_begin(ab, srng);
4322 
4323 	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4324 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4325 
4326 		switch (tag) {
4327 		case HAL_REO_GET_QUEUE_STATS_STATUS:
4328 			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4329 							  &reo_status);
4330 			break;
4331 		case HAL_REO_FLUSH_QUEUE_STATUS:
4332 			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4333 							  &reo_status);
4334 			break;
4335 		case HAL_REO_FLUSH_CACHE_STATUS:
4336 			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4337 							  &reo_status);
4338 			break;
4339 		case HAL_REO_UNBLOCK_CACHE_STATUS:
4340 			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4341 							  &reo_status);
4342 			break;
4343 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4344 			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4345 								 &reo_status);
4346 			break;
4347 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4348 			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4349 								  &reo_status);
4350 			break;
4351 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4352 			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4353 								  &reo_status);
4354 			break;
4355 		default:
4356 			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4357 			continue;
4358 		}
4359 
4360 		spin_lock_bh(&dp->reo_cmd_lock);
4361 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4362 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4363 				found = true;
4364 				list_del(&cmd->list);
4365 				break;
4366 			}
4367 		}
4368 		spin_unlock_bh(&dp->reo_cmd_lock);
4369 
4370 		if (found) {
4371 			cmd->handler(dp, (void *)&cmd->data,
4372 				     reo_status.uniform_hdr.cmd_status);
4373 			kfree(cmd);
4374 		}
4375 
4376 		found = false;
4377 	}
4378 
4379 	ath11k_hal_srng_access_end(ab, srng);
4380 
4381 	spin_unlock_bh(&srng->lock);
4382 }
4383 
4384 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4385 {
4386 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4387 
4388 	ath11k_dp_rx_pdev_srng_free(ar);
4389 	ath11k_dp_rxdma_pdev_buf_free(ar);
4390 }
4391 
4392 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4393 {
4394 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4395 	struct ath11k_pdev_dp *dp = &ar->dp;
4396 	u32 ring_id;
4397 	int i;
4398 	int ret;
4399 
4400 	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4401 	if (ret) {
4402 		ath11k_warn(ab, "failed to setup rx srngs\n");
4403 		return ret;
4404 	}
4405 
4406 	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4407 	if (ret) {
4408 		ath11k_warn(ab, "failed to setup rxdma ring\n");
4409 		return ret;
4410 	}
4411 
4412 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4413 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4414 	if (ret) {
4415 		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4416 			    ret);
4417 		return ret;
4418 	}
4419 
4420 	if (ab->hw_params.rx_mac_buf_ring) {
4421 		for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4422 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4423 			ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4424 							  mac_id + i, HAL_RXDMA_BUF);
4425 			if (ret) {
4426 				ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4427 					    i, ret);
4428 				return ret;
4429 			}
4430 		}
4431 	}
4432 
4433 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4434 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4435 		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4436 						  mac_id + i, HAL_RXDMA_DST);
4437 		if (ret) {
4438 			ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4439 				    i, ret);
4440 			return ret;
4441 		}
4442 	}
4443 
4444 	if (!ab->hw_params.rxdma1_enable)
4445 		goto config_refill_ring;
4446 
4447 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4448 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4449 					  mac_id, HAL_RXDMA_MONITOR_BUF);
4450 	if (ret) {
4451 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4452 			    ret);
4453 		return ret;
4454 	}
4455 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4456 					  dp->rxdma_mon_dst_ring.ring_id,
4457 					  mac_id, HAL_RXDMA_MONITOR_DST);
4458 	if (ret) {
4459 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4460 			    ret);
4461 		return ret;
4462 	}
4463 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4464 					  dp->rxdma_mon_desc_ring.ring_id,
4465 					  mac_id, HAL_RXDMA_MONITOR_DESC);
4466 	if (ret) {
4467 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4468 			    ret);
4469 		return ret;
4470 	}
4471 
4472 config_refill_ring:
4473 	for (i = 0; i < ab->hw_params.num_rxmda_per_pdev; i++) {
4474 		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4475 		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4476 						  HAL_RXDMA_MONITOR_STATUS);
4477 		if (ret) {
4478 			ath11k_warn(ab,
4479 				    "failed to configure mon_status_refill_ring%d %d\n",
4480 				    i, ret);
4481 			return ret;
4482 		}
4483 	}
4484 
4485 	return 0;
4486 }
4487 
4488 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4489 {
4490 	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4491 		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4492 		*total_len -= *frag_len;
4493 	} else {
4494 		*frag_len = *total_len;
4495 		*total_len = 0;
4496 	}
4497 }
4498 
4499 static
4500 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4501 					  void *p_last_buf_addr_info,
4502 					  u8 mac_id)
4503 {
4504 	struct ath11k_pdev_dp *dp = &ar->dp;
4505 	struct dp_srng *dp_srng;
4506 	void *hal_srng;
4507 	void *src_srng_desc;
4508 	int ret = 0;
4509 
4510 	if (ar->ab->hw_params.rxdma1_enable) {
4511 		dp_srng = &dp->rxdma_mon_desc_ring;
4512 		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4513 	} else {
4514 		dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4515 		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4516 	}
4517 
4518 	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4519 
4520 	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4521 
4522 	if (src_srng_desc) {
4523 		struct ath11k_buffer_addr *src_desc =
4524 				(struct ath11k_buffer_addr *)src_srng_desc;
4525 
4526 		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4527 	} else {
4528 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4529 			   "Monitor Link Desc Ring %d Full", mac_id);
4530 		ret = -ENOMEM;
4531 	}
4532 
4533 	ath11k_hal_srng_access_end(ar->ab, hal_srng);
4534 	return ret;
4535 }
4536 
4537 static
4538 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4539 					 dma_addr_t *paddr, u32 *sw_cookie,
4540 					 u8 *rbm,
4541 					 void **pp_buf_addr_info)
4542 {
4543 	struct hal_rx_msdu_link *msdu_link =
4544 			(struct hal_rx_msdu_link *)rx_msdu_link_desc;
4545 	struct ath11k_buffer_addr *buf_addr_info;
4546 
4547 	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4548 
4549 	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4550 
4551 	*pp_buf_addr_info = (void *)buf_addr_info;
4552 }
4553 
4554 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4555 {
4556 	if (skb->len > len) {
4557 		skb_trim(skb, len);
4558 	} else {
4559 		if (skb_tailroom(skb) < len - skb->len) {
4560 			if ((pskb_expand_head(skb, 0,
4561 					      len - skb->len - skb_tailroom(skb),
4562 					      GFP_ATOMIC))) {
4563 				dev_kfree_skb_any(skb);
4564 				return -ENOMEM;
4565 			}
4566 		}
4567 		skb_put(skb, (len - skb->len));
4568 	}
4569 	return 0;
4570 }
4571 
4572 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4573 					void *msdu_link_desc,
4574 					struct hal_rx_msdu_list *msdu_list,
4575 					u16 *num_msdus)
4576 {
4577 	struct hal_rx_msdu_details *msdu_details = NULL;
4578 	struct rx_msdu_desc *msdu_desc_info = NULL;
4579 	struct hal_rx_msdu_link *msdu_link = NULL;
4580 	int i;
4581 	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4582 	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4583 	u8  tmp  = 0;
4584 
4585 	msdu_link = (struct hal_rx_msdu_link *)msdu_link_desc;
4586 	msdu_details = &msdu_link->msdu_link[0];
4587 
4588 	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4589 		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4590 			      msdu_details[i].buf_addr_info.info0) == 0) {
4591 			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4592 			msdu_desc_info->info0 |= last;
4593 			;
4594 			break;
4595 		}
4596 		msdu_desc_info = &msdu_details[i].rx_msdu_info;
4597 
4598 		if (!i)
4599 			msdu_desc_info->info0 |= first;
4600 		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4601 			msdu_desc_info->info0 |= last;
4602 		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4603 		msdu_list->msdu_info[i].msdu_len =
4604 			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4605 		msdu_list->sw_cookie[i] =
4606 			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4607 				  msdu_details[i].buf_addr_info.info1);
4608 		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4609 				msdu_details[i].buf_addr_info.info1);
4610 		msdu_list->rbm[i] = tmp;
4611 	}
4612 	*num_msdus = i;
4613 }
4614 
4615 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4616 					u32 *rx_bufs_used)
4617 {
4618 	u32 ret = 0;
4619 
4620 	if ((*ppdu_id < msdu_ppdu_id) &&
4621 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4622 		*ppdu_id = msdu_ppdu_id;
4623 		ret = msdu_ppdu_id;
4624 	} else if ((*ppdu_id > msdu_ppdu_id) &&
4625 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4626 		/* mon_dst is behind than mon_status
4627 		 * skip dst_ring and free it
4628 		 */
4629 		*rx_bufs_used += 1;
4630 		*ppdu_id = msdu_ppdu_id;
4631 		ret = msdu_ppdu_id;
4632 	}
4633 	return ret;
4634 }
4635 
4636 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4637 				      bool *is_frag, u32 *total_len,
4638 				      u32 *frag_len, u32 *msdu_cnt)
4639 {
4640 	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4641 		if (!*is_frag) {
4642 			*total_len = info->msdu_len;
4643 			*is_frag = true;
4644 		}
4645 		ath11k_dp_mon_set_frag_len(total_len,
4646 					   frag_len);
4647 	} else {
4648 		if (*is_frag) {
4649 			ath11k_dp_mon_set_frag_len(total_len,
4650 						   frag_len);
4651 		} else {
4652 			*frag_len = info->msdu_len;
4653 		}
4654 		*is_frag = false;
4655 		*msdu_cnt -= 1;
4656 	}
4657 }
4658 
4659 static u32
4660 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4661 			  void *ring_entry, struct sk_buff **head_msdu,
4662 			  struct sk_buff **tail_msdu, u32 *npackets,
4663 			  u32 *ppdu_id)
4664 {
4665 	struct ath11k_pdev_dp *dp = &ar->dp;
4666 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4667 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4668 	struct sk_buff *msdu = NULL, *last = NULL;
4669 	struct hal_rx_msdu_list msdu_list;
4670 	void *p_buf_addr_info, *p_last_buf_addr_info;
4671 	struct hal_rx_desc *rx_desc;
4672 	void *rx_msdu_link_desc;
4673 	dma_addr_t paddr;
4674 	u16 num_msdus = 0;
4675 	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4676 	u32 rx_bufs_used = 0, i = 0;
4677 	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4678 	u32 total_len = 0, frag_len = 0;
4679 	bool is_frag, is_first_msdu;
4680 	bool drop_mpdu = false;
4681 	struct ath11k_skb_rxcb *rxcb;
4682 	struct hal_reo_entrance_ring *ent_desc =
4683 			(struct hal_reo_entrance_ring *)ring_entry;
4684 	int buf_id;
4685 	u32 rx_link_buf_info[2];
4686 	u8 rbm;
4687 
4688 	if (!ar->ab->hw_params.rxdma1_enable)
4689 		rx_ring = &dp->rx_refill_buf_ring;
4690 
4691 	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4692 					    &sw_cookie,
4693 					    &p_last_buf_addr_info, &rbm,
4694 					    &msdu_cnt);
4695 
4696 	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4697 		      ent_desc->info1) ==
4698 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4699 		u8 rxdma_err =
4700 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4701 				  ent_desc->info1);
4702 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4703 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4704 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4705 			drop_mpdu = true;
4706 			pmon->rx_mon_stats.dest_mpdu_drop++;
4707 		}
4708 	}
4709 
4710 	is_frag = false;
4711 	is_first_msdu = true;
4712 
4713 	do {
4714 		if (pmon->mon_last_linkdesc_paddr == paddr) {
4715 			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4716 			return rx_bufs_used;
4717 		}
4718 
4719 		if (ar->ab->hw_params.rxdma1_enable)
4720 			rx_msdu_link_desc =
4721 #if defined(__linux__)
4722 				(void *)pmon->link_desc_banks[sw_cookie].vaddr +
4723 #elif defined(__FreeBSD__)
4724 				(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
4725 #endif
4726 				(paddr - pmon->link_desc_banks[sw_cookie].paddr);
4727 		else
4728 			rx_msdu_link_desc =
4729 #if defined(__linux__)
4730 				(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4731 #elif defined(__FreeBSD__)
4732 				(u8 *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4733 #endif
4734 				(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4735 
4736 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4737 					    &num_msdus);
4738 
4739 		for (i = 0; i < num_msdus; i++) {
4740 			u32 l2_hdr_offset;
4741 
4742 			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4743 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4744 					   "i %d last_cookie %d is same\n",
4745 					   i, pmon->mon_last_buf_cookie);
4746 				drop_mpdu = true;
4747 				pmon->rx_mon_stats.dup_mon_buf_cnt++;
4748 				continue;
4749 			}
4750 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4751 					   msdu_list.sw_cookie[i]);
4752 
4753 			spin_lock_bh(&rx_ring->idr_lock);
4754 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4755 			spin_unlock_bh(&rx_ring->idr_lock);
4756 			if (!msdu) {
4757 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4758 					   "msdu_pop: invalid buf_id %d\n", buf_id);
4759 				break;
4760 			}
4761 			rxcb = ATH11K_SKB_RXCB(msdu);
4762 			if (!rxcb->unmapped) {
4763 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
4764 						 msdu->len +
4765 						 skb_tailroom(msdu),
4766 						 DMA_FROM_DEVICE);
4767 				rxcb->unmapped = 1;
4768 			}
4769 			if (drop_mpdu) {
4770 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4771 					   "i %d drop msdu %p *ppdu_id %x\n",
4772 					   i, msdu, *ppdu_id);
4773 				dev_kfree_skb_any(msdu);
4774 				msdu = NULL;
4775 				goto next_msdu;
4776 			}
4777 
4778 			rx_desc = (struct hal_rx_desc *)msdu->data;
4779 
4780 			rx_pkt_offset = sizeof(struct hal_rx_desc);
4781 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4782 
4783 			if (is_first_msdu) {
4784 				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4785 					drop_mpdu = true;
4786 					dev_kfree_skb_any(msdu);
4787 					msdu = NULL;
4788 					pmon->mon_last_linkdesc_paddr = paddr;
4789 					goto next_msdu;
4790 				}
4791 
4792 				msdu_ppdu_id =
4793 					ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4794 
4795 				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4796 								 ppdu_id,
4797 								 &rx_bufs_used)) {
4798 					if (rx_bufs_used) {
4799 						drop_mpdu = true;
4800 						dev_kfree_skb_any(msdu);
4801 						msdu = NULL;
4802 						goto next_msdu;
4803 					}
4804 					return rx_bufs_used;
4805 				}
4806 				pmon->mon_last_linkdesc_paddr = paddr;
4807 				is_first_msdu = false;
4808 			}
4809 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4810 						  &is_frag, &total_len,
4811 						  &frag_len, &msdu_cnt);
4812 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4813 
4814 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4815 
4816 			if (!(*head_msdu))
4817 				*head_msdu = msdu;
4818 			else if (last)
4819 				last->next = msdu;
4820 
4821 			last = msdu;
4822 next_msdu:
4823 			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4824 			rx_bufs_used++;
4825 			spin_lock_bh(&rx_ring->idr_lock);
4826 			idr_remove(&rx_ring->bufs_idr, buf_id);
4827 			spin_unlock_bh(&rx_ring->idr_lock);
4828 		}
4829 
4830 		ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4831 
4832 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4833 						    &sw_cookie, &rbm,
4834 						    &p_buf_addr_info);
4835 
4836 		if (ar->ab->hw_params.rxdma1_enable) {
4837 			if (ath11k_dp_rx_monitor_link_desc_return(ar,
4838 								  p_last_buf_addr_info,
4839 								  dp->mac_id))
4840 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4841 					   "dp_rx_monitor_link_desc_return failed");
4842 		} else {
4843 			ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4844 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4845 		}
4846 
4847 		p_last_buf_addr_info = p_buf_addr_info;
4848 
4849 	} while (paddr && msdu_cnt);
4850 
4851 	if (last)
4852 		last->next = NULL;
4853 
4854 	*tail_msdu = msdu;
4855 
4856 	if (msdu_cnt == 0)
4857 		*npackets = 1;
4858 
4859 	return rx_bufs_used;
4860 }
4861 
4862 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4863 {
4864 	u32 rx_pkt_offset, l2_hdr_offset;
4865 
4866 	rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4867 	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4868 						      (struct hal_rx_desc *)msdu->data);
4869 	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4870 }
4871 
4872 static struct sk_buff *
4873 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4874 			    u32 mac_id, struct sk_buff *head_msdu,
4875 			    struct sk_buff *last_msdu,
4876 			    struct ieee80211_rx_status *rxs, bool *fcs_err)
4877 {
4878 	struct ath11k_base *ab = ar->ab;
4879 	struct sk_buff *msdu, *prev_buf;
4880 	u32 wifi_hdr_len;
4881 	struct hal_rx_desc *rx_desc;
4882 	char *hdr_desc;
4883 	u8 *dest, decap_format;
4884 	struct ieee80211_hdr_3addr *wh;
4885 	struct rx_attention *rx_attention;
4886 	u32 err_bitmap;
4887 
4888 	if (!head_msdu)
4889 		goto err_merge_fail;
4890 
4891 	rx_desc = (struct hal_rx_desc *)head_msdu->data;
4892 	rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4893 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4894 
4895 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4896 		*fcs_err = true;
4897 
4898 	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4899 		return NULL;
4900 
4901 	decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4902 
4903 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4904 
4905 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4906 		ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4907 
4908 		prev_buf = head_msdu;
4909 		msdu = head_msdu->next;
4910 
4911 		while (msdu) {
4912 			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4913 
4914 			prev_buf = msdu;
4915 			msdu = msdu->next;
4916 		}
4917 
4918 		prev_buf->next = NULL;
4919 
4920 		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4921 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4922 		__le16 qos_field;
4923 		u8 qos_pkt = 0;
4924 
4925 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
4926 		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4927 
4928 		/* Base size */
4929 		wifi_hdr_len = sizeof(struct ieee80211_hdr_3addr);
4930 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4931 
4932 		if (ieee80211_is_data_qos(wh->frame_control)) {
4933 			struct ieee80211_qos_hdr *qwh =
4934 					(struct ieee80211_qos_hdr *)hdr_desc;
4935 
4936 			qos_field = qwh->qos_ctrl;
4937 			qos_pkt = 1;
4938 		}
4939 		msdu = head_msdu;
4940 
4941 		while (msdu) {
4942 			rx_desc = (struct hal_rx_desc *)msdu->data;
4943 			hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4944 
4945 			if (qos_pkt) {
4946 				dest = skb_push(msdu, sizeof(__le16));
4947 				if (!dest)
4948 					goto err_merge_fail;
4949 				memcpy(dest, hdr_desc, wifi_hdr_len);
4950 				memcpy(dest + wifi_hdr_len,
4951 				       (u8 *)&qos_field, sizeof(__le16));
4952 			}
4953 			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4954 			prev_buf = msdu;
4955 			msdu = msdu->next;
4956 		}
4957 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4958 		if (!dest)
4959 			goto err_merge_fail;
4960 
4961 		ath11k_dbg(ab, ATH11K_DBG_DATA,
4962 			   "mpdu_buf %pK mpdu_buf->len %u",
4963 			   prev_buf, prev_buf->len);
4964 	} else {
4965 		ath11k_dbg(ab, ATH11K_DBG_DATA,
4966 			   "decap format %d is not supported!\n",
4967 			   decap_format);
4968 		goto err_merge_fail;
4969 	}
4970 
4971 	return head_msdu;
4972 
4973 err_merge_fail:
4974 	return NULL;
4975 }
4976 
4977 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
4978 				    struct sk_buff *head_msdu,
4979 				    struct sk_buff *tail_msdu,
4980 				    struct napi_struct *napi)
4981 {
4982 	struct ath11k_pdev_dp *dp = &ar->dp;
4983 	struct sk_buff *mon_skb, *skb_next, *header;
4984 	struct ieee80211_rx_status *rxs = &dp->rx_status;
4985 	bool fcs_err = false;
4986 
4987 	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
4988 					      tail_msdu, rxs, &fcs_err);
4989 
4990 	if (!mon_skb)
4991 		goto mon_deliver_fail;
4992 
4993 	header = mon_skb;
4994 
4995 	rxs->flag = 0;
4996 
4997 	if (fcs_err)
4998 		rxs->flag = RX_FLAG_FAILED_FCS_CRC;
4999 
5000 	do {
5001 		skb_next = mon_skb->next;
5002 		if (!skb_next)
5003 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5004 		else
5005 			rxs->flag |= RX_FLAG_AMSDU_MORE;
5006 
5007 		if (mon_skb == header) {
5008 			header = NULL;
5009 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5010 		} else {
5011 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5012 		}
5013 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
5014 
5015 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5016 		mon_skb = skb_next;
5017 	} while (mon_skb);
5018 	rxs->flag = 0;
5019 
5020 	return 0;
5021 
5022 mon_deliver_fail:
5023 	mon_skb = head_msdu;
5024 	while (mon_skb) {
5025 		skb_next = mon_skb->next;
5026 		dev_kfree_skb_any(mon_skb);
5027 		mon_skb = skb_next;
5028 	}
5029 	return -EINVAL;
5030 }
5031 
5032 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5033 					  u32 quota, struct napi_struct *napi)
5034 {
5035 	struct ath11k_pdev_dp *dp = &ar->dp;
5036 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5037 	const struct ath11k_hw_hal_params *hal_params;
5038 	void *ring_entry;
5039 	void *mon_dst_srng;
5040 	u32 ppdu_id;
5041 	u32 rx_bufs_used;
5042 	u32 ring_id;
5043 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5044 	u32	 npackets = 0;
5045 
5046 	if (ar->ab->hw_params.rxdma1_enable)
5047 		ring_id = dp->rxdma_mon_dst_ring.ring_id;
5048 	else
5049 		ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5050 
5051 	mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5052 
5053 	if (!mon_dst_srng) {
5054 		ath11k_warn(ar->ab,
5055 			    "HAL Monitor Destination Ring Init Failed -- %pK",
5056 			    mon_dst_srng);
5057 		return;
5058 	}
5059 
5060 	spin_lock_bh(&pmon->mon_lock);
5061 
5062 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5063 
5064 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5065 	rx_bufs_used = 0;
5066 	rx_mon_stats = &pmon->rx_mon_stats;
5067 
5068 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5069 		struct sk_buff *head_msdu, *tail_msdu;
5070 
5071 		head_msdu = NULL;
5072 		tail_msdu = NULL;
5073 
5074 		rx_bufs_used += ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5075 							  &head_msdu,
5076 							  &tail_msdu,
5077 							  &npackets, &ppdu_id);
5078 
5079 		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5080 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5081 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5082 				   "dest_rx: new ppdu_id %x != status ppdu_id %x",
5083 				   ppdu_id, pmon->mon_ppdu_info.ppdu_id);
5084 			break;
5085 		}
5086 		if (head_msdu && tail_msdu) {
5087 			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5088 						 tail_msdu, napi);
5089 			rx_mon_stats->dest_mpdu_done++;
5090 		}
5091 
5092 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5093 								mon_dst_srng);
5094 	}
5095 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5096 
5097 	spin_unlock_bh(&pmon->mon_lock);
5098 
5099 	if (rx_bufs_used) {
5100 		rx_mon_stats->dest_ppdu_done++;
5101 		hal_params = ar->ab->hw_params.hal_params;
5102 
5103 		if (ar->ab->hw_params.rxdma1_enable)
5104 			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5105 						   &dp->rxdma_mon_buf_ring,
5106 						   rx_bufs_used,
5107 						   hal_params->rx_buf_rbm);
5108 		else
5109 			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5110 						   &dp->rx_refill_buf_ring,
5111 						   rx_bufs_used,
5112 						   hal_params->rx_buf_rbm);
5113 	}
5114 }
5115 
5116 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5117 				    struct napi_struct *napi, int budget)
5118 {
5119 	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5120 	enum hal_rx_mon_status hal_status;
5121 	struct sk_buff *skb;
5122 	struct sk_buff_head skb_list;
5123 	struct ath11k_peer *peer;
5124 	struct ath11k_sta *arsta;
5125 	int num_buffs_reaped = 0;
5126 	u32 rx_buf_sz;
5127 	u16 log_type = 0;
5128 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5129 	struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5130 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5131 
5132 	__skb_queue_head_init(&skb_list);
5133 
5134 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5135 							     &skb_list);
5136 	if (!num_buffs_reaped)
5137 		goto exit;
5138 
5139 	memset(ppdu_info, 0, sizeof(*ppdu_info));
5140 	ppdu_info->peer_id = HAL_INVALID_PEERID;
5141 
5142 	while ((skb = __skb_dequeue(&skb_list))) {
5143 		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5144 			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5145 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5146 		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5147 			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5148 			rx_buf_sz = DP_RX_BUFFER_SIZE;
5149 		}
5150 
5151 		if (log_type)
5152 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5153 
5154 		hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5155 
5156 		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5157 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5158 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5159 			rx_mon_stats->status_ppdu_done++;
5160 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5161 			ath11k_dp_rx_mon_dest_process(ar, mac_id, budget, napi);
5162 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5163 		}
5164 
5165 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5166 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5167 			dev_kfree_skb_any(skb);
5168 			continue;
5169 		}
5170 
5171 		rcu_read_lock();
5172 		spin_lock_bh(&ab->base_lock);
5173 		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5174 
5175 		if (!peer || !peer->sta) {
5176 			ath11k_dbg(ab, ATH11K_DBG_DATA,
5177 				   "failed to find the peer with peer_id %d\n",
5178 				   ppdu_info->peer_id);
5179 			goto next_skb;
5180 		}
5181 
5182 		arsta = (struct ath11k_sta *)peer->sta->drv_priv;
5183 		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5184 
5185 		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5186 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5187 
5188 next_skb:
5189 		spin_unlock_bh(&ab->base_lock);
5190 		rcu_read_unlock();
5191 
5192 		dev_kfree_skb_any(skb);
5193 		memset(ppdu_info, 0, sizeof(*ppdu_info));
5194 		ppdu_info->peer_id = HAL_INVALID_PEERID;
5195 	}
5196 exit:
5197 	return num_buffs_reaped;
5198 }
5199 
5200 static u32
5201 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5202 			       void *ring_entry, struct sk_buff **head_msdu,
5203 			       struct sk_buff **tail_msdu,
5204 			       struct hal_sw_mon_ring_entries *sw_mon_entries)
5205 {
5206 	struct ath11k_pdev_dp *dp = &ar->dp;
5207 	struct ath11k_mon_data *pmon = &dp->mon_data;
5208 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5209 	struct sk_buff *msdu = NULL, *last = NULL;
5210 	struct hal_sw_monitor_ring *sw_desc = ring_entry;
5211 	struct hal_rx_msdu_list msdu_list;
5212 	struct hal_rx_desc *rx_desc;
5213 	struct ath11k_skb_rxcb *rxcb;
5214 	void *rx_msdu_link_desc;
5215 	void *p_buf_addr_info, *p_last_buf_addr_info;
5216 	int buf_id, i = 0;
5217 	u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5218 	u32 rx_bufs_used = 0, msdu_cnt = 0;
5219 	u32 total_len = 0, frag_len = 0, sw_cookie;
5220 	u16 num_msdus = 0;
5221 	u8 rxdma_err, rbm;
5222 	bool is_frag, is_first_msdu;
5223 	bool drop_mpdu = false;
5224 
5225 	ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5226 
5227 	sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5228 	sw_mon_entries->end_of_ppdu = false;
5229 	sw_mon_entries->drop_ppdu = false;
5230 	p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5231 	msdu_cnt = sw_mon_entries->msdu_cnt;
5232 
5233 	sw_mon_entries->end_of_ppdu =
5234 		FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5235 	if (sw_mon_entries->end_of_ppdu)
5236 		return rx_bufs_used;
5237 
5238 	if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5239 		      sw_desc->info0) ==
5240 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5241 		rxdma_err =
5242 			FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5243 				  sw_desc->info0);
5244 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5245 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5246 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5247 			pmon->rx_mon_stats.dest_mpdu_drop++;
5248 			drop_mpdu = true;
5249 		}
5250 	}
5251 
5252 	is_frag = false;
5253 	is_first_msdu = true;
5254 
5255 	do {
5256 		rx_msdu_link_desc =
5257 			(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5258 			(sw_mon_entries->mon_dst_paddr -
5259 			 pmon->link_desc_banks[sw_cookie].paddr);
5260 
5261 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5262 					    &num_msdus);
5263 
5264 		for (i = 0; i < num_msdus; i++) {
5265 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5266 					   msdu_list.sw_cookie[i]);
5267 
5268 			spin_lock_bh(&rx_ring->idr_lock);
5269 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5270 			if (!msdu) {
5271 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5272 					   "full mon msdu_pop: invalid buf_id %d\n",
5273 					    buf_id);
5274 				spin_unlock_bh(&rx_ring->idr_lock);
5275 				break;
5276 			}
5277 			idr_remove(&rx_ring->bufs_idr, buf_id);
5278 			spin_unlock_bh(&rx_ring->idr_lock);
5279 
5280 			rxcb = ATH11K_SKB_RXCB(msdu);
5281 			if (!rxcb->unmapped) {
5282 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
5283 						 msdu->len +
5284 						 skb_tailroom(msdu),
5285 						 DMA_FROM_DEVICE);
5286 				rxcb->unmapped = 1;
5287 			}
5288 			if (drop_mpdu) {
5289 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5290 					   "full mon: i %d drop msdu %p *ppdu_id %x\n",
5291 					   i, msdu, sw_mon_entries->ppdu_id);
5292 				dev_kfree_skb_any(msdu);
5293 				msdu_cnt--;
5294 				goto next_msdu;
5295 			}
5296 
5297 			rx_desc = (struct hal_rx_desc *)msdu->data;
5298 
5299 			rx_pkt_offset = sizeof(struct hal_rx_desc);
5300 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5301 
5302 			if (is_first_msdu) {
5303 				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5304 					drop_mpdu = true;
5305 					dev_kfree_skb_any(msdu);
5306 					msdu = NULL;
5307 					goto next_msdu;
5308 				}
5309 				is_first_msdu = false;
5310 			}
5311 
5312 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5313 						  &is_frag, &total_len,
5314 						  &frag_len, &msdu_cnt);
5315 
5316 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5317 
5318 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5319 
5320 			if (!(*head_msdu))
5321 				*head_msdu = msdu;
5322 			else if (last)
5323 				last->next = msdu;
5324 
5325 			last = msdu;
5326 next_msdu:
5327 			rx_bufs_used++;
5328 		}
5329 
5330 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5331 						    &sw_mon_entries->mon_dst_paddr,
5332 						    &sw_mon_entries->mon_dst_sw_cookie,
5333 						    &rbm,
5334 						    &p_buf_addr_info);
5335 
5336 		if (ath11k_dp_rx_monitor_link_desc_return(ar,
5337 							  p_last_buf_addr_info,
5338 							  dp->mac_id))
5339 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5340 				   "full mon: dp_rx_monitor_link_desc_return failed\n");
5341 
5342 		p_last_buf_addr_info = p_buf_addr_info;
5343 
5344 	} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5345 
5346 	if (last)
5347 		last->next = NULL;
5348 
5349 	*tail_msdu = msdu;
5350 
5351 	return rx_bufs_used;
5352 }
5353 
5354 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5355 					      struct dp_full_mon_mpdu *mon_mpdu,
5356 					      struct sk_buff *head,
5357 					      struct sk_buff *tail)
5358 {
5359 	mon_mpdu = kzalloc(sizeof(*mon_mpdu), GFP_ATOMIC);
5360 	if (!mon_mpdu)
5361 		return -ENOMEM;
5362 
5363 	list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5364 	mon_mpdu->head = head;
5365 	mon_mpdu->tail = tail;
5366 
5367 	return 0;
5368 }
5369 
5370 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5371 					    struct dp_full_mon_mpdu *mon_mpdu)
5372 {
5373 	struct dp_full_mon_mpdu *tmp;
5374 	struct sk_buff *tmp_msdu, *skb_next;
5375 
5376 	if (list_empty(&dp->dp_full_mon_mpdu_list))
5377 		return;
5378 
5379 	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5380 		list_del(&mon_mpdu->list);
5381 
5382 		tmp_msdu = mon_mpdu->head;
5383 		while (tmp_msdu) {
5384 			skb_next = tmp_msdu->next;
5385 			dev_kfree_skb_any(tmp_msdu);
5386 			tmp_msdu = skb_next;
5387 		}
5388 
5389 		kfree(mon_mpdu);
5390 	}
5391 }
5392 
5393 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5394 					      int mac_id,
5395 					      struct ath11k_mon_data *pmon,
5396 					      struct napi_struct *napi)
5397 {
5398 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5399 	struct dp_full_mon_mpdu *tmp;
5400 	struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5401 	struct sk_buff *head_msdu, *tail_msdu;
5402 	struct ath11k_base *ab = ar->ab;
5403 	struct ath11k_dp *dp = &ab->dp;
5404 	int ret;
5405 
5406 	rx_mon_stats = &pmon->rx_mon_stats;
5407 
5408 	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5409 		list_del(&mon_mpdu->list);
5410 		head_msdu = mon_mpdu->head;
5411 		tail_msdu = mon_mpdu->tail;
5412 		if (head_msdu && tail_msdu) {
5413 			ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5414 						       tail_msdu, napi);
5415 			rx_mon_stats->dest_mpdu_done++;
5416 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5417 		}
5418 		kfree(mon_mpdu);
5419 	}
5420 
5421 	return ret;
5422 }
5423 
5424 static int
5425 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5426 					  struct napi_struct *napi, int budget)
5427 {
5428 	struct ath11k *ar = ab->pdevs[mac_id].ar;
5429 	struct ath11k_pdev_dp *dp = &ar->dp;
5430 	struct ath11k_mon_data *pmon = &dp->mon_data;
5431 	struct hal_sw_mon_ring_entries *sw_mon_entries;
5432 	int quota = 0, work = 0, count;
5433 
5434 	sw_mon_entries = &pmon->sw_mon_entries;
5435 
5436 	while (pmon->hold_mon_dst_ring) {
5437 		quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5438 							napi, 1);
5439 		if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5440 			count = sw_mon_entries->status_buf_count;
5441 			if (count > 1) {
5442 				quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5443 									 napi, count);
5444 			}
5445 
5446 			ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5447 							   pmon, napi);
5448 			pmon->hold_mon_dst_ring = false;
5449 		} else if (!pmon->mon_status_paddr ||
5450 			   pmon->buf_state == DP_MON_STATUS_LEAD) {
5451 			sw_mon_entries->drop_ppdu = true;
5452 			pmon->hold_mon_dst_ring = false;
5453 		}
5454 
5455 		if (!quota)
5456 			break;
5457 
5458 		work += quota;
5459 	}
5460 
5461 	if (sw_mon_entries->drop_ppdu)
5462 		ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5463 
5464 	return work;
5465 }
5466 
5467 static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5468 					 struct napi_struct *napi, int budget)
5469 {
5470 	struct ath11k *ar = ab->pdevs[mac_id].ar;
5471 	struct ath11k_pdev_dp *dp = &ar->dp;
5472 	struct ath11k_mon_data *pmon = &dp->mon_data;
5473 	struct hal_sw_mon_ring_entries *sw_mon_entries;
5474 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5475 	struct sk_buff *head_msdu, *tail_msdu;
5476 	void *mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5477 	void *ring_entry;
5478 	u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5479 	int quota = 0, ret;
5480 	bool break_dst_ring = false;
5481 
5482 	spin_lock_bh(&pmon->mon_lock);
5483 
5484 	sw_mon_entries = &pmon->sw_mon_entries;
5485 	rx_mon_stats = &pmon->rx_mon_stats;
5486 
5487 	if (pmon->hold_mon_dst_ring) {
5488 		spin_unlock_bh(&pmon->mon_lock);
5489 		goto reap_status_ring;
5490 	}
5491 
5492 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5493 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5494 		head_msdu = NULL;
5495 		tail_msdu = NULL;
5496 
5497 		mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5498 								   &head_msdu,
5499 								   &tail_msdu,
5500 								   sw_mon_entries);
5501 		rx_bufs_used += mpdu_rx_bufs_used;
5502 
5503 		if (!sw_mon_entries->end_of_ppdu) {
5504 			if (head_msdu) {
5505 				ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5506 									 pmon->mon_mpdu,
5507 									 head_msdu,
5508 									 tail_msdu);
5509 				if (ret)
5510 					break_dst_ring = true;
5511 			}
5512 
5513 			goto next_entry;
5514 		} else {
5515 			if (!sw_mon_entries->ppdu_id &&
5516 			    !sw_mon_entries->mon_status_paddr) {
5517 				break_dst_ring = true;
5518 				goto next_entry;
5519 			}
5520 		}
5521 
5522 		rx_mon_stats->dest_ppdu_done++;
5523 		pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5524 		pmon->buf_state = DP_MON_STATUS_LAG;
5525 		pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5526 		pmon->hold_mon_dst_ring = true;
5527 next_entry:
5528 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5529 								mon_dst_srng);
5530 		if (break_dst_ring)
5531 			break;
5532 	}
5533 
5534 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5535 	spin_unlock_bh(&pmon->mon_lock);
5536 
5537 	if (rx_bufs_used) {
5538 		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5539 					   &dp->rxdma_mon_buf_ring,
5540 					   rx_bufs_used,
5541 					   HAL_RX_BUF_RBM_SW3_BM);
5542 	}
5543 
5544 reap_status_ring:
5545 	quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5546 							  napi, budget);
5547 
5548 	return quota;
5549 }
5550 
5551 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5552 				   struct napi_struct *napi, int budget)
5553 {
5554 	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5555 	int ret = 0;
5556 
5557 	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5558 	    ab->hw_params.full_monitor_mode)
5559 		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5560 	else
5561 		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5562 
5563 	return ret;
5564 }
5565 
5566 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5567 {
5568 	struct ath11k_pdev_dp *dp = &ar->dp;
5569 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5570 
5571 	skb_queue_head_init(&pmon->rx_status_q);
5572 
5573 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5574 
5575 	memset(&pmon->rx_mon_stats, 0,
5576 	       sizeof(pmon->rx_mon_stats));
5577 	return 0;
5578 }
5579 
5580 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5581 {
5582 	struct ath11k_pdev_dp *dp = &ar->dp;
5583 	struct ath11k_mon_data *pmon = &dp->mon_data;
5584 	struct hal_srng *mon_desc_srng = NULL;
5585 	struct dp_srng *dp_srng;
5586 	int ret = 0;
5587 	u32 n_link_desc = 0;
5588 
5589 	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5590 	if (ret) {
5591 		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5592 		return ret;
5593 	}
5594 
5595 	/* if rxdma1_enable is false, no need to setup
5596 	 * rxdma_mon_desc_ring.
5597 	 */
5598 	if (!ar->ab->hw_params.rxdma1_enable)
5599 		return 0;
5600 
5601 	dp_srng = &dp->rxdma_mon_desc_ring;
5602 	n_link_desc = dp_srng->size /
5603 		ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5604 	mon_desc_srng =
5605 		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5606 
5607 	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5608 					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5609 					n_link_desc);
5610 	if (ret) {
5611 		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5612 		return ret;
5613 	}
5614 	pmon->mon_last_linkdesc_paddr = 0;
5615 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5616 	spin_lock_init(&pmon->mon_lock);
5617 
5618 	return 0;
5619 }
5620 
5621 static int ath11k_dp_mon_link_free(struct ath11k *ar)
5622 {
5623 	struct ath11k_pdev_dp *dp = &ar->dp;
5624 	struct ath11k_mon_data *pmon = &dp->mon_data;
5625 
5626 	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5627 				    HAL_RXDMA_MONITOR_DESC,
5628 				    &dp->rxdma_mon_desc_ring);
5629 	return 0;
5630 }
5631 
5632 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5633 {
5634 	ath11k_dp_mon_link_free(ar);
5635 	return 0;
5636 }
5637 
5638 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5639 {
5640 	/* start reap timer */
5641 	mod_timer(&ab->mon_reap_timer,
5642 		  jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5643 
5644 	return 0;
5645 }
5646 
5647 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5648 {
5649 	int ret;
5650 
5651 	if (stop_timer)
5652 		del_timer_sync(&ab->mon_reap_timer);
5653 
5654 	/* reap all the monitor related rings */
5655 	ret = ath11k_dp_purge_mon_ring(ab);
5656 	if (ret) {
5657 		ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5658 		return ret;
5659 	}
5660 
5661 	return 0;
5662 }
5663