xref: /linux/drivers/net/wireless/ath/ath11k/dp_rx.c (revision e225b36f83d7926c1f2035923bb0359d851fdb73)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "debugfs_htt_stats.h"
14 #include "debugfs_sta.h"
15 #include "hal_desc.h"
16 #include "hw.h"
17 #include "dp_rx.h"
18 #include "hal_rx.h"
19 #include "dp_tx.h"
20 #include "peer.h"
21 
22 #define ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS (2 * HZ)
23 
24 static inline
25 u8 *ath11k_dp_rx_h_80211_hdr(struct ath11k_base *ab, struct hal_rx_desc *desc)
26 {
27 	return ab->hw_params.hw_ops->rx_desc_get_hdr_status(desc);
28 }
29 
30 static inline
31 enum hal_encrypt_type ath11k_dp_rx_h_mpdu_start_enctype(struct ath11k_base *ab,
32 							struct hal_rx_desc *desc)
33 {
34 	if (!ab->hw_params.hw_ops->rx_desc_encrypt_valid(desc))
35 		return HAL_ENCRYPT_TYPE_OPEN;
36 
37 	return ab->hw_params.hw_ops->rx_desc_get_encrypt_type(desc);
38 }
39 
40 static inline u8 ath11k_dp_rx_h_msdu_start_decap_type(struct ath11k_base *ab,
41 						      struct hal_rx_desc *desc)
42 {
43 	return ab->hw_params.hw_ops->rx_desc_get_decap_type(desc);
44 }
45 
46 static inline
47 bool ath11k_dp_rx_h_msdu_start_ldpc_support(struct ath11k_base *ab,
48 					    struct hal_rx_desc *desc)
49 {
50 	return ab->hw_params.hw_ops->rx_desc_get_ldpc_support(desc);
51 }
52 
53 static inline
54 u8 ath11k_dp_rx_h_msdu_start_mesh_ctl_present(struct ath11k_base *ab,
55 					      struct hal_rx_desc *desc)
56 {
57 	return ab->hw_params.hw_ops->rx_desc_get_mesh_ctl(desc);
58 }
59 
60 static inline
61 bool ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(struct ath11k_base *ab,
62 					      struct hal_rx_desc *desc)
63 {
64 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_seq_ctl_vld(desc);
65 }
66 
67 static inline bool ath11k_dp_rx_h_mpdu_start_fc_valid(struct ath11k_base *ab,
68 						      struct hal_rx_desc *desc)
69 {
70 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_fc_valid(desc);
71 }
72 
73 static inline bool ath11k_dp_rx_h_mpdu_start_more_frags(struct ath11k_base *ab,
74 							struct sk_buff *skb)
75 {
76 	struct ieee80211_hdr *hdr;
77 
78 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
79 	return ieee80211_has_morefrags(hdr->frame_control);
80 }
81 
82 static inline u16 ath11k_dp_rx_h_mpdu_start_frag_no(struct ath11k_base *ab,
83 						    struct sk_buff *skb)
84 {
85 	struct ieee80211_hdr *hdr;
86 
87 	hdr = (struct ieee80211_hdr *)(skb->data + ab->hw_params.hal_desc_sz);
88 	return le16_to_cpu(hdr->seq_ctrl) & IEEE80211_SCTL_FRAG;
89 }
90 
91 static inline u16 ath11k_dp_rx_h_mpdu_start_seq_no(struct ath11k_base *ab,
92 						   struct hal_rx_desc *desc)
93 {
94 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_start_seq_no(desc);
95 }
96 
97 static inline void *ath11k_dp_rx_get_attention(struct ath11k_base *ab,
98 					       struct hal_rx_desc *desc)
99 {
100 	return ab->hw_params.hw_ops->rx_desc_get_attention(desc);
101 }
102 
103 static inline bool ath11k_dp_rx_h_attn_msdu_done(struct rx_attention *attn)
104 {
105 	return !!FIELD_GET(RX_ATTENTION_INFO2_MSDU_DONE,
106 			   __le32_to_cpu(attn->info2));
107 }
108 
109 static inline bool ath11k_dp_rx_h_attn_l4_cksum_fail(struct rx_attention *attn)
110 {
111 	return !!FIELD_GET(RX_ATTENTION_INFO1_TCP_UDP_CKSUM_FAIL,
112 			   __le32_to_cpu(attn->info1));
113 }
114 
115 static inline bool ath11k_dp_rx_h_attn_ip_cksum_fail(struct rx_attention *attn)
116 {
117 	return !!FIELD_GET(RX_ATTENTION_INFO1_IP_CKSUM_FAIL,
118 			   __le32_to_cpu(attn->info1));
119 }
120 
121 static inline bool ath11k_dp_rx_h_attn_is_decrypted(struct rx_attention *attn)
122 {
123 	return (FIELD_GET(RX_ATTENTION_INFO2_DCRYPT_STATUS_CODE,
124 			  __le32_to_cpu(attn->info2)) ==
125 		RX_DESC_DECRYPT_STATUS_CODE_OK);
126 }
127 
128 static u32 ath11k_dp_rx_h_attn_mpdu_err(struct rx_attention *attn)
129 {
130 	u32 info = __le32_to_cpu(attn->info1);
131 	u32 errmap = 0;
132 
133 	if (info & RX_ATTENTION_INFO1_FCS_ERR)
134 		errmap |= DP_RX_MPDU_ERR_FCS;
135 
136 	if (info & RX_ATTENTION_INFO1_DECRYPT_ERR)
137 		errmap |= DP_RX_MPDU_ERR_DECRYPT;
138 
139 	if (info & RX_ATTENTION_INFO1_TKIP_MIC_ERR)
140 		errmap |= DP_RX_MPDU_ERR_TKIP_MIC;
141 
142 	if (info & RX_ATTENTION_INFO1_A_MSDU_ERROR)
143 		errmap |= DP_RX_MPDU_ERR_AMSDU_ERR;
144 
145 	if (info & RX_ATTENTION_INFO1_OVERFLOW_ERR)
146 		errmap |= DP_RX_MPDU_ERR_OVERFLOW;
147 
148 	if (info & RX_ATTENTION_INFO1_MSDU_LEN_ERR)
149 		errmap |= DP_RX_MPDU_ERR_MSDU_LEN;
150 
151 	if (info & RX_ATTENTION_INFO1_MPDU_LEN_ERR)
152 		errmap |= DP_RX_MPDU_ERR_MPDU_LEN;
153 
154 	return errmap;
155 }
156 
157 static bool ath11k_dp_rx_h_attn_msdu_len_err(struct ath11k_base *ab,
158 					     struct hal_rx_desc *desc)
159 {
160 	struct rx_attention *rx_attention;
161 	u32 errmap;
162 
163 	rx_attention = ath11k_dp_rx_get_attention(ab, desc);
164 	errmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
165 
166 	return errmap & DP_RX_MPDU_ERR_MSDU_LEN;
167 }
168 
169 static inline u16 ath11k_dp_rx_h_msdu_start_msdu_len(struct ath11k_base *ab,
170 						     struct hal_rx_desc *desc)
171 {
172 	return ab->hw_params.hw_ops->rx_desc_get_msdu_len(desc);
173 }
174 
175 static inline u8 ath11k_dp_rx_h_msdu_start_sgi(struct ath11k_base *ab,
176 					       struct hal_rx_desc *desc)
177 {
178 	return ab->hw_params.hw_ops->rx_desc_get_msdu_sgi(desc);
179 }
180 
181 static inline u8 ath11k_dp_rx_h_msdu_start_rate_mcs(struct ath11k_base *ab,
182 						    struct hal_rx_desc *desc)
183 {
184 	return ab->hw_params.hw_ops->rx_desc_get_msdu_rate_mcs(desc);
185 }
186 
187 static inline u8 ath11k_dp_rx_h_msdu_start_rx_bw(struct ath11k_base *ab,
188 						 struct hal_rx_desc *desc)
189 {
190 	return ab->hw_params.hw_ops->rx_desc_get_msdu_rx_bw(desc);
191 }
192 
193 static inline u32 ath11k_dp_rx_h_msdu_start_freq(struct ath11k_base *ab,
194 						 struct hal_rx_desc *desc)
195 {
196 	return ab->hw_params.hw_ops->rx_desc_get_msdu_freq(desc);
197 }
198 
199 static inline u8 ath11k_dp_rx_h_msdu_start_pkt_type(struct ath11k_base *ab,
200 						    struct hal_rx_desc *desc)
201 {
202 	return ab->hw_params.hw_ops->rx_desc_get_msdu_pkt_type(desc);
203 }
204 
205 static inline u8 ath11k_dp_rx_h_msdu_start_nss(struct ath11k_base *ab,
206 					       struct hal_rx_desc *desc)
207 {
208 	return hweight8(ab->hw_params.hw_ops->rx_desc_get_msdu_nss(desc));
209 }
210 
211 static inline u8 ath11k_dp_rx_h_mpdu_start_tid(struct ath11k_base *ab,
212 					       struct hal_rx_desc *desc)
213 {
214 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_tid(desc);
215 }
216 
217 static inline u16 ath11k_dp_rx_h_mpdu_start_peer_id(struct ath11k_base *ab,
218 						    struct hal_rx_desc *desc)
219 {
220 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_peer_id(desc);
221 }
222 
223 static inline u8 ath11k_dp_rx_h_msdu_end_l3pad(struct ath11k_base *ab,
224 					       struct hal_rx_desc *desc)
225 {
226 	return ab->hw_params.hw_ops->rx_desc_get_l3_pad_bytes(desc);
227 }
228 
229 static inline bool ath11k_dp_rx_h_msdu_end_first_msdu(struct ath11k_base *ab,
230 						      struct hal_rx_desc *desc)
231 {
232 	return ab->hw_params.hw_ops->rx_desc_get_first_msdu(desc);
233 }
234 
235 static bool ath11k_dp_rx_h_msdu_end_last_msdu(struct ath11k_base *ab,
236 					      struct hal_rx_desc *desc)
237 {
238 	return ab->hw_params.hw_ops->rx_desc_get_last_msdu(desc);
239 }
240 
241 static void ath11k_dp_rx_desc_end_tlv_copy(struct ath11k_base *ab,
242 					   struct hal_rx_desc *fdesc,
243 					   struct hal_rx_desc *ldesc)
244 {
245 	ab->hw_params.hw_ops->rx_desc_copy_attn_end_tlv(fdesc, ldesc);
246 }
247 
248 static inline u32 ath11k_dp_rxdesc_get_mpdulen_err(struct rx_attention *attn)
249 {
250 	return FIELD_GET(RX_ATTENTION_INFO1_MPDU_LEN_ERR,
251 			 __le32_to_cpu(attn->info1));
252 }
253 
254 static inline u8 *ath11k_dp_rxdesc_get_80211hdr(struct ath11k_base *ab,
255 						struct hal_rx_desc *rx_desc)
256 {
257 	u8 *rx_pkt_hdr;
258 
259 	rx_pkt_hdr = ab->hw_params.hw_ops->rx_desc_get_msdu_payload(rx_desc);
260 
261 	return rx_pkt_hdr;
262 }
263 
264 static inline bool ath11k_dp_rxdesc_mpdu_valid(struct ath11k_base *ab,
265 					       struct hal_rx_desc *rx_desc)
266 {
267 	u32 tlv_tag;
268 
269 	tlv_tag = ab->hw_params.hw_ops->rx_desc_get_mpdu_start_tag(rx_desc);
270 
271 	return tlv_tag == HAL_RX_MPDU_START;
272 }
273 
274 static inline u32 ath11k_dp_rxdesc_get_ppduid(struct ath11k_base *ab,
275 					      struct hal_rx_desc *rx_desc)
276 {
277 	return ab->hw_params.hw_ops->rx_desc_get_mpdu_ppdu_id(rx_desc);
278 }
279 
280 static inline void ath11k_dp_rxdesc_set_msdu_len(struct ath11k_base *ab,
281 						 struct hal_rx_desc *desc,
282 						 u16 len)
283 {
284 	ab->hw_params.hw_ops->rx_desc_set_msdu_len(desc, len);
285 }
286 
287 static bool ath11k_dp_rx_h_attn_is_mcbc(struct ath11k_base *ab,
288 					struct hal_rx_desc *desc)
289 {
290 	struct rx_attention *attn = ath11k_dp_rx_get_attention(ab, desc);
291 
292 	return ath11k_dp_rx_h_msdu_end_first_msdu(ab, desc) &&
293 		(!!FIELD_GET(RX_ATTENTION_INFO1_MCAST_BCAST,
294 		 __le32_to_cpu(attn->info1)));
295 }
296 
297 static bool ath11k_dp_rxdesc_mac_addr2_valid(struct ath11k_base *ab,
298 					     struct hal_rx_desc *desc)
299 {
300 	return ab->hw_params.hw_ops->rx_desc_mac_addr2_valid(desc);
301 }
302 
303 static u8 *ath11k_dp_rxdesc_mpdu_start_addr2(struct ath11k_base *ab,
304 					     struct hal_rx_desc *desc)
305 {
306 	return ab->hw_params.hw_ops->rx_desc_mpdu_start_addr2(desc);
307 }
308 
309 static void ath11k_dp_service_mon_ring(struct timer_list *t)
310 {
311 	struct ath11k_base *ab = timer_container_of(ab, t, mon_reap_timer);
312 	int i;
313 
314 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
315 		ath11k_dp_rx_process_mon_rings(ab, i, NULL, DP_MON_SERVICE_BUDGET);
316 
317 	mod_timer(&ab->mon_reap_timer, jiffies +
318 		  msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
319 }
320 
321 static int ath11k_dp_purge_mon_ring(struct ath11k_base *ab)
322 {
323 	int i, reaped = 0;
324 	unsigned long timeout = jiffies + msecs_to_jiffies(DP_MON_PURGE_TIMEOUT_MS);
325 
326 	do {
327 		for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++)
328 			reaped += ath11k_dp_rx_process_mon_rings(ab, i,
329 								 NULL,
330 								 DP_MON_SERVICE_BUDGET);
331 
332 		/* nothing more to reap */
333 		if (reaped < DP_MON_SERVICE_BUDGET)
334 			return 0;
335 
336 	} while (time_before(jiffies, timeout));
337 
338 	ath11k_warn(ab, "dp mon ring purge timeout");
339 
340 	return -ETIMEDOUT;
341 }
342 
343 /* Returns number of Rx buffers replenished */
344 int ath11k_dp_rxbufs_replenish(struct ath11k_base *ab, int mac_id,
345 			       struct dp_rxdma_ring *rx_ring,
346 			       int req_entries,
347 			       enum hal_rx_buf_return_buf_manager mgr)
348 {
349 	struct hal_srng *srng;
350 	u32 *desc;
351 	struct sk_buff *skb;
352 	int num_free;
353 	int num_remain;
354 	int buf_id;
355 	u32 cookie;
356 	dma_addr_t paddr;
357 
358 	req_entries = min(req_entries, rx_ring->bufs_max);
359 
360 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
361 
362 	spin_lock_bh(&srng->lock);
363 
364 	ath11k_hal_srng_access_begin(ab, srng);
365 
366 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
367 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
368 		req_entries = num_free;
369 
370 	req_entries = min(num_free, req_entries);
371 	num_remain = req_entries;
372 
373 	while (num_remain > 0) {
374 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
375 				    DP_RX_BUFFER_ALIGN_SIZE);
376 		if (!skb)
377 			break;
378 
379 		if (!IS_ALIGNED((unsigned long)skb->data,
380 				DP_RX_BUFFER_ALIGN_SIZE)) {
381 			skb_pull(skb,
382 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
383 				 skb->data);
384 		}
385 
386 		paddr = dma_map_single(ab->dev, skb->data,
387 				       skb->len + skb_tailroom(skb),
388 				       DMA_FROM_DEVICE);
389 		if (dma_mapping_error(ab->dev, paddr))
390 			goto fail_free_skb;
391 
392 		spin_lock_bh(&rx_ring->idr_lock);
393 		buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 1,
394 				   (rx_ring->bufs_max * 3) + 1, GFP_ATOMIC);
395 		spin_unlock_bh(&rx_ring->idr_lock);
396 		if (buf_id <= 0)
397 			goto fail_dma_unmap;
398 
399 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
400 		if (!desc)
401 			goto fail_idr_remove;
402 
403 		ATH11K_SKB_RXCB(skb)->paddr = paddr;
404 
405 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
406 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
407 
408 		num_remain--;
409 
410 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
411 	}
412 
413 	ath11k_hal_srng_access_end(ab, srng);
414 
415 	spin_unlock_bh(&srng->lock);
416 
417 	return req_entries - num_remain;
418 
419 fail_idr_remove:
420 	spin_lock_bh(&rx_ring->idr_lock);
421 	idr_remove(&rx_ring->bufs_idr, buf_id);
422 	spin_unlock_bh(&rx_ring->idr_lock);
423 fail_dma_unmap:
424 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
425 			 DMA_FROM_DEVICE);
426 fail_free_skb:
427 	dev_kfree_skb_any(skb);
428 
429 	ath11k_hal_srng_access_end(ab, srng);
430 
431 	spin_unlock_bh(&srng->lock);
432 
433 	return req_entries - num_remain;
434 }
435 
436 static int ath11k_dp_rxdma_buf_ring_free(struct ath11k *ar,
437 					 struct dp_rxdma_ring *rx_ring)
438 {
439 	struct sk_buff *skb;
440 	int buf_id;
441 
442 	spin_lock_bh(&rx_ring->idr_lock);
443 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
444 		idr_remove(&rx_ring->bufs_idr, buf_id);
445 		/* TODO: Understand where internal driver does this dma_unmap
446 		 * of rxdma_buffer.
447 		 */
448 		dma_unmap_single(ar->ab->dev, ATH11K_SKB_RXCB(skb)->paddr,
449 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
450 		dev_kfree_skb_any(skb);
451 	}
452 
453 	idr_destroy(&rx_ring->bufs_idr);
454 	spin_unlock_bh(&rx_ring->idr_lock);
455 
456 	return 0;
457 }
458 
459 static int ath11k_dp_rxdma_pdev_buf_free(struct ath11k *ar)
460 {
461 	struct ath11k_pdev_dp *dp = &ar->dp;
462 	struct ath11k_base *ab = ar->ab;
463 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
464 	int i;
465 
466 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
467 
468 	rx_ring = &dp->rxdma_mon_buf_ring;
469 	ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
470 
471 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
472 		rx_ring = &dp->rx_mon_status_refill_ring[i];
473 		ath11k_dp_rxdma_buf_ring_free(ar, rx_ring);
474 	}
475 
476 	return 0;
477 }
478 
479 static int ath11k_dp_rxdma_ring_buf_setup(struct ath11k *ar,
480 					  struct dp_rxdma_ring *rx_ring,
481 					  u32 ringtype)
482 {
483 	struct ath11k_pdev_dp *dp = &ar->dp;
484 	int num_entries;
485 
486 	num_entries = rx_ring->refill_buf_ring.size /
487 		ath11k_hal_srng_get_entrysize(ar->ab, ringtype);
488 
489 	rx_ring->bufs_max = num_entries;
490 	ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id, rx_ring, num_entries,
491 				   ar->ab->hw_params.hal_params->rx_buf_rbm);
492 	return 0;
493 }
494 
495 static int ath11k_dp_rxdma_pdev_buf_setup(struct ath11k *ar)
496 {
497 	struct ath11k_pdev_dp *dp = &ar->dp;
498 	struct ath11k_base *ab = ar->ab;
499 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
500 	int i;
501 
502 	ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_BUF);
503 
504 	if (ar->ab->hw_params.rxdma1_enable) {
505 		rx_ring = &dp->rxdma_mon_buf_ring;
506 		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_BUF);
507 	}
508 
509 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
510 		rx_ring = &dp->rx_mon_status_refill_ring[i];
511 		ath11k_dp_rxdma_ring_buf_setup(ar, rx_ring, HAL_RXDMA_MONITOR_STATUS);
512 	}
513 
514 	return 0;
515 }
516 
517 static void ath11k_dp_rx_pdev_srng_free(struct ath11k *ar)
518 {
519 	struct ath11k_pdev_dp *dp = &ar->dp;
520 	struct ath11k_base *ab = ar->ab;
521 	int i;
522 
523 	ath11k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
524 
525 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
526 		if (ab->hw_params.rx_mac_buf_ring)
527 			ath11k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
528 
529 		ath11k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
530 		ath11k_dp_srng_cleanup(ab,
531 				       &dp->rx_mon_status_refill_ring[i].refill_buf_ring);
532 	}
533 
534 	ath11k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
535 }
536 
537 void ath11k_dp_pdev_reo_cleanup(struct ath11k_base *ab)
538 {
539 	struct ath11k_dp *dp = &ab->dp;
540 	int i;
541 
542 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
543 		ath11k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
544 }
545 
546 int ath11k_dp_pdev_reo_setup(struct ath11k_base *ab)
547 {
548 	struct ath11k_dp *dp = &ab->dp;
549 	int ret;
550 	int i;
551 
552 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
553 		ret = ath11k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
554 					   HAL_REO_DST, i, 0,
555 					   DP_REO_DST_RING_SIZE);
556 		if (ret) {
557 			ath11k_warn(ab, "failed to setup reo_dst_ring\n");
558 			goto err_reo_cleanup;
559 		}
560 	}
561 
562 	return 0;
563 
564 err_reo_cleanup:
565 	ath11k_dp_pdev_reo_cleanup(ab);
566 
567 	return ret;
568 }
569 
570 static int ath11k_dp_rx_pdev_srng_alloc(struct ath11k *ar)
571 {
572 	struct ath11k_pdev_dp *dp = &ar->dp;
573 	struct ath11k_base *ab = ar->ab;
574 	struct dp_srng *srng = NULL;
575 	int i;
576 	int ret;
577 
578 	ret = ath11k_dp_srng_setup(ar->ab,
579 				   &dp->rx_refill_buf_ring.refill_buf_ring,
580 				   HAL_RXDMA_BUF, 0,
581 				   dp->mac_id, DP_RXDMA_BUF_RING_SIZE);
582 	if (ret) {
583 		ath11k_warn(ar->ab, "failed to setup rx_refill_buf_ring\n");
584 		return ret;
585 	}
586 
587 	if (ar->ab->hw_params.rx_mac_buf_ring) {
588 		for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
589 			ret = ath11k_dp_srng_setup(ar->ab,
590 						   &dp->rx_mac_buf_ring[i],
591 						   HAL_RXDMA_BUF, 1,
592 						   dp->mac_id + i, 1024);
593 			if (ret) {
594 				ath11k_warn(ar->ab, "failed to setup rx_mac_buf_ring %d\n",
595 					    i);
596 				return ret;
597 			}
598 		}
599 	}
600 
601 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
602 		ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_err_dst_ring[i],
603 					   HAL_RXDMA_DST, 0, dp->mac_id + i,
604 					   DP_RXDMA_ERR_DST_RING_SIZE);
605 		if (ret) {
606 			ath11k_warn(ar->ab, "failed to setup rxdma_err_dst_ring %d\n", i);
607 			return ret;
608 		}
609 	}
610 
611 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
612 		srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
613 		ret = ath11k_dp_srng_setup(ar->ab,
614 					   srng,
615 					   HAL_RXDMA_MONITOR_STATUS, 0, dp->mac_id + i,
616 					   DP_RXDMA_MON_STATUS_RING_SIZE);
617 		if (ret) {
618 			ath11k_warn(ar->ab,
619 				    "failed to setup rx_mon_status_refill_ring %d\n", i);
620 			return ret;
621 		}
622 	}
623 
624 	/* if rxdma1_enable is false, then it doesn't need
625 	 * to setup rxdam_mon_buf_ring, rxdma_mon_dst_ring
626 	 * and rxdma_mon_desc_ring.
627 	 * init reap timer for QCA6390.
628 	 */
629 	if (!ar->ab->hw_params.rxdma1_enable) {
630 		//init mon status buffer reap timer
631 		timer_setup(&ar->ab->mon_reap_timer,
632 			    ath11k_dp_service_mon_ring, 0);
633 		return 0;
634 	}
635 
636 	ret = ath11k_dp_srng_setup(ar->ab,
637 				   &dp->rxdma_mon_buf_ring.refill_buf_ring,
638 				   HAL_RXDMA_MONITOR_BUF, 0, dp->mac_id,
639 				   DP_RXDMA_MONITOR_BUF_RING_SIZE);
640 	if (ret) {
641 		ath11k_warn(ar->ab,
642 			    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
643 		return ret;
644 	}
645 
646 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_dst_ring,
647 				   HAL_RXDMA_MONITOR_DST, 0, dp->mac_id,
648 				   DP_RXDMA_MONITOR_DST_RING_SIZE);
649 	if (ret) {
650 		ath11k_warn(ar->ab,
651 			    "failed to setup HAL_RXDMA_MONITOR_DST\n");
652 		return ret;
653 	}
654 
655 	ret = ath11k_dp_srng_setup(ar->ab, &dp->rxdma_mon_desc_ring,
656 				   HAL_RXDMA_MONITOR_DESC, 0, dp->mac_id,
657 				   DP_RXDMA_MONITOR_DESC_RING_SIZE);
658 	if (ret) {
659 		ath11k_warn(ar->ab,
660 			    "failed to setup HAL_RXDMA_MONITOR_DESC\n");
661 		return ret;
662 	}
663 
664 	return 0;
665 }
666 
667 void ath11k_dp_reo_cmd_list_cleanup(struct ath11k_base *ab)
668 {
669 	struct ath11k_dp *dp = &ab->dp;
670 	struct dp_reo_cmd *cmd, *tmp;
671 	struct dp_reo_cache_flush_elem *cmd_cache, *tmp_cache;
672 	struct dp_rx_tid *rx_tid;
673 
674 	spin_lock_bh(&dp->reo_cmd_lock);
675 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
676 		list_del(&cmd->list);
677 		rx_tid = &cmd->data;
678 		if (rx_tid->vaddr_unaligned) {
679 			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
680 					     rx_tid->vaddr_unaligned,
681 					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
682 			rx_tid->vaddr_unaligned = NULL;
683 		}
684 		kfree(cmd);
685 	}
686 
687 	list_for_each_entry_safe(cmd_cache, tmp_cache,
688 				 &dp->reo_cmd_cache_flush_list, list) {
689 		list_del(&cmd_cache->list);
690 		dp->reo_cmd_cache_flush_count--;
691 		rx_tid = &cmd_cache->data;
692 		if (rx_tid->vaddr_unaligned) {
693 			dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
694 					     rx_tid->vaddr_unaligned,
695 					     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
696 			rx_tid->vaddr_unaligned = NULL;
697 		}
698 		kfree(cmd_cache);
699 	}
700 	spin_unlock_bh(&dp->reo_cmd_lock);
701 }
702 
703 static void ath11k_dp_reo_cmd_free(struct ath11k_dp *dp, void *ctx,
704 				   enum hal_reo_cmd_status status)
705 {
706 	struct dp_rx_tid *rx_tid = ctx;
707 
708 	if (status != HAL_REO_CMD_SUCCESS)
709 		ath11k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
710 			    rx_tid->tid, status);
711 	if (rx_tid->vaddr_unaligned) {
712 		dma_free_noncoherent(dp->ab->dev, rx_tid->unaligned_size,
713 				     rx_tid->vaddr_unaligned,
714 				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
715 		rx_tid->vaddr_unaligned = NULL;
716 	}
717 }
718 
719 static void ath11k_dp_reo_cache_flush(struct ath11k_base *ab,
720 				      struct dp_rx_tid *rx_tid)
721 {
722 	struct ath11k_hal_reo_cmd cmd = {};
723 	unsigned long tot_desc_sz, desc_sz;
724 	int ret;
725 
726 	tot_desc_sz = rx_tid->size;
727 	desc_sz = ath11k_hal_reo_qdesc_size(0, HAL_DESC_REO_NON_QOS_TID);
728 
729 	while (tot_desc_sz > desc_sz) {
730 		tot_desc_sz -= desc_sz;
731 		cmd.addr_lo = lower_32_bits(rx_tid->paddr + tot_desc_sz);
732 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
733 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
734 						HAL_REO_CMD_FLUSH_CACHE, &cmd,
735 						NULL);
736 		if (ret)
737 			ath11k_warn(ab,
738 				    "failed to send HAL_REO_CMD_FLUSH_CACHE, tid %d (%d)\n",
739 				    rx_tid->tid, ret);
740 	}
741 
742 	memset(&cmd, 0, sizeof(cmd));
743 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
744 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
745 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
746 	ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
747 					HAL_REO_CMD_FLUSH_CACHE,
748 					&cmd, ath11k_dp_reo_cmd_free);
749 	if (ret) {
750 		ath11k_err(ab, "failed to send HAL_REO_CMD_FLUSH_CACHE cmd, tid %d (%d)\n",
751 			   rx_tid->tid, ret);
752 		dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
753 				     rx_tid->vaddr_unaligned,
754 				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
755 		rx_tid->vaddr_unaligned = NULL;
756 	}
757 }
758 
759 static void ath11k_dp_rx_tid_del_func(struct ath11k_dp *dp, void *ctx,
760 				      enum hal_reo_cmd_status status)
761 {
762 	struct ath11k_base *ab = dp->ab;
763 	struct dp_rx_tid *rx_tid = ctx;
764 	struct dp_reo_cache_flush_elem *elem, *tmp;
765 
766 	if (status == HAL_REO_CMD_DRAIN) {
767 		goto free_desc;
768 	} else if (status != HAL_REO_CMD_SUCCESS) {
769 		/* Shouldn't happen! Cleanup in case of other failure? */
770 		ath11k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
771 			    rx_tid->tid, status);
772 		return;
773 	}
774 
775 	elem = kzalloc_obj(*elem, GFP_ATOMIC);
776 	if (!elem)
777 		goto free_desc;
778 
779 	elem->ts = jiffies;
780 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
781 
782 	spin_lock_bh(&dp->reo_cmd_lock);
783 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
784 	dp->reo_cmd_cache_flush_count++;
785 
786 	/* Flush and invalidate aged REO desc from HW cache */
787 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
788 				 list) {
789 		if (dp->reo_cmd_cache_flush_count > DP_REO_DESC_FREE_THRESHOLD ||
790 		    time_after(jiffies, elem->ts +
791 			       msecs_to_jiffies(DP_REO_DESC_FREE_TIMEOUT_MS))) {
792 			list_del(&elem->list);
793 			dp->reo_cmd_cache_flush_count--;
794 			spin_unlock_bh(&dp->reo_cmd_lock);
795 
796 			ath11k_dp_reo_cache_flush(ab, &elem->data);
797 			kfree(elem);
798 			spin_lock_bh(&dp->reo_cmd_lock);
799 		}
800 	}
801 	spin_unlock_bh(&dp->reo_cmd_lock);
802 
803 	return;
804 free_desc:
805 	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size,
806 			     rx_tid->vaddr_unaligned,
807 			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
808 	rx_tid->vaddr_unaligned = NULL;
809 }
810 
811 void ath11k_peer_rx_tid_delete(struct ath11k *ar,
812 			       struct ath11k_peer *peer, u8 tid)
813 {
814 	struct ath11k_hal_reo_cmd cmd = {};
815 	struct dp_rx_tid *rx_tid = &peer->rx_tid[tid];
816 	int ret;
817 
818 	if (!rx_tid->active)
819 		return;
820 
821 	rx_tid->active = false;
822 
823 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
824 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
825 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
826 	cmd.upd0 |= HAL_REO_CMD_UPD0_VLD;
827 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
828 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
829 					ath11k_dp_rx_tid_del_func);
830 	if (ret) {
831 		if (ret != -ESHUTDOWN)
832 			ath11k_err(ar->ab, "failed to send HAL_REO_CMD_UPDATE_RX_QUEUE cmd, tid %d (%d)\n",
833 				   tid, ret);
834 		dma_free_noncoherent(ar->ab->dev, rx_tid->unaligned_size,
835 				     rx_tid->vaddr_unaligned,
836 				     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
837 		rx_tid->vaddr_unaligned = NULL;
838 	}
839 
840 	rx_tid->paddr = 0;
841 	rx_tid->paddr_unaligned = 0;
842 	rx_tid->size = 0;
843 	rx_tid->unaligned_size = 0;
844 }
845 
846 static int ath11k_dp_rx_link_desc_return(struct ath11k_base *ab,
847 					 u32 *link_desc,
848 					 enum hal_wbm_rel_bm_act action)
849 {
850 	struct ath11k_dp *dp = &ab->dp;
851 	struct hal_srng *srng;
852 	u32 *desc;
853 	int ret = 0;
854 
855 	srng = &ab->hal.srng_list[dp->wbm_desc_rel_ring.ring_id];
856 
857 	spin_lock_bh(&srng->lock);
858 
859 	ath11k_hal_srng_access_begin(ab, srng);
860 
861 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
862 	if (!desc) {
863 		ret = -ENOBUFS;
864 		goto exit;
865 	}
866 
867 	ath11k_hal_rx_msdu_link_desc_set(ab, (void *)desc, (void *)link_desc,
868 					 action);
869 
870 exit:
871 	ath11k_hal_srng_access_end(ab, srng);
872 
873 	spin_unlock_bh(&srng->lock);
874 
875 	return ret;
876 }
877 
878 static void ath11k_dp_rx_frags_cleanup(struct dp_rx_tid *rx_tid, bool rel_link_desc)
879 {
880 	struct ath11k_base *ab = rx_tid->ab;
881 
882 	lockdep_assert_held(&ab->base_lock);
883 
884 	if (rx_tid->dst_ring_desc) {
885 		if (rel_link_desc)
886 			ath11k_dp_rx_link_desc_return(ab, (u32 *)rx_tid->dst_ring_desc,
887 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
888 		kfree(rx_tid->dst_ring_desc);
889 		rx_tid->dst_ring_desc = NULL;
890 	}
891 
892 	rx_tid->cur_sn = 0;
893 	rx_tid->last_frag_no = 0;
894 	rx_tid->rx_frag_bitmap = 0;
895 	__skb_queue_purge(&rx_tid->rx_frags);
896 }
897 
898 void ath11k_peer_frags_flush(struct ath11k *ar, struct ath11k_peer *peer)
899 {
900 	struct dp_rx_tid *rx_tid;
901 	int i;
902 
903 	lockdep_assert_held(&ar->ab->base_lock);
904 
905 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
906 		rx_tid = &peer->rx_tid[i];
907 
908 		spin_unlock_bh(&ar->ab->base_lock);
909 		timer_delete_sync(&rx_tid->frag_timer);
910 		spin_lock_bh(&ar->ab->base_lock);
911 
912 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
913 	}
914 }
915 
916 void ath11k_peer_rx_tid_cleanup(struct ath11k *ar, struct ath11k_peer *peer)
917 {
918 	struct dp_rx_tid *rx_tid;
919 	int i;
920 
921 	lockdep_assert_held(&ar->ab->base_lock);
922 
923 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
924 		rx_tid = &peer->rx_tid[i];
925 
926 		ath11k_peer_rx_tid_delete(ar, peer, i);
927 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
928 
929 		spin_unlock_bh(&ar->ab->base_lock);
930 		timer_delete_sync(&rx_tid->frag_timer);
931 		spin_lock_bh(&ar->ab->base_lock);
932 	}
933 }
934 
935 static int ath11k_peer_rx_tid_reo_update(struct ath11k *ar,
936 					 struct ath11k_peer *peer,
937 					 struct dp_rx_tid *rx_tid,
938 					 u32 ba_win_sz, u16 ssn,
939 					 bool update_ssn)
940 {
941 	struct ath11k_hal_reo_cmd cmd = {};
942 	int ret;
943 
944 	cmd.addr_lo = lower_32_bits(rx_tid->paddr);
945 	cmd.addr_hi = upper_32_bits(rx_tid->paddr);
946 	cmd.flag = HAL_REO_CMD_FLG_NEED_STATUS;
947 	cmd.upd0 = HAL_REO_CMD_UPD0_BA_WINDOW_SIZE;
948 	cmd.ba_window_size = ba_win_sz;
949 
950 	if (update_ssn) {
951 		cmd.upd0 |= HAL_REO_CMD_UPD0_SSN;
952 		cmd.upd2 = FIELD_PREP(HAL_REO_CMD_UPD2_SSN, ssn);
953 	}
954 
955 	ret = ath11k_dp_tx_send_reo_cmd(ar->ab, rx_tid,
956 					HAL_REO_CMD_UPDATE_RX_QUEUE, &cmd,
957 					NULL);
958 	if (ret) {
959 		ath11k_warn(ar->ab, "failed to update rx tid queue, tid %d (%d)\n",
960 			    rx_tid->tid, ret);
961 		return ret;
962 	}
963 
964 	rx_tid->ba_win_sz = ba_win_sz;
965 
966 	return 0;
967 }
968 
969 static void ath11k_dp_rx_tid_mem_free(struct ath11k_base *ab,
970 				      const u8 *peer_mac, int vdev_id, u8 tid)
971 {
972 	struct ath11k_peer *peer;
973 	struct dp_rx_tid *rx_tid;
974 
975 	spin_lock_bh(&ab->base_lock);
976 
977 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
978 	if (!peer) {
979 		ath11k_warn(ab, "failed to find the peer to free up rx tid mem\n");
980 		goto unlock_exit;
981 	}
982 
983 	rx_tid = &peer->rx_tid[tid];
984 	if (!rx_tid->active)
985 		goto unlock_exit;
986 
987 	dma_free_noncoherent(ab->dev, rx_tid->unaligned_size, rx_tid->vaddr_unaligned,
988 			     rx_tid->paddr_unaligned, DMA_BIDIRECTIONAL);
989 	rx_tid->vaddr_unaligned = NULL;
990 
991 	rx_tid->active = false;
992 
993 unlock_exit:
994 	spin_unlock_bh(&ab->base_lock);
995 }
996 
997 int ath11k_peer_rx_tid_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id,
998 			     u8 tid, u32 ba_win_sz, u16 ssn,
999 			     enum hal_pn_type pn_type)
1000 {
1001 	struct ath11k_base *ab = ar->ab;
1002 	struct ath11k_peer *peer;
1003 	struct dp_rx_tid *rx_tid;
1004 	u32 hw_desc_sz, *vaddr;
1005 	void *vaddr_unaligned;
1006 	dma_addr_t paddr;
1007 	int ret;
1008 
1009 	spin_lock_bh(&ab->base_lock);
1010 
1011 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
1012 	if (!peer) {
1013 		ath11k_warn(ab, "failed to find the peer %pM to set up rx tid\n",
1014 			    peer_mac);
1015 		spin_unlock_bh(&ab->base_lock);
1016 		return -ENOENT;
1017 	}
1018 
1019 	rx_tid = &peer->rx_tid[tid];
1020 	/* Update the tid queue if it is already setup */
1021 	if (rx_tid->active) {
1022 		paddr = rx_tid->paddr;
1023 		ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid,
1024 						    ba_win_sz, ssn, true);
1025 		spin_unlock_bh(&ab->base_lock);
1026 		if (ret) {
1027 			ath11k_warn(ab, "failed to update reo for peer %pM rx tid %d\n: %d",
1028 				    peer_mac, tid, ret);
1029 			return ret;
1030 		}
1031 
1032 		ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1033 							     peer_mac, paddr,
1034 							     tid, 1, ba_win_sz);
1035 		if (ret)
1036 			ath11k_warn(ab, "failed to send wmi rx reorder queue for peer %pM tid %d: %d\n",
1037 				    peer_mac, tid, ret);
1038 		return ret;
1039 	}
1040 
1041 	rx_tid->tid = tid;
1042 
1043 	rx_tid->ba_win_sz = ba_win_sz;
1044 
1045 	/* TODO: Optimize the memory allocation for qos tid based on
1046 	 * the actual BA window size in REO tid update path.
1047 	 */
1048 	if (tid == HAL_DESC_REO_NON_QOS_TID)
1049 		hw_desc_sz = ath11k_hal_reo_qdesc_size(ba_win_sz, tid);
1050 	else
1051 		hw_desc_sz = ath11k_hal_reo_qdesc_size(DP_BA_WIN_SZ_MAX, tid);
1052 
1053 	rx_tid->unaligned_size = hw_desc_sz + HAL_LINK_DESC_ALIGN - 1;
1054 	vaddr_unaligned = dma_alloc_noncoherent(ab->dev, rx_tid->unaligned_size, &paddr,
1055 						DMA_BIDIRECTIONAL, GFP_ATOMIC);
1056 	if (!vaddr_unaligned) {
1057 		spin_unlock_bh(&ab->base_lock);
1058 		return -ENOMEM;
1059 	}
1060 
1061 	rx_tid->vaddr_unaligned = vaddr_unaligned;
1062 	vaddr = PTR_ALIGN(vaddr_unaligned, HAL_LINK_DESC_ALIGN);
1063 	rx_tid->paddr_unaligned = paddr;
1064 	rx_tid->paddr = rx_tid->paddr_unaligned + ((unsigned long)vaddr -
1065 			(unsigned long)rx_tid->vaddr_unaligned);
1066 	ath11k_hal_reo_qdesc_setup(vaddr, tid, ba_win_sz, ssn, pn_type);
1067 	rx_tid->size = hw_desc_sz;
1068 	rx_tid->active = true;
1069 
1070 	/* After dma_alloc_noncoherent, vaddr is being modified for reo qdesc setup.
1071 	 * Since these changes are not reflected in the device, driver now needs to
1072 	 * explicitly call dma_sync_single_for_device.
1073 	 */
1074 	dma_sync_single_for_device(ab->dev, rx_tid->paddr,
1075 				   rx_tid->size,
1076 				   DMA_TO_DEVICE);
1077 	spin_unlock_bh(&ab->base_lock);
1078 
1079 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac, rx_tid->paddr,
1080 						     tid, 1, ba_win_sz);
1081 	if (ret) {
1082 		ath11k_warn(ar->ab, "failed to setup rx reorder queue for peer %pM tid %d: %d\n",
1083 			    peer_mac, tid, ret);
1084 		ath11k_dp_rx_tid_mem_free(ab, peer_mac, vdev_id, tid);
1085 	}
1086 
1087 	return ret;
1088 }
1089 
1090 int ath11k_dp_rx_ampdu_start(struct ath11k *ar,
1091 			     struct ieee80211_ampdu_params *params)
1092 {
1093 	struct ath11k_base *ab = ar->ab;
1094 	struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1095 	int vdev_id = arsta->arvif->vdev_id;
1096 	int ret;
1097 
1098 	ret = ath11k_peer_rx_tid_setup(ar, params->sta->addr, vdev_id,
1099 				       params->tid, params->buf_size,
1100 				       params->ssn, arsta->pn_type);
1101 	if (ret)
1102 		ath11k_warn(ab, "failed to setup rx tid %d\n", ret);
1103 
1104 	return ret;
1105 }
1106 
1107 int ath11k_dp_rx_ampdu_stop(struct ath11k *ar,
1108 			    struct ieee80211_ampdu_params *params)
1109 {
1110 	struct ath11k_base *ab = ar->ab;
1111 	struct ath11k_peer *peer;
1112 	struct ath11k_sta *arsta = ath11k_sta_to_arsta(params->sta);
1113 	struct dp_rx_tid *rx_tid;
1114 	int vdev_id = arsta->arvif->vdev_id;
1115 	int ret;
1116 
1117 	spin_lock_bh(&ab->base_lock);
1118 
1119 	peer = ath11k_peer_find(ab, vdev_id, params->sta->addr);
1120 	if (!peer) {
1121 		ath11k_warn(ab, "failed to find the peer to stop rx aggregation\n");
1122 		spin_unlock_bh(&ab->base_lock);
1123 		return -ENOENT;
1124 	}
1125 
1126 	rx_tid = &peer->rx_tid[params->tid];
1127 
1128 	if (!rx_tid->active) {
1129 		spin_unlock_bh(&ab->base_lock);
1130 		return 0;
1131 	}
1132 
1133 	ret = ath11k_peer_rx_tid_reo_update(ar, peer, rx_tid, 1, 0, false);
1134 	spin_unlock_bh(&ab->base_lock);
1135 	if (ret) {
1136 		ath11k_warn(ab, "failed to update reo for rx tid %d: %d\n",
1137 			    params->tid, ret);
1138 		return ret;
1139 	}
1140 
1141 	ret = ath11k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
1142 						     params->sta->addr,
1143 						     rx_tid->paddr,
1144 						     params->tid, 1, 1);
1145 	if (ret)
1146 		ath11k_warn(ab, "failed to send wmi to delete rx tid %d\n",
1147 			    ret);
1148 
1149 	return ret;
1150 }
1151 
1152 int ath11k_dp_peer_rx_pn_replay_config(struct ath11k_vif *arvif,
1153 				       const u8 *peer_addr,
1154 				       enum set_key_cmd key_cmd,
1155 				       struct ieee80211_key_conf *key)
1156 {
1157 	struct ath11k *ar = arvif->ar;
1158 	struct ath11k_base *ab = ar->ab;
1159 	struct ath11k_hal_reo_cmd cmd = {};
1160 	struct ath11k_peer *peer;
1161 	struct dp_rx_tid *rx_tid;
1162 	u8 tid;
1163 	int ret = 0;
1164 
1165 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
1166 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
1167 	 * for now.
1168 	 */
1169 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
1170 		return 0;
1171 
1172 	cmd.flag |= HAL_REO_CMD_FLG_NEED_STATUS;
1173 	cmd.upd0 |= HAL_REO_CMD_UPD0_PN |
1174 		    HAL_REO_CMD_UPD0_PN_SIZE |
1175 		    HAL_REO_CMD_UPD0_PN_VALID |
1176 		    HAL_REO_CMD_UPD0_PN_CHECK |
1177 		    HAL_REO_CMD_UPD0_SVLD;
1178 
1179 	switch (key->cipher) {
1180 	case WLAN_CIPHER_SUITE_TKIP:
1181 	case WLAN_CIPHER_SUITE_CCMP:
1182 	case WLAN_CIPHER_SUITE_CCMP_256:
1183 	case WLAN_CIPHER_SUITE_GCMP:
1184 	case WLAN_CIPHER_SUITE_GCMP_256:
1185 		if (key_cmd == SET_KEY) {
1186 			cmd.upd1 |= HAL_REO_CMD_UPD1_PN_CHECK;
1187 			cmd.pn_size = 48;
1188 		}
1189 		break;
1190 	default:
1191 		break;
1192 	}
1193 
1194 	spin_lock_bh(&ab->base_lock);
1195 
1196 	peer = ath11k_peer_find(ab, arvif->vdev_id, peer_addr);
1197 	if (!peer) {
1198 		ath11k_warn(ab, "failed to find the peer to configure pn replay detection\n");
1199 		spin_unlock_bh(&ab->base_lock);
1200 		return -ENOENT;
1201 	}
1202 
1203 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
1204 		rx_tid = &peer->rx_tid[tid];
1205 		if (!rx_tid->active)
1206 			continue;
1207 		cmd.addr_lo = lower_32_bits(rx_tid->paddr);
1208 		cmd.addr_hi = upper_32_bits(rx_tid->paddr);
1209 		ret = ath11k_dp_tx_send_reo_cmd(ab, rx_tid,
1210 						HAL_REO_CMD_UPDATE_RX_QUEUE,
1211 						&cmd, NULL);
1212 		if (ret) {
1213 			ath11k_warn(ab, "failed to configure rx tid %d queue for pn replay detection %d\n",
1214 				    tid, ret);
1215 			break;
1216 		}
1217 	}
1218 
1219 	spin_unlock_bh(&ab->base_lock);
1220 
1221 	return ret;
1222 }
1223 
1224 static inline int ath11k_get_ppdu_user_index(struct htt_ppdu_stats *ppdu_stats,
1225 					     u16 peer_id)
1226 {
1227 	int i;
1228 
1229 	for (i = 0; i < HTT_PPDU_STATS_MAX_USERS - 1; i++) {
1230 		if (ppdu_stats->user_stats[i].is_valid_peer_id) {
1231 			if (peer_id == ppdu_stats->user_stats[i].peer_id)
1232 				return i;
1233 		} else {
1234 			return i;
1235 		}
1236 	}
1237 
1238 	return -EINVAL;
1239 }
1240 
1241 static int ath11k_htt_tlv_ppdu_stats_parse(struct ath11k_base *ab,
1242 					   u16 tag, u16 len, const void *ptr,
1243 					   void *data)
1244 {
1245 	struct htt_ppdu_stats_info *ppdu_info;
1246 	struct htt_ppdu_user_stats *user_stats;
1247 	int cur_user;
1248 	u16 peer_id;
1249 
1250 	ppdu_info = data;
1251 
1252 	switch (tag) {
1253 	case HTT_PPDU_STATS_TAG_COMMON:
1254 		if (len < sizeof(struct htt_ppdu_stats_common)) {
1255 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1256 				    len, tag);
1257 			return -EINVAL;
1258 		}
1259 		memcpy((void *)&ppdu_info->ppdu_stats.common, ptr,
1260 		       sizeof(struct htt_ppdu_stats_common));
1261 		break;
1262 	case HTT_PPDU_STATS_TAG_USR_RATE:
1263 		if (len < sizeof(struct htt_ppdu_stats_user_rate)) {
1264 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1265 				    len, tag);
1266 			return -EINVAL;
1267 		}
1268 
1269 		peer_id = ((struct htt_ppdu_stats_user_rate *)ptr)->sw_peer_id;
1270 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1271 						      peer_id);
1272 		if (cur_user < 0)
1273 			return -EINVAL;
1274 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1275 		user_stats->peer_id = peer_id;
1276 		user_stats->is_valid_peer_id = true;
1277 		memcpy((void *)&user_stats->rate, ptr,
1278 		       sizeof(struct htt_ppdu_stats_user_rate));
1279 		user_stats->tlv_flags |= BIT(tag);
1280 		break;
1281 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON:
1282 		if (len < sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn)) {
1283 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1284 				    len, tag);
1285 			return -EINVAL;
1286 		}
1287 
1288 		peer_id = ((struct htt_ppdu_stats_usr_cmpltn_cmn *)ptr)->sw_peer_id;
1289 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1290 						      peer_id);
1291 		if (cur_user < 0)
1292 			return -EINVAL;
1293 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1294 		user_stats->peer_id = peer_id;
1295 		user_stats->is_valid_peer_id = true;
1296 		memcpy((void *)&user_stats->cmpltn_cmn, ptr,
1297 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_cmn));
1298 		user_stats->tlv_flags |= BIT(tag);
1299 		break;
1300 	case HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS:
1301 		if (len <
1302 		    sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status)) {
1303 			ath11k_warn(ab, "Invalid len %d for the tag 0x%x\n",
1304 				    len, tag);
1305 			return -EINVAL;
1306 		}
1307 
1308 		peer_id =
1309 		((struct htt_ppdu_stats_usr_cmpltn_ack_ba_status *)ptr)->sw_peer_id;
1310 		cur_user = ath11k_get_ppdu_user_index(&ppdu_info->ppdu_stats,
1311 						      peer_id);
1312 		if (cur_user < 0)
1313 			return -EINVAL;
1314 		user_stats = &ppdu_info->ppdu_stats.user_stats[cur_user];
1315 		user_stats->peer_id = peer_id;
1316 		user_stats->is_valid_peer_id = true;
1317 		memcpy((void *)&user_stats->ack_ba, ptr,
1318 		       sizeof(struct htt_ppdu_stats_usr_cmpltn_ack_ba_status));
1319 		user_stats->tlv_flags |= BIT(tag);
1320 		break;
1321 	}
1322 	return 0;
1323 }
1324 
1325 int ath11k_dp_htt_tlv_iter(struct ath11k_base *ab, const void *ptr, size_t len,
1326 			   int (*iter)(struct ath11k_base *ar, u16 tag, u16 len,
1327 				       const void *ptr, void *data),
1328 			   void *data)
1329 {
1330 	const struct htt_tlv *tlv;
1331 	const void *begin = ptr;
1332 	u16 tlv_tag, tlv_len;
1333 	int ret = -EINVAL;
1334 
1335 	while (len > 0) {
1336 		if (len < sizeof(*tlv)) {
1337 			ath11k_err(ab, "htt tlv parse failure at byte %zd (%zu bytes left, %zu expected)\n",
1338 				   ptr - begin, len, sizeof(*tlv));
1339 			return -EINVAL;
1340 		}
1341 		tlv = (struct htt_tlv *)ptr;
1342 		tlv_tag = FIELD_GET(HTT_TLV_TAG, tlv->header);
1343 		tlv_len = FIELD_GET(HTT_TLV_LEN, tlv->header);
1344 		ptr += sizeof(*tlv);
1345 		len -= sizeof(*tlv);
1346 
1347 		if (tlv_len > len) {
1348 			ath11k_err(ab, "htt tlv parse failure of tag %u at byte %zd (%zu bytes left, %u expected)\n",
1349 				   tlv_tag, ptr - begin, len, tlv_len);
1350 			return -EINVAL;
1351 		}
1352 		ret = iter(ab, tlv_tag, tlv_len, ptr, data);
1353 		if (ret == -ENOMEM)
1354 			return ret;
1355 
1356 		ptr += tlv_len;
1357 		len -= tlv_len;
1358 	}
1359 	return 0;
1360 }
1361 
1362 static void
1363 ath11k_update_per_peer_tx_stats(struct ath11k *ar,
1364 				struct htt_ppdu_stats *ppdu_stats, u8 user)
1365 {
1366 	struct ath11k_base *ab = ar->ab;
1367 	struct ath11k_peer *peer;
1368 	struct ieee80211_sta *sta;
1369 	struct ath11k_sta *arsta;
1370 	struct htt_ppdu_stats_user_rate *user_rate;
1371 	struct ath11k_per_peer_tx_stats *peer_stats = &ar->peer_tx_stats;
1372 	struct htt_ppdu_user_stats *usr_stats = &ppdu_stats->user_stats[user];
1373 	struct htt_ppdu_stats_common *common = &ppdu_stats->common;
1374 	int ret;
1375 	u8 flags, mcs, nss, bw, sgi, dcm, rate_idx = 0;
1376 	u32 succ_bytes = 0;
1377 	u16 rate = 0, succ_pkts = 0;
1378 	u32 tx_duration = 0;
1379 	u8 tid = HTT_PPDU_STATS_NON_QOS_TID;
1380 	bool is_ampdu = false;
1381 
1382 	if (!(usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_RATE)))
1383 		return;
1384 
1385 	if (usr_stats->tlv_flags & BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_COMMON))
1386 		is_ampdu =
1387 			HTT_USR_CMPLTN_IS_AMPDU(usr_stats->cmpltn_cmn.flags);
1388 
1389 	if (usr_stats->tlv_flags &
1390 	    BIT(HTT_PPDU_STATS_TAG_USR_COMPLTN_ACK_BA_STATUS)) {
1391 		succ_bytes = usr_stats->ack_ba.success_bytes;
1392 		succ_pkts = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_NUM_MSDU_M,
1393 				      usr_stats->ack_ba.info);
1394 		tid = FIELD_GET(HTT_PPDU_STATS_ACK_BA_INFO_TID_NUM,
1395 				usr_stats->ack_ba.info);
1396 	}
1397 
1398 	if (common->fes_duration_us)
1399 		tx_duration = common->fes_duration_us;
1400 
1401 	user_rate = &usr_stats->rate;
1402 	flags = HTT_USR_RATE_PREAMBLE(user_rate->rate_flags);
1403 	bw = HTT_USR_RATE_BW(user_rate->rate_flags) - 2;
1404 	nss = HTT_USR_RATE_NSS(user_rate->rate_flags) + 1;
1405 	mcs = HTT_USR_RATE_MCS(user_rate->rate_flags);
1406 	sgi = HTT_USR_RATE_GI(user_rate->rate_flags);
1407 	dcm = HTT_USR_RATE_DCM(user_rate->rate_flags);
1408 
1409 	/* Note: If host configured fixed rates and in some other special
1410 	 * cases, the broadcast/management frames are sent in different rates.
1411 	 * Firmware rate's control to be skipped for this?
1412 	 */
1413 
1414 	if (flags == WMI_RATE_PREAMBLE_HE && mcs > ATH11K_HE_MCS_MAX) {
1415 		ath11k_warn(ab, "Invalid HE mcs %d peer stats",  mcs);
1416 		return;
1417 	}
1418 
1419 	if (flags == WMI_RATE_PREAMBLE_VHT && mcs > ATH11K_VHT_MCS_MAX) {
1420 		ath11k_warn(ab, "Invalid VHT mcs %d peer stats",  mcs);
1421 		return;
1422 	}
1423 
1424 	if (flags == WMI_RATE_PREAMBLE_HT && (mcs > ATH11K_HT_MCS_MAX || nss < 1)) {
1425 		ath11k_warn(ab, "Invalid HT mcs %d nss %d peer stats",
1426 			    mcs, nss);
1427 		return;
1428 	}
1429 
1430 	if (flags == WMI_RATE_PREAMBLE_CCK || flags == WMI_RATE_PREAMBLE_OFDM) {
1431 		ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
1432 							    flags,
1433 							    &rate_idx,
1434 							    &rate);
1435 		if (ret < 0)
1436 			return;
1437 	}
1438 
1439 	rcu_read_lock();
1440 	spin_lock_bh(&ab->base_lock);
1441 	peer = ath11k_peer_find_by_id(ab, usr_stats->peer_id);
1442 
1443 	if (!peer || !peer->sta) {
1444 		spin_unlock_bh(&ab->base_lock);
1445 		rcu_read_unlock();
1446 		return;
1447 	}
1448 
1449 	sta = peer->sta;
1450 	arsta = ath11k_sta_to_arsta(sta);
1451 
1452 	memset(&arsta->txrate, 0, sizeof(arsta->txrate));
1453 
1454 	switch (flags) {
1455 	case WMI_RATE_PREAMBLE_OFDM:
1456 		arsta->txrate.legacy = rate;
1457 		break;
1458 	case WMI_RATE_PREAMBLE_CCK:
1459 		arsta->txrate.legacy = rate;
1460 		break;
1461 	case WMI_RATE_PREAMBLE_HT:
1462 		arsta->txrate.mcs = mcs + 8 * (nss - 1);
1463 		arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
1464 		if (sgi)
1465 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1466 		break;
1467 	case WMI_RATE_PREAMBLE_VHT:
1468 		arsta->txrate.mcs = mcs;
1469 		arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
1470 		if (sgi)
1471 			arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
1472 		break;
1473 	case WMI_RATE_PREAMBLE_HE:
1474 		arsta->txrate.mcs = mcs;
1475 		arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
1476 		arsta->txrate.he_dcm = dcm;
1477 		arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
1478 		arsta->txrate.he_ru_alloc = ath11k_mac_phy_he_ru_to_nl80211_he_ru_alloc
1479 						((user_rate->ru_end -
1480 						 user_rate->ru_start) + 1);
1481 		break;
1482 	}
1483 
1484 	arsta->txrate.nss = nss;
1485 
1486 	arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
1487 	arsta->tx_duration += tx_duration;
1488 	memcpy(&arsta->last_txrate, &arsta->txrate, sizeof(struct rate_info));
1489 
1490 	/* PPDU stats reported for mgmt packet doesn't have valid tx bytes.
1491 	 * So skip peer stats update for mgmt packets.
1492 	 */
1493 	if (tid < HTT_PPDU_STATS_NON_QOS_TID) {
1494 		memset(peer_stats, 0, sizeof(*peer_stats));
1495 		peer_stats->succ_pkts = succ_pkts;
1496 		peer_stats->succ_bytes = succ_bytes;
1497 		peer_stats->is_ampdu = is_ampdu;
1498 		peer_stats->duration = tx_duration;
1499 		peer_stats->ba_fails =
1500 			HTT_USR_CMPLTN_LONG_RETRY(usr_stats->cmpltn_cmn.flags) +
1501 			HTT_USR_CMPLTN_SHORT_RETRY(usr_stats->cmpltn_cmn.flags);
1502 
1503 		if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
1504 			ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
1505 	}
1506 
1507 	spin_unlock_bh(&ab->base_lock);
1508 	rcu_read_unlock();
1509 }
1510 
1511 static void ath11k_htt_update_ppdu_stats(struct ath11k *ar,
1512 					 struct htt_ppdu_stats *ppdu_stats)
1513 {
1514 	u8 user;
1515 
1516 	for (user = 0; user < HTT_PPDU_STATS_MAX_USERS - 1; user++)
1517 		ath11k_update_per_peer_tx_stats(ar, ppdu_stats, user);
1518 }
1519 
1520 static
1521 struct htt_ppdu_stats_info *ath11k_dp_htt_get_ppdu_desc(struct ath11k *ar,
1522 							u32 ppdu_id)
1523 {
1524 	struct htt_ppdu_stats_info *ppdu_info;
1525 
1526 	lockdep_assert_held(&ar->data_lock);
1527 
1528 	if (!list_empty(&ar->ppdu_stats_info)) {
1529 		list_for_each_entry(ppdu_info, &ar->ppdu_stats_info, list) {
1530 			if (ppdu_info->ppdu_id == ppdu_id)
1531 				return ppdu_info;
1532 		}
1533 
1534 		if (ar->ppdu_stat_list_depth > HTT_PPDU_DESC_MAX_DEPTH) {
1535 			ppdu_info = list_first_entry(&ar->ppdu_stats_info,
1536 						     typeof(*ppdu_info), list);
1537 			list_del(&ppdu_info->list);
1538 			ar->ppdu_stat_list_depth--;
1539 			ath11k_htt_update_ppdu_stats(ar, &ppdu_info->ppdu_stats);
1540 			kfree(ppdu_info);
1541 		}
1542 	}
1543 
1544 	ppdu_info = kzalloc_obj(*ppdu_info, GFP_ATOMIC);
1545 	if (!ppdu_info)
1546 		return NULL;
1547 
1548 	list_add_tail(&ppdu_info->list, &ar->ppdu_stats_info);
1549 	ar->ppdu_stat_list_depth++;
1550 
1551 	return ppdu_info;
1552 }
1553 
1554 static int ath11k_htt_pull_ppdu_stats(struct ath11k_base *ab,
1555 				      struct sk_buff *skb)
1556 {
1557 	struct ath11k_htt_ppdu_stats_msg *msg;
1558 	struct htt_ppdu_stats_info *ppdu_info;
1559 	struct ath11k *ar;
1560 	int ret;
1561 	u8 pdev_id;
1562 	u32 ppdu_id, len;
1563 
1564 	msg = (struct ath11k_htt_ppdu_stats_msg *)skb->data;
1565 	len = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PAYLOAD_SIZE, msg->info);
1566 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, msg->info);
1567 	ppdu_id = msg->ppdu_id;
1568 
1569 	rcu_read_lock();
1570 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1571 	if (!ar) {
1572 		ret = -EINVAL;
1573 		goto out;
1574 	}
1575 
1576 	if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar))
1577 		trace_ath11k_htt_ppdu_stats(ar, skb->data, len);
1578 
1579 	spin_lock_bh(&ar->data_lock);
1580 	ppdu_info = ath11k_dp_htt_get_ppdu_desc(ar, ppdu_id);
1581 	if (!ppdu_info) {
1582 		ret = -EINVAL;
1583 		goto out_unlock_data;
1584 	}
1585 
1586 	ppdu_info->ppdu_id = ppdu_id;
1587 	ret = ath11k_dp_htt_tlv_iter(ab, msg->data, len,
1588 				     ath11k_htt_tlv_ppdu_stats_parse,
1589 				     (void *)ppdu_info);
1590 	if (ret) {
1591 		ath11k_warn(ab, "Failed to parse tlv %d\n", ret);
1592 		goto out_unlock_data;
1593 	}
1594 
1595 out_unlock_data:
1596 	spin_unlock_bh(&ar->data_lock);
1597 
1598 out:
1599 	rcu_read_unlock();
1600 
1601 	return ret;
1602 }
1603 
1604 static void ath11k_htt_pktlog(struct ath11k_base *ab, struct sk_buff *skb)
1605 {
1606 	struct htt_pktlog_msg *data = (struct htt_pktlog_msg *)skb->data;
1607 	struct ath_pktlog_hdr *hdr = (struct ath_pktlog_hdr *)data;
1608 	struct ath11k *ar;
1609 	u8 pdev_id;
1610 
1611 	pdev_id = FIELD_GET(HTT_T2H_PPDU_STATS_INFO_PDEV_ID, data->hdr);
1612 
1613 	rcu_read_lock();
1614 
1615 	ar = ath11k_mac_get_ar_by_pdev_id(ab, pdev_id);
1616 	if (!ar) {
1617 		ath11k_warn(ab, "invalid pdev id %d on htt pktlog\n", pdev_id);
1618 		goto out;
1619 	}
1620 
1621 	trace_ath11k_htt_pktlog(ar, data->payload, hdr->size,
1622 				ar->ab->pktlog_defs_checksum);
1623 
1624 out:
1625 	rcu_read_unlock();
1626 }
1627 
1628 static void ath11k_htt_backpressure_event_handler(struct ath11k_base *ab,
1629 						  struct sk_buff *skb)
1630 {
1631 	u32 *data = (u32 *)skb->data;
1632 	u8 pdev_id, ring_type, ring_id, pdev_idx;
1633 	u16 hp, tp;
1634 	u32 backpressure_time;
1635 	struct ath11k_bp_stats *bp_stats;
1636 
1637 	pdev_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_PDEV_ID_M, *data);
1638 	ring_type = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_TYPE_M, *data);
1639 	ring_id = FIELD_GET(HTT_BACKPRESSURE_EVENT_RING_ID_M, *data);
1640 	++data;
1641 
1642 	hp = FIELD_GET(HTT_BACKPRESSURE_EVENT_HP_M, *data);
1643 	tp = FIELD_GET(HTT_BACKPRESSURE_EVENT_TP_M, *data);
1644 	++data;
1645 
1646 	backpressure_time = *data;
1647 
1648 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "backpressure event, pdev %d, ring type %d,ring id %d, hp %d tp %d, backpressure time %d\n",
1649 		   pdev_id, ring_type, ring_id, hp, tp, backpressure_time);
1650 
1651 	if (ring_type == HTT_BACKPRESSURE_UMAC_RING_TYPE) {
1652 		if (ring_id >= HTT_SW_UMAC_RING_IDX_MAX)
1653 			return;
1654 
1655 		bp_stats = &ab->soc_stats.bp_stats.umac_ring_bp_stats[ring_id];
1656 	} else if (ring_type == HTT_BACKPRESSURE_LMAC_RING_TYPE) {
1657 		pdev_idx = DP_HW2SW_MACID(pdev_id);
1658 
1659 		if (ring_id >= HTT_SW_LMAC_RING_IDX_MAX || pdev_idx >= MAX_RADIOS)
1660 			return;
1661 
1662 		bp_stats = &ab->soc_stats.bp_stats.lmac_ring_bp_stats[ring_id][pdev_idx];
1663 	} else {
1664 		ath11k_warn(ab, "unknown ring type received in htt bp event %d\n",
1665 			    ring_type);
1666 		return;
1667 	}
1668 
1669 	spin_lock_bh(&ab->base_lock);
1670 	bp_stats->hp = hp;
1671 	bp_stats->tp = tp;
1672 	bp_stats->count++;
1673 	bp_stats->jiffies = jiffies;
1674 	spin_unlock_bh(&ab->base_lock);
1675 }
1676 
1677 void ath11k_dp_htt_htc_t2h_msg_handler(struct ath11k_base *ab,
1678 				       struct sk_buff *skb)
1679 {
1680 	struct ath11k_dp *dp = &ab->dp;
1681 	struct htt_resp_msg *resp = (struct htt_resp_msg *)skb->data;
1682 	enum htt_t2h_msg_type type = FIELD_GET(HTT_T2H_MSG_TYPE, *(u32 *)resp);
1683 	u16 peer_id;
1684 	u8 vdev_id;
1685 	u8 mac_addr[ETH_ALEN];
1686 	u16 peer_mac_h16;
1687 	u16 ast_hash;
1688 	u16 hw_peer_id;
1689 
1690 	ath11k_dbg(ab, ATH11K_DBG_DP_HTT, "dp_htt rx msg type :0x%0x\n", type);
1691 
1692 	switch (type) {
1693 	case HTT_T2H_MSG_TYPE_VERSION_CONF:
1694 		dp->htt_tgt_ver_major = FIELD_GET(HTT_T2H_VERSION_CONF_MAJOR,
1695 						  resp->version_msg.version);
1696 		dp->htt_tgt_ver_minor = FIELD_GET(HTT_T2H_VERSION_CONF_MINOR,
1697 						  resp->version_msg.version);
1698 		complete(&dp->htt_tgt_version_received);
1699 		break;
1700 	case HTT_T2H_MSG_TYPE_PEER_MAP:
1701 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1702 				    resp->peer_map_ev.info);
1703 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1704 				    resp->peer_map_ev.info);
1705 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1706 					 resp->peer_map_ev.info1);
1707 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1708 				       peer_mac_h16, mac_addr);
1709 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, 0, 0);
1710 		break;
1711 	case HTT_T2H_MSG_TYPE_PEER_MAP2:
1712 		vdev_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_VDEV_ID,
1713 				    resp->peer_map_ev.info);
1714 		peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO_PEER_ID,
1715 				    resp->peer_map_ev.info);
1716 		peer_mac_h16 = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_MAC_ADDR_H16,
1717 					 resp->peer_map_ev.info1);
1718 		ath11k_dp_get_mac_addr(resp->peer_map_ev.mac_addr_l32,
1719 				       peer_mac_h16, mac_addr);
1720 		ast_hash = FIELD_GET(HTT_T2H_PEER_MAP_INFO2_AST_HASH_VAL,
1721 				     resp->peer_map_ev.info2);
1722 		hw_peer_id = FIELD_GET(HTT_T2H_PEER_MAP_INFO1_HW_PEER_ID,
1723 				       resp->peer_map_ev.info1);
1724 		ath11k_peer_map_event(ab, vdev_id, peer_id, mac_addr, ast_hash,
1725 				      hw_peer_id);
1726 		break;
1727 	case HTT_T2H_MSG_TYPE_PEER_UNMAP:
1728 	case HTT_T2H_MSG_TYPE_PEER_UNMAP2:
1729 		peer_id = FIELD_GET(HTT_T2H_PEER_UNMAP_INFO_PEER_ID,
1730 				    resp->peer_unmap_ev.info);
1731 		ath11k_peer_unmap_event(ab, peer_id);
1732 		break;
1733 	case HTT_T2H_MSG_TYPE_PPDU_STATS_IND:
1734 		ath11k_htt_pull_ppdu_stats(ab, skb);
1735 		break;
1736 	case HTT_T2H_MSG_TYPE_EXT_STATS_CONF:
1737 		ath11k_debugfs_htt_ext_stats_handler(ab, skb);
1738 		break;
1739 	case HTT_T2H_MSG_TYPE_PKTLOG:
1740 		ath11k_htt_pktlog(ab, skb);
1741 		break;
1742 	case HTT_T2H_MSG_TYPE_BKPRESSURE_EVENT_IND:
1743 		ath11k_htt_backpressure_event_handler(ab, skb);
1744 		break;
1745 	default:
1746 		ath11k_warn(ab, "htt event %d not handled\n", type);
1747 		break;
1748 	}
1749 
1750 	dev_kfree_skb_any(skb);
1751 }
1752 
1753 static int ath11k_dp_rx_msdu_coalesce(struct ath11k *ar,
1754 				      struct sk_buff_head *msdu_list,
1755 				      struct sk_buff *first, struct sk_buff *last,
1756 				      u8 l3pad_bytes, int msdu_len)
1757 {
1758 	struct ath11k_base *ab = ar->ab;
1759 	struct sk_buff *skb;
1760 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1761 	int buf_first_hdr_len, buf_first_len;
1762 	struct hal_rx_desc *ldesc;
1763 	int space_extra, rem_len, buf_len;
1764 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
1765 
1766 	/* As the msdu is spread across multiple rx buffers,
1767 	 * find the offset to the start of msdu for computing
1768 	 * the length of the msdu in the first buffer.
1769 	 */
1770 	buf_first_hdr_len = hal_rx_desc_sz + l3pad_bytes;
1771 	buf_first_len = DP_RX_BUFFER_SIZE - buf_first_hdr_len;
1772 
1773 	if (WARN_ON_ONCE(msdu_len <= buf_first_len)) {
1774 		skb_put(first, buf_first_hdr_len + msdu_len);
1775 		skb_pull(first, buf_first_hdr_len);
1776 		return 0;
1777 	}
1778 
1779 	ldesc = (struct hal_rx_desc *)last->data;
1780 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ab, ldesc);
1781 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ab, ldesc);
1782 
1783 	/* MSDU spans over multiple buffers because the length of the MSDU
1784 	 * exceeds DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE. So assume the data
1785 	 * in the first buf is of length DP_RX_BUFFER_SIZE - HAL_RX_DESC_SIZE.
1786 	 */
1787 	skb_put(first, DP_RX_BUFFER_SIZE);
1788 	skb_pull(first, buf_first_hdr_len);
1789 
1790 	/* When an MSDU spread over multiple buffers attention, MSDU_END and
1791 	 * MPDU_END tlvs are valid only in the last buffer. Copy those tlvs.
1792 	 */
1793 	ath11k_dp_rx_desc_end_tlv_copy(ab, rxcb->rx_desc, ldesc);
1794 
1795 	space_extra = msdu_len - (buf_first_len + skb_tailroom(first));
1796 	if (space_extra > 0 &&
1797 	    (pskb_expand_head(first, 0, space_extra, GFP_ATOMIC) < 0)) {
1798 		/* Free up all buffers of the MSDU */
1799 		while ((skb = __skb_dequeue(msdu_list)) != NULL) {
1800 			rxcb = ATH11K_SKB_RXCB(skb);
1801 			if (!rxcb->is_continuation) {
1802 				dev_kfree_skb_any(skb);
1803 				break;
1804 			}
1805 			dev_kfree_skb_any(skb);
1806 		}
1807 		return -ENOMEM;
1808 	}
1809 
1810 	rem_len = msdu_len - buf_first_len;
1811 	while ((skb = __skb_dequeue(msdu_list)) != NULL && rem_len > 0) {
1812 		rxcb = ATH11K_SKB_RXCB(skb);
1813 		if (rxcb->is_continuation)
1814 			buf_len = DP_RX_BUFFER_SIZE - hal_rx_desc_sz;
1815 		else
1816 			buf_len = rem_len;
1817 
1818 		if (buf_len > (DP_RX_BUFFER_SIZE - hal_rx_desc_sz)) {
1819 			WARN_ON_ONCE(1);
1820 			dev_kfree_skb_any(skb);
1821 			return -EINVAL;
1822 		}
1823 
1824 		skb_put(skb, buf_len + hal_rx_desc_sz);
1825 		skb_pull(skb, hal_rx_desc_sz);
1826 		skb_copy_from_linear_data(skb, skb_put(first, buf_len),
1827 					  buf_len);
1828 		dev_kfree_skb_any(skb);
1829 
1830 		rem_len -= buf_len;
1831 		if (!rxcb->is_continuation)
1832 			break;
1833 	}
1834 
1835 	return 0;
1836 }
1837 
1838 static struct sk_buff *ath11k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
1839 						      struct sk_buff *first)
1840 {
1841 	struct sk_buff *skb;
1842 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(first);
1843 
1844 	if (!rxcb->is_continuation)
1845 		return first;
1846 
1847 	skb_queue_walk(msdu_list, skb) {
1848 		rxcb = ATH11K_SKB_RXCB(skb);
1849 		if (!rxcb->is_continuation)
1850 			return skb;
1851 	}
1852 
1853 	return NULL;
1854 }
1855 
1856 static void ath11k_dp_rx_h_csum_offload(struct ath11k *ar, struct sk_buff *msdu)
1857 {
1858 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1859 	struct rx_attention *rx_attention;
1860 	bool ip_csum_fail, l4_csum_fail;
1861 
1862 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rxcb->rx_desc);
1863 	ip_csum_fail = ath11k_dp_rx_h_attn_ip_cksum_fail(rx_attention);
1864 	l4_csum_fail = ath11k_dp_rx_h_attn_l4_cksum_fail(rx_attention);
1865 
1866 	msdu->ip_summed = (ip_csum_fail || l4_csum_fail) ?
1867 			  CHECKSUM_NONE : CHECKSUM_UNNECESSARY;
1868 }
1869 
1870 int ath11k_dp_rx_crypto_mic_len(struct ath11k *ar, enum hal_encrypt_type enctype)
1871 {
1872 	switch (enctype) {
1873 	case HAL_ENCRYPT_TYPE_OPEN:
1874 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1875 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1876 		return 0;
1877 	case HAL_ENCRYPT_TYPE_CCMP_128:
1878 		return IEEE80211_CCMP_MIC_LEN;
1879 	case HAL_ENCRYPT_TYPE_CCMP_256:
1880 		return IEEE80211_CCMP_256_MIC_LEN;
1881 	case HAL_ENCRYPT_TYPE_GCMP_128:
1882 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1883 		return IEEE80211_GCMP_MIC_LEN;
1884 	case HAL_ENCRYPT_TYPE_WEP_40:
1885 	case HAL_ENCRYPT_TYPE_WEP_104:
1886 	case HAL_ENCRYPT_TYPE_WEP_128:
1887 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1888 	case HAL_ENCRYPT_TYPE_WAPI:
1889 		break;
1890 	}
1891 
1892 	ath11k_warn(ar->ab, "unsupported encryption type %d for mic len\n", enctype);
1893 	return 0;
1894 }
1895 
1896 static int ath11k_dp_rx_crypto_param_len(struct ath11k *ar,
1897 					 enum hal_encrypt_type enctype)
1898 {
1899 	switch (enctype) {
1900 	case HAL_ENCRYPT_TYPE_OPEN:
1901 		return 0;
1902 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1903 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1904 		return IEEE80211_TKIP_IV_LEN;
1905 	case HAL_ENCRYPT_TYPE_CCMP_128:
1906 		return IEEE80211_CCMP_HDR_LEN;
1907 	case HAL_ENCRYPT_TYPE_CCMP_256:
1908 		return IEEE80211_CCMP_256_HDR_LEN;
1909 	case HAL_ENCRYPT_TYPE_GCMP_128:
1910 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1911 		return IEEE80211_GCMP_HDR_LEN;
1912 	case HAL_ENCRYPT_TYPE_WEP_40:
1913 	case HAL_ENCRYPT_TYPE_WEP_104:
1914 	case HAL_ENCRYPT_TYPE_WEP_128:
1915 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1916 	case HAL_ENCRYPT_TYPE_WAPI:
1917 		break;
1918 	}
1919 
1920 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1921 	return 0;
1922 }
1923 
1924 static int ath11k_dp_rx_crypto_icv_len(struct ath11k *ar,
1925 				       enum hal_encrypt_type enctype)
1926 {
1927 	switch (enctype) {
1928 	case HAL_ENCRYPT_TYPE_OPEN:
1929 	case HAL_ENCRYPT_TYPE_CCMP_128:
1930 	case HAL_ENCRYPT_TYPE_CCMP_256:
1931 	case HAL_ENCRYPT_TYPE_GCMP_128:
1932 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
1933 		return 0;
1934 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
1935 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
1936 		return IEEE80211_TKIP_ICV_LEN;
1937 	case HAL_ENCRYPT_TYPE_WEP_40:
1938 	case HAL_ENCRYPT_TYPE_WEP_104:
1939 	case HAL_ENCRYPT_TYPE_WEP_128:
1940 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
1941 	case HAL_ENCRYPT_TYPE_WAPI:
1942 		break;
1943 	}
1944 
1945 	ath11k_warn(ar->ab, "unsupported encryption type %d\n", enctype);
1946 	return 0;
1947 }
1948 
1949 static void ath11k_dp_rx_h_undecap_nwifi(struct ath11k *ar,
1950 					 struct sk_buff *msdu,
1951 					 u8 *first_hdr,
1952 					 enum hal_encrypt_type enctype,
1953 					 struct ieee80211_rx_status *status)
1954 {
1955 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
1956 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
1957 	struct ieee80211_hdr *hdr;
1958 	size_t hdr_len;
1959 	u8 da[ETH_ALEN];
1960 	u8 sa[ETH_ALEN];
1961 	u16 qos_ctl = 0;
1962 	u8 *qos;
1963 
1964 	/* copy SA & DA and pull decapped header */
1965 	hdr = (struct ieee80211_hdr *)msdu->data;
1966 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1967 	ether_addr_copy(da, ieee80211_get_DA(hdr));
1968 	ether_addr_copy(sa, ieee80211_get_SA(hdr));
1969 	skb_pull(msdu, ieee80211_hdrlen(hdr->frame_control));
1970 
1971 	if (rxcb->is_first_msdu) {
1972 		/* original 802.11 header is valid for the first msdu
1973 		 * hence we can reuse the same header
1974 		 */
1975 		hdr = (struct ieee80211_hdr *)first_hdr;
1976 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1977 
1978 		/* Each A-MSDU subframe will be reported as a separate MSDU,
1979 		 * so strip the A-MSDU bit from QoS Ctl.
1980 		 */
1981 		if (ieee80211_is_data_qos(hdr->frame_control)) {
1982 			qos = ieee80211_get_qos_ctl(hdr);
1983 			qos[0] &= ~IEEE80211_QOS_CTL_A_MSDU_PRESENT;
1984 		}
1985 	} else {
1986 		/*  Rebuild qos header if this is a middle/last msdu */
1987 		hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
1988 
1989 		/* Reset the order bit as the HT_Control header is stripped */
1990 		hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
1991 
1992 		qos_ctl = rxcb->tid;
1993 
1994 		if (ath11k_dp_rx_h_msdu_start_mesh_ctl_present(ar->ab, rxcb->rx_desc))
1995 			qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
1996 
1997 		/* TODO Add other QoS ctl fields when required */
1998 
1999 		/* copy decap header before overwriting for reuse below */
2000 		memcpy(decap_hdr, (uint8_t *)hdr, hdr_len);
2001 	}
2002 
2003 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2004 		memcpy(skb_push(msdu,
2005 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2006 		       (void *)hdr + hdr_len,
2007 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2008 	}
2009 
2010 	if (!rxcb->is_first_msdu) {
2011 		memcpy(skb_push(msdu,
2012 				IEEE80211_QOS_CTL_LEN), &qos_ctl,
2013 				IEEE80211_QOS_CTL_LEN);
2014 		memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
2015 		return;
2016 	}
2017 
2018 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2019 
2020 	/* original 802.11 header has a different DA and in
2021 	 * case of 4addr it may also have different SA
2022 	 */
2023 	hdr = (struct ieee80211_hdr *)msdu->data;
2024 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2025 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2026 }
2027 
2028 static void ath11k_dp_rx_h_undecap_raw(struct ath11k *ar, struct sk_buff *msdu,
2029 				       enum hal_encrypt_type enctype,
2030 				       struct ieee80211_rx_status *status,
2031 				       bool decrypted)
2032 {
2033 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2034 	struct ieee80211_hdr *hdr;
2035 	size_t hdr_len;
2036 	size_t crypto_len;
2037 
2038 	if (!rxcb->is_first_msdu ||
2039 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
2040 		WARN_ON_ONCE(1);
2041 		return;
2042 	}
2043 
2044 	skb_trim(msdu, msdu->len - FCS_LEN);
2045 
2046 	if (!decrypted)
2047 		return;
2048 
2049 	hdr = (void *)msdu->data;
2050 
2051 	/* Tail */
2052 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2053 		skb_trim(msdu, msdu->len -
2054 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2055 
2056 		skb_trim(msdu, msdu->len -
2057 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2058 	} else {
2059 		/* MIC */
2060 		if (status->flag & RX_FLAG_MIC_STRIPPED)
2061 			skb_trim(msdu, msdu->len -
2062 				 ath11k_dp_rx_crypto_mic_len(ar, enctype));
2063 
2064 		/* ICV */
2065 		if (status->flag & RX_FLAG_ICV_STRIPPED)
2066 			skb_trim(msdu, msdu->len -
2067 				 ath11k_dp_rx_crypto_icv_len(ar, enctype));
2068 	}
2069 
2070 	/* MMIC */
2071 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
2072 	    !ieee80211_has_morefrags(hdr->frame_control) &&
2073 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
2074 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
2075 
2076 	/* Head */
2077 	if (status->flag & RX_FLAG_IV_STRIPPED) {
2078 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2079 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2080 
2081 		memmove((void *)msdu->data + crypto_len,
2082 			(void *)msdu->data, hdr_len);
2083 		skb_pull(msdu, crypto_len);
2084 	}
2085 }
2086 
2087 static void *ath11k_dp_rx_h_find_rfc1042(struct ath11k *ar,
2088 					 struct sk_buff *msdu,
2089 					 enum hal_encrypt_type enctype)
2090 {
2091 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2092 	struct ieee80211_hdr *hdr;
2093 	size_t hdr_len, crypto_len;
2094 	void *rfc1042;
2095 	bool is_amsdu;
2096 
2097 	is_amsdu = !(rxcb->is_first_msdu && rxcb->is_last_msdu);
2098 	hdr = (struct ieee80211_hdr *)ath11k_dp_rx_h_80211_hdr(ar->ab, rxcb->rx_desc);
2099 	rfc1042 = hdr;
2100 
2101 	if (rxcb->is_first_msdu) {
2102 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
2103 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
2104 
2105 		rfc1042 += hdr_len + crypto_len;
2106 	}
2107 
2108 	if (is_amsdu)
2109 		rfc1042 += sizeof(struct ath11k_dp_amsdu_subframe_hdr);
2110 
2111 	return rfc1042;
2112 }
2113 
2114 static void ath11k_dp_rx_h_undecap_eth(struct ath11k *ar,
2115 				       struct sk_buff *msdu,
2116 				       u8 *first_hdr,
2117 				       enum hal_encrypt_type enctype,
2118 				       struct ieee80211_rx_status *status)
2119 {
2120 	struct ieee80211_hdr *hdr;
2121 	struct ethhdr *eth;
2122 	size_t hdr_len;
2123 	u8 da[ETH_ALEN];
2124 	u8 sa[ETH_ALEN];
2125 	void *rfc1042;
2126 
2127 	rfc1042 = ath11k_dp_rx_h_find_rfc1042(ar, msdu, enctype);
2128 	if (WARN_ON_ONCE(!rfc1042))
2129 		return;
2130 
2131 	/* pull decapped header and copy SA & DA */
2132 	eth = (struct ethhdr *)msdu->data;
2133 	ether_addr_copy(da, eth->h_dest);
2134 	ether_addr_copy(sa, eth->h_source);
2135 	skb_pull(msdu, sizeof(struct ethhdr));
2136 
2137 	/* push rfc1042/llc/snap */
2138 	memcpy(skb_push(msdu, sizeof(struct ath11k_dp_rfc1042_hdr)), rfc1042,
2139 	       sizeof(struct ath11k_dp_rfc1042_hdr));
2140 
2141 	/* push original 802.11 header */
2142 	hdr = (struct ieee80211_hdr *)first_hdr;
2143 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
2144 
2145 	if (!(status->flag & RX_FLAG_IV_STRIPPED)) {
2146 		memcpy(skb_push(msdu,
2147 				ath11k_dp_rx_crypto_param_len(ar, enctype)),
2148 		       (void *)hdr + hdr_len,
2149 		       ath11k_dp_rx_crypto_param_len(ar, enctype));
2150 	}
2151 
2152 	memcpy(skb_push(msdu, hdr_len), hdr, hdr_len);
2153 
2154 	/* original 802.11 header has a different DA and in
2155 	 * case of 4addr it may also have different SA
2156 	 */
2157 	hdr = (struct ieee80211_hdr *)msdu->data;
2158 	ether_addr_copy(ieee80211_get_DA(hdr), da);
2159 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
2160 }
2161 
2162 static void ath11k_dp_rx_h_undecap(struct ath11k *ar, struct sk_buff *msdu,
2163 				   struct hal_rx_desc *rx_desc,
2164 				   enum hal_encrypt_type enctype,
2165 				   struct ieee80211_rx_status *status,
2166 				   bool decrypted)
2167 {
2168 	u8 *first_hdr;
2169 	u8 decap;
2170 	struct ethhdr *ehdr;
2171 
2172 	first_hdr = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
2173 	decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc);
2174 
2175 	switch (decap) {
2176 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
2177 		ath11k_dp_rx_h_undecap_nwifi(ar, msdu, first_hdr,
2178 					     enctype, status);
2179 		break;
2180 	case DP_RX_DECAP_TYPE_RAW:
2181 		ath11k_dp_rx_h_undecap_raw(ar, msdu, enctype, status,
2182 					   decrypted);
2183 		break;
2184 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
2185 		ehdr = (struct ethhdr *)msdu->data;
2186 
2187 		/* mac80211 allows fast path only for authorized STA */
2188 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
2189 			ATH11K_SKB_RXCB(msdu)->is_eapol = true;
2190 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2191 						   enctype, status);
2192 			break;
2193 		}
2194 
2195 		/* PN for mcast packets will be validated in mac80211;
2196 		 * remove eth header and add 802.11 header.
2197 		 */
2198 		if (ATH11K_SKB_RXCB(msdu)->is_mcbc && decrypted)
2199 			ath11k_dp_rx_h_undecap_eth(ar, msdu, first_hdr,
2200 						   enctype, status);
2201 		break;
2202 	case DP_RX_DECAP_TYPE_8023:
2203 		/* TODO: Handle undecap for these formats */
2204 		break;
2205 	}
2206 }
2207 
2208 static struct ath11k_peer *
2209 ath11k_dp_rx_h_find_peer(struct ath11k_base *ab, struct sk_buff *msdu)
2210 {
2211 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2212 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
2213 	struct ath11k_peer *peer = NULL;
2214 
2215 	lockdep_assert_held(&ab->base_lock);
2216 
2217 	if (rxcb->peer_id)
2218 		peer = ath11k_peer_find_by_id(ab, rxcb->peer_id);
2219 
2220 	if (peer)
2221 		return peer;
2222 
2223 	if (!rx_desc || !(ath11k_dp_rxdesc_mac_addr2_valid(ab, rx_desc)))
2224 		return NULL;
2225 
2226 	peer = ath11k_peer_find_by_addr(ab,
2227 					ath11k_dp_rxdesc_mpdu_start_addr2(ab, rx_desc));
2228 	return peer;
2229 }
2230 
2231 static void ath11k_dp_rx_h_mpdu(struct ath11k *ar,
2232 				struct sk_buff *msdu,
2233 				struct hal_rx_desc *rx_desc,
2234 				struct ieee80211_rx_status *rx_status)
2235 {
2236 	bool  fill_crypto_hdr;
2237 	enum hal_encrypt_type enctype;
2238 	bool is_decrypted = false;
2239 	struct ath11k_skb_rxcb *rxcb;
2240 	struct ieee80211_hdr *hdr;
2241 	struct ath11k_peer *peer;
2242 	struct rx_attention *rx_attention;
2243 	u32 err_bitmap;
2244 
2245 	/* PN for multicast packets will be checked in mac80211 */
2246 	rxcb = ATH11K_SKB_RXCB(msdu);
2247 	fill_crypto_hdr = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
2248 	rxcb->is_mcbc = fill_crypto_hdr;
2249 
2250 	if (rxcb->is_mcbc) {
2251 		rxcb->peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
2252 		rxcb->seq_no = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
2253 	}
2254 
2255 	spin_lock_bh(&ar->ab->base_lock);
2256 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2257 	if (peer) {
2258 		if (rxcb->is_mcbc)
2259 			enctype = peer->sec_type_grp;
2260 		else
2261 			enctype = peer->sec_type;
2262 	} else {
2263 		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
2264 	}
2265 	spin_unlock_bh(&ar->ab->base_lock);
2266 
2267 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
2268 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
2269 	if (enctype != HAL_ENCRYPT_TYPE_OPEN && !err_bitmap)
2270 		is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
2271 
2272 	/* Clear per-MPDU flags while leaving per-PPDU flags intact */
2273 	rx_status->flag &= ~(RX_FLAG_FAILED_FCS_CRC |
2274 			     RX_FLAG_MMIC_ERROR |
2275 			     RX_FLAG_DECRYPTED |
2276 			     RX_FLAG_IV_STRIPPED |
2277 			     RX_FLAG_MMIC_STRIPPED);
2278 
2279 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
2280 		rx_status->flag |= RX_FLAG_FAILED_FCS_CRC;
2281 	if (err_bitmap & DP_RX_MPDU_ERR_TKIP_MIC)
2282 		rx_status->flag |= RX_FLAG_MMIC_ERROR;
2283 
2284 	if (is_decrypted) {
2285 		rx_status->flag |= RX_FLAG_DECRYPTED | RX_FLAG_MMIC_STRIPPED;
2286 
2287 		if (fill_crypto_hdr)
2288 			rx_status->flag |= RX_FLAG_MIC_STRIPPED |
2289 					RX_FLAG_ICV_STRIPPED;
2290 		else
2291 			rx_status->flag |= RX_FLAG_IV_STRIPPED |
2292 					   RX_FLAG_PN_VALIDATED;
2293 	}
2294 
2295 	ath11k_dp_rx_h_csum_offload(ar, msdu);
2296 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
2297 			       enctype, rx_status, is_decrypted);
2298 
2299 	if (!is_decrypted || fill_crypto_hdr)
2300 		return;
2301 
2302 	if (ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rx_desc) !=
2303 	    DP_RX_DECAP_TYPE_ETHERNET2_DIX) {
2304 		hdr = (void *)msdu->data;
2305 		hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_PROTECTED);
2306 	}
2307 }
2308 
2309 static void ath11k_dp_rx_h_rate(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2310 				struct ieee80211_rx_status *rx_status)
2311 {
2312 	struct ieee80211_supported_band *sband;
2313 	enum rx_msdu_start_pkt_type pkt_type;
2314 	u8 bw;
2315 	u8 rate_mcs, nss;
2316 	u8 sgi;
2317 	bool is_cck, is_ldpc;
2318 
2319 	pkt_type = ath11k_dp_rx_h_msdu_start_pkt_type(ar->ab, rx_desc);
2320 	bw = ath11k_dp_rx_h_msdu_start_rx_bw(ar->ab, rx_desc);
2321 	rate_mcs = ath11k_dp_rx_h_msdu_start_rate_mcs(ar->ab, rx_desc);
2322 	nss = ath11k_dp_rx_h_msdu_start_nss(ar->ab, rx_desc);
2323 	sgi = ath11k_dp_rx_h_msdu_start_sgi(ar->ab, rx_desc);
2324 
2325 	switch (pkt_type) {
2326 	case RX_MSDU_START_PKT_TYPE_11A:
2327 	case RX_MSDU_START_PKT_TYPE_11B:
2328 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
2329 		sband = &ar->mac.sbands[rx_status->band];
2330 		rx_status->rate_idx = ath11k_mac_hw_rate_to_idx(sband, rate_mcs,
2331 								is_cck);
2332 		break;
2333 	case RX_MSDU_START_PKT_TYPE_11N:
2334 		rx_status->encoding = RX_ENC_HT;
2335 		if (rate_mcs > ATH11K_HT_MCS_MAX) {
2336 			ath11k_warn(ar->ab,
2337 				    "Received with invalid mcs in HT mode %d\n",
2338 				     rate_mcs);
2339 			break;
2340 		}
2341 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
2342 		if (sgi)
2343 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2344 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2345 		break;
2346 	case RX_MSDU_START_PKT_TYPE_11AC:
2347 		rx_status->encoding = RX_ENC_VHT;
2348 		rx_status->rate_idx = rate_mcs;
2349 		if (rate_mcs > ATH11K_VHT_MCS_MAX) {
2350 			ath11k_warn(ar->ab,
2351 				    "Received with invalid mcs in VHT mode %d\n",
2352 				     rate_mcs);
2353 			break;
2354 		}
2355 		rx_status->nss = nss;
2356 		if (sgi)
2357 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
2358 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2359 		is_ldpc = ath11k_dp_rx_h_msdu_start_ldpc_support(ar->ab, rx_desc);
2360 		if (is_ldpc)
2361 			rx_status->enc_flags |= RX_ENC_FLAG_LDPC;
2362 		break;
2363 	case RX_MSDU_START_PKT_TYPE_11AX:
2364 		rx_status->rate_idx = rate_mcs;
2365 		if (rate_mcs > ATH11K_HE_MCS_MAX) {
2366 			ath11k_warn(ar->ab,
2367 				    "Received with invalid mcs in HE mode %d\n",
2368 				    rate_mcs);
2369 			break;
2370 		}
2371 		rx_status->encoding = RX_ENC_HE;
2372 		rx_status->nss = nss;
2373 		rx_status->he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
2374 		rx_status->bw = ath11k_mac_bw_to_mac80211_bw(bw);
2375 		break;
2376 	}
2377 }
2378 
2379 static void ath11k_dp_rx_h_ppdu(struct ath11k *ar, struct hal_rx_desc *rx_desc,
2380 				struct ieee80211_rx_status *rx_status)
2381 {
2382 	u8 channel_num;
2383 	u32 center_freq, meta_data;
2384 	struct ieee80211_channel *channel;
2385 
2386 	rx_status->freq = 0;
2387 	rx_status->rate_idx = 0;
2388 	rx_status->nss = 0;
2389 	rx_status->encoding = RX_ENC_LEGACY;
2390 	rx_status->bw = RATE_INFO_BW_20;
2391 
2392 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
2393 
2394 	meta_data = ath11k_dp_rx_h_msdu_start_freq(ar->ab, rx_desc);
2395 	channel_num = meta_data;
2396 	center_freq = meta_data >> 16;
2397 
2398 	if (center_freq >= ATH11K_MIN_6G_FREQ &&
2399 	    center_freq <= ATH11K_MAX_6G_FREQ) {
2400 		rx_status->band = NL80211_BAND_6GHZ;
2401 		rx_status->freq = center_freq;
2402 	} else if (channel_num >= 1 && channel_num <= 14) {
2403 		rx_status->band = NL80211_BAND_2GHZ;
2404 	} else if (channel_num >= 36 && channel_num <= 177) {
2405 		rx_status->band = NL80211_BAND_5GHZ;
2406 	} else {
2407 		spin_lock_bh(&ar->data_lock);
2408 		channel = ar->rx_channel;
2409 		if (channel) {
2410 			rx_status->band = channel->band;
2411 			channel_num =
2412 				ieee80211_frequency_to_channel(channel->center_freq);
2413 		}
2414 		spin_unlock_bh(&ar->data_lock);
2415 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "rx_desc: ",
2416 				rx_desc, sizeof(struct hal_rx_desc));
2417 	}
2418 
2419 	if (rx_status->band != NL80211_BAND_6GHZ)
2420 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
2421 								 rx_status->band);
2422 
2423 	ath11k_dp_rx_h_rate(ar, rx_desc, rx_status);
2424 }
2425 
2426 static void ath11k_dp_rx_deliver_msdu(struct ath11k *ar, struct napi_struct *napi,
2427 				      struct sk_buff *msdu,
2428 				      struct ieee80211_rx_status *status)
2429 {
2430 	static const struct ieee80211_radiotap_he known = {
2431 		.data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN |
2432 				     IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN),
2433 		.data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN),
2434 	};
2435 	struct ieee80211_rx_status *rx_status;
2436 	struct ieee80211_radiotap_he *he = NULL;
2437 	struct ieee80211_sta *pubsta = NULL;
2438 	struct ath11k_peer *peer;
2439 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
2440 	u8 decap = DP_RX_DECAP_TYPE_RAW;
2441 	bool is_mcbc = rxcb->is_mcbc;
2442 	bool is_eapol = rxcb->is_eapol;
2443 
2444 	if (status->encoding == RX_ENC_HE &&
2445 	    !(status->flag & RX_FLAG_RADIOTAP_HE) &&
2446 	    !(status->flag & RX_FLAG_SKIP_MONITOR)) {
2447 		he = skb_push(msdu, sizeof(known));
2448 		memcpy(he, &known, sizeof(known));
2449 		status->flag |= RX_FLAG_RADIOTAP_HE;
2450 	}
2451 
2452 	if (!(status->flag & RX_FLAG_ONLY_MONITOR))
2453 		decap = ath11k_dp_rx_h_msdu_start_decap_type(ar->ab, rxcb->rx_desc);
2454 
2455 	spin_lock_bh(&ar->ab->base_lock);
2456 	peer = ath11k_dp_rx_h_find_peer(ar->ab, msdu);
2457 	if (peer && peer->sta)
2458 		pubsta = peer->sta;
2459 	spin_unlock_bh(&ar->ab->base_lock);
2460 
2461 	ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
2462 		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s %srate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
2463 		   msdu,
2464 		   msdu->len,
2465 		   peer ? peer->addr : NULL,
2466 		   rxcb->tid,
2467 		   is_mcbc ? "mcast" : "ucast",
2468 		   rxcb->seq_no,
2469 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
2470 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
2471 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
2472 		   (status->encoding == RX_ENC_HE) ? "he" : "",
2473 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
2474 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
2475 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
2476 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
2477 		   status->rate_idx,
2478 		   status->nss,
2479 		   status->freq,
2480 		   status->band, status->flag,
2481 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
2482 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
2483 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
2484 
2485 	ath11k_dbg_dump(ar->ab, ATH11K_DBG_DP_RX, NULL, "dp rx msdu: ",
2486 			msdu->data, msdu->len);
2487 
2488 	rx_status = IEEE80211_SKB_RXCB(msdu);
2489 	*rx_status = *status;
2490 
2491 	/* TODO: trace rx packet */
2492 
2493 	/* PN for multicast packets are not validate in HW,
2494 	 * so skip 802.3 rx path
2495 	 * Also, fast_rx expects the STA to be authorized, hence
2496 	 * eapol packets are sent in slow path.
2497 	 */
2498 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
2499 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
2500 		rx_status->flag |= RX_FLAG_8023;
2501 
2502 	ieee80211_rx_napi(ar->hw, pubsta, msdu, napi);
2503 }
2504 
2505 static int ath11k_dp_rx_process_msdu(struct ath11k *ar,
2506 				     struct sk_buff *msdu,
2507 				     struct sk_buff_head *msdu_list,
2508 				     struct ieee80211_rx_status *rx_status)
2509 {
2510 	struct ath11k_base *ab = ar->ab;
2511 	struct hal_rx_desc *rx_desc, *lrx_desc;
2512 	struct rx_attention *rx_attention;
2513 	struct ath11k_skb_rxcb *rxcb;
2514 	struct sk_buff *last_buf;
2515 	u8 l3_pad_bytes;
2516 	u8 *hdr_status;
2517 	u16 msdu_len;
2518 	int ret;
2519 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
2520 
2521 	last_buf = ath11k_dp_rx_get_msdu_last_buf(msdu_list, msdu);
2522 	if (!last_buf) {
2523 		ath11k_warn(ab,
2524 			    "No valid Rx buffer to access Atten/MSDU_END/MPDU_END tlvs\n");
2525 		ret = -EIO;
2526 		goto free_out;
2527 	}
2528 
2529 	rx_desc = (struct hal_rx_desc *)msdu->data;
2530 	if (ath11k_dp_rx_h_attn_msdu_len_err(ab, rx_desc)) {
2531 		ath11k_warn(ar->ab, "msdu len not valid\n");
2532 		ret = -EIO;
2533 		goto free_out;
2534 	}
2535 
2536 	lrx_desc = (struct hal_rx_desc *)last_buf->data;
2537 	rx_attention = ath11k_dp_rx_get_attention(ab, lrx_desc);
2538 	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
2539 		ath11k_warn(ab, "msdu_done bit in attention is not set\n");
2540 		ret = -EIO;
2541 		goto free_out;
2542 	}
2543 
2544 	rxcb = ATH11K_SKB_RXCB(msdu);
2545 	rxcb->rx_desc = rx_desc;
2546 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ab, rx_desc);
2547 	l3_pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ab, lrx_desc);
2548 
2549 	if (rxcb->is_frag) {
2550 		skb_pull(msdu, hal_rx_desc_sz);
2551 	} else if (!rxcb->is_continuation) {
2552 		if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
2553 			hdr_status = ath11k_dp_rx_h_80211_hdr(ab, rx_desc);
2554 			ret = -EINVAL;
2555 			ath11k_warn(ab, "invalid msdu len %u\n", msdu_len);
2556 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
2557 					sizeof(struct ieee80211_hdr));
2558 			ath11k_dbg_dump(ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
2559 					sizeof(struct hal_rx_desc));
2560 			goto free_out;
2561 		}
2562 		skb_put(msdu, hal_rx_desc_sz + l3_pad_bytes + msdu_len);
2563 		skb_pull(msdu, hal_rx_desc_sz + l3_pad_bytes);
2564 	} else {
2565 		ret = ath11k_dp_rx_msdu_coalesce(ar, msdu_list,
2566 						 msdu, last_buf,
2567 						 l3_pad_bytes, msdu_len);
2568 		if (ret) {
2569 			ath11k_warn(ab,
2570 				    "failed to coalesce msdu rx buffer%d\n", ret);
2571 			goto free_out;
2572 		}
2573 	}
2574 
2575 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rx_status);
2576 	ath11k_dp_rx_h_mpdu(ar, msdu, rx_desc, rx_status);
2577 
2578 	rx_status->flag |= RX_FLAG_SKIP_MONITOR | RX_FLAG_DUP_VALIDATED;
2579 
2580 	return 0;
2581 
2582 free_out:
2583 	return ret;
2584 }
2585 
2586 static void ath11k_dp_rx_process_received_packets(struct ath11k_base *ab,
2587 						  struct napi_struct *napi,
2588 						  struct sk_buff_head *msdu_list,
2589 						  int mac_id)
2590 {
2591 	struct sk_buff *msdu;
2592 	struct ath11k *ar;
2593 	struct ieee80211_rx_status rx_status = {};
2594 	int ret;
2595 
2596 	if (skb_queue_empty(msdu_list))
2597 		return;
2598 
2599 	if (unlikely(!rcu_access_pointer(ab->pdevs_active[mac_id]))) {
2600 		__skb_queue_purge(msdu_list);
2601 		return;
2602 	}
2603 
2604 	ar = ab->pdevs[mac_id].ar;
2605 	if (unlikely(test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags))) {
2606 		__skb_queue_purge(msdu_list);
2607 		return;
2608 	}
2609 
2610 	while ((msdu = __skb_dequeue(msdu_list))) {
2611 		ret = ath11k_dp_rx_process_msdu(ar, msdu, msdu_list, &rx_status);
2612 		if (unlikely(ret)) {
2613 			ath11k_dbg(ab, ATH11K_DBG_DATA,
2614 				   "Unable to process msdu %d", ret);
2615 			dev_kfree_skb_any(msdu);
2616 			continue;
2617 		}
2618 
2619 		ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rx_status);
2620 	}
2621 }
2622 
2623 int ath11k_dp_process_rx(struct ath11k_base *ab, int ring_id,
2624 			 struct napi_struct *napi, int budget)
2625 {
2626 	struct ath11k_dp *dp = &ab->dp;
2627 	struct dp_rxdma_ring *rx_ring;
2628 	int num_buffs_reaped[MAX_RADIOS] = {};
2629 	struct sk_buff_head msdu_list[MAX_RADIOS];
2630 	struct ath11k_skb_rxcb *rxcb;
2631 	int total_msdu_reaped = 0;
2632 	struct hal_srng *srng;
2633 	struct sk_buff *msdu;
2634 	bool done = false;
2635 	int buf_id, mac_id;
2636 	struct ath11k *ar;
2637 	struct hal_reo_dest_ring *desc;
2638 	enum hal_reo_dest_ring_push_reason push_reason;
2639 	u32 cookie;
2640 	int i;
2641 
2642 	for (i = 0; i < MAX_RADIOS; i++)
2643 		__skb_queue_head_init(&msdu_list[i]);
2644 
2645 	srng = &ab->hal.srng_list[dp->reo_dst_ring[ring_id].ring_id];
2646 
2647 	spin_lock_bh(&srng->lock);
2648 
2649 try_again:
2650 	ath11k_hal_srng_access_begin(ab, srng);
2651 
2652 	while (likely(desc =
2653 	      (struct hal_reo_dest_ring *)ath11k_hal_srng_dst_get_next_entry(ab,
2654 									     srng))) {
2655 		cookie = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
2656 				   desc->buf_addr_info.info1);
2657 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
2658 				   cookie);
2659 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, cookie);
2660 
2661 		if (unlikely(buf_id == 0))
2662 			continue;
2663 
2664 		ar = ab->pdevs[mac_id].ar;
2665 		rx_ring = &ar->dp.rx_refill_buf_ring;
2666 		spin_lock_bh(&rx_ring->idr_lock);
2667 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
2668 		if (unlikely(!msdu)) {
2669 			ath11k_warn(ab, "frame rx with invalid buf_id %d\n",
2670 				    buf_id);
2671 			spin_unlock_bh(&rx_ring->idr_lock);
2672 			continue;
2673 		}
2674 
2675 		idr_remove(&rx_ring->bufs_idr, buf_id);
2676 		spin_unlock_bh(&rx_ring->idr_lock);
2677 
2678 		rxcb = ATH11K_SKB_RXCB(msdu);
2679 		dma_unmap_single(ab->dev, rxcb->paddr,
2680 				 msdu->len + skb_tailroom(msdu),
2681 				 DMA_FROM_DEVICE);
2682 
2683 		num_buffs_reaped[mac_id]++;
2684 
2685 		push_reason = FIELD_GET(HAL_REO_DEST_RING_INFO0_PUSH_REASON,
2686 					desc->info0);
2687 		if (unlikely(push_reason !=
2688 			     HAL_REO_DEST_RING_PUSH_REASON_ROUTING_INSTRUCTION)) {
2689 			dev_kfree_skb_any(msdu);
2690 			ab->soc_stats.hal_reo_error[ring_id]++;
2691 			continue;
2692 		}
2693 
2694 		rxcb->is_first_msdu = !!(desc->rx_msdu_info.info0 &
2695 					 RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU);
2696 		rxcb->is_last_msdu = !!(desc->rx_msdu_info.info0 &
2697 					RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU);
2698 		rxcb->is_continuation = !!(desc->rx_msdu_info.info0 &
2699 					   RX_MSDU_DESC_INFO0_MSDU_CONTINUATION);
2700 		rxcb->peer_id = FIELD_GET(RX_MPDU_DESC_META_DATA_PEER_ID,
2701 					  desc->rx_mpdu_info.meta_data);
2702 		rxcb->seq_no = FIELD_GET(RX_MPDU_DESC_INFO0_SEQ_NUM,
2703 					 desc->rx_mpdu_info.info0);
2704 		rxcb->tid = FIELD_GET(HAL_REO_DEST_RING_INFO0_RX_QUEUE_NUM,
2705 				      desc->info0);
2706 
2707 		rxcb->mac_id = mac_id;
2708 		__skb_queue_tail(&msdu_list[mac_id], msdu);
2709 
2710 		if (rxcb->is_continuation) {
2711 			done = false;
2712 		} else {
2713 			total_msdu_reaped++;
2714 			done = true;
2715 		}
2716 
2717 		if (total_msdu_reaped >= budget)
2718 			break;
2719 	}
2720 
2721 	/* Hw might have updated the head pointer after we cached it.
2722 	 * In this case, even though there are entries in the ring we'll
2723 	 * get rx_desc NULL. Give the read another try with updated cached
2724 	 * head pointer so that we can reap complete MPDU in the current
2725 	 * rx processing.
2726 	 */
2727 	if (unlikely(!done && ath11k_hal_srng_dst_num_free(ab, srng, true))) {
2728 		ath11k_hal_srng_access_end(ab, srng);
2729 		goto try_again;
2730 	}
2731 
2732 	ath11k_hal_srng_access_end(ab, srng);
2733 
2734 	spin_unlock_bh(&srng->lock);
2735 
2736 	if (unlikely(!total_msdu_reaped))
2737 		goto exit;
2738 
2739 	for (i = 0; i < ab->num_radios; i++) {
2740 		if (!num_buffs_reaped[i])
2741 			continue;
2742 
2743 		ath11k_dp_rx_process_received_packets(ab, napi, &msdu_list[i], i);
2744 
2745 		ar = ab->pdevs[i].ar;
2746 		rx_ring = &ar->dp.rx_refill_buf_ring;
2747 
2748 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
2749 					   ab->hw_params.hal_params->rx_buf_rbm);
2750 	}
2751 exit:
2752 	return total_msdu_reaped;
2753 }
2754 
2755 static void ath11k_dp_rx_update_peer_stats(struct ath11k_sta *arsta,
2756 					   struct hal_rx_mon_ppdu_info *ppdu_info)
2757 {
2758 	struct ath11k_rx_peer_stats *rx_stats = arsta->rx_stats;
2759 	u32 num_msdu;
2760 	int i;
2761 
2762 	if (!rx_stats)
2763 		return;
2764 
2765 	arsta->rssi_comb = ppdu_info->rssi_comb;
2766 	ewma_avg_rssi_add(&arsta->avg_rssi, ppdu_info->rssi_comb);
2767 
2768 	num_msdu = ppdu_info->tcp_msdu_count + ppdu_info->tcp_ack_msdu_count +
2769 		   ppdu_info->udp_msdu_count + ppdu_info->other_msdu_count;
2770 
2771 	rx_stats->num_msdu += num_msdu;
2772 	rx_stats->tcp_msdu_count += ppdu_info->tcp_msdu_count +
2773 				    ppdu_info->tcp_ack_msdu_count;
2774 	rx_stats->udp_msdu_count += ppdu_info->udp_msdu_count;
2775 	rx_stats->other_msdu_count += ppdu_info->other_msdu_count;
2776 
2777 	if (ppdu_info->preamble_type == HAL_RX_PREAMBLE_11A ||
2778 	    ppdu_info->preamble_type == HAL_RX_PREAMBLE_11B) {
2779 		ppdu_info->nss = 1;
2780 		ppdu_info->mcs = HAL_RX_MAX_MCS;
2781 		ppdu_info->tid = IEEE80211_NUM_TIDS;
2782 	}
2783 
2784 	if (ppdu_info->nss > 0 && ppdu_info->nss <= HAL_RX_MAX_NSS)
2785 		rx_stats->nss_count[ppdu_info->nss - 1] += num_msdu;
2786 
2787 	if (ppdu_info->mcs <= HAL_RX_MAX_MCS)
2788 		rx_stats->mcs_count[ppdu_info->mcs] += num_msdu;
2789 
2790 	if (ppdu_info->gi < HAL_RX_GI_MAX)
2791 		rx_stats->gi_count[ppdu_info->gi] += num_msdu;
2792 
2793 	if (ppdu_info->bw < HAL_RX_BW_MAX)
2794 		rx_stats->bw_count[ppdu_info->bw] += num_msdu;
2795 
2796 	if (ppdu_info->ldpc < HAL_RX_SU_MU_CODING_MAX)
2797 		rx_stats->coding_count[ppdu_info->ldpc] += num_msdu;
2798 
2799 	if (ppdu_info->tid <= IEEE80211_NUM_TIDS)
2800 		rx_stats->tid_count[ppdu_info->tid] += num_msdu;
2801 
2802 	if (ppdu_info->preamble_type < HAL_RX_PREAMBLE_MAX)
2803 		rx_stats->pream_cnt[ppdu_info->preamble_type] += num_msdu;
2804 
2805 	if (ppdu_info->reception_type < HAL_RX_RECEPTION_TYPE_MAX)
2806 		rx_stats->reception_type[ppdu_info->reception_type] += num_msdu;
2807 
2808 	if (ppdu_info->is_stbc)
2809 		rx_stats->stbc_count += num_msdu;
2810 
2811 	if (ppdu_info->beamformed)
2812 		rx_stats->beamformed_count += num_msdu;
2813 
2814 	if (ppdu_info->num_mpdu_fcs_ok > 1)
2815 		rx_stats->ampdu_msdu_count += num_msdu;
2816 	else
2817 		rx_stats->non_ampdu_msdu_count += num_msdu;
2818 
2819 	rx_stats->num_mpdu_fcs_ok += ppdu_info->num_mpdu_fcs_ok;
2820 	rx_stats->num_mpdu_fcs_err += ppdu_info->num_mpdu_fcs_err;
2821 	rx_stats->dcm_count += ppdu_info->dcm;
2822 	rx_stats->ru_alloc_cnt[ppdu_info->ru_alloc] += num_msdu;
2823 
2824 	BUILD_BUG_ON(ARRAY_SIZE(arsta->chain_signal) >
2825 			     ARRAY_SIZE(ppdu_info->rssi_chain_pri20));
2826 
2827 	for (i = 0; i < ARRAY_SIZE(arsta->chain_signal); i++)
2828 		arsta->chain_signal[i] = ppdu_info->rssi_chain_pri20[i];
2829 
2830 	rx_stats->rx_duration += ppdu_info->rx_duration;
2831 	arsta->rx_duration = rx_stats->rx_duration;
2832 }
2833 
2834 static struct sk_buff *ath11k_dp_rx_alloc_mon_status_buf(struct ath11k_base *ab,
2835 							 struct dp_rxdma_ring *rx_ring,
2836 							 int *buf_id)
2837 {
2838 	struct sk_buff *skb;
2839 	dma_addr_t paddr;
2840 
2841 	skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
2842 			    DP_RX_BUFFER_ALIGN_SIZE);
2843 
2844 	if (!skb)
2845 		goto fail_alloc_skb;
2846 
2847 	if (!IS_ALIGNED((unsigned long)skb->data,
2848 			DP_RX_BUFFER_ALIGN_SIZE)) {
2849 		skb_pull(skb, PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
2850 			 skb->data);
2851 	}
2852 
2853 	paddr = dma_map_single(ab->dev, skb->data,
2854 			       skb->len + skb_tailroom(skb),
2855 			       DMA_FROM_DEVICE);
2856 	if (unlikely(dma_mapping_error(ab->dev, paddr)))
2857 		goto fail_free_skb;
2858 
2859 	spin_lock_bh(&rx_ring->idr_lock);
2860 	*buf_id = idr_alloc(&rx_ring->bufs_idr, skb, 0,
2861 			    rx_ring->bufs_max, GFP_ATOMIC);
2862 	spin_unlock_bh(&rx_ring->idr_lock);
2863 	if (*buf_id < 0)
2864 		goto fail_dma_unmap;
2865 
2866 	ATH11K_SKB_RXCB(skb)->paddr = paddr;
2867 	return skb;
2868 
2869 fail_dma_unmap:
2870 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2871 			 DMA_FROM_DEVICE);
2872 fail_free_skb:
2873 	dev_kfree_skb_any(skb);
2874 fail_alloc_skb:
2875 	return NULL;
2876 }
2877 
2878 int ath11k_dp_rx_mon_status_bufs_replenish(struct ath11k_base *ab, int mac_id,
2879 					   struct dp_rxdma_ring *rx_ring,
2880 					   int req_entries,
2881 					   enum hal_rx_buf_return_buf_manager mgr)
2882 {
2883 	struct hal_srng *srng;
2884 	u32 *desc;
2885 	struct sk_buff *skb;
2886 	int num_free;
2887 	int num_remain;
2888 	int buf_id;
2889 	u32 cookie;
2890 	dma_addr_t paddr;
2891 
2892 	req_entries = min(req_entries, rx_ring->bufs_max);
2893 
2894 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
2895 
2896 	spin_lock_bh(&srng->lock);
2897 
2898 	ath11k_hal_srng_access_begin(ab, srng);
2899 
2900 	num_free = ath11k_hal_srng_src_num_free(ab, srng, true);
2901 
2902 	req_entries = min(num_free, req_entries);
2903 	num_remain = req_entries;
2904 
2905 	while (num_remain > 0) {
2906 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
2907 							&buf_id);
2908 		if (!skb)
2909 			break;
2910 		paddr = ATH11K_SKB_RXCB(skb)->paddr;
2911 
2912 		desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
2913 		if (!desc)
2914 			goto fail_desc_get;
2915 
2916 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
2917 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
2918 
2919 		num_remain--;
2920 
2921 		ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, mgr);
2922 	}
2923 
2924 	ath11k_hal_srng_access_end(ab, srng);
2925 
2926 	spin_unlock_bh(&srng->lock);
2927 
2928 	return req_entries - num_remain;
2929 
2930 fail_desc_get:
2931 	spin_lock_bh(&rx_ring->idr_lock);
2932 	idr_remove(&rx_ring->bufs_idr, buf_id);
2933 	spin_unlock_bh(&rx_ring->idr_lock);
2934 	dma_unmap_single(ab->dev, paddr, skb->len + skb_tailroom(skb),
2935 			 DMA_FROM_DEVICE);
2936 	dev_kfree_skb_any(skb);
2937 	ath11k_hal_srng_access_end(ab, srng);
2938 	spin_unlock_bh(&srng->lock);
2939 
2940 	return req_entries - num_remain;
2941 }
2942 
2943 #define ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP 32535
2944 
2945 static void
2946 ath11k_dp_rx_mon_update_status_buf_state(struct ath11k_mon_data *pmon,
2947 					 struct hal_tlv_hdr *tlv)
2948 {
2949 	struct hal_rx_ppdu_start *ppdu_start;
2950 	u16 ppdu_id_diff, ppdu_id, tlv_len;
2951 	u8 *ptr;
2952 
2953 	/* PPDU id is part of second tlv, move ptr to second tlv */
2954 	tlv_len = FIELD_GET(HAL_TLV_HDR_LEN, tlv->tl);
2955 	ptr = (u8 *)tlv;
2956 	ptr += sizeof(*tlv) + tlv_len;
2957 	tlv = (struct hal_tlv_hdr *)ptr;
2958 
2959 	if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_PPDU_START)
2960 		return;
2961 
2962 	ptr += sizeof(*tlv);
2963 	ppdu_start = (struct hal_rx_ppdu_start *)ptr;
2964 	ppdu_id = FIELD_GET(HAL_RX_PPDU_START_INFO0_PPDU_ID,
2965 			    __le32_to_cpu(ppdu_start->info0));
2966 
2967 	if (pmon->sw_mon_entries.ppdu_id < ppdu_id) {
2968 		pmon->buf_state = DP_MON_STATUS_LEAD;
2969 		ppdu_id_diff = ppdu_id - pmon->sw_mon_entries.ppdu_id;
2970 		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2971 			pmon->buf_state = DP_MON_STATUS_LAG;
2972 	} else if (pmon->sw_mon_entries.ppdu_id > ppdu_id) {
2973 		pmon->buf_state = DP_MON_STATUS_LAG;
2974 		ppdu_id_diff = pmon->sw_mon_entries.ppdu_id - ppdu_id;
2975 		if (ppdu_id_diff > ATH11K_DP_RX_FULL_MON_PPDU_ID_WRAP)
2976 			pmon->buf_state = DP_MON_STATUS_LEAD;
2977 	}
2978 }
2979 
2980 static enum dp_mon_status_buf_state
2981 ath11k_dp_rx_mon_buf_done(struct ath11k_base *ab, struct hal_srng *srng,
2982 			  struct dp_rxdma_ring *rx_ring)
2983 {
2984 	struct ath11k_skb_rxcb *rxcb;
2985 	struct hal_tlv_hdr *tlv;
2986 	struct sk_buff *skb;
2987 	void *status_desc;
2988 	dma_addr_t paddr;
2989 	u32 cookie;
2990 	int buf_id;
2991 	u8 rbm;
2992 
2993 	status_desc = ath11k_hal_srng_src_next_peek(ab, srng);
2994 	if (!status_desc)
2995 		return DP_MON_STATUS_NO_DMA;
2996 
2997 	ath11k_hal_rx_buf_addr_info_get(status_desc, &paddr, &cookie, &rbm);
2998 
2999 	buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3000 
3001 	spin_lock_bh(&rx_ring->idr_lock);
3002 	skb = idr_find(&rx_ring->bufs_idr, buf_id);
3003 	spin_unlock_bh(&rx_ring->idr_lock);
3004 
3005 	if (!skb)
3006 		return DP_MON_STATUS_NO_DMA;
3007 
3008 	rxcb = ATH11K_SKB_RXCB(skb);
3009 	dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3010 				skb->len + skb_tailroom(skb),
3011 				DMA_FROM_DEVICE);
3012 
3013 	tlv = (struct hal_tlv_hdr *)skb->data;
3014 	if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) != HAL_RX_STATUS_BUFFER_DONE)
3015 		return DP_MON_STATUS_NO_DMA;
3016 
3017 	return DP_MON_STATUS_REPLINISH;
3018 }
3019 
3020 static int ath11k_dp_rx_reap_mon_status_ring(struct ath11k_base *ab, int mac_id,
3021 					     int *budget, struct sk_buff_head *skb_list)
3022 {
3023 	struct ath11k *ar;
3024 	const struct ath11k_hw_hal_params *hal_params;
3025 	enum dp_mon_status_buf_state reap_status;
3026 	struct ath11k_pdev_dp *dp;
3027 	struct dp_rxdma_ring *rx_ring;
3028 	struct ath11k_mon_data *pmon;
3029 	struct hal_srng *srng;
3030 	void *rx_mon_status_desc;
3031 	struct sk_buff *skb;
3032 	struct ath11k_skb_rxcb *rxcb;
3033 	struct hal_tlv_hdr *tlv;
3034 	u32 cookie;
3035 	int buf_id, srng_id;
3036 	dma_addr_t paddr;
3037 	u8 rbm;
3038 	int num_buffs_reaped = 0;
3039 
3040 	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
3041 	dp = &ar->dp;
3042 	pmon = &dp->mon_data;
3043 	srng_id = ath11k_hw_mac_id_to_srng_id(&ab->hw_params, mac_id);
3044 	rx_ring = &dp->rx_mon_status_refill_ring[srng_id];
3045 
3046 	srng = &ab->hal.srng_list[rx_ring->refill_buf_ring.ring_id];
3047 
3048 	spin_lock_bh(&srng->lock);
3049 
3050 	ath11k_hal_srng_access_begin(ab, srng);
3051 	while (*budget) {
3052 		*budget -= 1;
3053 		rx_mon_status_desc =
3054 			ath11k_hal_srng_src_peek(ab, srng);
3055 		if (!rx_mon_status_desc) {
3056 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3057 			break;
3058 		}
3059 
3060 		ath11k_hal_rx_buf_addr_info_get(rx_mon_status_desc, &paddr,
3061 						&cookie, &rbm);
3062 		if (paddr) {
3063 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
3064 
3065 			spin_lock_bh(&rx_ring->idr_lock);
3066 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
3067 			spin_unlock_bh(&rx_ring->idr_lock);
3068 
3069 			if (!skb) {
3070 				ath11k_warn(ab, "rx monitor status with invalid buf_id %d\n",
3071 					    buf_id);
3072 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
3073 				goto move_next;
3074 			}
3075 
3076 			rxcb = ATH11K_SKB_RXCB(skb);
3077 
3078 			dma_sync_single_for_cpu(ab->dev, rxcb->paddr,
3079 						skb->len + skb_tailroom(skb),
3080 						DMA_FROM_DEVICE);
3081 
3082 			tlv = (struct hal_tlv_hdr *)skb->data;
3083 			if (FIELD_GET(HAL_TLV_HDR_TAG, tlv->tl) !=
3084 					HAL_RX_STATUS_BUFFER_DONE) {
3085 				ath11k_warn(ab, "mon status DONE not set %lx, buf_id %d\n",
3086 					    FIELD_GET(HAL_TLV_HDR_TAG,
3087 						      tlv->tl), buf_id);
3088 				/* RxDMA status done bit might not be set even
3089 				 * though tp is moved by HW.
3090 				 */
3091 
3092 				/* If done status is missing:
3093 				 * 1. As per MAC team's suggestion,
3094 				 *    when HP + 1 entry is peeked and if DMA
3095 				 *    is not done and if HP + 2 entry's DMA done
3096 				 *    is set. skip HP + 1 entry and
3097 				 *    start processing in next interrupt.
3098 				 * 2. If HP + 2 entry's DMA done is not set,
3099 				 *    poll onto HP + 1 entry DMA done to be set.
3100 				 *    Check status for same buffer for next time
3101 				 *    dp_rx_mon_status_srng_process
3102 				 */
3103 
3104 				reap_status = ath11k_dp_rx_mon_buf_done(ab, srng,
3105 									rx_ring);
3106 				if (reap_status == DP_MON_STATUS_NO_DMA)
3107 					continue;
3108 
3109 				spin_lock_bh(&rx_ring->idr_lock);
3110 				idr_remove(&rx_ring->bufs_idr, buf_id);
3111 				spin_unlock_bh(&rx_ring->idr_lock);
3112 
3113 				dma_unmap_single(ab->dev, rxcb->paddr,
3114 						 skb->len + skb_tailroom(skb),
3115 						 DMA_FROM_DEVICE);
3116 
3117 				dev_kfree_skb_any(skb);
3118 				pmon->buf_state = DP_MON_STATUS_REPLINISH;
3119 				goto move_next;
3120 			}
3121 
3122 			spin_lock_bh(&rx_ring->idr_lock);
3123 			idr_remove(&rx_ring->bufs_idr, buf_id);
3124 			spin_unlock_bh(&rx_ring->idr_lock);
3125 			if (ab->hw_params.full_monitor_mode) {
3126 				ath11k_dp_rx_mon_update_status_buf_state(pmon, tlv);
3127 				if (paddr == pmon->mon_status_paddr)
3128 					pmon->buf_state = DP_MON_STATUS_MATCH;
3129 			}
3130 
3131 			dma_unmap_single(ab->dev, rxcb->paddr,
3132 					 skb->len + skb_tailroom(skb),
3133 					 DMA_FROM_DEVICE);
3134 
3135 			__skb_queue_tail(skb_list, skb);
3136 		} else {
3137 			pmon->buf_state = DP_MON_STATUS_REPLINISH;
3138 		}
3139 move_next:
3140 		skb = ath11k_dp_rx_alloc_mon_status_buf(ab, rx_ring,
3141 							&buf_id);
3142 
3143 		if (!skb) {
3144 			hal_params = ab->hw_params.hal_params;
3145 			ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, 0, 0,
3146 							hal_params->rx_buf_rbm);
3147 			num_buffs_reaped++;
3148 			break;
3149 		}
3150 		rxcb = ATH11K_SKB_RXCB(skb);
3151 
3152 		cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, mac_id) |
3153 			 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3154 
3155 		ath11k_hal_rx_buf_addr_info_set(rx_mon_status_desc, rxcb->paddr,
3156 						cookie,
3157 						ab->hw_params.hal_params->rx_buf_rbm);
3158 		ath11k_hal_srng_src_get_next_entry(ab, srng);
3159 		num_buffs_reaped++;
3160 	}
3161 	ath11k_hal_srng_access_end(ab, srng);
3162 	spin_unlock_bh(&srng->lock);
3163 
3164 	return num_buffs_reaped;
3165 }
3166 
3167 static void ath11k_dp_rx_frag_timer(struct timer_list *timer)
3168 {
3169 	struct dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
3170 						      frag_timer);
3171 
3172 	spin_lock_bh(&rx_tid->ab->base_lock);
3173 	if (rx_tid->last_frag_no &&
3174 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
3175 		spin_unlock_bh(&rx_tid->ab->base_lock);
3176 		return;
3177 	}
3178 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3179 	spin_unlock_bh(&rx_tid->ab->base_lock);
3180 }
3181 
3182 int ath11k_peer_rx_frag_setup(struct ath11k *ar, const u8 *peer_mac, int vdev_id)
3183 {
3184 	struct ath11k_base *ab = ar->ab;
3185 	struct crypto_shash *tfm;
3186 	struct ath11k_peer *peer;
3187 	struct dp_rx_tid *rx_tid;
3188 	int i;
3189 
3190 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
3191 	if (IS_ERR(tfm)) {
3192 		ath11k_warn(ab, "failed to allocate michael_mic shash: %ld\n",
3193 			    PTR_ERR(tfm));
3194 		return PTR_ERR(tfm);
3195 	}
3196 
3197 	spin_lock_bh(&ab->base_lock);
3198 
3199 	peer = ath11k_peer_find(ab, vdev_id, peer_mac);
3200 	if (!peer) {
3201 		ath11k_warn(ab, "failed to find the peer to set up fragment info\n");
3202 		spin_unlock_bh(&ab->base_lock);
3203 		crypto_free_shash(tfm);
3204 		return -ENOENT;
3205 	}
3206 
3207 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
3208 		rx_tid = &peer->rx_tid[i];
3209 		rx_tid->ab = ab;
3210 		timer_setup(&rx_tid->frag_timer, ath11k_dp_rx_frag_timer, 0);
3211 		skb_queue_head_init(&rx_tid->rx_frags);
3212 	}
3213 
3214 	peer->tfm_mmic = tfm;
3215 	peer->dp_setup_done = true;
3216 	spin_unlock_bh(&ab->base_lock);
3217 
3218 	return 0;
3219 }
3220 
3221 static int ath11k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
3222 				      struct ieee80211_hdr *hdr, u8 *data,
3223 				      size_t data_len, u8 *mic)
3224 {
3225 	SHASH_DESC_ON_STACK(desc, tfm);
3226 	u8 mic_hdr[16] = {};
3227 	u8 tid = 0;
3228 	int ret;
3229 
3230 	if (!tfm)
3231 		return -EINVAL;
3232 
3233 	desc->tfm = tfm;
3234 
3235 	ret = crypto_shash_setkey(tfm, key, 8);
3236 	if (ret)
3237 		goto out;
3238 
3239 	ret = crypto_shash_init(desc);
3240 	if (ret)
3241 		goto out;
3242 
3243 	/* TKIP MIC header */
3244 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
3245 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
3246 	if (ieee80211_is_data_qos(hdr->frame_control))
3247 		tid = ieee80211_get_tid(hdr);
3248 	mic_hdr[12] = tid;
3249 
3250 	ret = crypto_shash_update(desc, mic_hdr, 16);
3251 	if (ret)
3252 		goto out;
3253 	ret = crypto_shash_update(desc, data, data_len);
3254 	if (ret)
3255 		goto out;
3256 	ret = crypto_shash_final(desc, mic);
3257 out:
3258 	shash_desc_zero(desc);
3259 	return ret;
3260 }
3261 
3262 static int ath11k_dp_rx_h_verify_tkip_mic(struct ath11k *ar, struct ath11k_peer *peer,
3263 					  struct sk_buff *msdu)
3264 {
3265 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)msdu->data;
3266 	struct ieee80211_rx_status *rxs = IEEE80211_SKB_RXCB(msdu);
3267 	struct ieee80211_key_conf *key_conf;
3268 	struct ieee80211_hdr *hdr;
3269 	u8 mic[IEEE80211_CCMP_MIC_LEN];
3270 	int head_len, tail_len, ret;
3271 	size_t data_len;
3272 	u32 hdr_len, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3273 	u8 *key, *data;
3274 	u8 key_idx;
3275 
3276 	if (ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc) !=
3277 	    HAL_ENCRYPT_TYPE_TKIP_MIC)
3278 		return 0;
3279 
3280 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3281 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
3282 	head_len = hdr_len + hal_rx_desc_sz + IEEE80211_TKIP_IV_LEN;
3283 	tail_len = IEEE80211_CCMP_MIC_LEN + IEEE80211_TKIP_ICV_LEN + FCS_LEN;
3284 
3285 	if (!is_multicast_ether_addr(hdr->addr1))
3286 		key_idx = peer->ucast_keyidx;
3287 	else
3288 		key_idx = peer->mcast_keyidx;
3289 
3290 	key_conf = peer->keys[key_idx];
3291 
3292 	data = msdu->data + head_len;
3293 	data_len = msdu->len - head_len - tail_len;
3294 	key = &key_conf->key[NL80211_TKIP_DATA_OFFSET_RX_MIC_KEY];
3295 
3296 	ret = ath11k_dp_rx_h_michael_mic(peer->tfm_mmic, key, hdr, data, data_len, mic);
3297 	if (ret || memcmp(mic, data + data_len, IEEE80211_CCMP_MIC_LEN))
3298 		goto mic_fail;
3299 
3300 	return 0;
3301 
3302 mic_fail:
3303 	(ATH11K_SKB_RXCB(msdu))->is_first_msdu = true;
3304 	(ATH11K_SKB_RXCB(msdu))->is_last_msdu = true;
3305 
3306 	rxs->flag |= RX_FLAG_MMIC_ERROR | RX_FLAG_MMIC_STRIPPED |
3307 		    RX_FLAG_IV_STRIPPED | RX_FLAG_DECRYPTED;
3308 	skb_pull(msdu, hal_rx_desc_sz);
3309 
3310 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
3311 	ath11k_dp_rx_h_undecap(ar, msdu, rx_desc,
3312 			       HAL_ENCRYPT_TYPE_TKIP_MIC, rxs, true);
3313 	ieee80211_rx(ar->hw, msdu);
3314 	return -EINVAL;
3315 }
3316 
3317 static void ath11k_dp_rx_h_undecap_frag(struct ath11k *ar, struct sk_buff *msdu,
3318 					enum hal_encrypt_type enctype, u32 flags)
3319 {
3320 	struct ieee80211_hdr *hdr;
3321 	size_t hdr_len;
3322 	size_t crypto_len;
3323 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3324 
3325 	if (!flags)
3326 		return;
3327 
3328 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
3329 
3330 	if (flags & RX_FLAG_MIC_STRIPPED)
3331 		skb_trim(msdu, msdu->len -
3332 			 ath11k_dp_rx_crypto_mic_len(ar, enctype));
3333 
3334 	if (flags & RX_FLAG_ICV_STRIPPED)
3335 		skb_trim(msdu, msdu->len -
3336 			 ath11k_dp_rx_crypto_icv_len(ar, enctype));
3337 
3338 	if (flags & RX_FLAG_IV_STRIPPED) {
3339 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
3340 		crypto_len = ath11k_dp_rx_crypto_param_len(ar, enctype);
3341 
3342 		memmove((void *)msdu->data + hal_rx_desc_sz + crypto_len,
3343 			(void *)msdu->data + hal_rx_desc_sz, hdr_len);
3344 		skb_pull(msdu, crypto_len);
3345 	}
3346 }
3347 
3348 static int ath11k_dp_rx_h_defrag(struct ath11k *ar,
3349 				 struct ath11k_peer *peer,
3350 				 struct dp_rx_tid *rx_tid,
3351 				 struct sk_buff **defrag_skb)
3352 {
3353 	struct hal_rx_desc *rx_desc;
3354 	struct sk_buff *skb, *first_frag, *last_frag;
3355 	struct ieee80211_hdr *hdr;
3356 	struct rx_attention *rx_attention;
3357 	enum hal_encrypt_type enctype;
3358 	bool is_decrypted = false;
3359 	int msdu_len = 0;
3360 	int extra_space;
3361 	u32 flags, hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3362 
3363 	first_frag = skb_peek(&rx_tid->rx_frags);
3364 	last_frag = skb_peek_tail(&rx_tid->rx_frags);
3365 
3366 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3367 		flags = 0;
3368 		rx_desc = (struct hal_rx_desc *)skb->data;
3369 		hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3370 
3371 		enctype = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, rx_desc);
3372 		if (enctype != HAL_ENCRYPT_TYPE_OPEN) {
3373 			rx_attention = ath11k_dp_rx_get_attention(ar->ab, rx_desc);
3374 			is_decrypted = ath11k_dp_rx_h_attn_is_decrypted(rx_attention);
3375 		}
3376 
3377 		if (is_decrypted) {
3378 			if (skb != first_frag)
3379 				flags |=  RX_FLAG_IV_STRIPPED;
3380 			if (skb != last_frag)
3381 				flags |= RX_FLAG_ICV_STRIPPED |
3382 					 RX_FLAG_MIC_STRIPPED;
3383 		}
3384 
3385 		/* RX fragments are always raw packets */
3386 		if (skb != last_frag)
3387 			skb_trim(skb, skb->len - FCS_LEN);
3388 		ath11k_dp_rx_h_undecap_frag(ar, skb, enctype, flags);
3389 
3390 		if (skb != first_frag)
3391 			skb_pull(skb, hal_rx_desc_sz +
3392 				      ieee80211_hdrlen(hdr->frame_control));
3393 		msdu_len += skb->len;
3394 	}
3395 
3396 	extra_space = msdu_len - (DP_RX_BUFFER_SIZE + skb_tailroom(first_frag));
3397 	if (extra_space > 0 &&
3398 	    (pskb_expand_head(first_frag, 0, extra_space, GFP_ATOMIC) < 0))
3399 		return -ENOMEM;
3400 
3401 	__skb_unlink(first_frag, &rx_tid->rx_frags);
3402 	while ((skb = __skb_dequeue(&rx_tid->rx_frags))) {
3403 		skb_put_data(first_frag, skb->data, skb->len);
3404 		dev_kfree_skb_any(skb);
3405 	}
3406 
3407 	hdr = (struct ieee80211_hdr *)(first_frag->data + hal_rx_desc_sz);
3408 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_FCTL_MOREFRAGS);
3409 	ATH11K_SKB_RXCB(first_frag)->is_frag = 1;
3410 
3411 	if (ath11k_dp_rx_h_verify_tkip_mic(ar, peer, first_frag))
3412 		first_frag = NULL;
3413 
3414 	*defrag_skb = first_frag;
3415 	return 0;
3416 }
3417 
3418 static int ath11k_dp_rx_h_defrag_reo_reinject(struct ath11k *ar, struct dp_rx_tid *rx_tid,
3419 					      struct sk_buff *defrag_skb)
3420 {
3421 	struct ath11k_base *ab = ar->ab;
3422 	struct ath11k_pdev_dp *dp = &ar->dp;
3423 	struct dp_rxdma_ring *rx_refill_ring = &dp->rx_refill_buf_ring;
3424 	struct hal_rx_desc *rx_desc = (struct hal_rx_desc *)defrag_skb->data;
3425 	struct hal_reo_entrance_ring *reo_ent_ring;
3426 	struct hal_reo_dest_ring *reo_dest_ring;
3427 	struct dp_link_desc_bank *link_desc_banks;
3428 	struct hal_rx_msdu_link *msdu_link;
3429 	struct hal_rx_msdu_details *msdu0;
3430 	struct hal_srng *srng;
3431 	dma_addr_t paddr;
3432 	u32 desc_bank, msdu_info, mpdu_info;
3433 	u32 dst_idx, cookie, hal_rx_desc_sz;
3434 	int ret, buf_id;
3435 
3436 	hal_rx_desc_sz = ab->hw_params.hal_desc_sz;
3437 	link_desc_banks = ab->dp.link_desc_banks;
3438 	reo_dest_ring = rx_tid->dst_ring_desc;
3439 
3440 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3441 	msdu_link = (struct hal_rx_msdu_link *)(link_desc_banks[desc_bank].vaddr +
3442 			(paddr - link_desc_banks[desc_bank].paddr));
3443 	msdu0 = &msdu_link->msdu_link[0];
3444 	dst_idx = FIELD_GET(RX_MSDU_DESC_INFO0_REO_DEST_IND, msdu0->rx_msdu_info.info0);
3445 	memset(msdu0, 0, sizeof(*msdu0));
3446 
3447 	msdu_info = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1) |
3448 		    FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1) |
3449 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_CONTINUATION, 0) |
3450 		    FIELD_PREP(RX_MSDU_DESC_INFO0_MSDU_LENGTH,
3451 			       defrag_skb->len - hal_rx_desc_sz) |
3452 		    FIELD_PREP(RX_MSDU_DESC_INFO0_REO_DEST_IND, dst_idx) |
3453 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_SA, 1) |
3454 		    FIELD_PREP(RX_MSDU_DESC_INFO0_VALID_DA, 1);
3455 	msdu0->rx_msdu_info.info0 = msdu_info;
3456 
3457 	/* change msdu len in hal rx desc */
3458 	ath11k_dp_rxdesc_set_msdu_len(ab, rx_desc, defrag_skb->len - hal_rx_desc_sz);
3459 
3460 	paddr = dma_map_single(ab->dev, defrag_skb->data,
3461 			       defrag_skb->len + skb_tailroom(defrag_skb),
3462 			       DMA_TO_DEVICE);
3463 	if (dma_mapping_error(ab->dev, paddr))
3464 		return -ENOMEM;
3465 
3466 	spin_lock_bh(&rx_refill_ring->idr_lock);
3467 	buf_id = idr_alloc(&rx_refill_ring->bufs_idr, defrag_skb, 0,
3468 			   rx_refill_ring->bufs_max * 3, GFP_ATOMIC);
3469 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3470 	if (buf_id < 0) {
3471 		ret = -ENOMEM;
3472 		goto err_unmap_dma;
3473 	}
3474 
3475 	ATH11K_SKB_RXCB(defrag_skb)->paddr = paddr;
3476 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, dp->mac_id) |
3477 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
3478 
3479 	ath11k_hal_rx_buf_addr_info_set(msdu0, paddr, cookie,
3480 					ab->hw_params.hal_params->rx_buf_rbm);
3481 
3482 	/* Fill mpdu details into reo entrance ring */
3483 	srng = &ab->hal.srng_list[ab->dp.reo_reinject_ring.ring_id];
3484 
3485 	spin_lock_bh(&srng->lock);
3486 	ath11k_hal_srng_access_begin(ab, srng);
3487 
3488 	reo_ent_ring = (struct hal_reo_entrance_ring *)
3489 			ath11k_hal_srng_src_get_next_entry(ab, srng);
3490 	if (!reo_ent_ring) {
3491 		ath11k_hal_srng_access_end(ab, srng);
3492 		spin_unlock_bh(&srng->lock);
3493 		ret = -ENOSPC;
3494 		goto err_free_idr;
3495 	}
3496 	memset(reo_ent_ring, 0, sizeof(*reo_ent_ring));
3497 
3498 	ath11k_hal_rx_reo_ent_paddr_get(ab, reo_dest_ring, &paddr, &desc_bank);
3499 	ath11k_hal_rx_buf_addr_info_set(reo_ent_ring, paddr, desc_bank,
3500 					HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST);
3501 
3502 	mpdu_info = FIELD_PREP(RX_MPDU_DESC_INFO0_MSDU_COUNT, 1) |
3503 		    FIELD_PREP(RX_MPDU_DESC_INFO0_SEQ_NUM, rx_tid->cur_sn) |
3504 		    FIELD_PREP(RX_MPDU_DESC_INFO0_FRAG_FLAG, 0) |
3505 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_SA, 1) |
3506 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_DA, 1) |
3507 		    FIELD_PREP(RX_MPDU_DESC_INFO0_RAW_MPDU, 1) |
3508 		    FIELD_PREP(RX_MPDU_DESC_INFO0_VALID_PN, 1);
3509 
3510 	reo_ent_ring->rx_mpdu_info.info0 = mpdu_info;
3511 	reo_ent_ring->rx_mpdu_info.meta_data = reo_dest_ring->rx_mpdu_info.meta_data;
3512 	reo_ent_ring->queue_addr_lo = reo_dest_ring->queue_addr_lo;
3513 	reo_ent_ring->info0 = FIELD_PREP(HAL_REO_ENTR_RING_INFO0_QUEUE_ADDR_HI,
3514 					 FIELD_GET(HAL_REO_DEST_RING_INFO0_QUEUE_ADDR_HI,
3515 						   reo_dest_ring->info0)) |
3516 			      FIELD_PREP(HAL_REO_ENTR_RING_INFO0_DEST_IND, dst_idx);
3517 	ath11k_hal_srng_access_end(ab, srng);
3518 	spin_unlock_bh(&srng->lock);
3519 
3520 	return 0;
3521 
3522 err_free_idr:
3523 	spin_lock_bh(&rx_refill_ring->idr_lock);
3524 	idr_remove(&rx_refill_ring->bufs_idr, buf_id);
3525 	spin_unlock_bh(&rx_refill_ring->idr_lock);
3526 err_unmap_dma:
3527 	dma_unmap_single(ab->dev, paddr, defrag_skb->len + skb_tailroom(defrag_skb),
3528 			 DMA_TO_DEVICE);
3529 	return ret;
3530 }
3531 
3532 static int ath11k_dp_rx_h_cmp_frags(struct ath11k *ar,
3533 				    struct sk_buff *a, struct sk_buff *b)
3534 {
3535 	int frag1, frag2;
3536 
3537 	frag1 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, a);
3538 	frag2 = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, b);
3539 
3540 	return frag1 - frag2;
3541 }
3542 
3543 static void ath11k_dp_rx_h_sort_frags(struct ath11k *ar,
3544 				      struct sk_buff_head *frag_list,
3545 				      struct sk_buff *cur_frag)
3546 {
3547 	struct sk_buff *skb;
3548 	int cmp;
3549 
3550 	skb_queue_walk(frag_list, skb) {
3551 		cmp = ath11k_dp_rx_h_cmp_frags(ar, skb, cur_frag);
3552 		if (cmp < 0)
3553 			continue;
3554 		__skb_queue_before(frag_list, skb, cur_frag);
3555 		return;
3556 	}
3557 	__skb_queue_tail(frag_list, cur_frag);
3558 }
3559 
3560 static u64 ath11k_dp_rx_h_get_pn(struct ath11k *ar, struct sk_buff *skb)
3561 {
3562 	struct ieee80211_hdr *hdr;
3563 	u64 pn = 0;
3564 	u8 *ehdr;
3565 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3566 
3567 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
3568 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
3569 
3570 	pn = ehdr[0];
3571 	pn |= (u64)ehdr[1] << 8;
3572 	pn |= (u64)ehdr[4] << 16;
3573 	pn |= (u64)ehdr[5] << 24;
3574 	pn |= (u64)ehdr[6] << 32;
3575 	pn |= (u64)ehdr[7] << 40;
3576 
3577 	return pn;
3578 }
3579 
3580 static bool
3581 ath11k_dp_rx_h_defrag_validate_incr_pn(struct ath11k *ar, struct dp_rx_tid *rx_tid)
3582 {
3583 	enum hal_encrypt_type encrypt_type;
3584 	struct sk_buff *first_frag, *skb;
3585 	struct hal_rx_desc *desc;
3586 	u64 last_pn;
3587 	u64 cur_pn;
3588 
3589 	first_frag = skb_peek(&rx_tid->rx_frags);
3590 	desc = (struct hal_rx_desc *)first_frag->data;
3591 
3592 	encrypt_type = ath11k_dp_rx_h_mpdu_start_enctype(ar->ab, desc);
3593 	if (encrypt_type != HAL_ENCRYPT_TYPE_CCMP_128 &&
3594 	    encrypt_type != HAL_ENCRYPT_TYPE_CCMP_256 &&
3595 	    encrypt_type != HAL_ENCRYPT_TYPE_GCMP_128 &&
3596 	    encrypt_type != HAL_ENCRYPT_TYPE_AES_GCMP_256)
3597 		return true;
3598 
3599 	last_pn = ath11k_dp_rx_h_get_pn(ar, first_frag);
3600 	skb_queue_walk(&rx_tid->rx_frags, skb) {
3601 		if (skb == first_frag)
3602 			continue;
3603 
3604 		cur_pn = ath11k_dp_rx_h_get_pn(ar, skb);
3605 		if (cur_pn != last_pn + 1)
3606 			return false;
3607 		last_pn = cur_pn;
3608 	}
3609 	return true;
3610 }
3611 
3612 static int ath11k_dp_rx_frag_h_mpdu(struct ath11k *ar,
3613 				    struct sk_buff *msdu,
3614 				    u32 *ring_desc)
3615 {
3616 	struct ath11k_base *ab = ar->ab;
3617 	struct hal_rx_desc *rx_desc;
3618 	struct ath11k_peer *peer;
3619 	struct dp_rx_tid *rx_tid;
3620 	struct sk_buff *defrag_skb = NULL;
3621 	u32 peer_id;
3622 	u16 seqno, frag_no;
3623 	u8 tid;
3624 	int ret = 0;
3625 	bool more_frags;
3626 	bool is_mcbc;
3627 
3628 	rx_desc = (struct hal_rx_desc *)msdu->data;
3629 	peer_id = ath11k_dp_rx_h_mpdu_start_peer_id(ar->ab, rx_desc);
3630 	tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, rx_desc);
3631 	seqno = ath11k_dp_rx_h_mpdu_start_seq_no(ar->ab, rx_desc);
3632 	frag_no = ath11k_dp_rx_h_mpdu_start_frag_no(ar->ab, msdu);
3633 	more_frags = ath11k_dp_rx_h_mpdu_start_more_frags(ar->ab, msdu);
3634 	is_mcbc = ath11k_dp_rx_h_attn_is_mcbc(ar->ab, rx_desc);
3635 
3636 	/* Multicast/Broadcast fragments are not expected */
3637 	if (is_mcbc)
3638 		return -EINVAL;
3639 
3640 	if (!ath11k_dp_rx_h_mpdu_start_seq_ctrl_valid(ar->ab, rx_desc) ||
3641 	    !ath11k_dp_rx_h_mpdu_start_fc_valid(ar->ab, rx_desc) ||
3642 	    tid > IEEE80211_NUM_TIDS)
3643 		return -EINVAL;
3644 
3645 	/* received unfragmented packet in reo
3646 	 * exception ring, this shouldn't happen
3647 	 * as these packets typically come from
3648 	 * reo2sw srngs.
3649 	 */
3650 	if (WARN_ON_ONCE(!frag_no && !more_frags))
3651 		return -EINVAL;
3652 
3653 	spin_lock_bh(&ab->base_lock);
3654 	peer = ath11k_peer_find_by_id(ab, peer_id);
3655 	if (!peer) {
3656 		ath11k_warn(ab, "failed to find the peer to de-fragment received fragment peer_id %d\n",
3657 			    peer_id);
3658 		ret = -ENOENT;
3659 		goto out_unlock;
3660 	}
3661 	if (!peer->dp_setup_done) {
3662 		ath11k_warn(ab, "The peer %pM [%d] has uninitialized datapath\n",
3663 			    peer->addr, peer_id);
3664 		ret = -ENOENT;
3665 		goto out_unlock;
3666 	}
3667 
3668 	rx_tid = &peer->rx_tid[tid];
3669 
3670 	if ((!skb_queue_empty(&rx_tid->rx_frags) && seqno != rx_tid->cur_sn) ||
3671 	    skb_queue_empty(&rx_tid->rx_frags)) {
3672 		/* Flush stored fragments and start a new sequence */
3673 		ath11k_dp_rx_frags_cleanup(rx_tid, true);
3674 		rx_tid->cur_sn = seqno;
3675 	}
3676 
3677 	if (rx_tid->rx_frag_bitmap & BIT(frag_no)) {
3678 		/* Fragment already present */
3679 		ret = -EINVAL;
3680 		goto out_unlock;
3681 	}
3682 
3683 	if (!rx_tid->rx_frag_bitmap || (frag_no > __fls(rx_tid->rx_frag_bitmap)))
3684 		__skb_queue_tail(&rx_tid->rx_frags, msdu);
3685 	else
3686 		ath11k_dp_rx_h_sort_frags(ar, &rx_tid->rx_frags, msdu);
3687 
3688 	rx_tid->rx_frag_bitmap |= BIT(frag_no);
3689 	if (!more_frags)
3690 		rx_tid->last_frag_no = frag_no;
3691 
3692 	if (frag_no == 0) {
3693 		rx_tid->dst_ring_desc = kmemdup(ring_desc,
3694 						sizeof(*rx_tid->dst_ring_desc),
3695 						GFP_ATOMIC);
3696 		if (!rx_tid->dst_ring_desc) {
3697 			ret = -ENOMEM;
3698 			goto out_unlock;
3699 		}
3700 	} else {
3701 		ath11k_dp_rx_link_desc_return(ab, ring_desc,
3702 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3703 	}
3704 
3705 	if (!rx_tid->last_frag_no ||
3706 	    rx_tid->rx_frag_bitmap != GENMASK(rx_tid->last_frag_no, 0)) {
3707 		mod_timer(&rx_tid->frag_timer, jiffies +
3708 					       ATH11K_DP_RX_FRAGMENT_TIMEOUT_MS);
3709 		goto out_unlock;
3710 	}
3711 
3712 	spin_unlock_bh(&ab->base_lock);
3713 	timer_delete_sync(&rx_tid->frag_timer);
3714 	spin_lock_bh(&ab->base_lock);
3715 
3716 	peer = ath11k_peer_find_by_id(ab, peer_id);
3717 	if (!peer)
3718 		goto err_frags_cleanup;
3719 
3720 	if (!ath11k_dp_rx_h_defrag_validate_incr_pn(ar, rx_tid))
3721 		goto err_frags_cleanup;
3722 
3723 	if (ath11k_dp_rx_h_defrag(ar, peer, rx_tid, &defrag_skb))
3724 		goto err_frags_cleanup;
3725 
3726 	if (!defrag_skb)
3727 		goto err_frags_cleanup;
3728 
3729 	if (ath11k_dp_rx_h_defrag_reo_reinject(ar, rx_tid, defrag_skb))
3730 		goto err_frags_cleanup;
3731 
3732 	ath11k_dp_rx_frags_cleanup(rx_tid, false);
3733 	goto out_unlock;
3734 
3735 err_frags_cleanup:
3736 	dev_kfree_skb_any(defrag_skb);
3737 	ath11k_dp_rx_frags_cleanup(rx_tid, true);
3738 out_unlock:
3739 	spin_unlock_bh(&ab->base_lock);
3740 	return ret;
3741 }
3742 
3743 static int
3744 ath11k_dp_process_rx_err_buf(struct ath11k *ar, u32 *ring_desc, int buf_id, bool drop)
3745 {
3746 	struct ath11k_pdev_dp *dp = &ar->dp;
3747 	struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
3748 	struct sk_buff *msdu;
3749 	struct ath11k_skb_rxcb *rxcb;
3750 	struct hal_rx_desc *rx_desc;
3751 	u8 *hdr_status;
3752 	u16 msdu_len;
3753 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3754 
3755 	spin_lock_bh(&rx_ring->idr_lock);
3756 	msdu = idr_find(&rx_ring->bufs_idr, buf_id);
3757 	if (!msdu) {
3758 		ath11k_warn(ar->ab, "rx err buf with invalid buf_id %d\n",
3759 			    buf_id);
3760 		spin_unlock_bh(&rx_ring->idr_lock);
3761 		return -EINVAL;
3762 	}
3763 
3764 	idr_remove(&rx_ring->bufs_idr, buf_id);
3765 	spin_unlock_bh(&rx_ring->idr_lock);
3766 
3767 	rxcb = ATH11K_SKB_RXCB(msdu);
3768 	dma_unmap_single(ar->ab->dev, rxcb->paddr,
3769 			 msdu->len + skb_tailroom(msdu),
3770 			 DMA_FROM_DEVICE);
3771 
3772 	if (drop) {
3773 		dev_kfree_skb_any(msdu);
3774 		return 0;
3775 	}
3776 
3777 	rcu_read_lock();
3778 	if (!rcu_dereference(ar->ab->pdevs_active[ar->pdev_idx])) {
3779 		dev_kfree_skb_any(msdu);
3780 		goto exit;
3781 	}
3782 
3783 	if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
3784 		dev_kfree_skb_any(msdu);
3785 		goto exit;
3786 	}
3787 
3788 	rx_desc = (struct hal_rx_desc *)msdu->data;
3789 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, rx_desc);
3790 	if ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE) {
3791 		hdr_status = ath11k_dp_rx_h_80211_hdr(ar->ab, rx_desc);
3792 		ath11k_warn(ar->ab, "invalid msdu leng %u", msdu_len);
3793 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", hdr_status,
3794 				sizeof(struct ieee80211_hdr));
3795 		ath11k_dbg_dump(ar->ab, ATH11K_DBG_DATA, NULL, "", rx_desc,
3796 				sizeof(struct hal_rx_desc));
3797 		dev_kfree_skb_any(msdu);
3798 		goto exit;
3799 	}
3800 
3801 	skb_put(msdu, hal_rx_desc_sz + msdu_len);
3802 
3803 	if (ath11k_dp_rx_frag_h_mpdu(ar, msdu, ring_desc)) {
3804 		dev_kfree_skb_any(msdu);
3805 		ath11k_dp_rx_link_desc_return(ar->ab, ring_desc,
3806 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3807 	}
3808 exit:
3809 	rcu_read_unlock();
3810 	return 0;
3811 }
3812 
3813 int ath11k_dp_process_rx_err(struct ath11k_base *ab, struct napi_struct *napi,
3814 			     int budget)
3815 {
3816 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
3817 	struct dp_link_desc_bank *link_desc_banks;
3818 	enum hal_rx_buf_return_buf_manager rbm;
3819 	int tot_n_bufs_reaped, quota, ret, i;
3820 	int n_bufs_reaped[MAX_RADIOS] = {};
3821 	struct dp_rxdma_ring *rx_ring;
3822 	struct dp_srng *reo_except;
3823 	u32 desc_bank, num_msdus;
3824 	struct hal_srng *srng;
3825 	struct ath11k_dp *dp;
3826 	void *link_desc_va;
3827 	int buf_id, mac_id;
3828 	struct ath11k *ar;
3829 	dma_addr_t paddr;
3830 	u32 *desc;
3831 	bool is_frag;
3832 	u8 drop = 0;
3833 
3834 	tot_n_bufs_reaped = 0;
3835 	quota = budget;
3836 
3837 	dp = &ab->dp;
3838 	reo_except = &dp->reo_except_ring;
3839 	link_desc_banks = dp->link_desc_banks;
3840 
3841 	srng = &ab->hal.srng_list[reo_except->ring_id];
3842 
3843 	spin_lock_bh(&srng->lock);
3844 
3845 	ath11k_hal_srng_access_begin(ab, srng);
3846 
3847 	while (budget &&
3848 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
3849 		struct hal_reo_dest_ring *reo_desc = (struct hal_reo_dest_ring *)desc;
3850 
3851 		ab->soc_stats.err_ring_pkts++;
3852 		ret = ath11k_hal_desc_reo_parse_err(ab, desc, &paddr,
3853 						    &desc_bank);
3854 		if (ret) {
3855 			ath11k_warn(ab, "failed to parse error reo desc %d\n",
3856 				    ret);
3857 			continue;
3858 		}
3859 		link_desc_va = link_desc_banks[desc_bank].vaddr +
3860 			       (paddr - link_desc_banks[desc_bank].paddr);
3861 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus, msdu_cookies,
3862 						 &rbm);
3863 		if (rbm != HAL_RX_BUF_RBM_WBM_IDLE_DESC_LIST &&
3864 		    rbm != HAL_RX_BUF_RBM_SW1_BM &&
3865 		    rbm != HAL_RX_BUF_RBM_SW3_BM) {
3866 			ab->soc_stats.invalid_rbm++;
3867 			ath11k_warn(ab, "invalid return buffer manager %d\n", rbm);
3868 			ath11k_dp_rx_link_desc_return(ab, desc,
3869 						      HAL_WBM_REL_BM_ACT_REL_MSDU);
3870 			continue;
3871 		}
3872 
3873 		is_frag = !!(reo_desc->rx_mpdu_info.info0 & RX_MPDU_DESC_INFO0_FRAG_FLAG);
3874 
3875 		/* Process only rx fragments with one msdu per link desc below, and drop
3876 		 * msdu's indicated due to error reasons.
3877 		 */
3878 		if (!is_frag || num_msdus > 1) {
3879 			drop = 1;
3880 			/* Return the link desc back to wbm idle list */
3881 			ath11k_dp_rx_link_desc_return(ab, desc,
3882 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
3883 		}
3884 
3885 		for (i = 0; i < num_msdus; i++) {
3886 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
3887 					   msdu_cookies[i]);
3888 
3889 			mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID,
3890 					   msdu_cookies[i]);
3891 
3892 			ar = ab->pdevs[mac_id].ar;
3893 
3894 			if (!ath11k_dp_process_rx_err_buf(ar, desc, buf_id, drop)) {
3895 				n_bufs_reaped[mac_id]++;
3896 				tot_n_bufs_reaped++;
3897 			}
3898 		}
3899 
3900 		if (tot_n_bufs_reaped >= quota) {
3901 			tot_n_bufs_reaped = quota;
3902 			goto exit;
3903 		}
3904 
3905 		budget = quota - tot_n_bufs_reaped;
3906 	}
3907 
3908 exit:
3909 	ath11k_hal_srng_access_end(ab, srng);
3910 
3911 	spin_unlock_bh(&srng->lock);
3912 
3913 	for (i = 0; i <  ab->num_radios; i++) {
3914 		if (!n_bufs_reaped[i])
3915 			continue;
3916 
3917 		ar = ab->pdevs[i].ar;
3918 		rx_ring = &ar->dp.rx_refill_buf_ring;
3919 
3920 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, n_bufs_reaped[i],
3921 					   ab->hw_params.hal_params->rx_buf_rbm);
3922 	}
3923 
3924 	return tot_n_bufs_reaped;
3925 }
3926 
3927 static void ath11k_dp_rx_null_q_desc_sg_drop(struct ath11k *ar,
3928 					     int msdu_len,
3929 					     struct sk_buff_head *msdu_list)
3930 {
3931 	struct sk_buff *skb, *tmp;
3932 	struct ath11k_skb_rxcb *rxcb;
3933 	int n_buffs;
3934 
3935 	n_buffs = DIV_ROUND_UP(msdu_len,
3936 			       (DP_RX_BUFFER_SIZE - ar->ab->hw_params.hal_desc_sz));
3937 
3938 	skb_queue_walk_safe(msdu_list, skb, tmp) {
3939 		rxcb = ATH11K_SKB_RXCB(skb);
3940 		if (rxcb->err_rel_src == HAL_WBM_REL_SRC_MODULE_REO &&
3941 		    rxcb->err_code == HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO) {
3942 			if (!n_buffs)
3943 				break;
3944 			__skb_unlink(skb, msdu_list);
3945 			dev_kfree_skb_any(skb);
3946 			n_buffs--;
3947 		}
3948 	}
3949 }
3950 
3951 static int ath11k_dp_rx_h_null_q_desc(struct ath11k *ar, struct sk_buff *msdu,
3952 				      struct ieee80211_rx_status *status,
3953 				      struct sk_buff_head *msdu_list)
3954 {
3955 	u16 msdu_len;
3956 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
3957 	struct rx_attention *rx_attention;
3958 	u8 l3pad_bytes;
3959 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
3960 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
3961 
3962 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
3963 
3964 	if (!rxcb->is_frag && ((msdu_len + hal_rx_desc_sz) > DP_RX_BUFFER_SIZE)) {
3965 		/* First buffer will be freed by the caller, so deduct it's length */
3966 		msdu_len = msdu_len - (DP_RX_BUFFER_SIZE - hal_rx_desc_sz);
3967 		ath11k_dp_rx_null_q_desc_sg_drop(ar, msdu_len, msdu_list);
3968 		return -EINVAL;
3969 	}
3970 
3971 	rx_attention = ath11k_dp_rx_get_attention(ar->ab, desc);
3972 	if (!ath11k_dp_rx_h_attn_msdu_done(rx_attention)) {
3973 		ath11k_warn(ar->ab,
3974 			    "msdu_done bit not set in null_q_des processing\n");
3975 		__skb_queue_purge(msdu_list);
3976 		return -EIO;
3977 	}
3978 
3979 	/* Handle NULL queue descriptor violations arising out a missing
3980 	 * REO queue for a given peer or a given TID. This typically
3981 	 * may happen if a packet is received on a QOS enabled TID before the
3982 	 * ADDBA negotiation for that TID, when the TID queue is setup. Or
3983 	 * it may also happen for MC/BC frames if they are not routed to the
3984 	 * non-QOS TID queue, in the absence of any other default TID queue.
3985 	 * This error can show up both in a REO destination or WBM release ring.
3986 	 */
3987 
3988 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
3989 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
3990 
3991 	if (rxcb->is_frag) {
3992 		skb_pull(msdu, hal_rx_desc_sz);
3993 	} else {
3994 		l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
3995 
3996 		if ((hal_rx_desc_sz + l3pad_bytes + msdu_len) > DP_RX_BUFFER_SIZE)
3997 			return -EINVAL;
3998 
3999 		skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4000 		skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4001 	}
4002 	ath11k_dp_rx_h_ppdu(ar, desc, status);
4003 
4004 	ath11k_dp_rx_h_mpdu(ar, msdu, desc, status);
4005 
4006 	rxcb->tid = ath11k_dp_rx_h_mpdu_start_tid(ar->ab, desc);
4007 
4008 	/* Please note that caller will having the access to msdu and completing
4009 	 * rx with mac80211. Need not worry about cleaning up amsdu_list.
4010 	 */
4011 
4012 	return 0;
4013 }
4014 
4015 static bool ath11k_dp_rx_h_reo_err(struct ath11k *ar, struct sk_buff *msdu,
4016 				   struct ieee80211_rx_status *status,
4017 				   struct sk_buff_head *msdu_list)
4018 {
4019 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4020 	bool drop = false;
4021 
4022 	ar->ab->soc_stats.reo_error[rxcb->err_code]++;
4023 
4024 	switch (rxcb->err_code) {
4025 	case HAL_REO_DEST_RING_ERROR_CODE_DESC_ADDR_ZERO:
4026 		if (ath11k_dp_rx_h_null_q_desc(ar, msdu, status, msdu_list))
4027 			drop = true;
4028 		break;
4029 	case HAL_REO_DEST_RING_ERROR_CODE_PN_CHECK_FAILED:
4030 		/* TODO: Do not drop PN failed packets in the driver;
4031 		 * instead, it is good to drop such packets in mac80211
4032 		 * after incrementing the replay counters.
4033 		 */
4034 		fallthrough;
4035 	default:
4036 		/* TODO: Review other errors and process them to mac80211
4037 		 * as appropriate.
4038 		 */
4039 		drop = true;
4040 		break;
4041 	}
4042 
4043 	return drop;
4044 }
4045 
4046 static void ath11k_dp_rx_h_tkip_mic_err(struct ath11k *ar, struct sk_buff *msdu,
4047 					struct ieee80211_rx_status *status)
4048 {
4049 	u16 msdu_len;
4050 	struct hal_rx_desc *desc = (struct hal_rx_desc *)msdu->data;
4051 	u8 l3pad_bytes;
4052 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4053 	u32 hal_rx_desc_sz = ar->ab->hw_params.hal_desc_sz;
4054 
4055 	rxcb->is_first_msdu = ath11k_dp_rx_h_msdu_end_first_msdu(ar->ab, desc);
4056 	rxcb->is_last_msdu = ath11k_dp_rx_h_msdu_end_last_msdu(ar->ab, desc);
4057 
4058 	l3pad_bytes = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, desc);
4059 	msdu_len = ath11k_dp_rx_h_msdu_start_msdu_len(ar->ab, desc);
4060 	skb_put(msdu, hal_rx_desc_sz + l3pad_bytes + msdu_len);
4061 	skb_pull(msdu, hal_rx_desc_sz + l3pad_bytes);
4062 
4063 	ath11k_dp_rx_h_ppdu(ar, desc, status);
4064 
4065 	status->flag |= (RX_FLAG_MMIC_STRIPPED | RX_FLAG_MMIC_ERROR |
4066 			 RX_FLAG_DECRYPTED);
4067 
4068 	ath11k_dp_rx_h_undecap(ar, msdu, desc,
4069 			       HAL_ENCRYPT_TYPE_TKIP_MIC, status, false);
4070 }
4071 
4072 static bool ath11k_dp_rx_h_rxdma_err(struct ath11k *ar,  struct sk_buff *msdu,
4073 				     struct ieee80211_rx_status *status)
4074 {
4075 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4076 	bool drop = false;
4077 
4078 	ar->ab->soc_stats.rxdma_error[rxcb->err_code]++;
4079 
4080 	switch (rxcb->err_code) {
4081 	case HAL_REO_ENTR_RING_RXDMA_ECODE_TKIP_MIC_ERR:
4082 		ath11k_dp_rx_h_tkip_mic_err(ar, msdu, status);
4083 		break;
4084 	default:
4085 		/* TODO: Review other rxdma error code to check if anything is
4086 		 * worth reporting to mac80211
4087 		 */
4088 		drop = true;
4089 		break;
4090 	}
4091 
4092 	return drop;
4093 }
4094 
4095 static void ath11k_dp_rx_wbm_err(struct ath11k *ar,
4096 				 struct napi_struct *napi,
4097 				 struct sk_buff *msdu,
4098 				 struct sk_buff_head *msdu_list)
4099 {
4100 	struct ath11k_skb_rxcb *rxcb = ATH11K_SKB_RXCB(msdu);
4101 	struct ieee80211_rx_status rxs = {};
4102 	bool drop = true;
4103 
4104 	switch (rxcb->err_rel_src) {
4105 	case HAL_WBM_REL_SRC_MODULE_REO:
4106 		drop = ath11k_dp_rx_h_reo_err(ar, msdu, &rxs, msdu_list);
4107 		break;
4108 	case HAL_WBM_REL_SRC_MODULE_RXDMA:
4109 		drop = ath11k_dp_rx_h_rxdma_err(ar, msdu, &rxs);
4110 		break;
4111 	default:
4112 		/* msdu will get freed */
4113 		break;
4114 	}
4115 
4116 	if (drop) {
4117 		dev_kfree_skb_any(msdu);
4118 		return;
4119 	}
4120 
4121 	ath11k_dp_rx_deliver_msdu(ar, napi, msdu, &rxs);
4122 }
4123 
4124 int ath11k_dp_rx_process_wbm_err(struct ath11k_base *ab,
4125 				 struct napi_struct *napi, int budget)
4126 {
4127 	struct ath11k *ar;
4128 	struct ath11k_dp *dp = &ab->dp;
4129 	struct dp_rxdma_ring *rx_ring;
4130 	struct hal_rx_wbm_rel_info err_info;
4131 	struct hal_srng *srng;
4132 	struct sk_buff *msdu;
4133 	struct sk_buff_head msdu_list[MAX_RADIOS];
4134 	struct ath11k_skb_rxcb *rxcb;
4135 	u32 *rx_desc;
4136 	int buf_id, mac_id;
4137 	int num_buffs_reaped[MAX_RADIOS] = {};
4138 	int total_num_buffs_reaped = 0;
4139 	int ret, i;
4140 
4141 	for (i = 0; i < ab->num_radios; i++)
4142 		__skb_queue_head_init(&msdu_list[i]);
4143 
4144 	srng = &ab->hal.srng_list[dp->rx_rel_ring.ring_id];
4145 
4146 	spin_lock_bh(&srng->lock);
4147 
4148 	ath11k_hal_srng_access_begin(ab, srng);
4149 
4150 	while (budget) {
4151 		rx_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng);
4152 		if (!rx_desc)
4153 			break;
4154 
4155 		ret = ath11k_hal_wbm_desc_parse_err(ab, rx_desc, &err_info);
4156 		if (ret) {
4157 			ath11k_warn(ab,
4158 				    "failed to parse rx error in wbm_rel ring desc %d\n",
4159 				    ret);
4160 			continue;
4161 		}
4162 
4163 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, err_info.cookie);
4164 		mac_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_PDEV_ID, err_info.cookie);
4165 
4166 		ar = ab->pdevs[mac_id].ar;
4167 		rx_ring = &ar->dp.rx_refill_buf_ring;
4168 
4169 		spin_lock_bh(&rx_ring->idr_lock);
4170 		msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4171 		if (!msdu) {
4172 			ath11k_warn(ab, "frame rx with invalid buf_id %d pdev %d\n",
4173 				    buf_id, mac_id);
4174 			spin_unlock_bh(&rx_ring->idr_lock);
4175 			continue;
4176 		}
4177 
4178 		idr_remove(&rx_ring->bufs_idr, buf_id);
4179 		spin_unlock_bh(&rx_ring->idr_lock);
4180 
4181 		rxcb = ATH11K_SKB_RXCB(msdu);
4182 		dma_unmap_single(ab->dev, rxcb->paddr,
4183 				 msdu->len + skb_tailroom(msdu),
4184 				 DMA_FROM_DEVICE);
4185 
4186 		num_buffs_reaped[mac_id]++;
4187 		total_num_buffs_reaped++;
4188 		budget--;
4189 
4190 		if (err_info.push_reason !=
4191 		    HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4192 			dev_kfree_skb_any(msdu);
4193 			continue;
4194 		}
4195 
4196 		rxcb->err_rel_src = err_info.err_rel_src;
4197 		rxcb->err_code = err_info.err_code;
4198 		rxcb->rx_desc = (struct hal_rx_desc *)msdu->data;
4199 		__skb_queue_tail(&msdu_list[mac_id], msdu);
4200 	}
4201 
4202 	ath11k_hal_srng_access_end(ab, srng);
4203 
4204 	spin_unlock_bh(&srng->lock);
4205 
4206 	if (!total_num_buffs_reaped)
4207 		goto done;
4208 
4209 	for (i = 0; i <  ab->num_radios; i++) {
4210 		if (!num_buffs_reaped[i])
4211 			continue;
4212 
4213 		ar = ab->pdevs[i].ar;
4214 		rx_ring = &ar->dp.rx_refill_buf_ring;
4215 
4216 		ath11k_dp_rxbufs_replenish(ab, i, rx_ring, num_buffs_reaped[i],
4217 					   ab->hw_params.hal_params->rx_buf_rbm);
4218 	}
4219 
4220 	rcu_read_lock();
4221 	for (i = 0; i <  ab->num_radios; i++) {
4222 		if (!rcu_dereference(ab->pdevs_active[i])) {
4223 			__skb_queue_purge(&msdu_list[i]);
4224 			continue;
4225 		}
4226 
4227 		ar = ab->pdevs[i].ar;
4228 
4229 		if (test_bit(ATH11K_CAC_RUNNING, &ar->dev_flags)) {
4230 			__skb_queue_purge(&msdu_list[i]);
4231 			continue;
4232 		}
4233 
4234 		while ((msdu = __skb_dequeue(&msdu_list[i])) != NULL)
4235 			ath11k_dp_rx_wbm_err(ar, napi, msdu, &msdu_list[i]);
4236 	}
4237 	rcu_read_unlock();
4238 done:
4239 	return total_num_buffs_reaped;
4240 }
4241 
4242 int ath11k_dp_process_rxdma_err(struct ath11k_base *ab, int mac_id, int budget)
4243 {
4244 	struct ath11k *ar;
4245 	struct dp_srng *err_ring;
4246 	struct dp_rxdma_ring *rx_ring;
4247 	struct dp_link_desc_bank *link_desc_banks = ab->dp.link_desc_banks;
4248 	struct hal_srng *srng;
4249 	u32 msdu_cookies[HAL_NUM_RX_MSDUS_PER_LINK_DESC];
4250 	enum hal_rx_buf_return_buf_manager rbm;
4251 	enum hal_reo_entr_rxdma_ecode rxdma_err_code;
4252 	struct ath11k_skb_rxcb *rxcb;
4253 	struct sk_buff *skb;
4254 	struct hal_reo_entrance_ring *entr_ring;
4255 	void *desc;
4256 	int num_buf_freed = 0;
4257 	int quota = budget;
4258 	dma_addr_t paddr;
4259 	u32 desc_bank;
4260 	void *link_desc_va;
4261 	int num_msdus;
4262 	int i;
4263 	int buf_id;
4264 
4265 	ar = ab->pdevs[ath11k_hw_mac_id_to_pdev_id(&ab->hw_params, mac_id)].ar;
4266 	err_ring = &ar->dp.rxdma_err_dst_ring[ath11k_hw_mac_id_to_srng_id(&ab->hw_params,
4267 									  mac_id)];
4268 	rx_ring = &ar->dp.rx_refill_buf_ring;
4269 
4270 	srng = &ab->hal.srng_list[err_ring->ring_id];
4271 
4272 	spin_lock_bh(&srng->lock);
4273 
4274 	ath11k_hal_srng_access_begin(ab, srng);
4275 
4276 	while (quota-- &&
4277 	       (desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4278 		ath11k_hal_rx_reo_ent_paddr_get(ab, desc, &paddr, &desc_bank);
4279 
4280 		entr_ring = (struct hal_reo_entrance_ring *)desc;
4281 		rxdma_err_code =
4282 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4283 				  entr_ring->info1);
4284 		ab->soc_stats.rxdma_error[rxdma_err_code]++;
4285 
4286 		link_desc_va = link_desc_banks[desc_bank].vaddr +
4287 			       (paddr - link_desc_banks[desc_bank].paddr);
4288 		ath11k_hal_rx_msdu_link_info_get(link_desc_va, &num_msdus,
4289 						 msdu_cookies, &rbm);
4290 
4291 		for (i = 0; i < num_msdus; i++) {
4292 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4293 					   msdu_cookies[i]);
4294 
4295 			spin_lock_bh(&rx_ring->idr_lock);
4296 			skb = idr_find(&rx_ring->bufs_idr, buf_id);
4297 			if (!skb) {
4298 				ath11k_warn(ab, "rxdma error with invalid buf_id %d\n",
4299 					    buf_id);
4300 				spin_unlock_bh(&rx_ring->idr_lock);
4301 				continue;
4302 			}
4303 
4304 			idr_remove(&rx_ring->bufs_idr, buf_id);
4305 			spin_unlock_bh(&rx_ring->idr_lock);
4306 
4307 			rxcb = ATH11K_SKB_RXCB(skb);
4308 			dma_unmap_single(ab->dev, rxcb->paddr,
4309 					 skb->len + skb_tailroom(skb),
4310 					 DMA_FROM_DEVICE);
4311 			dev_kfree_skb_any(skb);
4312 
4313 			num_buf_freed++;
4314 		}
4315 
4316 		ath11k_dp_rx_link_desc_return(ab, desc,
4317 					      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4318 	}
4319 
4320 	ath11k_hal_srng_access_end(ab, srng);
4321 
4322 	spin_unlock_bh(&srng->lock);
4323 
4324 	if (num_buf_freed)
4325 		ath11k_dp_rxbufs_replenish(ab, mac_id, rx_ring, num_buf_freed,
4326 					   ab->hw_params.hal_params->rx_buf_rbm);
4327 
4328 	return budget - quota;
4329 }
4330 
4331 void ath11k_dp_process_reo_status(struct ath11k_base *ab)
4332 {
4333 	struct ath11k_dp *dp = &ab->dp;
4334 	struct hal_srng *srng;
4335 	struct dp_reo_cmd *cmd, *tmp;
4336 	bool found = false;
4337 	u32 *reo_desc;
4338 	u16 tag;
4339 	struct hal_reo_status reo_status;
4340 
4341 	srng = &ab->hal.srng_list[dp->reo_status_ring.ring_id];
4342 
4343 	memset(&reo_status, 0, sizeof(reo_status));
4344 
4345 	spin_lock_bh(&srng->lock);
4346 
4347 	ath11k_hal_srng_access_begin(ab, srng);
4348 
4349 	while ((reo_desc = ath11k_hal_srng_dst_get_next_entry(ab, srng))) {
4350 		tag = FIELD_GET(HAL_SRNG_TLV_HDR_TAG, *reo_desc);
4351 
4352 		switch (tag) {
4353 		case HAL_REO_GET_QUEUE_STATS_STATUS:
4354 			ath11k_hal_reo_status_queue_stats(ab, reo_desc,
4355 							  &reo_status);
4356 			break;
4357 		case HAL_REO_FLUSH_QUEUE_STATUS:
4358 			ath11k_hal_reo_flush_queue_status(ab, reo_desc,
4359 							  &reo_status);
4360 			break;
4361 		case HAL_REO_FLUSH_CACHE_STATUS:
4362 			ath11k_hal_reo_flush_cache_status(ab, reo_desc,
4363 							  &reo_status);
4364 			break;
4365 		case HAL_REO_UNBLOCK_CACHE_STATUS:
4366 			ath11k_hal_reo_unblk_cache_status(ab, reo_desc,
4367 							  &reo_status);
4368 			break;
4369 		case HAL_REO_FLUSH_TIMEOUT_LIST_STATUS:
4370 			ath11k_hal_reo_flush_timeout_list_status(ab, reo_desc,
4371 								 &reo_status);
4372 			break;
4373 		case HAL_REO_DESCRIPTOR_THRESHOLD_REACHED_STATUS:
4374 			ath11k_hal_reo_desc_thresh_reached_status(ab, reo_desc,
4375 								  &reo_status);
4376 			break;
4377 		case HAL_REO_UPDATE_RX_REO_QUEUE_STATUS:
4378 			ath11k_hal_reo_update_rx_reo_queue_status(ab, reo_desc,
4379 								  &reo_status);
4380 			break;
4381 		default:
4382 			ath11k_warn(ab, "Unknown reo status type %d\n", tag);
4383 			continue;
4384 		}
4385 
4386 		spin_lock_bh(&dp->reo_cmd_lock);
4387 		list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
4388 			if (reo_status.uniform_hdr.cmd_num == cmd->cmd_num) {
4389 				found = true;
4390 				list_del(&cmd->list);
4391 				break;
4392 			}
4393 		}
4394 		spin_unlock_bh(&dp->reo_cmd_lock);
4395 
4396 		if (found) {
4397 			cmd->handler(dp, (void *)&cmd->data,
4398 				     reo_status.uniform_hdr.cmd_status);
4399 			kfree(cmd);
4400 		}
4401 
4402 		found = false;
4403 	}
4404 
4405 	ath11k_hal_srng_access_end(ab, srng);
4406 
4407 	spin_unlock_bh(&srng->lock);
4408 }
4409 
4410 void ath11k_dp_rx_pdev_free(struct ath11k_base *ab, int mac_id)
4411 {
4412 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4413 
4414 	ath11k_dp_rx_pdev_srng_free(ar);
4415 	ath11k_dp_rxdma_pdev_buf_free(ar);
4416 }
4417 
4418 int ath11k_dp_rx_pdev_alloc(struct ath11k_base *ab, int mac_id)
4419 {
4420 	struct ath11k *ar = ab->pdevs[mac_id].ar;
4421 	struct ath11k_pdev_dp *dp = &ar->dp;
4422 	u32 ring_id;
4423 	int i;
4424 	int ret;
4425 
4426 	ret = ath11k_dp_rx_pdev_srng_alloc(ar);
4427 	if (ret) {
4428 		ath11k_warn(ab, "failed to setup rx srngs\n");
4429 		return ret;
4430 	}
4431 
4432 	ret = ath11k_dp_rxdma_pdev_buf_setup(ar);
4433 	if (ret) {
4434 		ath11k_warn(ab, "failed to setup rxdma ring\n");
4435 		return ret;
4436 	}
4437 
4438 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
4439 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id, HAL_RXDMA_BUF);
4440 	if (ret) {
4441 		ath11k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
4442 			    ret);
4443 		return ret;
4444 	}
4445 
4446 	if (ab->hw_params.rx_mac_buf_ring) {
4447 		for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4448 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
4449 			ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4450 							  mac_id + i, HAL_RXDMA_BUF);
4451 			if (ret) {
4452 				ath11k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
4453 					    i, ret);
4454 				return ret;
4455 			}
4456 		}
4457 	}
4458 
4459 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4460 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
4461 		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4462 						  mac_id + i, HAL_RXDMA_DST);
4463 		if (ret) {
4464 			ath11k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
4465 				    i, ret);
4466 			return ret;
4467 		}
4468 	}
4469 
4470 	if (!ab->hw_params.rxdma1_enable)
4471 		goto config_refill_ring;
4472 
4473 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
4474 	ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id,
4475 					  mac_id, HAL_RXDMA_MONITOR_BUF);
4476 	if (ret) {
4477 		ath11k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
4478 			    ret);
4479 		return ret;
4480 	}
4481 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4482 					  dp->rxdma_mon_dst_ring.ring_id,
4483 					  mac_id, HAL_RXDMA_MONITOR_DST);
4484 	if (ret) {
4485 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4486 			    ret);
4487 		return ret;
4488 	}
4489 	ret = ath11k_dp_tx_htt_srng_setup(ab,
4490 					  dp->rxdma_mon_desc_ring.ring_id,
4491 					  mac_id, HAL_RXDMA_MONITOR_DESC);
4492 	if (ret) {
4493 		ath11k_warn(ab, "failed to configure rxdma_mon_dst_ring %d\n",
4494 			    ret);
4495 		return ret;
4496 	}
4497 
4498 config_refill_ring:
4499 	for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
4500 		ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
4501 		ret = ath11k_dp_tx_htt_srng_setup(ab, ring_id, mac_id + i,
4502 						  HAL_RXDMA_MONITOR_STATUS);
4503 		if (ret) {
4504 			ath11k_warn(ab,
4505 				    "failed to configure mon_status_refill_ring%d %d\n",
4506 				    i, ret);
4507 			return ret;
4508 		}
4509 	}
4510 
4511 	return 0;
4512 }
4513 
4514 static void ath11k_dp_mon_set_frag_len(u32 *total_len, u32 *frag_len)
4515 {
4516 	if (*total_len >= (DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc))) {
4517 		*frag_len = DP_RX_BUFFER_SIZE - sizeof(struct hal_rx_desc);
4518 		*total_len -= *frag_len;
4519 	} else {
4520 		*frag_len = *total_len;
4521 		*total_len = 0;
4522 	}
4523 }
4524 
4525 static
4526 int ath11k_dp_rx_monitor_link_desc_return(struct ath11k *ar,
4527 					  void *p_last_buf_addr_info,
4528 					  u8 mac_id)
4529 {
4530 	struct ath11k_pdev_dp *dp = &ar->dp;
4531 	struct dp_srng *dp_srng;
4532 	void *hal_srng;
4533 	void *src_srng_desc;
4534 	int ret = 0;
4535 
4536 	if (ar->ab->hw_params.rxdma1_enable) {
4537 		dp_srng = &dp->rxdma_mon_desc_ring;
4538 		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4539 	} else {
4540 		dp_srng = &ar->ab->dp.wbm_desc_rel_ring;
4541 		hal_srng = &ar->ab->hal.srng_list[dp_srng->ring_id];
4542 	}
4543 
4544 	ath11k_hal_srng_access_begin(ar->ab, hal_srng);
4545 
4546 	src_srng_desc = ath11k_hal_srng_src_get_next_entry(ar->ab, hal_srng);
4547 
4548 	if (src_srng_desc) {
4549 		struct ath11k_buffer_addr *src_desc = src_srng_desc;
4550 
4551 		*src_desc = *((struct ath11k_buffer_addr *)p_last_buf_addr_info);
4552 	} else {
4553 		ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4554 			   "Monitor Link Desc Ring %d Full", mac_id);
4555 		ret = -ENOMEM;
4556 	}
4557 
4558 	ath11k_hal_srng_access_end(ar->ab, hal_srng);
4559 	return ret;
4560 }
4561 
4562 static
4563 void ath11k_dp_rx_mon_next_link_desc_get(void *rx_msdu_link_desc,
4564 					 dma_addr_t *paddr, u32 *sw_cookie,
4565 					 u8 *rbm,
4566 					 void **pp_buf_addr_info)
4567 {
4568 	struct hal_rx_msdu_link *msdu_link = rx_msdu_link_desc;
4569 	struct ath11k_buffer_addr *buf_addr_info;
4570 
4571 	buf_addr_info = (struct ath11k_buffer_addr *)&msdu_link->buf_addr_info;
4572 
4573 	ath11k_hal_rx_buf_addr_info_get(buf_addr_info, paddr, sw_cookie, rbm);
4574 
4575 	*pp_buf_addr_info = (void *)buf_addr_info;
4576 }
4577 
4578 static int ath11k_dp_pkt_set_pktlen(struct sk_buff *skb, u32 len)
4579 {
4580 	if (skb->len > len) {
4581 		skb_trim(skb, len);
4582 	} else {
4583 		if (skb_tailroom(skb) < len - skb->len) {
4584 			if ((pskb_expand_head(skb, 0,
4585 					      len - skb->len - skb_tailroom(skb),
4586 					      GFP_ATOMIC))) {
4587 				dev_kfree_skb_any(skb);
4588 				return -ENOMEM;
4589 			}
4590 		}
4591 		skb_put(skb, (len - skb->len));
4592 	}
4593 	return 0;
4594 }
4595 
4596 static void ath11k_hal_rx_msdu_list_get(struct ath11k *ar,
4597 					void *msdu_link_desc,
4598 					struct hal_rx_msdu_list *msdu_list,
4599 					u16 *num_msdus)
4600 {
4601 	struct hal_rx_msdu_details *msdu_details = NULL;
4602 	struct rx_msdu_desc *msdu_desc_info = NULL;
4603 	struct hal_rx_msdu_link *msdu_link = NULL;
4604 	int i;
4605 	u32 last = FIELD_PREP(RX_MSDU_DESC_INFO0_LAST_MSDU_IN_MPDU, 1);
4606 	u32 first = FIELD_PREP(RX_MSDU_DESC_INFO0_FIRST_MSDU_IN_MPDU, 1);
4607 	u8  tmp  = 0;
4608 
4609 	msdu_link = msdu_link_desc;
4610 	msdu_details = &msdu_link->msdu_link[0];
4611 
4612 	for (i = 0; i < HAL_RX_NUM_MSDU_DESC; i++) {
4613 		if (FIELD_GET(BUFFER_ADDR_INFO0_ADDR,
4614 			      msdu_details[i].buf_addr_info.info0) == 0) {
4615 			msdu_desc_info = &msdu_details[i - 1].rx_msdu_info;
4616 			msdu_desc_info->info0 |= last;
4617 			break;
4618 		}
4619 		msdu_desc_info = &msdu_details[i].rx_msdu_info;
4620 
4621 		if (!i)
4622 			msdu_desc_info->info0 |= first;
4623 		else if (i == (HAL_RX_NUM_MSDU_DESC - 1))
4624 			msdu_desc_info->info0 |= last;
4625 		msdu_list->msdu_info[i].msdu_flags = msdu_desc_info->info0;
4626 		msdu_list->msdu_info[i].msdu_len =
4627 			 HAL_RX_MSDU_PKT_LENGTH_GET(msdu_desc_info->info0);
4628 		msdu_list->sw_cookie[i] =
4629 			FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
4630 				  msdu_details[i].buf_addr_info.info1);
4631 		tmp = FIELD_GET(BUFFER_ADDR_INFO1_RET_BUF_MGR,
4632 				msdu_details[i].buf_addr_info.info1);
4633 		msdu_list->rbm[i] = tmp;
4634 	}
4635 	*num_msdus = i;
4636 }
4637 
4638 static u32 ath11k_dp_rx_mon_comp_ppduid(u32 msdu_ppdu_id, u32 *ppdu_id,
4639 					u32 *rx_bufs_used)
4640 {
4641 	u32 ret = 0;
4642 
4643 	if ((*ppdu_id < msdu_ppdu_id) &&
4644 	    ((msdu_ppdu_id - *ppdu_id) < DP_NOT_PPDU_ID_WRAP_AROUND)) {
4645 		*ppdu_id = msdu_ppdu_id;
4646 		ret = msdu_ppdu_id;
4647 	} else if ((*ppdu_id > msdu_ppdu_id) &&
4648 		((*ppdu_id - msdu_ppdu_id) > DP_NOT_PPDU_ID_WRAP_AROUND)) {
4649 		/* mon_dst is behind than mon_status
4650 		 * skip dst_ring and free it
4651 		 */
4652 		*rx_bufs_used += 1;
4653 		*ppdu_id = msdu_ppdu_id;
4654 		ret = msdu_ppdu_id;
4655 	}
4656 	return ret;
4657 }
4658 
4659 static void ath11k_dp_mon_get_buf_len(struct hal_rx_msdu_desc_info *info,
4660 				      bool *is_frag, u32 *total_len,
4661 				      u32 *frag_len, u32 *msdu_cnt)
4662 {
4663 	if (info->msdu_flags & RX_MSDU_DESC_INFO0_MSDU_CONTINUATION) {
4664 		if (!*is_frag) {
4665 			*total_len = info->msdu_len;
4666 			*is_frag = true;
4667 		}
4668 		ath11k_dp_mon_set_frag_len(total_len,
4669 					   frag_len);
4670 	} else {
4671 		if (*is_frag) {
4672 			ath11k_dp_mon_set_frag_len(total_len,
4673 						   frag_len);
4674 		} else {
4675 			*frag_len = info->msdu_len;
4676 		}
4677 		*is_frag = false;
4678 		*msdu_cnt -= 1;
4679 	}
4680 }
4681 
4682 /* clang stack usage explodes if this is inlined */
4683 static noinline_for_stack
4684 u32 ath11k_dp_rx_mon_mpdu_pop(struct ath11k *ar, int mac_id,
4685 			      void *ring_entry, struct sk_buff **head_msdu,
4686 			      struct sk_buff **tail_msdu, u32 *npackets,
4687 			      u32 *ppdu_id)
4688 {
4689 	struct ath11k_pdev_dp *dp = &ar->dp;
4690 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
4691 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
4692 	struct sk_buff *msdu = NULL, *last = NULL;
4693 	struct hal_rx_msdu_list msdu_list;
4694 	void *p_buf_addr_info, *p_last_buf_addr_info;
4695 	struct hal_rx_desc *rx_desc;
4696 	void *rx_msdu_link_desc;
4697 	dma_addr_t paddr;
4698 	u16 num_msdus = 0;
4699 	u32 rx_buf_size, rx_pkt_offset, sw_cookie;
4700 	u32 rx_bufs_used = 0, i = 0;
4701 	u32 msdu_ppdu_id = 0, msdu_cnt = 0;
4702 	u32 total_len = 0, frag_len = 0;
4703 	bool is_frag, is_first_msdu;
4704 	bool drop_mpdu = false;
4705 	struct ath11k_skb_rxcb *rxcb;
4706 	struct hal_reo_entrance_ring *ent_desc = ring_entry;
4707 	int buf_id;
4708 	u32 rx_link_buf_info[2];
4709 	u8 rbm;
4710 
4711 	if (!ar->ab->hw_params.rxdma1_enable)
4712 		rx_ring = &dp->rx_refill_buf_ring;
4713 
4714 	ath11k_hal_rx_reo_ent_buf_paddr_get(ring_entry, &paddr,
4715 					    &sw_cookie,
4716 					    &p_last_buf_addr_info, &rbm,
4717 					    &msdu_cnt);
4718 
4719 	if (FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_PUSH_REASON,
4720 		      ent_desc->info1) ==
4721 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
4722 		u8 rxdma_err =
4723 			FIELD_GET(HAL_REO_ENTR_RING_INFO1_RXDMA_ERROR_CODE,
4724 				  ent_desc->info1);
4725 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
4726 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
4727 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
4728 			drop_mpdu = true;
4729 			pmon->rx_mon_stats.dest_mpdu_drop++;
4730 		}
4731 	}
4732 
4733 	is_frag = false;
4734 	is_first_msdu = true;
4735 
4736 	do {
4737 		if (pmon->mon_last_linkdesc_paddr == paddr) {
4738 			pmon->rx_mon_stats.dup_mon_linkdesc_cnt++;
4739 			return rx_bufs_used;
4740 		}
4741 
4742 		if (ar->ab->hw_params.rxdma1_enable)
4743 			rx_msdu_link_desc =
4744 				(void *)pmon->link_desc_banks[sw_cookie].vaddr +
4745 				(paddr - pmon->link_desc_banks[sw_cookie].paddr);
4746 		else
4747 			rx_msdu_link_desc =
4748 				(void *)ar->ab->dp.link_desc_banks[sw_cookie].vaddr +
4749 				(paddr - ar->ab->dp.link_desc_banks[sw_cookie].paddr);
4750 
4751 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
4752 					    &num_msdus);
4753 
4754 		for (i = 0; i < num_msdus; i++) {
4755 			u32 l2_hdr_offset;
4756 
4757 			if (pmon->mon_last_buf_cookie == msdu_list.sw_cookie[i]) {
4758 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4759 					   "i %d last_cookie %d is same\n",
4760 					   i, pmon->mon_last_buf_cookie);
4761 				drop_mpdu = true;
4762 				pmon->rx_mon_stats.dup_mon_buf_cnt++;
4763 				continue;
4764 			}
4765 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
4766 					   msdu_list.sw_cookie[i]);
4767 
4768 			spin_lock_bh(&rx_ring->idr_lock);
4769 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
4770 			spin_unlock_bh(&rx_ring->idr_lock);
4771 			if (!msdu) {
4772 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4773 					   "msdu_pop: invalid buf_id %d\n", buf_id);
4774 				goto next_msdu;
4775 			}
4776 			rxcb = ATH11K_SKB_RXCB(msdu);
4777 			if (!rxcb->unmapped) {
4778 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
4779 						 msdu->len +
4780 						 skb_tailroom(msdu),
4781 						 DMA_FROM_DEVICE);
4782 				rxcb->unmapped = 1;
4783 			}
4784 			if (drop_mpdu) {
4785 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4786 					   "i %d drop msdu %p *ppdu_id %x\n",
4787 					   i, msdu, *ppdu_id);
4788 				dev_kfree_skb_any(msdu);
4789 				msdu = NULL;
4790 				goto next_msdu;
4791 			}
4792 
4793 			rx_desc = (struct hal_rx_desc *)msdu->data;
4794 
4795 			rx_pkt_offset = sizeof(struct hal_rx_desc);
4796 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
4797 
4798 			if (is_first_msdu) {
4799 				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
4800 					drop_mpdu = true;
4801 					dev_kfree_skb_any(msdu);
4802 					msdu = NULL;
4803 					pmon->mon_last_linkdesc_paddr = paddr;
4804 					goto next_msdu;
4805 				}
4806 
4807 				msdu_ppdu_id =
4808 					ath11k_dp_rxdesc_get_ppduid(ar->ab, rx_desc);
4809 
4810 				if (ath11k_dp_rx_mon_comp_ppduid(msdu_ppdu_id,
4811 								 ppdu_id,
4812 								 &rx_bufs_used)) {
4813 					if (rx_bufs_used) {
4814 						drop_mpdu = true;
4815 						dev_kfree_skb_any(msdu);
4816 						msdu = NULL;
4817 						goto next_msdu;
4818 					}
4819 					return rx_bufs_used;
4820 				}
4821 				pmon->mon_last_linkdesc_paddr = paddr;
4822 				is_first_msdu = false;
4823 			}
4824 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
4825 						  &is_frag, &total_len,
4826 						  &frag_len, &msdu_cnt);
4827 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
4828 
4829 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
4830 
4831 			if (!(*head_msdu))
4832 				*head_msdu = msdu;
4833 			else if (last)
4834 				last->next = msdu;
4835 
4836 			last = msdu;
4837 next_msdu:
4838 			pmon->mon_last_buf_cookie = msdu_list.sw_cookie[i];
4839 			rx_bufs_used++;
4840 			spin_lock_bh(&rx_ring->idr_lock);
4841 			idr_remove(&rx_ring->bufs_idr, buf_id);
4842 			spin_unlock_bh(&rx_ring->idr_lock);
4843 		}
4844 
4845 		ath11k_hal_rx_buf_addr_info_set(rx_link_buf_info, paddr, sw_cookie, rbm);
4846 
4847 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc, &paddr,
4848 						    &sw_cookie, &rbm,
4849 						    &p_buf_addr_info);
4850 
4851 		if (ar->ab->hw_params.rxdma1_enable) {
4852 			if (ath11k_dp_rx_monitor_link_desc_return(ar,
4853 								  p_last_buf_addr_info,
4854 								  dp->mac_id))
4855 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
4856 					   "dp_rx_monitor_link_desc_return failed");
4857 		} else {
4858 			ath11k_dp_rx_link_desc_return(ar->ab, rx_link_buf_info,
4859 						      HAL_WBM_REL_BM_ACT_PUT_IN_IDLE);
4860 		}
4861 
4862 		p_last_buf_addr_info = p_buf_addr_info;
4863 
4864 	} while (paddr && msdu_cnt);
4865 
4866 	if (last)
4867 		last->next = NULL;
4868 
4869 	*tail_msdu = msdu;
4870 
4871 	if (msdu_cnt == 0)
4872 		*npackets = 1;
4873 
4874 	return rx_bufs_used;
4875 }
4876 
4877 static void ath11k_dp_rx_msdus_set_payload(struct ath11k *ar, struct sk_buff *msdu)
4878 {
4879 	u32 rx_pkt_offset, l2_hdr_offset;
4880 
4881 	rx_pkt_offset = ar->ab->hw_params.hal_desc_sz;
4882 	l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab,
4883 						      (struct hal_rx_desc *)msdu->data);
4884 	skb_pull(msdu, rx_pkt_offset + l2_hdr_offset);
4885 }
4886 
4887 static struct sk_buff *
4888 ath11k_dp_rx_mon_merg_msdus(struct ath11k *ar,
4889 			    u32 mac_id, struct sk_buff *head_msdu,
4890 			    struct sk_buff *last_msdu,
4891 			    struct ieee80211_rx_status *rxs, bool *fcs_err)
4892 {
4893 	struct ath11k_base *ab = ar->ab;
4894 	struct sk_buff *msdu, *prev_buf;
4895 	struct hal_rx_desc *rx_desc;
4896 	char *hdr_desc;
4897 	u8 *dest, decap_format;
4898 	struct ieee80211_hdr_3addr *wh;
4899 	struct rx_attention *rx_attention;
4900 	u32 err_bitmap;
4901 
4902 	if (!head_msdu)
4903 		goto err_merge_fail;
4904 
4905 	rx_desc = (struct hal_rx_desc *)head_msdu->data;
4906 	rx_attention = ath11k_dp_rx_get_attention(ab, rx_desc);
4907 	err_bitmap = ath11k_dp_rx_h_attn_mpdu_err(rx_attention);
4908 
4909 	if (err_bitmap & DP_RX_MPDU_ERR_FCS)
4910 		*fcs_err = true;
4911 
4912 	if (ath11k_dp_rxdesc_get_mpdulen_err(rx_attention))
4913 		return NULL;
4914 
4915 	decap_format = ath11k_dp_rx_h_msdu_start_decap_type(ab, rx_desc);
4916 
4917 	ath11k_dp_rx_h_ppdu(ar, rx_desc, rxs);
4918 
4919 	if (decap_format == DP_RX_DECAP_TYPE_RAW) {
4920 		ath11k_dp_rx_msdus_set_payload(ar, head_msdu);
4921 
4922 		prev_buf = head_msdu;
4923 		msdu = head_msdu->next;
4924 
4925 		while (msdu) {
4926 			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4927 
4928 			prev_buf = msdu;
4929 			msdu = msdu->next;
4930 		}
4931 
4932 		prev_buf->next = NULL;
4933 
4934 		skb_trim(prev_buf, prev_buf->len - HAL_RX_FCS_LEN);
4935 	} else if (decap_format == DP_RX_DECAP_TYPE_NATIVE_WIFI) {
4936 		u8 qos_pkt = 0;
4937 
4938 		rx_desc = (struct hal_rx_desc *)head_msdu->data;
4939 		hdr_desc = ath11k_dp_rxdesc_get_80211hdr(ab, rx_desc);
4940 
4941 		/* Base size */
4942 		wh = (struct ieee80211_hdr_3addr *)hdr_desc;
4943 
4944 		if (ieee80211_is_data_qos(wh->frame_control))
4945 			qos_pkt = 1;
4946 
4947 		msdu = head_msdu;
4948 
4949 		while (msdu) {
4950 			ath11k_dp_rx_msdus_set_payload(ar, msdu);
4951 			if (qos_pkt) {
4952 				dest = skb_push(msdu, sizeof(__le16));
4953 				if (!dest)
4954 					goto err_merge_fail;
4955 				memcpy(dest, hdr_desc, sizeof(struct ieee80211_qos_hdr));
4956 			}
4957 			prev_buf = msdu;
4958 			msdu = msdu->next;
4959 		}
4960 		dest = skb_put(prev_buf, HAL_RX_FCS_LEN);
4961 		if (!dest)
4962 			goto err_merge_fail;
4963 
4964 		ath11k_dbg(ab, ATH11K_DBG_DATA,
4965 			   "mpdu_buf %p mpdu_buf->len %u",
4966 			   prev_buf, prev_buf->len);
4967 	} else {
4968 		ath11k_dbg(ab, ATH11K_DBG_DATA,
4969 			   "decap format %d is not supported!\n",
4970 			   decap_format);
4971 		goto err_merge_fail;
4972 	}
4973 
4974 	return head_msdu;
4975 
4976 err_merge_fail:
4977 	return NULL;
4978 }
4979 
4980 static void
4981 ath11k_dp_rx_update_radiotap_he(struct hal_rx_mon_ppdu_info *rx_status,
4982 				u8 *rtap_buf)
4983 {
4984 	u32 rtap_len = 0;
4985 
4986 	put_unaligned_le16(rx_status->he_data1, &rtap_buf[rtap_len]);
4987 	rtap_len += 2;
4988 
4989 	put_unaligned_le16(rx_status->he_data2, &rtap_buf[rtap_len]);
4990 	rtap_len += 2;
4991 
4992 	put_unaligned_le16(rx_status->he_data3, &rtap_buf[rtap_len]);
4993 	rtap_len += 2;
4994 
4995 	put_unaligned_le16(rx_status->he_data4, &rtap_buf[rtap_len]);
4996 	rtap_len += 2;
4997 
4998 	put_unaligned_le16(rx_status->he_data5, &rtap_buf[rtap_len]);
4999 	rtap_len += 2;
5000 
5001 	put_unaligned_le16(rx_status->he_data6, &rtap_buf[rtap_len]);
5002 }
5003 
5004 static void
5005 ath11k_dp_rx_update_radiotap_he_mu(struct hal_rx_mon_ppdu_info *rx_status,
5006 				   u8 *rtap_buf)
5007 {
5008 	u32 rtap_len = 0;
5009 
5010 	put_unaligned_le16(rx_status->he_flags1, &rtap_buf[rtap_len]);
5011 	rtap_len += 2;
5012 
5013 	put_unaligned_le16(rx_status->he_flags2, &rtap_buf[rtap_len]);
5014 	rtap_len += 2;
5015 
5016 	rtap_buf[rtap_len] = rx_status->he_RU[0];
5017 	rtap_len += 1;
5018 
5019 	rtap_buf[rtap_len] = rx_status->he_RU[1];
5020 	rtap_len += 1;
5021 
5022 	rtap_buf[rtap_len] = rx_status->he_RU[2];
5023 	rtap_len += 1;
5024 
5025 	rtap_buf[rtap_len] = rx_status->he_RU[3];
5026 }
5027 
5028 static void ath11k_update_radiotap(struct ath11k *ar,
5029 				   struct hal_rx_mon_ppdu_info *ppduinfo,
5030 				   struct sk_buff *mon_skb,
5031 				   struct ieee80211_rx_status *rxs)
5032 {
5033 	struct ieee80211_supported_band *sband;
5034 	u8 *ptr = NULL;
5035 
5036 	rxs->flag |= RX_FLAG_MACTIME_START;
5037 	rxs->signal = ppduinfo->rssi_comb + ATH11K_DEFAULT_NOISE_FLOOR;
5038 
5039 	if (ppduinfo->nss)
5040 		rxs->nss = ppduinfo->nss;
5041 
5042 	if (ppduinfo->he_mu_flags) {
5043 		rxs->flag |= RX_FLAG_RADIOTAP_HE_MU;
5044 		rxs->encoding = RX_ENC_HE;
5045 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he_mu));
5046 		ath11k_dp_rx_update_radiotap_he_mu(ppduinfo, ptr);
5047 	} else if (ppduinfo->he_flags) {
5048 		rxs->flag |= RX_FLAG_RADIOTAP_HE;
5049 		rxs->encoding = RX_ENC_HE;
5050 		ptr = skb_push(mon_skb, sizeof(struct ieee80211_radiotap_he));
5051 		ath11k_dp_rx_update_radiotap_he(ppduinfo, ptr);
5052 		rxs->rate_idx = ppduinfo->rate;
5053 	} else if (ppduinfo->vht_flags) {
5054 		rxs->encoding = RX_ENC_VHT;
5055 		rxs->rate_idx = ppduinfo->rate;
5056 	} else if (ppduinfo->ht_flags) {
5057 		rxs->encoding = RX_ENC_HT;
5058 		rxs->rate_idx = ppduinfo->rate;
5059 	} else {
5060 		rxs->encoding = RX_ENC_LEGACY;
5061 		sband = &ar->mac.sbands[rxs->band];
5062 		rxs->rate_idx = ath11k_mac_hw_rate_to_idx(sband, ppduinfo->rate,
5063 							  ppduinfo->cck_flag);
5064 	}
5065 
5066 	rxs->mactime = ppduinfo->tsft;
5067 }
5068 
5069 static int ath11k_dp_rx_mon_deliver(struct ath11k *ar, u32 mac_id,
5070 				    struct sk_buff *head_msdu,
5071 				    struct hal_rx_mon_ppdu_info *ppduinfo,
5072 				    struct sk_buff *tail_msdu,
5073 				    struct napi_struct *napi)
5074 {
5075 	struct ath11k_pdev_dp *dp = &ar->dp;
5076 	struct sk_buff *mon_skb, *skb_next, *header;
5077 	struct ieee80211_rx_status *rxs = &dp->rx_status;
5078 	bool fcs_err = false;
5079 
5080 	mon_skb = ath11k_dp_rx_mon_merg_msdus(ar, mac_id, head_msdu,
5081 					      tail_msdu, rxs, &fcs_err);
5082 
5083 	if (!mon_skb)
5084 		goto mon_deliver_fail;
5085 
5086 	header = mon_skb;
5087 
5088 	rxs->flag = 0;
5089 
5090 	if (fcs_err)
5091 		rxs->flag = RX_FLAG_FAILED_FCS_CRC;
5092 
5093 	do {
5094 		skb_next = mon_skb->next;
5095 		if (!skb_next)
5096 			rxs->flag &= ~RX_FLAG_AMSDU_MORE;
5097 		else
5098 			rxs->flag |= RX_FLAG_AMSDU_MORE;
5099 
5100 		if (mon_skb == header) {
5101 			header = NULL;
5102 			rxs->flag &= ~RX_FLAG_ALLOW_SAME_PN;
5103 		} else {
5104 			rxs->flag |= RX_FLAG_ALLOW_SAME_PN;
5105 		}
5106 		rxs->flag |= RX_FLAG_ONLY_MONITOR;
5107 		ath11k_update_radiotap(ar, ppduinfo, mon_skb, rxs);
5108 
5109 		ath11k_dp_rx_deliver_msdu(ar, napi, mon_skb, rxs);
5110 		mon_skb = skb_next;
5111 	} while (mon_skb);
5112 	rxs->flag = 0;
5113 
5114 	return 0;
5115 
5116 mon_deliver_fail:
5117 	mon_skb = head_msdu;
5118 	while (mon_skb) {
5119 		skb_next = mon_skb->next;
5120 		dev_kfree_skb_any(mon_skb);
5121 		mon_skb = skb_next;
5122 	}
5123 	return -EINVAL;
5124 }
5125 
5126 /* The destination ring processing is stuck if the destination is not
5127  * moving while status ring moves 16 PPDU. The destination ring processing
5128  * skips this destination ring PPDU as a workaround.
5129  */
5130 #define MON_DEST_RING_STUCK_MAX_CNT 16
5131 
5132 static void ath11k_dp_rx_mon_dest_process(struct ath11k *ar, int mac_id,
5133 					  u32 quota, struct napi_struct *napi)
5134 {
5135 	struct ath11k_pdev_dp *dp = &ar->dp;
5136 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5137 	const struct ath11k_hw_hal_params *hal_params;
5138 	void *ring_entry;
5139 	struct hal_srng *mon_dst_srng;
5140 	u32 ppdu_id;
5141 	u32 rx_bufs_used;
5142 	u32 ring_id;
5143 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5144 	u32	 npackets = 0;
5145 	u32 mpdu_rx_bufs_used;
5146 
5147 	if (ar->ab->hw_params.rxdma1_enable)
5148 		ring_id = dp->rxdma_mon_dst_ring.ring_id;
5149 	else
5150 		ring_id = dp->rxdma_err_dst_ring[mac_id].ring_id;
5151 
5152 	mon_dst_srng = &ar->ab->hal.srng_list[ring_id];
5153 
5154 	spin_lock_bh(&pmon->mon_lock);
5155 
5156 	spin_lock_bh(&mon_dst_srng->lock);
5157 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5158 
5159 	ppdu_id = pmon->mon_ppdu_info.ppdu_id;
5160 	rx_bufs_used = 0;
5161 	rx_mon_stats = &pmon->rx_mon_stats;
5162 
5163 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5164 		struct sk_buff *head_msdu, *tail_msdu;
5165 
5166 		head_msdu = NULL;
5167 		tail_msdu = NULL;
5168 
5169 		mpdu_rx_bufs_used = ath11k_dp_rx_mon_mpdu_pop(ar, mac_id, ring_entry,
5170 							      &head_msdu,
5171 							      &tail_msdu,
5172 							      &npackets, &ppdu_id);
5173 
5174 		rx_bufs_used += mpdu_rx_bufs_used;
5175 
5176 		if (mpdu_rx_bufs_used) {
5177 			dp->mon_dest_ring_stuck_cnt = 0;
5178 		} else {
5179 			dp->mon_dest_ring_stuck_cnt++;
5180 			rx_mon_stats->dest_mon_not_reaped++;
5181 		}
5182 
5183 		if (dp->mon_dest_ring_stuck_cnt > MON_DEST_RING_STUCK_MAX_CNT) {
5184 			rx_mon_stats->dest_mon_stuck++;
5185 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5186 				   "status ring ppdu_id=%d dest ring ppdu_id=%d mon_dest_ring_stuck_cnt=%d dest_mon_not_reaped=%u dest_mon_stuck=%u\n",
5187 				   pmon->mon_ppdu_info.ppdu_id, ppdu_id,
5188 				   dp->mon_dest_ring_stuck_cnt,
5189 				   rx_mon_stats->dest_mon_not_reaped,
5190 				   rx_mon_stats->dest_mon_stuck);
5191 			pmon->mon_ppdu_info.ppdu_id = ppdu_id;
5192 			continue;
5193 		}
5194 
5195 		if (ppdu_id != pmon->mon_ppdu_info.ppdu_id) {
5196 			pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5197 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5198 				   "dest_rx: new ppdu_id %x != status ppdu_id %x dest_mon_not_reaped = %u dest_mon_stuck = %u\n",
5199 				   ppdu_id, pmon->mon_ppdu_info.ppdu_id,
5200 				   rx_mon_stats->dest_mon_not_reaped,
5201 				   rx_mon_stats->dest_mon_stuck);
5202 			break;
5203 		}
5204 		if (head_msdu && tail_msdu) {
5205 			ath11k_dp_rx_mon_deliver(ar, dp->mac_id, head_msdu,
5206 						 &pmon->mon_ppdu_info,
5207 						 tail_msdu, napi);
5208 			rx_mon_stats->dest_mpdu_done++;
5209 		}
5210 
5211 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5212 								mon_dst_srng);
5213 	}
5214 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5215 	spin_unlock_bh(&mon_dst_srng->lock);
5216 
5217 	spin_unlock_bh(&pmon->mon_lock);
5218 
5219 	if (rx_bufs_used) {
5220 		rx_mon_stats->dest_ppdu_done++;
5221 		hal_params = ar->ab->hw_params.hal_params;
5222 
5223 		if (ar->ab->hw_params.rxdma1_enable)
5224 			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5225 						   &dp->rxdma_mon_buf_ring,
5226 						   rx_bufs_used,
5227 						   hal_params->rx_buf_rbm);
5228 		else
5229 			ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5230 						   &dp->rx_refill_buf_ring,
5231 						   rx_bufs_used,
5232 						   hal_params->rx_buf_rbm);
5233 	}
5234 }
5235 
5236 int ath11k_dp_rx_process_mon_status(struct ath11k_base *ab, int mac_id,
5237 				    struct napi_struct *napi, int budget)
5238 {
5239 	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5240 	enum hal_rx_mon_status hal_status;
5241 	struct sk_buff *skb;
5242 	struct sk_buff_head skb_list;
5243 	struct ath11k_peer *peer;
5244 	struct ath11k_sta *arsta;
5245 	int num_buffs_reaped = 0;
5246 	u32 rx_buf_sz;
5247 	u16 log_type;
5248 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&ar->dp.mon_data;
5249 	struct ath11k_pdev_mon_stats *rx_mon_stats = &pmon->rx_mon_stats;
5250 	struct hal_rx_mon_ppdu_info *ppdu_info = &pmon->mon_ppdu_info;
5251 
5252 	__skb_queue_head_init(&skb_list);
5253 
5254 	num_buffs_reaped = ath11k_dp_rx_reap_mon_status_ring(ab, mac_id, &budget,
5255 							     &skb_list);
5256 	if (!num_buffs_reaped)
5257 		goto exit;
5258 
5259 	memset(ppdu_info, 0, sizeof(*ppdu_info));
5260 	ppdu_info->peer_id = HAL_INVALID_PEERID;
5261 
5262 	while ((skb = __skb_dequeue(&skb_list))) {
5263 		if (ath11k_debugfs_is_pktlog_lite_mode_enabled(ar)) {
5264 			log_type = ATH11K_PKTLOG_TYPE_LITE_RX;
5265 			rx_buf_sz = DP_RX_BUFFER_SIZE_LITE;
5266 		} else if (ath11k_debugfs_is_pktlog_rx_stats_enabled(ar)) {
5267 			log_type = ATH11K_PKTLOG_TYPE_RX_STATBUF;
5268 			rx_buf_sz = DP_RX_BUFFER_SIZE;
5269 		} else {
5270 			log_type = ATH11K_PKTLOG_TYPE_INVALID;
5271 			rx_buf_sz = 0;
5272 		}
5273 
5274 		if (log_type != ATH11K_PKTLOG_TYPE_INVALID)
5275 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5276 
5277 		memset(ppdu_info, 0, sizeof(*ppdu_info));
5278 		ppdu_info->peer_id = HAL_INVALID_PEERID;
5279 		hal_status = ath11k_hal_rx_parse_mon_status(ab, ppdu_info, skb);
5280 
5281 		if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5282 		    pmon->mon_ppdu_status == DP_PPDU_STATUS_START &&
5283 		    hal_status == HAL_TLV_STATUS_PPDU_DONE) {
5284 			rx_mon_stats->status_ppdu_done++;
5285 			pmon->mon_ppdu_status = DP_PPDU_STATUS_DONE;
5286 			if (!ab->hw_params.full_monitor_mode) {
5287 				ath11k_dp_rx_mon_dest_process(ar, mac_id,
5288 							      budget, napi);
5289 				pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5290 			}
5291 		}
5292 
5293 		if (ppdu_info->peer_id == HAL_INVALID_PEERID ||
5294 		    hal_status != HAL_RX_MON_STATUS_PPDU_DONE) {
5295 			dev_kfree_skb_any(skb);
5296 			continue;
5297 		}
5298 
5299 		rcu_read_lock();
5300 		spin_lock_bh(&ab->base_lock);
5301 		peer = ath11k_peer_find_by_id(ab, ppdu_info->peer_id);
5302 
5303 		if (!peer || !peer->sta) {
5304 			ath11k_dbg(ab, ATH11K_DBG_DATA,
5305 				   "failed to find the peer with peer_id %d\n",
5306 				   ppdu_info->peer_id);
5307 			goto next_skb;
5308 		}
5309 
5310 		arsta = ath11k_sta_to_arsta(peer->sta);
5311 		ath11k_dp_rx_update_peer_stats(arsta, ppdu_info);
5312 
5313 		if (ath11k_debugfs_is_pktlog_peer_valid(ar, peer->addr))
5314 			trace_ath11k_htt_rxdesc(ar, skb->data, log_type, rx_buf_sz);
5315 
5316 next_skb:
5317 		spin_unlock_bh(&ab->base_lock);
5318 		rcu_read_unlock();
5319 
5320 		dev_kfree_skb_any(skb);
5321 		memset(ppdu_info, 0, sizeof(*ppdu_info));
5322 		ppdu_info->peer_id = HAL_INVALID_PEERID;
5323 	}
5324 exit:
5325 	return num_buffs_reaped;
5326 }
5327 
5328 static u32
5329 ath11k_dp_rx_full_mon_mpdu_pop(struct ath11k *ar,
5330 			       void *ring_entry, struct sk_buff **head_msdu,
5331 			       struct sk_buff **tail_msdu,
5332 			       struct hal_sw_mon_ring_entries *sw_mon_entries)
5333 {
5334 	struct ath11k_pdev_dp *dp = &ar->dp;
5335 	struct ath11k_mon_data *pmon = &dp->mon_data;
5336 	struct dp_rxdma_ring *rx_ring = &dp->rxdma_mon_buf_ring;
5337 	struct sk_buff *msdu = NULL, *last = NULL;
5338 	struct hal_sw_monitor_ring *sw_desc = ring_entry;
5339 	struct hal_rx_msdu_list msdu_list;
5340 	struct hal_rx_desc *rx_desc;
5341 	struct ath11k_skb_rxcb *rxcb;
5342 	void *rx_msdu_link_desc;
5343 	void *p_buf_addr_info, *p_last_buf_addr_info;
5344 	int buf_id, i = 0;
5345 	u32 rx_buf_size, rx_pkt_offset, l2_hdr_offset;
5346 	u32 rx_bufs_used = 0, msdu_cnt = 0;
5347 	u32 total_len = 0, frag_len = 0, sw_cookie;
5348 	u16 num_msdus = 0;
5349 	u8 rxdma_err, rbm;
5350 	bool is_frag, is_first_msdu;
5351 	bool drop_mpdu = false;
5352 
5353 	ath11k_hal_rx_sw_mon_ring_buf_paddr_get(ring_entry, sw_mon_entries);
5354 
5355 	sw_cookie = sw_mon_entries->mon_dst_sw_cookie;
5356 	sw_mon_entries->end_of_ppdu = false;
5357 	sw_mon_entries->drop_ppdu = false;
5358 	p_last_buf_addr_info = sw_mon_entries->dst_buf_addr_info;
5359 	msdu_cnt = sw_mon_entries->msdu_cnt;
5360 
5361 	sw_mon_entries->end_of_ppdu =
5362 		FIELD_GET(HAL_SW_MON_RING_INFO0_END_OF_PPDU, sw_desc->info0);
5363 	if (sw_mon_entries->end_of_ppdu)
5364 		return rx_bufs_used;
5365 
5366 	if (FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_PUSH_REASON,
5367 		      sw_desc->info0) ==
5368 		      HAL_REO_DEST_RING_PUSH_REASON_ERR_DETECTED) {
5369 		rxdma_err =
5370 			FIELD_GET(HAL_SW_MON_RING_INFO0_RXDMA_ERROR_CODE,
5371 				  sw_desc->info0);
5372 		if (rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_FLUSH_REQUEST_ERR ||
5373 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_MPDU_LEN_ERR ||
5374 		    rxdma_err == HAL_REO_ENTR_RING_RXDMA_ECODE_OVERFLOW_ERR) {
5375 			pmon->rx_mon_stats.dest_mpdu_drop++;
5376 			drop_mpdu = true;
5377 		}
5378 	}
5379 
5380 	is_frag = false;
5381 	is_first_msdu = true;
5382 
5383 	do {
5384 		rx_msdu_link_desc =
5385 			(u8 *)pmon->link_desc_banks[sw_cookie].vaddr +
5386 			(sw_mon_entries->mon_dst_paddr -
5387 			 pmon->link_desc_banks[sw_cookie].paddr);
5388 
5389 		ath11k_hal_rx_msdu_list_get(ar, rx_msdu_link_desc, &msdu_list,
5390 					    &num_msdus);
5391 
5392 		for (i = 0; i < num_msdus; i++) {
5393 			buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID,
5394 					   msdu_list.sw_cookie[i]);
5395 
5396 			spin_lock_bh(&rx_ring->idr_lock);
5397 			msdu = idr_find(&rx_ring->bufs_idr, buf_id);
5398 			if (!msdu) {
5399 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5400 					   "full mon msdu_pop: invalid buf_id %d\n",
5401 					    buf_id);
5402 				spin_unlock_bh(&rx_ring->idr_lock);
5403 				goto next_msdu;
5404 			}
5405 			idr_remove(&rx_ring->bufs_idr, buf_id);
5406 			spin_unlock_bh(&rx_ring->idr_lock);
5407 
5408 			rxcb = ATH11K_SKB_RXCB(msdu);
5409 			if (!rxcb->unmapped) {
5410 				dma_unmap_single(ar->ab->dev, rxcb->paddr,
5411 						 msdu->len +
5412 						 skb_tailroom(msdu),
5413 						 DMA_FROM_DEVICE);
5414 				rxcb->unmapped = 1;
5415 			}
5416 			if (drop_mpdu) {
5417 				ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5418 					   "full mon: i %d drop msdu %p *ppdu_id %x\n",
5419 					   i, msdu, sw_mon_entries->ppdu_id);
5420 				dev_kfree_skb_any(msdu);
5421 				msdu_cnt--;
5422 				goto next_msdu;
5423 			}
5424 
5425 			rx_desc = (struct hal_rx_desc *)msdu->data;
5426 
5427 			rx_pkt_offset = sizeof(struct hal_rx_desc);
5428 			l2_hdr_offset = ath11k_dp_rx_h_msdu_end_l3pad(ar->ab, rx_desc);
5429 
5430 			if (is_first_msdu) {
5431 				if (!ath11k_dp_rxdesc_mpdu_valid(ar->ab, rx_desc)) {
5432 					drop_mpdu = true;
5433 					dev_kfree_skb_any(msdu);
5434 					msdu = NULL;
5435 					goto next_msdu;
5436 				}
5437 				is_first_msdu = false;
5438 			}
5439 
5440 			ath11k_dp_mon_get_buf_len(&msdu_list.msdu_info[i],
5441 						  &is_frag, &total_len,
5442 						  &frag_len, &msdu_cnt);
5443 
5444 			rx_buf_size = rx_pkt_offset + l2_hdr_offset + frag_len;
5445 
5446 			ath11k_dp_pkt_set_pktlen(msdu, rx_buf_size);
5447 
5448 			if (!(*head_msdu))
5449 				*head_msdu = msdu;
5450 			else if (last)
5451 				last->next = msdu;
5452 
5453 			last = msdu;
5454 next_msdu:
5455 			rx_bufs_used++;
5456 		}
5457 
5458 		ath11k_dp_rx_mon_next_link_desc_get(rx_msdu_link_desc,
5459 						    &sw_mon_entries->mon_dst_paddr,
5460 						    &sw_mon_entries->mon_dst_sw_cookie,
5461 						    &rbm,
5462 						    &p_buf_addr_info);
5463 
5464 		if (ath11k_dp_rx_monitor_link_desc_return(ar,
5465 							  p_last_buf_addr_info,
5466 							  dp->mac_id))
5467 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA,
5468 				   "full mon: dp_rx_monitor_link_desc_return failed\n");
5469 
5470 		p_last_buf_addr_info = p_buf_addr_info;
5471 
5472 	} while (sw_mon_entries->mon_dst_paddr && msdu_cnt);
5473 
5474 	if (last)
5475 		last->next = NULL;
5476 
5477 	*tail_msdu = msdu;
5478 
5479 	return rx_bufs_used;
5480 }
5481 
5482 static int ath11k_dp_rx_full_mon_prepare_mpdu(struct ath11k_dp *dp,
5483 					      struct dp_full_mon_mpdu *mon_mpdu,
5484 					      struct sk_buff *head,
5485 					      struct sk_buff *tail)
5486 {
5487 	mon_mpdu = kzalloc_obj(*mon_mpdu, GFP_ATOMIC);
5488 	if (!mon_mpdu)
5489 		return -ENOMEM;
5490 
5491 	list_add_tail(&mon_mpdu->list, &dp->dp_full_mon_mpdu_list);
5492 	mon_mpdu->head = head;
5493 	mon_mpdu->tail = tail;
5494 
5495 	return 0;
5496 }
5497 
5498 static void ath11k_dp_rx_full_mon_drop_ppdu(struct ath11k_dp *dp,
5499 					    struct dp_full_mon_mpdu *mon_mpdu)
5500 {
5501 	struct dp_full_mon_mpdu *tmp;
5502 	struct sk_buff *tmp_msdu, *skb_next;
5503 
5504 	if (list_empty(&dp->dp_full_mon_mpdu_list))
5505 		return;
5506 
5507 	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5508 		list_del(&mon_mpdu->list);
5509 
5510 		tmp_msdu = mon_mpdu->head;
5511 		while (tmp_msdu) {
5512 			skb_next = tmp_msdu->next;
5513 			dev_kfree_skb_any(tmp_msdu);
5514 			tmp_msdu = skb_next;
5515 		}
5516 
5517 		kfree(mon_mpdu);
5518 	}
5519 }
5520 
5521 static int ath11k_dp_rx_full_mon_deliver_ppdu(struct ath11k *ar,
5522 					      int mac_id,
5523 					      struct ath11k_mon_data *pmon,
5524 					      struct napi_struct *napi)
5525 {
5526 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5527 	struct dp_full_mon_mpdu *tmp;
5528 	struct dp_full_mon_mpdu *mon_mpdu = pmon->mon_mpdu;
5529 	struct sk_buff *head_msdu, *tail_msdu;
5530 	struct ath11k_base *ab = ar->ab;
5531 	struct ath11k_dp *dp = &ab->dp;
5532 	int ret;
5533 
5534 	rx_mon_stats = &pmon->rx_mon_stats;
5535 
5536 	list_for_each_entry_safe(mon_mpdu, tmp, &dp->dp_full_mon_mpdu_list, list) {
5537 		list_del(&mon_mpdu->list);
5538 		head_msdu = mon_mpdu->head;
5539 		tail_msdu = mon_mpdu->tail;
5540 		if (head_msdu && tail_msdu) {
5541 			ret = ath11k_dp_rx_mon_deliver(ar, mac_id, head_msdu,
5542 						       &pmon->mon_ppdu_info,
5543 						       tail_msdu, napi);
5544 			rx_mon_stats->dest_mpdu_done++;
5545 			ath11k_dbg(ar->ab, ATH11K_DBG_DATA, "full mon: deliver ppdu\n");
5546 		}
5547 		kfree(mon_mpdu);
5548 	}
5549 
5550 	return ret;
5551 }
5552 
5553 static int
5554 ath11k_dp_rx_process_full_mon_status_ring(struct ath11k_base *ab, int mac_id,
5555 					  struct napi_struct *napi, int budget)
5556 {
5557 	struct ath11k *ar = ab->pdevs[mac_id].ar;
5558 	struct ath11k_pdev_dp *dp = &ar->dp;
5559 	struct ath11k_mon_data *pmon = &dp->mon_data;
5560 	struct hal_sw_mon_ring_entries *sw_mon_entries;
5561 	int quota = 0, work = 0, count;
5562 
5563 	sw_mon_entries = &pmon->sw_mon_entries;
5564 
5565 	while (pmon->hold_mon_dst_ring) {
5566 		quota = ath11k_dp_rx_process_mon_status(ab, mac_id,
5567 							napi, 1);
5568 		if (pmon->buf_state == DP_MON_STATUS_MATCH) {
5569 			count = sw_mon_entries->status_buf_count;
5570 			if (count > 1) {
5571 				quota += ath11k_dp_rx_process_mon_status(ab, mac_id,
5572 									 napi, count);
5573 			}
5574 
5575 			ath11k_dp_rx_full_mon_deliver_ppdu(ar, dp->mac_id,
5576 							   pmon, napi);
5577 			pmon->hold_mon_dst_ring = false;
5578 		} else if (!pmon->mon_status_paddr ||
5579 			   pmon->buf_state == DP_MON_STATUS_LEAD) {
5580 			sw_mon_entries->drop_ppdu = true;
5581 			pmon->hold_mon_dst_ring = false;
5582 		}
5583 
5584 		if (!quota)
5585 			break;
5586 
5587 		work += quota;
5588 	}
5589 
5590 	if (sw_mon_entries->drop_ppdu)
5591 		ath11k_dp_rx_full_mon_drop_ppdu(&ab->dp, pmon->mon_mpdu);
5592 
5593 	return work;
5594 }
5595 
5596 static int ath11k_dp_full_mon_process_rx(struct ath11k_base *ab, int mac_id,
5597 					 struct napi_struct *napi, int budget)
5598 {
5599 	struct ath11k *ar = ab->pdevs[mac_id].ar;
5600 	struct ath11k_pdev_dp *dp = &ar->dp;
5601 	struct ath11k_mon_data *pmon = &dp->mon_data;
5602 	struct hal_sw_mon_ring_entries *sw_mon_entries;
5603 	struct ath11k_pdev_mon_stats *rx_mon_stats;
5604 	struct sk_buff *head_msdu, *tail_msdu;
5605 	struct hal_srng *mon_dst_srng;
5606 	void *ring_entry;
5607 	u32 rx_bufs_used = 0, mpdu_rx_bufs_used;
5608 	int quota = 0, ret;
5609 	bool break_dst_ring = false;
5610 
5611 	spin_lock_bh(&pmon->mon_lock);
5612 
5613 	sw_mon_entries = &pmon->sw_mon_entries;
5614 	rx_mon_stats = &pmon->rx_mon_stats;
5615 
5616 	if (pmon->hold_mon_dst_ring) {
5617 		spin_unlock_bh(&pmon->mon_lock);
5618 		goto reap_status_ring;
5619 	}
5620 
5621 	mon_dst_srng = &ar->ab->hal.srng_list[dp->rxdma_mon_dst_ring.ring_id];
5622 	spin_lock_bh(&mon_dst_srng->lock);
5623 
5624 	ath11k_hal_srng_access_begin(ar->ab, mon_dst_srng);
5625 	while ((ring_entry = ath11k_hal_srng_dst_peek(ar->ab, mon_dst_srng))) {
5626 		head_msdu = NULL;
5627 		tail_msdu = NULL;
5628 
5629 		mpdu_rx_bufs_used = ath11k_dp_rx_full_mon_mpdu_pop(ar, ring_entry,
5630 								   &head_msdu,
5631 								   &tail_msdu,
5632 								   sw_mon_entries);
5633 		rx_bufs_used += mpdu_rx_bufs_used;
5634 
5635 		if (!sw_mon_entries->end_of_ppdu) {
5636 			if (head_msdu) {
5637 				ret = ath11k_dp_rx_full_mon_prepare_mpdu(&ab->dp,
5638 									 pmon->mon_mpdu,
5639 									 head_msdu,
5640 									 tail_msdu);
5641 				if (ret)
5642 					break_dst_ring = true;
5643 			}
5644 
5645 			goto next_entry;
5646 		} else {
5647 			if (!sw_mon_entries->ppdu_id &&
5648 			    !sw_mon_entries->mon_status_paddr) {
5649 				break_dst_ring = true;
5650 				goto next_entry;
5651 			}
5652 		}
5653 
5654 		rx_mon_stats->dest_ppdu_done++;
5655 		pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5656 		pmon->buf_state = DP_MON_STATUS_LAG;
5657 		pmon->mon_status_paddr = sw_mon_entries->mon_status_paddr;
5658 		pmon->hold_mon_dst_ring = true;
5659 next_entry:
5660 		ring_entry = ath11k_hal_srng_dst_get_next_entry(ar->ab,
5661 								mon_dst_srng);
5662 		if (break_dst_ring)
5663 			break;
5664 	}
5665 
5666 	ath11k_hal_srng_access_end(ar->ab, mon_dst_srng);
5667 	spin_unlock_bh(&mon_dst_srng->lock);
5668 	spin_unlock_bh(&pmon->mon_lock);
5669 
5670 	if (rx_bufs_used) {
5671 		ath11k_dp_rxbufs_replenish(ar->ab, dp->mac_id,
5672 					   &dp->rxdma_mon_buf_ring,
5673 					   rx_bufs_used,
5674 					   HAL_RX_BUF_RBM_SW3_BM);
5675 	}
5676 
5677 reap_status_ring:
5678 	quota = ath11k_dp_rx_process_full_mon_status_ring(ab, mac_id,
5679 							  napi, budget);
5680 
5681 	return quota;
5682 }
5683 
5684 int ath11k_dp_rx_process_mon_rings(struct ath11k_base *ab, int mac_id,
5685 				   struct napi_struct *napi, int budget)
5686 {
5687 	struct ath11k *ar = ath11k_ab_to_ar(ab, mac_id);
5688 	int ret = 0;
5689 
5690 	if (test_bit(ATH11K_FLAG_MONITOR_STARTED, &ar->monitor_flags) &&
5691 	    ab->hw_params.full_monitor_mode)
5692 		ret = ath11k_dp_full_mon_process_rx(ab, mac_id, napi, budget);
5693 	else
5694 		ret = ath11k_dp_rx_process_mon_status(ab, mac_id, napi, budget);
5695 
5696 	return ret;
5697 }
5698 
5699 static int ath11k_dp_rx_pdev_mon_status_attach(struct ath11k *ar)
5700 {
5701 	struct ath11k_pdev_dp *dp = &ar->dp;
5702 	struct ath11k_mon_data *pmon = (struct ath11k_mon_data *)&dp->mon_data;
5703 
5704 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
5705 
5706 	memset(&pmon->rx_mon_stats, 0,
5707 	       sizeof(pmon->rx_mon_stats));
5708 	return 0;
5709 }
5710 
5711 int ath11k_dp_rx_pdev_mon_attach(struct ath11k *ar)
5712 {
5713 	struct ath11k_pdev_dp *dp = &ar->dp;
5714 	struct ath11k_mon_data *pmon = &dp->mon_data;
5715 	struct hal_srng *mon_desc_srng = NULL;
5716 	struct dp_srng *dp_srng;
5717 	int ret = 0;
5718 	u32 n_link_desc = 0;
5719 
5720 	ret = ath11k_dp_rx_pdev_mon_status_attach(ar);
5721 	if (ret) {
5722 		ath11k_warn(ar->ab, "pdev_mon_status_attach() failed");
5723 		return ret;
5724 	}
5725 
5726 	/* if rxdma1_enable is false, no need to setup
5727 	 * rxdma_mon_desc_ring.
5728 	 */
5729 	if (!ar->ab->hw_params.rxdma1_enable)
5730 		return 0;
5731 
5732 	dp_srng = &dp->rxdma_mon_desc_ring;
5733 	n_link_desc = dp_srng->size /
5734 		ath11k_hal_srng_get_entrysize(ar->ab, HAL_RXDMA_MONITOR_DESC);
5735 	mon_desc_srng =
5736 		&ar->ab->hal.srng_list[dp->rxdma_mon_desc_ring.ring_id];
5737 
5738 	ret = ath11k_dp_link_desc_setup(ar->ab, pmon->link_desc_banks,
5739 					HAL_RXDMA_MONITOR_DESC, mon_desc_srng,
5740 					n_link_desc);
5741 	if (ret) {
5742 		ath11k_warn(ar->ab, "mon_link_desc_pool_setup() failed");
5743 		return ret;
5744 	}
5745 	pmon->mon_last_linkdesc_paddr = 0;
5746 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
5747 	spin_lock_init(&pmon->mon_lock);
5748 
5749 	return 0;
5750 }
5751 
5752 static int ath11k_dp_mon_link_free(struct ath11k *ar)
5753 {
5754 	struct ath11k_pdev_dp *dp = &ar->dp;
5755 	struct ath11k_mon_data *pmon = &dp->mon_data;
5756 
5757 	ath11k_dp_link_desc_cleanup(ar->ab, pmon->link_desc_banks,
5758 				    HAL_RXDMA_MONITOR_DESC,
5759 				    &dp->rxdma_mon_desc_ring);
5760 	return 0;
5761 }
5762 
5763 int ath11k_dp_rx_pdev_mon_detach(struct ath11k *ar)
5764 {
5765 	ath11k_dp_mon_link_free(ar);
5766 	return 0;
5767 }
5768 
5769 int ath11k_dp_rx_pktlog_start(struct ath11k_base *ab)
5770 {
5771 	/* start reap timer */
5772 	mod_timer(&ab->mon_reap_timer,
5773 		  jiffies + msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
5774 
5775 	return 0;
5776 }
5777 
5778 int ath11k_dp_rx_pktlog_stop(struct ath11k_base *ab, bool stop_timer)
5779 {
5780 	int ret;
5781 
5782 	if (stop_timer)
5783 		timer_delete_sync(&ab->mon_reap_timer);
5784 
5785 	/* reap all the monitor related rings */
5786 	ret = ath11k_dp_purge_mon_ring(ab);
5787 	if (ret) {
5788 		ath11k_warn(ab, "failed to purge dp mon ring: %d\n", ret);
5789 		return ret;
5790 	}
5791 
5792 	return 0;
5793 }
5794