xref: /linux/drivers/net/wireless/ath/ath12k/dp_rx.c (revision 4242625f272974dd1947f73b10d884eab3b277cd)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/ieee80211.h>
8 #include <linux/kernel.h>
9 #include <linux/skbuff.h>
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "hw.h"
14 #include "dp_rx.h"
15 #include "dp_tx.h"
16 #include "peer.h"
17 #include "dp_mon.h"
18 #include "debugfs_htt_stats.h"
19 
20 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
21 					   struct ath12k_dp_rx_tid_rxq *rx_tid);
22 
23 static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
24 				       struct list_head *head,
25 				       size_t count)
26 {
27 	struct list_head *cur;
28 	struct ath12k_rx_desc_info *rx_desc;
29 	size_t nodes = 0;
30 
31 	if (!count) {
32 		INIT_LIST_HEAD(list);
33 		goto out;
34 	}
35 
36 	list_for_each(cur, head) {
37 		if (!count)
38 			break;
39 
40 		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
41 		rx_desc->in_use = true;
42 
43 		count--;
44 		nodes++;
45 	}
46 
47 	list_cut_before(list, head, cur);
48 out:
49 	return nodes;
50 }
51 
52 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
53 				      struct list_head *used_list)
54 {
55 	struct ath12k_rx_desc_info *rx_desc, *safe;
56 
57 	/* Reset the use flag */
58 	list_for_each_entry_safe(rx_desc, safe, used_list, list)
59 		rx_desc->in_use = false;
60 
61 	spin_lock_bh(&dp->rx_desc_lock);
62 	list_splice_tail(used_list, &dp->rx_desc_free_list);
63 	spin_unlock_bh(&dp->rx_desc_lock);
64 }
65 
66 /* Returns number of Rx buffers replenished */
67 int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
68 				struct dp_rxdma_ring *rx_ring,
69 				struct list_head *used_list,
70 				int req_entries)
71 {
72 	struct ath12k_base *ab = dp->ab;
73 	struct ath12k_buffer_addr *desc;
74 	struct hal_srng *srng;
75 	struct sk_buff *skb;
76 	int num_free;
77 	int num_remain;
78 	u32 cookie;
79 	dma_addr_t paddr;
80 	struct ath12k_rx_desc_info *rx_desc;
81 	enum hal_rx_buf_return_buf_manager mgr = dp->hal->hal_params->rx_buf_rbm;
82 
83 	req_entries = min(req_entries, rx_ring->bufs_max);
84 
85 	srng = &dp->hal->srng_list[rx_ring->refill_buf_ring.ring_id];
86 
87 	spin_lock_bh(&srng->lock);
88 
89 	ath12k_hal_srng_access_begin(ab, srng);
90 
91 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
92 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
93 		req_entries = num_free;
94 
95 	req_entries = min(num_free, req_entries);
96 	num_remain = req_entries;
97 
98 	if (!num_remain)
99 		goto out;
100 
101 	/* Get the descriptor from free list */
102 	if (list_empty(used_list)) {
103 		spin_lock_bh(&dp->rx_desc_lock);
104 		req_entries = ath12k_dp_list_cut_nodes(used_list,
105 						       &dp->rx_desc_free_list,
106 						       num_remain);
107 		spin_unlock_bh(&dp->rx_desc_lock);
108 		num_remain = req_entries;
109 	}
110 
111 	while (num_remain > 0) {
112 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
113 				    DP_RX_BUFFER_ALIGN_SIZE);
114 		if (!skb)
115 			break;
116 
117 		if (!IS_ALIGNED((unsigned long)skb->data,
118 				DP_RX_BUFFER_ALIGN_SIZE)) {
119 			skb_pull(skb,
120 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
121 				 skb->data);
122 		}
123 
124 		paddr = dma_map_single(dp->dev, skb->data,
125 				       skb->len + skb_tailroom(skb),
126 				       DMA_FROM_DEVICE);
127 		if (dma_mapping_error(dp->dev, paddr))
128 			goto fail_free_skb;
129 
130 		rx_desc = list_first_entry_or_null(used_list,
131 						   struct ath12k_rx_desc_info,
132 						   list);
133 		if (!rx_desc)
134 			goto fail_dma_unmap;
135 
136 		rx_desc->skb = skb;
137 		cookie = rx_desc->cookie;
138 
139 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
140 		if (!desc)
141 			goto fail_dma_unmap;
142 
143 		list_del(&rx_desc->list);
144 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
145 
146 		num_remain--;
147 
148 		ath12k_hal_rx_buf_addr_info_set(dp->hal, desc, paddr, cookie,
149 						mgr);
150 	}
151 
152 	goto out;
153 
154 fail_dma_unmap:
155 	dma_unmap_single(dp->dev, paddr, skb->len + skb_tailroom(skb),
156 			 DMA_FROM_DEVICE);
157 fail_free_skb:
158 	dev_kfree_skb_any(skb);
159 out:
160 	ath12k_hal_srng_access_end(ab, srng);
161 
162 	if (!list_empty(used_list))
163 		ath12k_dp_rx_enqueue_free(dp, used_list);
164 
165 	spin_unlock_bh(&srng->lock);
166 
167 	return req_entries - num_remain;
168 }
169 EXPORT_SYMBOL(ath12k_dp_rx_bufs_replenish);
170 
171 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
172 					     struct dp_rxdma_mon_ring *rx_ring)
173 {
174 	struct sk_buff *skb;
175 	int buf_id;
176 
177 	spin_lock_bh(&rx_ring->idr_lock);
178 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
179 		idr_remove(&rx_ring->bufs_idr, buf_id);
180 		/* TODO: Understand where internal driver does this dma_unmap
181 		 * of rxdma_buffer.
182 		 */
183 		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
184 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
185 		dev_kfree_skb_any(skb);
186 	}
187 
188 	idr_destroy(&rx_ring->bufs_idr);
189 	spin_unlock_bh(&rx_ring->idr_lock);
190 
191 	return 0;
192 }
193 
194 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
195 {
196 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
197 	int i;
198 
199 	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
200 
201 	if (ab->hw_params->rxdma1_enable)
202 		return 0;
203 
204 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
205 		ath12k_dp_rxdma_mon_buf_ring_free(ab,
206 						  &dp->rx_mon_status_refill_ring[i]);
207 
208 	return 0;
209 }
210 
211 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
212 					      struct dp_rxdma_mon_ring *rx_ring,
213 					      u32 ringtype)
214 {
215 	int num_entries;
216 
217 	num_entries = rx_ring->refill_buf_ring.size /
218 		ath12k_hal_srng_get_entrysize(ab, ringtype);
219 
220 	rx_ring->bufs_max = num_entries;
221 
222 	if (ringtype == HAL_RXDMA_MONITOR_STATUS)
223 		ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
224 						    num_entries);
225 	else
226 		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
227 
228 	return 0;
229 }
230 
231 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
232 					  struct dp_rxdma_ring *rx_ring)
233 {
234 	LIST_HEAD(list);
235 
236 	rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
237 			ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
238 
239 	ath12k_dp_rx_bufs_replenish(ath12k_ab_to_dp(ab), rx_ring, &list, 0);
240 
241 	return 0;
242 }
243 
244 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
245 {
246 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
247 	struct dp_rxdma_mon_ring *mon_ring;
248 	int ret, i;
249 
250 	ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
251 	if (ret) {
252 		ath12k_warn(ab,
253 			    "failed to setup HAL_RXDMA_BUF\n");
254 		return ret;
255 	}
256 
257 	if (ab->hw_params->rxdma1_enable) {
258 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
259 							 &dp->rxdma_mon_buf_ring,
260 							 HAL_RXDMA_MONITOR_BUF);
261 		if (ret)
262 			ath12k_warn(ab,
263 				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
264 		return ret;
265 	}
266 
267 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
268 		mon_ring = &dp->rx_mon_status_refill_ring[i];
269 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
270 							 HAL_RXDMA_MONITOR_STATUS);
271 		if (ret) {
272 			ath12k_warn(ab,
273 				    "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
274 			return ret;
275 		}
276 	}
277 
278 	return 0;
279 }
280 
281 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
282 {
283 	struct ath12k_pdev_dp *dp = &ar->dp;
284 	struct ath12k_base *ab = ar->ab;
285 	int i;
286 
287 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
288 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
289 }
290 
291 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
292 {
293 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
294 	int i;
295 
296 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
297 		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
298 }
299 
300 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
301 {
302 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
303 	int ret;
304 	int i;
305 
306 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
307 		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
308 					   HAL_REO_DST, i, 0,
309 					   DP_REO_DST_RING_SIZE);
310 		if (ret) {
311 			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
312 			goto err_reo_cleanup;
313 		}
314 	}
315 
316 	return 0;
317 
318 err_reo_cleanup:
319 	ath12k_dp_rx_pdev_reo_cleanup(ab);
320 
321 	return ret;
322 }
323 
324 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
325 {
326 	struct ath12k_pdev_dp *dp = &ar->dp;
327 	struct ath12k_base *ab = ar->ab;
328 	int i;
329 	int ret;
330 	u32 mac_id = dp->mac_id;
331 
332 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
333 		ret = ath12k_dp_srng_setup(ar->ab,
334 					   &dp->rxdma_mon_dst_ring[i],
335 					   HAL_RXDMA_MONITOR_DST,
336 					   0, mac_id + i,
337 					   DP_RXDMA_MONITOR_DST_RING_SIZE(ab));
338 		if (ret) {
339 			ath12k_warn(ar->ab,
340 				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
341 			return ret;
342 		}
343 	}
344 
345 	return 0;
346 }
347 
348 void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
349 			       struct ath12k_dp_rx_tid *rx_tid,
350 			       bool active)
351 {
352 	rx_tid_rxq->tid = rx_tid->tid;
353 	rx_tid_rxq->active = active;
354 	rx_tid_rxq->qbuf = rx_tid->qbuf;
355 }
356 EXPORT_SYMBOL(ath12k_dp_init_rx_tid_rxq);
357 
358 static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
359 				     struct ath12k_reoq_buf *tid_qbuf)
360 {
361 	if (tid_qbuf->vaddr) {
362 		dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned,
363 				 tid_qbuf->size, DMA_BIDIRECTIONAL);
364 		kfree(tid_qbuf->vaddr);
365 		tid_qbuf->vaddr = NULL;
366 	}
367 }
368 
369 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
370 {
371 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
372 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
373 	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
374 	struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
375 
376 	spin_lock_bh(&dp->reo_rxq_flush_lock);
377 	list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
378 				 list) {
379 		list_del(&cmd_queue->list);
380 		ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf);
381 		kfree(cmd_queue);
382 	}
383 	list_for_each_entry_safe(cmd_cache, tmp_cache,
384 				 &dp->reo_cmd_cache_flush_list, list) {
385 		list_del(&cmd_cache->list);
386 		dp->reo_cmd_cache_flush_count--;
387 		ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf);
388 		kfree(cmd_cache);
389 	}
390 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
391 
392 	spin_lock_bh(&dp->reo_cmd_lock);
393 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
394 		list_del(&cmd->list);
395 		ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf);
396 		kfree(cmd);
397 	}
398 	spin_unlock_bh(&dp->reo_cmd_lock);
399 }
400 
401 void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
402 			    enum hal_reo_cmd_status status)
403 {
404 	struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
405 
406 	if (status != HAL_REO_CMD_SUCCESS)
407 		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
408 			    rx_tid->tid, status);
409 
410 	ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
411 }
412 EXPORT_SYMBOL(ath12k_dp_reo_cmd_free);
413 
414 void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
415 {
416 	struct ath12k_base *ab = dp->ab;
417 	struct dp_reo_update_rx_queue_elem *elem, *tmp;
418 
419 	spin_lock_bh(&dp->reo_rxq_flush_lock);
420 
421 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) {
422 		if (elem->rx_tid.active)
423 			continue;
424 
425 		if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
426 			break;
427 
428 		ath12k_dp_arch_peer_rx_tid_qref_reset(dp,
429 						      elem->is_ml_peer ?
430 						      elem->ml_peer_id : elem->peer_id,
431 						      elem->rx_tid.tid);
432 
433 		if (ab->hw_params->reoq_lut_support)
434 			ath12k_hal_reo_shared_qaddr_cache_clear(ab);
435 
436 		list_del(&elem->list);
437 		kfree(elem);
438 	}
439 
440 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
441 }
442 EXPORT_SYMBOL(ath12k_dp_rx_process_reo_cmd_update_rx_queue_list);
443 
444 void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
445 			       enum hal_reo_cmd_status status)
446 {
447 	struct ath12k_base *ab = dp->ab;
448 	struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
449 	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
450 
451 	if (status == HAL_REO_CMD_DRAIN) {
452 		goto free_desc;
453 	} else if (status != HAL_REO_CMD_SUCCESS) {
454 		/* Shouldn't happen! Cleanup in case of other failure? */
455 		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
456 			    rx_tid->tid, status);
457 		return;
458 	}
459 
460 	/* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
461 	 * in the pending queue list marked TID as inactive
462 	 */
463 	spin_lock_bh(&dp->dp_lock);
464 	ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
465 	spin_unlock_bh(&dp->dp_lock);
466 
467 	elem = kzalloc_obj(*elem, GFP_ATOMIC);
468 	if (!elem)
469 		goto free_desc;
470 
471 	elem->ts = jiffies;
472 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
473 
474 	spin_lock_bh(&dp->reo_rxq_flush_lock);
475 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
476 	dp->reo_cmd_cache_flush_count++;
477 
478 	/* Flush and invalidate aged REO desc from HW cache */
479 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
480 				 list) {
481 		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
482 		    time_after(jiffies, elem->ts +
483 			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
484 			/* The reo_cmd_cache_flush_list is used in only two contexts,
485 			 * one is in this function called from napi and the
486 			 * other in ath12k_dp_free during core destroy.
487 			 * If cache command sent is success, delete the element in
488 			 * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup
489 			 * will be called during core destroy.
490 			 */
491 
492 			if (ath12k_dp_arch_reo_cache_flush(dp, &elem->data))
493 				break;
494 
495 			list_del(&elem->list);
496 			dp->reo_cmd_cache_flush_count--;
497 
498 			kfree(elem);
499 		}
500 	}
501 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
502 
503 	return;
504 free_desc:
505 	ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
506 }
507 EXPORT_SYMBOL(ath12k_dp_rx_tid_del_func);
508 
509 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
510 					   struct ath12k_dp_rx_tid_rxq *rx_tid)
511 {
512 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
513 
514 	return ath12k_dp_arch_rx_tid_delete_handler(dp, rx_tid);
515 }
516 
517 void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
518 {
519 	struct dp_reo_update_rx_queue_elem *elem;
520 	struct ath12k_dp_rx_tid_rxq *rx_tid;
521 
522 	spin_lock_bh(&dp->reo_rxq_flush_lock);
523 	list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) {
524 		if (elem->peer_id == peer_id) {
525 			rx_tid = &elem->rx_tid;
526 			if (rx_tid->tid == tid) {
527 				rx_tid->active = false;
528 				break;
529 			}
530 		}
531 	}
532 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
533 }
534 EXPORT_SYMBOL(ath12k_dp_mark_tid_as_inactive);
535 
536 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer)
537 {
538 	struct ath12k_dp_rx_tid *rx_tid;
539 	int i;
540 	struct ath12k_base *ab = ar->ab;
541 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
542 
543 	lockdep_assert_held(&dp->dp_lock);
544 
545 	if (!peer->primary_link)
546 		return;
547 
548 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
549 		rx_tid = &peer->dp_peer->rx_tid[i];
550 
551 		ath12k_dp_arch_rx_peer_tid_delete(dp, peer, i);
552 		ath12k_dp_arch_rx_frags_cleanup(dp, rx_tid, true);
553 
554 		spin_unlock_bh(&dp->dp_lock);
555 		timer_delete_sync(&rx_tid->frag_timer);
556 		spin_lock_bh(&dp->dp_lock);
557 	}
558 }
559 
560 static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
561 					     struct ath12k_dp_link_peer *peer,
562 					     struct ath12k_dp_rx_tid *rx_tid)
563 {
564 	struct dp_reo_update_rx_queue_elem *elem;
565 
566 	lockdep_assert_held(&dp->dp_lock);
567 
568 	elem = kzalloc_obj(*elem, GFP_ATOMIC);
569 	if (!elem)
570 		return -ENOMEM;
571 
572 	elem->peer_id = peer->peer_id;
573 	elem->is_ml_peer = peer->mlo;
574 	elem->ml_peer_id = peer->ml_id;
575 
576 	ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid,
577 				  (peer->rx_tid_active_bitmask & (1 << rx_tid->tid)));
578 
579 	spin_lock_bh(&dp->reo_rxq_flush_lock);
580 	list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
581 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
582 
583 	return 0;
584 }
585 
586 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
587 				u8 tid, u32 ba_win_sz, u16 ssn,
588 				enum hal_pn_type pn_type)
589 {
590 	struct ath12k_base *ab = ar->ab;
591 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
592 	struct ath12k_dp_link_peer *peer;
593 	struct ath12k_dp_rx_tid *rx_tid;
594 	dma_addr_t paddr_aligned;
595 	int ret;
596 
597 	spin_lock_bh(&dp->dp_lock);
598 
599 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
600 	if (!peer || !peer->dp_peer) {
601 		spin_unlock_bh(&dp->dp_lock);
602 		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
603 		return -ENOENT;
604 	}
605 
606 	if (ab->hw_params->dp_primary_link_only &&
607 	    !peer->primary_link) {
608 		spin_unlock_bh(&dp->dp_lock);
609 		return 0;
610 	}
611 
612 	if (ab->hw_params->reoq_lut_support &&
613 	    (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
614 		spin_unlock_bh(&dp->dp_lock);
615 		ath12k_warn(ab, "reo qref table is not setup\n");
616 		return -EINVAL;
617 	}
618 
619 	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
620 		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
621 			    peer->peer_id, tid);
622 		spin_unlock_bh(&dp->dp_lock);
623 		return -EINVAL;
624 	}
625 
626 	rx_tid = &peer->dp_peer->rx_tid[tid];
627 	/* Update the tid queue if it is already setup */
628 	if (peer->rx_tid_active_bitmask & (1 << tid)) {
629 		ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid,
630 							    ba_win_sz, ssn, true);
631 		spin_unlock_bh(&dp->dp_lock);
632 		if (ret) {
633 			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
634 			return ret;
635 		}
636 
637 		if (!ab->hw_params->reoq_lut_support) {
638 			paddr_aligned = rx_tid->qbuf.paddr_aligned;
639 			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
640 								     peer_mac,
641 								     paddr_aligned, tid,
642 								     1, ba_win_sz);
643 			if (ret) {
644 				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
645 					    tid, ret);
646 				return ret;
647 			}
648 		}
649 
650 		return 0;
651 	}
652 
653 	rx_tid->tid = tid;
654 
655 	rx_tid->ba_win_sz = ba_win_sz;
656 
657 	ret = ath12k_dp_arch_rx_assign_reoq(dp, peer->dp_peer, rx_tid, ssn, pn_type);
658 	if (ret) {
659 		spin_unlock_bh(&dp->dp_lock);
660 		ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
661 		return ret;
662 	}
663 
664 	peer->rx_tid_active_bitmask |= (1 << tid);
665 
666 	/* Pre-allocate the update_rxq_list for the corresponding tid
667 	 * This will be used during the tid delete. The reason we are not
668 	 * allocating during tid delete is that, if any alloc fail in update_rxq_list
669 	 * we may not be able to delete the tid vaddr/paddr and may lead to leak
670 	 */
671 	ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid);
672 	if (ret) {
673 		ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
674 		ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
675 		spin_unlock_bh(&dp->dp_lock);
676 		return ret;
677 	}
678 
679 	paddr_aligned = rx_tid->qbuf.paddr_aligned;
680 	if (ab->hw_params->reoq_lut_support) {
681 		/* Update the REO queue LUT at the corresponding peer id
682 		 * and tid with qaddr.
683 		 */
684 		if (peer->mlo)
685 			ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->ml_id, tid,
686 							      paddr_aligned);
687 		else
688 			ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->peer_id, tid,
689 							      paddr_aligned);
690 
691 		spin_unlock_bh(&dp->dp_lock);
692 	} else {
693 		spin_unlock_bh(&dp->dp_lock);
694 		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
695 							     paddr_aligned, tid, 1,
696 							     ba_win_sz);
697 	}
698 
699 	return ret;
700 }
701 
702 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
703 			     struct ieee80211_ampdu_params *params,
704 			     u8 link_id)
705 {
706 	struct ath12k_base *ab = ar->ab;
707 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
708 	struct ath12k_link_sta *arsta;
709 	int vdev_id;
710 	int ret;
711 
712 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
713 
714 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
715 				  ahsta->link[link_id]);
716 	if (!arsta)
717 		return -ENOLINK;
718 
719 	vdev_id = arsta->arvif->vdev_id;
720 
721 	ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
722 					  params->tid, params->buf_size,
723 					  params->ssn, arsta->ahsta->pn_type);
724 	if (ret)
725 		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
726 
727 	return ret;
728 }
729 
730 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
731 			    struct ieee80211_ampdu_params *params,
732 			    u8 link_id)
733 {
734 	struct ath12k_base *ab = ar->ab;
735 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
736 	struct ath12k_dp_link_peer *peer;
737 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
738 	struct ath12k_dp_rx_tid *rx_tid;
739 	struct ath12k_link_sta *arsta;
740 	int vdev_id;
741 	bool active;
742 	int ret;
743 
744 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
745 
746 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
747 				  ahsta->link[link_id]);
748 	if (!arsta)
749 		return -ENOLINK;
750 
751 	vdev_id = arsta->arvif->vdev_id;
752 
753 	spin_lock_bh(&dp->dp_lock);
754 
755 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, arsta->addr);
756 	if (!peer || !peer->dp_peer) {
757 		spin_unlock_bh(&dp->dp_lock);
758 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
759 		return -ENOENT;
760 	}
761 
762 	if (ab->hw_params->dp_primary_link_only &&
763 	    !peer->primary_link) {
764 		spin_unlock_bh(&dp->dp_lock);
765 		return 0;
766 	}
767 
768 	active = peer->rx_tid_active_bitmask & (1 << params->tid);
769 	if (!active) {
770 		spin_unlock_bh(&dp->dp_lock);
771 		return 0;
772 	}
773 
774 	rx_tid = &peer->dp_peer->rx_tid[params->tid];
775 	ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid,
776 						    1, 0, false);
777 	spin_unlock_bh(&dp->dp_lock);
778 	if (ret) {
779 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
780 			    params->tid, ret);
781 		return ret;
782 	}
783 
784 	return ret;
785 }
786 
787 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
788 				       const u8 *peer_addr,
789 				       enum set_key_cmd key_cmd,
790 				       struct ieee80211_key_conf *key)
791 {
792 	struct ath12k *ar = arvif->ar;
793 	struct ath12k_base *ab = ar->ab;
794 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
795 	struct ath12k_hal_reo_cmd cmd = {};
796 	struct ath12k_dp_link_peer *peer;
797 	struct ath12k_dp_rx_tid *rx_tid;
798 	struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
799 	u8 tid;
800 	int ret = 0;
801 
802 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
803 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
804 	 * for now.
805 	 */
806 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
807 		return 0;
808 
809 	spin_lock_bh(&dp->dp_lock);
810 
811 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
812 							 peer_addr);
813 	if (!peer || !peer->dp_peer) {
814 		spin_unlock_bh(&dp->dp_lock);
815 		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
816 			    peer_addr);
817 		return -ENOENT;
818 	}
819 
820 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
821 		if (!(peer->rx_tid_active_bitmask & (1 << tid)))
822 			continue;
823 
824 		rx_tid = &peer->dp_peer->rx_tid[tid];
825 		ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid,
826 					  (peer->rx_tid_active_bitmask & (1 << tid)));
827 		ath12k_dp_arch_setup_pn_check_reo_cmd(dp, &cmd, rx_tid, key->cipher,
828 						      key_cmd);
829 		ret = ath12k_dp_arch_reo_cmd_send(dp, &rx_tid_rxq,
830 						  HAL_REO_CMD_UPDATE_RX_QUEUE,
831 						  &cmd, NULL);
832 		if (ret) {
833 			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
834 				    tid, peer_addr, ret);
835 			break;
836 		}
837 	}
838 
839 	spin_unlock_bh(&dp->dp_lock);
840 
841 	return ret;
842 }
843 EXPORT_SYMBOL(ath12k_dp_rx_get_msdu_last_buf);
844 
845 struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
846 					       struct sk_buff *first)
847 {
848 	struct sk_buff *skb;
849 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
850 
851 	if (!rxcb->is_continuation)
852 		return first;
853 
854 	skb_queue_walk(msdu_list, skb) {
855 		rxcb = ATH12K_SKB_RXCB(skb);
856 		if (!rxcb->is_continuation)
857 			return skb;
858 	}
859 
860 	return NULL;
861 }
862 
863 int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype)
864 {
865 	switch (enctype) {
866 	case HAL_ENCRYPT_TYPE_OPEN:
867 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
868 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
869 		return 0;
870 	case HAL_ENCRYPT_TYPE_CCMP_128:
871 		return IEEE80211_CCMP_MIC_LEN;
872 	case HAL_ENCRYPT_TYPE_CCMP_256:
873 		return IEEE80211_CCMP_256_MIC_LEN;
874 	case HAL_ENCRYPT_TYPE_GCMP_128:
875 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
876 		return IEEE80211_GCMP_MIC_LEN;
877 	case HAL_ENCRYPT_TYPE_WEP_40:
878 	case HAL_ENCRYPT_TYPE_WEP_104:
879 	case HAL_ENCRYPT_TYPE_WEP_128:
880 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
881 	case HAL_ENCRYPT_TYPE_WAPI:
882 		break;
883 	}
884 
885 	ath12k_warn(dp->ab, "unsupported encryption type %d for mic len\n", enctype);
886 	return 0;
887 }
888 
889 static int ath12k_dp_rx_crypto_param_len(struct ath12k_pdev_dp *dp_pdev,
890 					 enum hal_encrypt_type enctype)
891 {
892 	switch (enctype) {
893 	case HAL_ENCRYPT_TYPE_OPEN:
894 		return 0;
895 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
896 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
897 		return IEEE80211_TKIP_IV_LEN;
898 	case HAL_ENCRYPT_TYPE_CCMP_128:
899 		return IEEE80211_CCMP_HDR_LEN;
900 	case HAL_ENCRYPT_TYPE_CCMP_256:
901 		return IEEE80211_CCMP_256_HDR_LEN;
902 	case HAL_ENCRYPT_TYPE_GCMP_128:
903 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
904 		return IEEE80211_GCMP_HDR_LEN;
905 	case HAL_ENCRYPT_TYPE_WEP_40:
906 	case HAL_ENCRYPT_TYPE_WEP_104:
907 	case HAL_ENCRYPT_TYPE_WEP_128:
908 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
909 	case HAL_ENCRYPT_TYPE_WAPI:
910 		break;
911 	}
912 
913 	ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
914 	return 0;
915 }
916 
917 static int ath12k_dp_rx_crypto_icv_len(struct ath12k_pdev_dp *dp_pdev,
918 				       enum hal_encrypt_type enctype)
919 {
920 	switch (enctype) {
921 	case HAL_ENCRYPT_TYPE_OPEN:
922 	case HAL_ENCRYPT_TYPE_CCMP_128:
923 	case HAL_ENCRYPT_TYPE_CCMP_256:
924 	case HAL_ENCRYPT_TYPE_GCMP_128:
925 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
926 		return 0;
927 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
928 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
929 		return IEEE80211_TKIP_ICV_LEN;
930 	case HAL_ENCRYPT_TYPE_WEP_40:
931 	case HAL_ENCRYPT_TYPE_WEP_104:
932 	case HAL_ENCRYPT_TYPE_WEP_128:
933 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
934 	case HAL_ENCRYPT_TYPE_WAPI:
935 		break;
936 	}
937 
938 	ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
939 	return 0;
940 }
941 
942 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k_pdev_dp *dp_pdev,
943 					 struct sk_buff *msdu,
944 					 enum hal_encrypt_type enctype,
945 					 struct hal_rx_desc_data *rx_info)
946 {
947 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
948 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
949 	struct ieee80211_hdr *hdr;
950 	size_t hdr_len;
951 	u8 *crypto_hdr;
952 	u16 qos_ctl;
953 
954 	/* pull decapped header */
955 	hdr = (struct ieee80211_hdr *)msdu->data;
956 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
957 	skb_pull(msdu, hdr_len);
958 
959 	/*  Rebuild qos header */
960 	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
961 
962 	/* Reset the order bit as the HT_Control header is stripped */
963 	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
964 
965 	qos_ctl = rxcb->tid;
966 
967 	if (rx_info->mesh_ctrl_present)
968 		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
969 
970 	/* TODO: Add other QoS ctl fields when required */
971 
972 	/* copy decap header before overwriting for reuse below */
973 	memcpy(decap_hdr, hdr, hdr_len);
974 
975 	/* Rebuild crypto header for mac80211 use */
976 	if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
977 		crypto_hdr = skb_push(msdu,
978 				      ath12k_dp_rx_crypto_param_len(dp_pdev, enctype));
979 		ath12k_dp_rx_desc_get_crypto_header(dp_pdev->dp->hal,
980 						    rxcb->rx_desc, crypto_hdr,
981 						    enctype);
982 	}
983 
984 	memcpy(skb_push(msdu,
985 			IEEE80211_QOS_CTL_LEN), &qos_ctl,
986 			IEEE80211_QOS_CTL_LEN);
987 	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
988 }
989 
990 static void ath12k_dp_rx_h_undecap_raw(struct ath12k_pdev_dp *dp_pdev,
991 				       struct sk_buff *msdu,
992 				       enum hal_encrypt_type enctype,
993 				       struct ieee80211_rx_status *status,
994 				       bool decrypted)
995 {
996 	struct ath12k_dp *dp = dp_pdev->dp;
997 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
998 	struct ieee80211_hdr *hdr;
999 	size_t hdr_len;
1000 	size_t crypto_len;
1001 
1002 	if (!rxcb->is_first_msdu ||
1003 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1004 		WARN_ON_ONCE(1);
1005 		return;
1006 	}
1007 
1008 	skb_trim(msdu, msdu->len - FCS_LEN);
1009 
1010 	if (!decrypted)
1011 		return;
1012 
1013 	hdr = (void *)msdu->data;
1014 
1015 	/* Tail */
1016 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1017 		skb_trim(msdu, msdu->len -
1018 			 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1019 
1020 		skb_trim(msdu, msdu->len -
1021 			 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1022 	} else {
1023 		/* MIC */
1024 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1025 			skb_trim(msdu, msdu->len -
1026 				 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1027 
1028 		/* ICV */
1029 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1030 			skb_trim(msdu, msdu->len -
1031 				 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1032 	}
1033 
1034 	/* MMIC */
1035 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1036 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1037 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1038 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1039 
1040 	/* Head */
1041 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1042 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1043 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1044 
1045 		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
1046 		skb_pull(msdu, crypto_len);
1047 	}
1048 }
1049 
1050 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k_pdev_dp *dp_pdev,
1051 					      struct sk_buff *msdu,
1052 					      struct ath12k_skb_rxcb *rxcb,
1053 					      enum hal_encrypt_type enctype,
1054 					      struct hal_rx_desc_data *rx_info)
1055 {
1056 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
1057 	struct ath12k_dp *dp = dp_pdev->dp;
1058 	struct ath12k_hal *hal = dp->hal;
1059 	size_t hdr_len, crypto_len;
1060 	struct ieee80211_hdr hdr;
1061 	__le16 qos_ctl;
1062 	u8 *crypto_hdr;
1063 
1064 	ath12k_dp_rx_desc_get_dot11_hdr(hal, rx_desc, &hdr);
1065 	hdr_len = ieee80211_hdrlen(hdr.frame_control);
1066 
1067 	if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
1068 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1069 		crypto_hdr = skb_push(msdu, crypto_len);
1070 		ath12k_dp_rx_desc_get_crypto_header(dp->hal, rx_desc, crypto_hdr,
1071 						    enctype);
1072 	}
1073 
1074 	skb_push(msdu, hdr_len);
1075 	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
1076 
1077 	if (rxcb->is_mcbc)
1078 		rx_info->rx_status->flag &= ~RX_FLAG_PN_VALIDATED;
1079 
1080 	/* Add QOS header */
1081 	if (ieee80211_is_data_qos(hdr.frame_control)) {
1082 		struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
1083 
1084 		qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
1085 		if (rx_info->mesh_ctrl_present)
1086 			qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
1087 
1088 		memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
1089 	}
1090 }
1091 
1092 static void ath12k_dp_rx_h_undecap_eth(struct ath12k_pdev_dp *dp_pdev,
1093 				       struct sk_buff *msdu,
1094 				       enum hal_encrypt_type enctype,
1095 				       struct hal_rx_desc_data *rx_info)
1096 {
1097 	struct ieee80211_hdr *hdr;
1098 	struct ethhdr *eth;
1099 	u8 da[ETH_ALEN];
1100 	u8 sa[ETH_ALEN];
1101 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1102 	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
1103 
1104 	eth = (struct ethhdr *)msdu->data;
1105 	ether_addr_copy(da, eth->h_dest);
1106 	ether_addr_copy(sa, eth->h_source);
1107 	rfc.snap_type = eth->h_proto;
1108 	skb_pull(msdu, sizeof(*eth));
1109 	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
1110 	       sizeof(rfc));
1111 	ath12k_get_dot11_hdr_from_rx_desc(dp_pdev, msdu, rxcb, enctype, rx_info);
1112 
1113 	/* original 802.11 header has a different DA and in
1114 	 * case of 4addr it may also have different SA
1115 	 */
1116 	hdr = (struct ieee80211_hdr *)msdu->data;
1117 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1118 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1119 }
1120 
1121 void ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1122 			    struct hal_rx_desc *rx_desc,
1123 			    enum hal_encrypt_type enctype,
1124 			    bool decrypted,
1125 			    struct hal_rx_desc_data *rx_info)
1126 {
1127 	struct ethhdr *ehdr;
1128 
1129 	switch (rx_info->decap_type) {
1130 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
1131 		ath12k_dp_rx_h_undecap_nwifi(dp_pdev, msdu, enctype, rx_info);
1132 		break;
1133 	case DP_RX_DECAP_TYPE_RAW:
1134 		ath12k_dp_rx_h_undecap_raw(dp_pdev, msdu, enctype, rx_info->rx_status,
1135 					   decrypted);
1136 		break;
1137 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
1138 		ehdr = (struct ethhdr *)msdu->data;
1139 
1140 		/* mac80211 allows fast path only for authorized STA */
1141 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
1142 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
1143 			ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
1144 			break;
1145 		}
1146 
1147 		/* PN for mcast packets will be validated in mac80211;
1148 		 * remove eth header and add 802.11 header.
1149 		 */
1150 		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
1151 			ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
1152 		break;
1153 	case DP_RX_DECAP_TYPE_8023:
1154 		/* TODO: Handle undecap for these formats */
1155 		break;
1156 	}
1157 }
1158 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap);
1159 
1160 struct ath12k_dp_link_peer *
1161 ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1162 			      struct hal_rx_desc_data *rx_info)
1163 {
1164 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1165 	struct ath12k_dp_link_peer *peer = NULL;
1166 	struct ath12k_dp *dp = dp_pdev->dp;
1167 
1168 	lockdep_assert_held(&dp->dp_lock);
1169 
1170 	if (rxcb->peer_id)
1171 		peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
1172 
1173 	if (peer)
1174 		return peer;
1175 
1176 	if (rx_info->addr2_present)
1177 		peer = ath12k_dp_link_peer_find_by_addr(dp, rx_info->addr2);
1178 
1179 	return peer;
1180 }
1181 
1182 static void ath12k_dp_rx_h_rate(struct ath12k_pdev_dp *dp_pdev,
1183 				struct hal_rx_desc_data *rx_info)
1184 {
1185 	struct ath12k_dp *dp = dp_pdev->dp;
1186 	struct ieee80211_supported_band *sband;
1187 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
1188 	enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
1189 	u8 bw = rx_info->bw, sgi = rx_info->sgi;
1190 	u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
1191 	bool is_cck;
1192 	struct ath12k *ar;
1193 
1194 	switch (pkt_type) {
1195 	case RX_MSDU_START_PKT_TYPE_11A:
1196 	case RX_MSDU_START_PKT_TYPE_11B:
1197 		ar = ath12k_pdev_dp_to_ar(dp_pdev);
1198 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
1199 		sband = &ar->mac.sbands[rx_status->band];
1200 		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
1201 								is_cck);
1202 		break;
1203 	case RX_MSDU_START_PKT_TYPE_11N:
1204 		rx_status->encoding = RX_ENC_HT;
1205 		if (rate_mcs > ATH12K_HT_MCS_MAX) {
1206 			ath12k_warn(dp->ab,
1207 				    "Received with invalid mcs in HT mode %d\n",
1208 				     rate_mcs);
1209 			break;
1210 		}
1211 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
1212 		if (sgi)
1213 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1214 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1215 		break;
1216 	case RX_MSDU_START_PKT_TYPE_11AC:
1217 		rx_status->encoding = RX_ENC_VHT;
1218 		rx_status->rate_idx = rate_mcs;
1219 		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
1220 			ath12k_warn(dp->ab,
1221 				    "Received with invalid mcs in VHT mode %d\n",
1222 				     rate_mcs);
1223 			break;
1224 		}
1225 		rx_status->nss = nss;
1226 		if (sgi)
1227 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1228 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1229 		break;
1230 	case RX_MSDU_START_PKT_TYPE_11AX:
1231 		rx_status->rate_idx = rate_mcs;
1232 		if (rate_mcs > ATH12K_HE_MCS_MAX) {
1233 			ath12k_warn(dp->ab,
1234 				    "Received with invalid mcs in HE mode %d\n",
1235 				    rate_mcs);
1236 			break;
1237 		}
1238 		rx_status->encoding = RX_ENC_HE;
1239 		rx_status->nss = nss;
1240 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1241 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1242 		break;
1243 	case RX_MSDU_START_PKT_TYPE_11BE:
1244 		rx_status->rate_idx = rate_mcs;
1245 
1246 		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
1247 			ath12k_warn(dp->ab,
1248 				    "Received with invalid mcs in EHT mode %d\n",
1249 				    rate_mcs);
1250 			break;
1251 		}
1252 
1253 		rx_status->encoding = RX_ENC_EHT;
1254 		rx_status->nss = nss;
1255 		rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
1256 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1257 		break;
1258 	default:
1259 		break;
1260 	}
1261 }
1262 
1263 void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev,
1264 			 struct hal_rx_desc_data *rx_info)
1265 {
1266 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
1267 	u8 channel_num;
1268 	u32 center_freq, meta_data;
1269 	struct ieee80211_channel *channel;
1270 
1271 	rx_status->freq = 0;
1272 	rx_status->rate_idx = 0;
1273 	rx_status->nss = 0;
1274 	rx_status->encoding = RX_ENC_LEGACY;
1275 	rx_status->bw = RATE_INFO_BW_20;
1276 	rx_status->enc_flags = 0;
1277 
1278 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1279 
1280 	meta_data = rx_info->phy_meta_data;
1281 	channel_num = meta_data;
1282 	center_freq = meta_data >> 16;
1283 
1284 	rx_status->band = NUM_NL80211_BANDS;
1285 
1286 	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
1287 	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
1288 		rx_status->band = NL80211_BAND_6GHZ;
1289 		rx_status->freq = center_freq;
1290 	} else if (channel_num >= 1 && channel_num <= 14) {
1291 		rx_status->band = NL80211_BAND_2GHZ;
1292 	} else if (channel_num >= 36 && channel_num <= 173) {
1293 		rx_status->band = NL80211_BAND_5GHZ;
1294 	}
1295 
1296 	if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
1297 		     !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rx_status->band])) {
1298 		struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
1299 
1300 		ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
1301 			    rx_status->band, channel_num, center_freq, ar->pdev_idx);
1302 
1303 		spin_lock_bh(&ar->data_lock);
1304 		channel = ar->rx_channel;
1305 		if (channel) {
1306 			rx_status->band = channel->band;
1307 			channel_num =
1308 				ieee80211_frequency_to_channel(channel->center_freq);
1309 			rx_status->freq = ieee80211_channel_to_frequency(channel_num,
1310 									 rx_status->band);
1311 		} else {
1312 			ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
1313 		}
1314 		spin_unlock_bh(&ar->data_lock);
1315 		goto h_rate;
1316 	}
1317 
1318 	if (rx_status->band != NL80211_BAND_6GHZ)
1319 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
1320 								 rx_status->band);
1321 
1322 h_rate:
1323 	ath12k_dp_rx_h_rate(dp_pdev, rx_info);
1324 }
1325 EXPORT_SYMBOL(ath12k_dp_rx_h_ppdu);
1326 
1327 void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi,
1328 			       struct sk_buff *msdu,
1329 			       struct hal_rx_desc_data *rx_info)
1330 {
1331 	struct ath12k_dp *dp = dp_pdev->dp;
1332 	struct ieee80211_rx_status *rx_status;
1333 	struct ieee80211_sta *pubsta;
1334 	struct ath12k_dp_peer *peer;
1335 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1336 	struct ieee80211_rx_status *status = rx_info->rx_status;
1337 	u8 decap = rx_info->decap_type;
1338 	bool is_mcbc = rxcb->is_mcbc;
1339 	bool is_eapol = rxcb->is_eapol;
1340 
1341 	peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rx_info->peer_id);
1342 
1343 	pubsta = peer ? peer->sta : NULL;
1344 
1345 	if (pubsta && pubsta->valid_links) {
1346 		status->link_valid = 1;
1347 		status->link_id = peer->hw_links[rxcb->hw_link_id];
1348 	}
1349 
1350 	ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
1351 		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1352 		   msdu,
1353 		   msdu->len,
1354 		   peer ? peer->addr : NULL,
1355 		   rxcb->tid,
1356 		   is_mcbc ? "mcast" : "ucast",
1357 		   rx_info->seq_no,
1358 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1359 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1360 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1361 		   (status->encoding == RX_ENC_HE) ? "he" : "",
1362 		   (status->encoding == RX_ENC_EHT) ? "eht" : "",
1363 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1364 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1365 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1366 		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
1367 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1368 		   status->rate_idx,
1369 		   status->nss,
1370 		   status->freq,
1371 		   status->band, status->flag,
1372 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1373 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1374 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1375 
1376 	ath12k_dbg_dump(dp->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
1377 			msdu->data, msdu->len);
1378 
1379 	rx_status = IEEE80211_SKB_RXCB(msdu);
1380 	*rx_status = *status;
1381 
1382 	/* TODO: trace rx packet */
1383 
1384 	/* PN for multicast packets are not validate in HW,
1385 	 * so skip 802.3 rx path
1386 	 * Also, fast_rx expects the STA to be authorized, hence
1387 	 * eapol packets are sent in slow path.
1388 	 */
1389 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
1390 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
1391 		rx_status->flag |= RX_FLAG_8023;
1392 
1393 	ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi);
1394 }
1395 EXPORT_SYMBOL(ath12k_dp_rx_deliver_msdu);
1396 
1397 bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp *dp,
1398 					    struct hal_rx_desc *rx_desc,
1399 					    struct sk_buff *msdu,
1400 					    struct hal_rx_desc_data *rx_info)
1401 {
1402 	struct ieee80211_hdr *hdr;
1403 	u32 hdr_len;
1404 
1405 	if (rx_info->decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
1406 		return true;
1407 
1408 	hdr = (struct ieee80211_hdr *)msdu->data;
1409 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1410 
1411 	if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
1412 		return true;
1413 
1414 	dp->device_stats.invalid_rbm++;
1415 	WARN_ON_ONCE(1);
1416 	return false;
1417 }
1418 EXPORT_SYMBOL(ath12k_dp_rx_check_nwifi_hdr_len_valid);
1419 
1420 static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
1421 {
1422 	struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
1423 							     frag_timer);
1424 
1425 	spin_lock_bh(&rx_tid->dp->dp_lock);
1426 	if (rx_tid->last_frag_no &&
1427 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
1428 		spin_unlock_bh(&rx_tid->dp->dp_lock);
1429 		return;
1430 	}
1431 	ath12k_dp_arch_rx_frags_cleanup(rx_tid->dp, rx_tid, true);
1432 	spin_unlock_bh(&rx_tid->dp->dp_lock);
1433 }
1434 
1435 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
1436 {
1437 	struct ath12k_base *ab = ar->ab;
1438 	struct crypto_shash *tfm;
1439 	struct ath12k_dp_link_peer *peer;
1440 	struct ath12k_dp_rx_tid *rx_tid;
1441 	int i;
1442 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1443 
1444 	tfm = crypto_alloc_shash("michael_mic", 0, 0);
1445 	if (IS_ERR(tfm))
1446 		return PTR_ERR(tfm);
1447 
1448 	spin_lock_bh(&dp->dp_lock);
1449 
1450 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
1451 	if (!peer || !peer->dp_peer) {
1452 		spin_unlock_bh(&dp->dp_lock);
1453 		crypto_free_shash(tfm);
1454 		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
1455 		return -ENOENT;
1456 	}
1457 
1458 	if (!peer->primary_link) {
1459 		spin_unlock_bh(&dp->dp_lock);
1460 		crypto_free_shash(tfm);
1461 		return 0;
1462 	}
1463 
1464 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
1465 		rx_tid = &peer->dp_peer->rx_tid[i];
1466 		rx_tid->dp = dp;
1467 		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
1468 		skb_queue_head_init(&rx_tid->rx_frags);
1469 	}
1470 
1471 	peer->dp_peer->tfm_mmic = tfm;
1472 	peer->dp_peer->dp_setup_done = true;
1473 	spin_unlock_bh(&dp->dp_lock);
1474 
1475 	return 0;
1476 }
1477 
1478 int ath12k_dp_rx_h_michael_mic(struct crypto_shash *tfm, u8 *key,
1479 			       struct ieee80211_hdr *hdr, u8 *data,
1480 			       size_t data_len, u8 *mic)
1481 {
1482 	SHASH_DESC_ON_STACK(desc, tfm);
1483 	u8 mic_hdr[16] = {};
1484 	u8 tid = 0;
1485 	int ret;
1486 
1487 	if (!tfm)
1488 		return -EINVAL;
1489 
1490 	desc->tfm = tfm;
1491 
1492 	ret = crypto_shash_setkey(tfm, key, 8);
1493 	if (ret)
1494 		goto out;
1495 
1496 	ret = crypto_shash_init(desc);
1497 	if (ret)
1498 		goto out;
1499 
1500 	/* TKIP MIC header */
1501 	memcpy(mic_hdr, ieee80211_get_DA(hdr), ETH_ALEN);
1502 	memcpy(mic_hdr + ETH_ALEN, ieee80211_get_SA(hdr), ETH_ALEN);
1503 	if (ieee80211_is_data_qos(hdr->frame_control))
1504 		tid = ieee80211_get_tid(hdr);
1505 	mic_hdr[12] = tid;
1506 
1507 	ret = crypto_shash_update(desc, mic_hdr, 16);
1508 	if (ret)
1509 		goto out;
1510 	ret = crypto_shash_update(desc, data, data_len);
1511 	if (ret)
1512 		goto out;
1513 	ret = crypto_shash_final(desc, mic);
1514 out:
1515 	shash_desc_zero(desc);
1516 	return ret;
1517 }
1518 EXPORT_SYMBOL(ath12k_dp_rx_h_michael_mic);
1519 
1520 void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1521 				 enum hal_encrypt_type enctype, u32 flags)
1522 {
1523 	struct ath12k_dp *dp = dp_pdev->dp;
1524 	struct ieee80211_hdr *hdr;
1525 	size_t hdr_len;
1526 	size_t crypto_len;
1527 	u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
1528 
1529 	if (!flags)
1530 		return;
1531 
1532 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
1533 
1534 	if (flags & RX_FLAG_MIC_STRIPPED)
1535 		skb_trim(msdu, msdu->len -
1536 			 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1537 
1538 	if (flags & RX_FLAG_ICV_STRIPPED)
1539 		skb_trim(msdu, msdu->len -
1540 			 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1541 
1542 	if (flags & RX_FLAG_IV_STRIPPED) {
1543 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1544 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1545 
1546 		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
1547 			msdu->data + hal_rx_desc_sz, hdr_len);
1548 		skb_pull(msdu, crypto_len);
1549 	}
1550 }
1551 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap_frag);
1552 
1553 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_hal *hal,
1554 				    struct sk_buff *a, struct sk_buff *b)
1555 {
1556 	int frag1, frag2;
1557 
1558 	frag1 = ath12k_dp_rx_h_frag_no(hal, a);
1559 	frag2 = ath12k_dp_rx_h_frag_no(hal, b);
1560 
1561 	return frag1 - frag2;
1562 }
1563 
1564 void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
1565 			       struct sk_buff_head *frag_list,
1566 			       struct sk_buff *cur_frag)
1567 {
1568 	struct sk_buff *skb;
1569 	int cmp;
1570 
1571 	skb_queue_walk(frag_list, skb) {
1572 		cmp = ath12k_dp_rx_h_cmp_frags(hal, skb, cur_frag);
1573 		if (cmp < 0)
1574 			continue;
1575 		__skb_queue_before(frag_list, skb, cur_frag);
1576 		return;
1577 	}
1578 	__skb_queue_tail(frag_list, cur_frag);
1579 }
1580 EXPORT_SYMBOL(ath12k_dp_rx_h_sort_frags);
1581 
1582 u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb)
1583 {
1584 	struct ieee80211_hdr *hdr;
1585 	u64 pn = 0;
1586 	u8 *ehdr;
1587 	u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
1588 
1589 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
1590 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
1591 
1592 	pn = ehdr[0];
1593 	pn |= (u64)ehdr[1] << 8;
1594 	pn |= (u64)ehdr[4] << 16;
1595 	pn |= (u64)ehdr[5] << 24;
1596 	pn |= (u64)ehdr[6] << 32;
1597 	pn |= (u64)ehdr[7] << 40;
1598 
1599 	return pn;
1600 }
1601 EXPORT_SYMBOL(ath12k_dp_rx_h_get_pn);
1602 
1603 void ath12k_dp_rx_free(struct ath12k_base *ab)
1604 {
1605 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1606 	struct dp_srng *srng;
1607 	int i;
1608 
1609 	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
1610 
1611 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1612 		if (ab->hw_params->rx_mac_buf_ring)
1613 			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
1614 		if (!ab->hw_params->rxdma1_enable) {
1615 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
1616 			ath12k_dp_srng_cleanup(ab, srng);
1617 		}
1618 	}
1619 
1620 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
1621 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
1622 
1623 	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
1624 
1625 	ath12k_dp_rxdma_buf_free(ab);
1626 }
1627 
1628 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
1629 {
1630 	struct ath12k *ar = ab->pdevs[mac_id].ar;
1631 
1632 	ath12k_dp_rx_pdev_srng_free(ar);
1633 }
1634 
1635 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
1636 {
1637 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1638 	u32 ring_id;
1639 	int i, ret;
1640 
1641 	/* TODO: Need to verify the HTT setup for QCN9224 */
1642 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
1643 	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
1644 	if (ret) {
1645 		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
1646 			    ret);
1647 		return ret;
1648 	}
1649 
1650 	if (ab->hw_params->rx_mac_buf_ring) {
1651 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1652 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
1653 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1654 							  i, HAL_RXDMA_BUF);
1655 			if (ret) {
1656 				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
1657 					    i, ret);
1658 				return ret;
1659 			}
1660 		}
1661 	}
1662 
1663 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
1664 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
1665 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1666 						  i, HAL_RXDMA_DST);
1667 		if (ret) {
1668 			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
1669 				    i, ret);
1670 			return ret;
1671 		}
1672 	}
1673 
1674 	if (ab->hw_params->rxdma1_enable) {
1675 		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1676 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1677 						  0, HAL_RXDMA_MONITOR_BUF);
1678 		if (ret) {
1679 			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
1680 				    ret);
1681 			return ret;
1682 		}
1683 	} else {
1684 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1685 			ring_id =
1686 				dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1687 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
1688 							  HAL_RXDMA_MONITOR_STATUS);
1689 			if (ret) {
1690 				ath12k_warn(ab,
1691 					    "failed to configure mon_status_refill_ring%d %d\n",
1692 					    i, ret);
1693 				return ret;
1694 			}
1695 		}
1696 	}
1697 
1698 	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
1699 	if (ret) {
1700 		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
1701 		return ret;
1702 	}
1703 
1704 	return 0;
1705 }
1706 
1707 int ath12k_dp_rx_alloc(struct ath12k_base *ab)
1708 {
1709 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1710 	struct dp_srng *srng;
1711 	int i, ret;
1712 
1713 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
1714 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
1715 
1716 	ret = ath12k_dp_srng_setup(ab,
1717 				   &dp->rx_refill_buf_ring.refill_buf_ring,
1718 				   HAL_RXDMA_BUF, 0, 0,
1719 				   DP_RXDMA_BUF_RING_SIZE);
1720 	if (ret) {
1721 		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
1722 		return ret;
1723 	}
1724 
1725 	if (ab->hw_params->rx_mac_buf_ring) {
1726 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1727 			ret = ath12k_dp_srng_setup(ab,
1728 						   &dp->rx_mac_buf_ring[i],
1729 						   HAL_RXDMA_BUF, 1,
1730 						   i, DP_RX_MAC_BUF_RING_SIZE);
1731 			if (ret) {
1732 				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
1733 					    i);
1734 				return ret;
1735 			}
1736 		}
1737 	}
1738 
1739 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
1740 		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
1741 					   HAL_RXDMA_DST, 0, i,
1742 					   DP_RXDMA_ERR_DST_RING_SIZE);
1743 		if (ret) {
1744 			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
1745 			return ret;
1746 		}
1747 	}
1748 
1749 	if (ab->hw_params->rxdma1_enable) {
1750 		ret = ath12k_dp_srng_setup(ab,
1751 					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
1752 					   HAL_RXDMA_MONITOR_BUF, 0, 0,
1753 					   DP_RXDMA_MONITOR_BUF_RING_SIZE(ab));
1754 		if (ret) {
1755 			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
1756 			return ret;
1757 		}
1758 	} else {
1759 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1760 			idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
1761 			spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
1762 		}
1763 
1764 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1765 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
1766 			ret = ath12k_dp_srng_setup(ab, srng,
1767 						   HAL_RXDMA_MONITOR_STATUS, 0, i,
1768 						   DP_RXDMA_MON_STATUS_RING_SIZE);
1769 			if (ret) {
1770 				ath12k_warn(ab, "failed to setup mon status ring %d\n",
1771 					    i);
1772 				return ret;
1773 			}
1774 		}
1775 	}
1776 
1777 	ret = ath12k_dp_rxdma_buf_setup(ab);
1778 	if (ret) {
1779 		ath12k_warn(ab, "failed to setup rxdma ring\n");
1780 		return ret;
1781 	}
1782 
1783 	return 0;
1784 }
1785 
1786 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
1787 {
1788 	struct ath12k *ar = ab->pdevs[mac_id].ar;
1789 	struct ath12k_pdev_dp *dp = &ar->dp;
1790 	u32 ring_id;
1791 	int i;
1792 	int ret;
1793 
1794 	if (!ab->hw_params->rxdma1_enable)
1795 		goto out;
1796 
1797 	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
1798 	if (ret) {
1799 		ath12k_warn(ab, "failed to setup rx srngs\n");
1800 		return ret;
1801 	}
1802 
1803 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1804 		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
1805 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1806 						  mac_id + i,
1807 						  HAL_RXDMA_MONITOR_DST);
1808 		if (ret) {
1809 			ath12k_warn(ab,
1810 				    "failed to configure rxdma_mon_dst_ring %d %d\n",
1811 				    i, ret);
1812 			return ret;
1813 		}
1814 	}
1815 out:
1816 	return 0;
1817 }
1818 
1819 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
1820 {
1821 	struct ath12k_pdev_dp *dp = &ar->dp;
1822 	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
1823 
1824 	skb_queue_head_init(&pmon->rx_status_q);
1825 
1826 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
1827 
1828 	memset(&pmon->rx_mon_stats, 0,
1829 	       sizeof(pmon->rx_mon_stats));
1830 	return 0;
1831 }
1832 
1833 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
1834 {
1835 	struct ath12k_pdev_dp *dp = &ar->dp;
1836 	struct ath12k_mon_data *pmon = &dp->mon_data;
1837 	int ret = 0;
1838 
1839 	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
1840 	if (ret) {
1841 		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
1842 		return ret;
1843 	}
1844 
1845 	pmon->mon_last_linkdesc_paddr = 0;
1846 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
1847 	spin_lock_init(&pmon->mon_lock);
1848 
1849 	if (!ar->ab->hw_params->rxdma1_enable)
1850 		return 0;
1851 
1852 	INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
1853 	pmon->mon_mpdu = NULL;
1854 
1855 	return 0;
1856 }
1857