xref: /linux/drivers/net/wireless/ath/ath12k/dp_rx.c (revision fcee7d82f27d6a8b1ddc5bbefda59b4e441e9bc0)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/fips.h>
8 #include <linux/ieee80211.h>
9 #include <linux/kernel.h>
10 #include <linux/skbuff.h>
11 #include "core.h"
12 #include "debug.h"
13 #include "hw.h"
14 #include "dp_rx.h"
15 #include "dp_tx.h"
16 #include "peer.h"
17 #include "dp_mon.h"
18 #include "debugfs_htt_stats.h"
19 
20 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
21 					   struct ath12k_dp_rx_tid_rxq *rx_tid);
22 
ath12k_dp_list_cut_nodes(struct list_head * list,struct list_head * head,size_t count)23 static size_t ath12k_dp_list_cut_nodes(struct list_head *list,
24 				       struct list_head *head,
25 				       size_t count)
26 {
27 	struct list_head *cur;
28 	struct ath12k_rx_desc_info *rx_desc;
29 	size_t nodes = 0;
30 
31 	if (!count) {
32 		INIT_LIST_HEAD(list);
33 		goto out;
34 	}
35 
36 	list_for_each(cur, head) {
37 		if (!count)
38 			break;
39 
40 		rx_desc = list_entry(cur, struct ath12k_rx_desc_info, list);
41 		rx_desc->in_use = true;
42 
43 		count--;
44 		nodes++;
45 	}
46 
47 	list_cut_before(list, head, cur);
48 out:
49 	return nodes;
50 }
51 
ath12k_dp_rx_enqueue_free(struct ath12k_dp * dp,struct list_head * used_list)52 static void ath12k_dp_rx_enqueue_free(struct ath12k_dp *dp,
53 				      struct list_head *used_list)
54 {
55 	struct ath12k_rx_desc_info *rx_desc, *safe;
56 
57 	/* Reset the use flag */
58 	list_for_each_entry_safe(rx_desc, safe, used_list, list)
59 		rx_desc->in_use = false;
60 
61 	spin_lock_bh(&dp->rx_desc_lock);
62 	list_splice_tail(used_list, &dp->rx_desc_free_list);
63 	spin_unlock_bh(&dp->rx_desc_lock);
64 }
65 
66 /* Returns number of Rx buffers replenished */
ath12k_dp_rx_bufs_replenish(struct ath12k_dp * dp,struct dp_rxdma_ring * rx_ring,struct list_head * used_list,int req_entries)67 int ath12k_dp_rx_bufs_replenish(struct ath12k_dp *dp,
68 				struct dp_rxdma_ring *rx_ring,
69 				struct list_head *used_list,
70 				int req_entries)
71 {
72 	struct ath12k_base *ab = dp->ab;
73 	struct ath12k_buffer_addr *desc;
74 	struct hal_srng *srng;
75 	struct sk_buff *skb;
76 	int num_free;
77 	int num_remain;
78 	u32 cookie;
79 	dma_addr_t paddr;
80 	struct ath12k_rx_desc_info *rx_desc;
81 	enum hal_rx_buf_return_buf_manager mgr = dp->hal->hal_params->rx_buf_rbm;
82 
83 	req_entries = min(req_entries, rx_ring->bufs_max);
84 
85 	srng = &dp->hal->srng_list[rx_ring->refill_buf_ring.ring_id];
86 
87 	spin_lock_bh(&srng->lock);
88 
89 	ath12k_hal_srng_access_begin(ab, srng);
90 
91 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
92 	if (!req_entries && (num_free > (rx_ring->bufs_max * 3) / 4))
93 		req_entries = num_free;
94 
95 	req_entries = min(num_free, req_entries);
96 	num_remain = req_entries;
97 
98 	if (!num_remain)
99 		goto out;
100 
101 	/* Get the descriptor from free list */
102 	if (list_empty(used_list)) {
103 		spin_lock_bh(&dp->rx_desc_lock);
104 		req_entries = ath12k_dp_list_cut_nodes(used_list,
105 						       &dp->rx_desc_free_list,
106 						       num_remain);
107 		spin_unlock_bh(&dp->rx_desc_lock);
108 		num_remain = req_entries;
109 	}
110 
111 	while (num_remain > 0) {
112 		skb = dev_alloc_skb(DP_RX_BUFFER_SIZE +
113 				    DP_RX_BUFFER_ALIGN_SIZE);
114 		if (!skb)
115 			break;
116 
117 		if (!IS_ALIGNED((unsigned long)skb->data,
118 				DP_RX_BUFFER_ALIGN_SIZE)) {
119 			skb_pull(skb,
120 				 PTR_ALIGN(skb->data, DP_RX_BUFFER_ALIGN_SIZE) -
121 				 skb->data);
122 		}
123 
124 		paddr = dma_map_single(dp->dev, skb->data,
125 				       skb->len + skb_tailroom(skb),
126 				       DMA_FROM_DEVICE);
127 		if (dma_mapping_error(dp->dev, paddr))
128 			goto fail_free_skb;
129 
130 		rx_desc = list_first_entry_or_null(used_list,
131 						   struct ath12k_rx_desc_info,
132 						   list);
133 		if (!rx_desc)
134 			goto fail_dma_unmap;
135 
136 		rx_desc->skb = skb;
137 		cookie = rx_desc->cookie;
138 
139 		desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
140 		if (!desc)
141 			goto fail_dma_unmap;
142 
143 		list_del(&rx_desc->list);
144 		ATH12K_SKB_RXCB(skb)->paddr = paddr;
145 
146 		num_remain--;
147 
148 		ath12k_hal_rx_buf_addr_info_set(dp->hal, desc, paddr, cookie,
149 						mgr);
150 	}
151 
152 	goto out;
153 
154 fail_dma_unmap:
155 	dma_unmap_single(dp->dev, paddr, skb->len + skb_tailroom(skb),
156 			 DMA_FROM_DEVICE);
157 fail_free_skb:
158 	dev_kfree_skb_any(skb);
159 out:
160 	ath12k_hal_srng_access_end(ab, srng);
161 
162 	if (!list_empty(used_list))
163 		ath12k_dp_rx_enqueue_free(dp, used_list);
164 
165 	spin_unlock_bh(&srng->lock);
166 
167 	return req_entries - num_remain;
168 }
169 EXPORT_SYMBOL(ath12k_dp_rx_bufs_replenish);
170 
ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base * ab,struct dp_rxdma_mon_ring * rx_ring)171 static int ath12k_dp_rxdma_mon_buf_ring_free(struct ath12k_base *ab,
172 					     struct dp_rxdma_mon_ring *rx_ring)
173 {
174 	struct sk_buff *skb;
175 	int buf_id;
176 
177 	spin_lock_bh(&rx_ring->idr_lock);
178 	idr_for_each_entry(&rx_ring->bufs_idr, skb, buf_id) {
179 		idr_remove(&rx_ring->bufs_idr, buf_id);
180 		/* TODO: Understand where internal driver does this dma_unmap
181 		 * of rxdma_buffer.
182 		 */
183 		dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
184 				 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
185 		dev_kfree_skb_any(skb);
186 	}
187 
188 	idr_destroy(&rx_ring->bufs_idr);
189 	spin_unlock_bh(&rx_ring->idr_lock);
190 
191 	return 0;
192 }
193 
ath12k_dp_rxdma_buf_free(struct ath12k_base * ab)194 static int ath12k_dp_rxdma_buf_free(struct ath12k_base *ab)
195 {
196 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
197 	int i;
198 
199 	ath12k_dp_rxdma_mon_buf_ring_free(ab, &dp->rxdma_mon_buf_ring);
200 
201 	if (ab->hw_params->rxdma1_enable)
202 		return 0;
203 
204 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
205 		ath12k_dp_rxdma_mon_buf_ring_free(ab,
206 						  &dp->rx_mon_status_refill_ring[i]);
207 
208 	return 0;
209 }
210 
ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base * ab,struct dp_rxdma_mon_ring * rx_ring,u32 ringtype)211 static int ath12k_dp_rxdma_mon_ring_buf_setup(struct ath12k_base *ab,
212 					      struct dp_rxdma_mon_ring *rx_ring,
213 					      u32 ringtype)
214 {
215 	int num_entries;
216 
217 	num_entries = rx_ring->refill_buf_ring.size /
218 		ath12k_hal_srng_get_entrysize(ab, ringtype);
219 
220 	rx_ring->bufs_max = num_entries;
221 
222 	if (ringtype == HAL_RXDMA_MONITOR_STATUS)
223 		ath12k_dp_mon_status_bufs_replenish(ab, rx_ring,
224 						    num_entries);
225 	else
226 		ath12k_dp_mon_buf_replenish(ab, rx_ring, num_entries);
227 
228 	return 0;
229 }
230 
ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base * ab,struct dp_rxdma_ring * rx_ring)231 static int ath12k_dp_rxdma_ring_buf_setup(struct ath12k_base *ab,
232 					  struct dp_rxdma_ring *rx_ring)
233 {
234 	LIST_HEAD(list);
235 
236 	rx_ring->bufs_max = rx_ring->refill_buf_ring.size /
237 			ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_BUF);
238 
239 	ath12k_dp_rx_bufs_replenish(ath12k_ab_to_dp(ab), rx_ring, &list, 0);
240 
241 	return 0;
242 }
243 
ath12k_dp_rxdma_buf_setup(struct ath12k_base * ab)244 static int ath12k_dp_rxdma_buf_setup(struct ath12k_base *ab)
245 {
246 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
247 	struct dp_rxdma_mon_ring *mon_ring;
248 	int ret, i;
249 
250 	ret = ath12k_dp_rxdma_ring_buf_setup(ab, &dp->rx_refill_buf_ring);
251 	if (ret) {
252 		ath12k_warn(ab,
253 			    "failed to setup HAL_RXDMA_BUF\n");
254 		return ret;
255 	}
256 
257 	if (ab->hw_params->rxdma1_enable) {
258 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab,
259 							 &dp->rxdma_mon_buf_ring,
260 							 HAL_RXDMA_MONITOR_BUF);
261 		if (ret)
262 			ath12k_warn(ab,
263 				    "failed to setup HAL_RXDMA_MONITOR_BUF\n");
264 		return ret;
265 	}
266 
267 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
268 		mon_ring = &dp->rx_mon_status_refill_ring[i];
269 		ret = ath12k_dp_rxdma_mon_ring_buf_setup(ab, mon_ring,
270 							 HAL_RXDMA_MONITOR_STATUS);
271 		if (ret) {
272 			ath12k_warn(ab,
273 				    "failed to setup HAL_RXDMA_MONITOR_STATUS\n");
274 			return ret;
275 		}
276 	}
277 
278 	return 0;
279 }
280 
ath12k_dp_rx_pdev_srng_free(struct ath12k * ar)281 static void ath12k_dp_rx_pdev_srng_free(struct ath12k *ar)
282 {
283 	struct ath12k_pdev_dp *dp = &ar->dp;
284 	struct ath12k_base *ab = ar->ab;
285 	int i;
286 
287 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++)
288 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_dst_ring[i]);
289 }
290 
ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base * ab)291 void ath12k_dp_rx_pdev_reo_cleanup(struct ath12k_base *ab)
292 {
293 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
294 	int i;
295 
296 	for (i = 0; i < DP_REO_DST_RING_MAX; i++)
297 		ath12k_dp_srng_cleanup(ab, &dp->reo_dst_ring[i]);
298 }
299 
ath12k_dp_rx_pdev_reo_setup(struct ath12k_base * ab)300 int ath12k_dp_rx_pdev_reo_setup(struct ath12k_base *ab)
301 {
302 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
303 	int ret;
304 	int i;
305 
306 	for (i = 0; i < DP_REO_DST_RING_MAX; i++) {
307 		ret = ath12k_dp_srng_setup(ab, &dp->reo_dst_ring[i],
308 					   HAL_REO_DST, i, 0,
309 					   DP_REO_DST_RING_SIZE);
310 		if (ret) {
311 			ath12k_warn(ab, "failed to setup reo_dst_ring\n");
312 			goto err_reo_cleanup;
313 		}
314 	}
315 
316 	return 0;
317 
318 err_reo_cleanup:
319 	ath12k_dp_rx_pdev_reo_cleanup(ab);
320 
321 	return ret;
322 }
323 
ath12k_dp_rx_pdev_srng_alloc(struct ath12k * ar)324 static int ath12k_dp_rx_pdev_srng_alloc(struct ath12k *ar)
325 {
326 	struct ath12k_pdev_dp *dp = &ar->dp;
327 	struct ath12k_base *ab = ar->ab;
328 	int i;
329 	int ret;
330 	u32 mac_id = dp->mac_id;
331 
332 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
333 		ret = ath12k_dp_srng_setup(ar->ab,
334 					   &dp->rxdma_mon_dst_ring[i],
335 					   HAL_RXDMA_MONITOR_DST,
336 					   0, mac_id + i,
337 					   DP_RXDMA_MONITOR_DST_RING_SIZE(ab));
338 		if (ret) {
339 			ath12k_warn(ar->ab,
340 				    "failed to setup HAL_RXDMA_MONITOR_DST\n");
341 			return ret;
342 		}
343 	}
344 
345 	return 0;
346 }
347 
ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq * rx_tid_rxq,struct ath12k_dp_rx_tid * rx_tid,bool active)348 void ath12k_dp_init_rx_tid_rxq(struct ath12k_dp_rx_tid_rxq *rx_tid_rxq,
349 			       struct ath12k_dp_rx_tid *rx_tid,
350 			       bool active)
351 {
352 	rx_tid_rxq->tid = rx_tid->tid;
353 	rx_tid_rxq->active = active;
354 	rx_tid_rxq->qbuf = rx_tid->qbuf;
355 }
356 EXPORT_SYMBOL(ath12k_dp_init_rx_tid_rxq);
357 
ath12k_dp_rx_tid_cleanup(struct ath12k_base * ab,struct ath12k_reoq_buf * tid_qbuf)358 static void ath12k_dp_rx_tid_cleanup(struct ath12k_base *ab,
359 				     struct ath12k_reoq_buf *tid_qbuf)
360 {
361 	if (tid_qbuf->vaddr) {
362 		dma_unmap_single(ab->dev, tid_qbuf->paddr_aligned,
363 				 tid_qbuf->size, DMA_BIDIRECTIONAL);
364 		kfree(tid_qbuf->vaddr);
365 		tid_qbuf->vaddr = NULL;
366 	}
367 }
368 
ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base * ab)369 void ath12k_dp_rx_reo_cmd_list_cleanup(struct ath12k_base *ab)
370 {
371 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
372 	struct ath12k_dp_rx_reo_cmd *cmd, *tmp;
373 	struct ath12k_dp_rx_reo_cache_flush_elem *cmd_cache, *tmp_cache;
374 	struct dp_reo_update_rx_queue_elem *cmd_queue, *tmp_queue;
375 
376 	spin_lock_bh(&dp->reo_rxq_flush_lock);
377 	list_for_each_entry_safe(cmd_queue, tmp_queue, &dp->reo_cmd_update_rx_queue_list,
378 				 list) {
379 		list_del(&cmd_queue->list);
380 		ath12k_dp_rx_tid_cleanup(ab, &cmd_queue->rx_tid.qbuf);
381 		kfree(cmd_queue);
382 	}
383 	list_for_each_entry_safe(cmd_cache, tmp_cache,
384 				 &dp->reo_cmd_cache_flush_list, list) {
385 		list_del(&cmd_cache->list);
386 		dp->reo_cmd_cache_flush_count--;
387 		ath12k_dp_rx_tid_cleanup(ab, &cmd_cache->data.qbuf);
388 		kfree(cmd_cache);
389 	}
390 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
391 
392 	spin_lock_bh(&dp->reo_cmd_lock);
393 	list_for_each_entry_safe(cmd, tmp, &dp->reo_cmd_list, list) {
394 		list_del(&cmd->list);
395 		ath12k_dp_rx_tid_cleanup(ab, &cmd->data.qbuf);
396 		kfree(cmd);
397 	}
398 	spin_unlock_bh(&dp->reo_cmd_lock);
399 }
400 
ath12k_dp_reo_cmd_free(struct ath12k_dp * dp,void * ctx,enum hal_reo_cmd_status status)401 void ath12k_dp_reo_cmd_free(struct ath12k_dp *dp, void *ctx,
402 			    enum hal_reo_cmd_status status)
403 {
404 	struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
405 
406 	if (status != HAL_REO_CMD_SUCCESS)
407 		ath12k_warn(dp->ab, "failed to flush rx tid hw desc, tid %d status %d\n",
408 			    rx_tid->tid, status);
409 
410 	ath12k_dp_rx_tid_cleanup(dp->ab, &rx_tid->qbuf);
411 }
412 EXPORT_SYMBOL(ath12k_dp_reo_cmd_free);
413 
ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp * dp)414 void ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(struct ath12k_dp *dp)
415 {
416 	struct ath12k_base *ab = dp->ab;
417 	struct dp_reo_update_rx_queue_elem *elem, *tmp;
418 
419 	spin_lock_bh(&dp->reo_rxq_flush_lock);
420 
421 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_update_rx_queue_list, list) {
422 		if (elem->rx_tid.active)
423 			continue;
424 
425 		if (ath12k_dp_rx_tid_delete_handler(ab, &elem->rx_tid))
426 			break;
427 
428 		ath12k_dp_arch_peer_rx_tid_qref_reset(dp,
429 						      elem->is_ml_peer ?
430 						      elem->ml_peer_id : elem->peer_id,
431 						      elem->rx_tid.tid);
432 
433 		if (ab->hw_params->reoq_lut_support)
434 			ath12k_hal_reo_shared_qaddr_cache_clear(ab);
435 
436 		list_del(&elem->list);
437 		kfree(elem);
438 	}
439 
440 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
441 }
442 EXPORT_SYMBOL(ath12k_dp_rx_process_reo_cmd_update_rx_queue_list);
443 
ath12k_dp_rx_tid_del_func(struct ath12k_dp * dp,void * ctx,enum hal_reo_cmd_status status)444 void ath12k_dp_rx_tid_del_func(struct ath12k_dp *dp, void *ctx,
445 			       enum hal_reo_cmd_status status)
446 {
447 	struct ath12k_base *ab = dp->ab;
448 	struct ath12k_dp_rx_tid_rxq *rx_tid = ctx;
449 	struct ath12k_dp_rx_reo_cache_flush_elem *elem, *tmp;
450 
451 	if (status == HAL_REO_CMD_DRAIN) {
452 		goto free_desc;
453 	} else if (status != HAL_REO_CMD_SUCCESS) {
454 		/* Shouldn't happen! Cleanup in case of other failure? */
455 		ath12k_warn(ab, "failed to delete rx tid %d hw descriptor %d\n",
456 			    rx_tid->tid, status);
457 		return;
458 	}
459 
460 	/* Retry the HAL_REO_CMD_UPDATE_RX_QUEUE command for entries
461 	 * in the pending queue list marked TID as inactive
462 	 */
463 	spin_lock_bh(&dp->dp_lock);
464 	ath12k_dp_rx_process_reo_cmd_update_rx_queue_list(dp);
465 	spin_unlock_bh(&dp->dp_lock);
466 
467 	elem = kzalloc_obj(*elem, GFP_ATOMIC);
468 	if (!elem)
469 		goto free_desc;
470 
471 	elem->ts = jiffies;
472 	memcpy(&elem->data, rx_tid, sizeof(*rx_tid));
473 
474 	spin_lock_bh(&dp->reo_rxq_flush_lock);
475 	list_add_tail(&elem->list, &dp->reo_cmd_cache_flush_list);
476 	dp->reo_cmd_cache_flush_count++;
477 
478 	/* Flush and invalidate aged REO desc from HW cache */
479 	list_for_each_entry_safe(elem, tmp, &dp->reo_cmd_cache_flush_list,
480 				 list) {
481 		if (dp->reo_cmd_cache_flush_count > ATH12K_DP_RX_REO_DESC_FREE_THRES ||
482 		    time_after(jiffies, elem->ts +
483 			       msecs_to_jiffies(ATH12K_DP_RX_REO_DESC_FREE_TIMEOUT_MS))) {
484 			/* The reo_cmd_cache_flush_list is used in only two contexts,
485 			 * one is in this function called from napi and the
486 			 * other in ath12k_dp_free during core destroy.
487 			 * If cache command sent is success, delete the element in
488 			 * the cache list. ath12k_dp_rx_reo_cmd_list_cleanup
489 			 * will be called during core destroy.
490 			 */
491 
492 			if (ath12k_dp_arch_reo_cache_flush(dp, &elem->data))
493 				break;
494 
495 			list_del(&elem->list);
496 			dp->reo_cmd_cache_flush_count--;
497 
498 			kfree(elem);
499 		}
500 	}
501 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
502 
503 	return;
504 free_desc:
505 	ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
506 }
507 EXPORT_SYMBOL(ath12k_dp_rx_tid_del_func);
508 
ath12k_dp_rx_tid_delete_handler(struct ath12k_base * ab,struct ath12k_dp_rx_tid_rxq * rx_tid)509 static int ath12k_dp_rx_tid_delete_handler(struct ath12k_base *ab,
510 					   struct ath12k_dp_rx_tid_rxq *rx_tid)
511 {
512 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
513 
514 	return ath12k_dp_arch_rx_tid_delete_handler(dp, rx_tid);
515 }
516 
ath12k_dp_mark_tid_as_inactive(struct ath12k_dp * dp,int peer_id,u8 tid)517 void ath12k_dp_mark_tid_as_inactive(struct ath12k_dp *dp, int peer_id, u8 tid)
518 {
519 	struct dp_reo_update_rx_queue_elem *elem;
520 	struct ath12k_dp_rx_tid_rxq *rx_tid;
521 
522 	spin_lock_bh(&dp->reo_rxq_flush_lock);
523 	list_for_each_entry(elem, &dp->reo_cmd_update_rx_queue_list, list) {
524 		if (elem->peer_id == peer_id) {
525 			rx_tid = &elem->rx_tid;
526 			if (rx_tid->tid == tid) {
527 				rx_tid->active = false;
528 				break;
529 			}
530 		}
531 	}
532 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
533 }
534 EXPORT_SYMBOL(ath12k_dp_mark_tid_as_inactive);
535 
ath12k_dp_rx_peer_tid_cleanup(struct ath12k * ar,struct ath12k_dp_link_peer * peer)536 void ath12k_dp_rx_peer_tid_cleanup(struct ath12k *ar, struct ath12k_dp_link_peer *peer)
537 {
538 	struct ath12k_dp_rx_tid *rx_tid;
539 	int i;
540 	struct ath12k_base *ab = ar->ab;
541 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
542 
543 	lockdep_assert_held(&dp->dp_lock);
544 
545 	if (!peer->primary_link)
546 		return;
547 
548 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
549 		rx_tid = &peer->dp_peer->rx_tid[i];
550 
551 		ath12k_dp_arch_rx_peer_tid_delete(dp, peer, i);
552 		ath12k_dp_arch_rx_frags_cleanup(dp, rx_tid, true);
553 
554 		spin_unlock_bh(&dp->dp_lock);
555 		timer_delete_sync(&rx_tid->frag_timer);
556 		spin_lock_bh(&dp->dp_lock);
557 	}
558 }
559 
ath12k_dp_prepare_reo_update_elem(struct ath12k_dp * dp,struct ath12k_dp_link_peer * peer,struct ath12k_dp_rx_tid * rx_tid)560 static int ath12k_dp_prepare_reo_update_elem(struct ath12k_dp *dp,
561 					     struct ath12k_dp_link_peer *peer,
562 					     struct ath12k_dp_rx_tid *rx_tid)
563 {
564 	struct dp_reo_update_rx_queue_elem *elem;
565 
566 	lockdep_assert_held(&dp->dp_lock);
567 
568 	if (!peer->primary_link)
569 		return 0;
570 
571 	elem = kzalloc_obj(*elem, GFP_ATOMIC);
572 	if (!elem)
573 		return -ENOMEM;
574 
575 	elem->peer_id = peer->peer_id;
576 	elem->is_ml_peer = peer->mlo;
577 	elem->ml_peer_id = peer->ml_id;
578 
579 	ath12k_dp_init_rx_tid_rxq(&elem->rx_tid, rx_tid,
580 				  (peer->rx_tid_active_bitmask & (1 << rx_tid->tid)));
581 
582 	spin_lock_bh(&dp->reo_rxq_flush_lock);
583 	list_add_tail(&elem->list, &dp->reo_cmd_update_rx_queue_list);
584 	spin_unlock_bh(&dp->reo_rxq_flush_lock);
585 
586 	return 0;
587 }
588 
ath12k_dp_rx_peer_tid_setup(struct ath12k * ar,const u8 * peer_mac,int vdev_id,u8 tid,u32 ba_win_sz,u16 ssn,enum hal_pn_type pn_type)589 int ath12k_dp_rx_peer_tid_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id,
590 				u8 tid, u32 ba_win_sz, u16 ssn,
591 				enum hal_pn_type pn_type)
592 {
593 	struct ath12k_base *ab = ar->ab;
594 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
595 	struct ath12k_dp_link_peer *peer;
596 	struct ath12k_dp_rx_tid *rx_tid;
597 	dma_addr_t paddr_aligned;
598 	int ret;
599 
600 	spin_lock_bh(&dp->dp_lock);
601 
602 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
603 	if (!peer || !peer->dp_peer) {
604 		spin_unlock_bh(&dp->dp_lock);
605 		ath12k_warn(ab, "failed to find the peer to set up rx tid\n");
606 		return -ENOENT;
607 	}
608 
609 	if (ab->hw_params->dp_primary_link_only &&
610 	    !peer->primary_link) {
611 		spin_unlock_bh(&dp->dp_lock);
612 		return 0;
613 	}
614 
615 	if (ab->hw_params->reoq_lut_support &&
616 	    (!dp->reoq_lut.vaddr || !dp->ml_reoq_lut.vaddr)) {
617 		spin_unlock_bh(&dp->dp_lock);
618 		ath12k_warn(ab, "reo qref table is not setup\n");
619 		return -EINVAL;
620 	}
621 
622 	if (peer->peer_id > DP_MAX_PEER_ID || tid > IEEE80211_NUM_TIDS) {
623 		ath12k_warn(ab, "peer id of peer %d or tid %d doesn't allow reoq setup\n",
624 			    peer->peer_id, tid);
625 		spin_unlock_bh(&dp->dp_lock);
626 		return -EINVAL;
627 	}
628 
629 	rx_tid = &peer->dp_peer->rx_tid[tid];
630 	/* Update the tid queue if it is already setup */
631 	if (peer->rx_tid_active_bitmask & (1 << tid)) {
632 		ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid,
633 							    ba_win_sz, ssn, true);
634 		spin_unlock_bh(&dp->dp_lock);
635 		if (ret) {
636 			ath12k_warn(ab, "failed to update reo for rx tid %d\n", tid);
637 			return ret;
638 		}
639 
640 		if (!ab->hw_params->reoq_lut_support) {
641 			paddr_aligned = rx_tid->qbuf.paddr_aligned;
642 			ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id,
643 								     peer_mac,
644 								     paddr_aligned, tid,
645 								     1, ba_win_sz);
646 			if (ret) {
647 				ath12k_warn(ab, "failed to setup peer rx reorder queuefor tid %d: %d\n",
648 					    tid, ret);
649 				return ret;
650 			}
651 		}
652 
653 		return 0;
654 	}
655 
656 	rx_tid->tid = tid;
657 
658 	rx_tid->ba_win_sz = ba_win_sz;
659 
660 	ret = ath12k_dp_arch_rx_assign_reoq(dp, peer->dp_peer, rx_tid, ssn, pn_type);
661 	if (ret) {
662 		spin_unlock_bh(&dp->dp_lock);
663 		ath12k_warn(ab, "failed to assign reoq buf for rx tid %u\n", tid);
664 		return ret;
665 	}
666 
667 	peer->rx_tid_active_bitmask |= (1 << tid);
668 
669 	/* Pre-allocate the update_rxq_list for the corresponding tid
670 	 * This will be used during the tid delete. The reason we are not
671 	 * allocating during tid delete is that, if any alloc fail in update_rxq_list
672 	 * we may not be able to delete the tid vaddr/paddr and may lead to leak
673 	 */
674 	ret = ath12k_dp_prepare_reo_update_elem(dp, peer, rx_tid);
675 	if (ret) {
676 		ath12k_warn(ab, "failed to alloc update_rxq_list for rx tid %u\n", tid);
677 		ath12k_dp_rx_tid_cleanup(ab, &rx_tid->qbuf);
678 		spin_unlock_bh(&dp->dp_lock);
679 		return ret;
680 	}
681 
682 	paddr_aligned = rx_tid->qbuf.paddr_aligned;
683 	if (ab->hw_params->reoq_lut_support) {
684 		/* Update the REO queue LUT at the corresponding peer id
685 		 * and tid with qaddr.
686 		 */
687 		if (peer->mlo)
688 			ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->ml_id, tid,
689 							      paddr_aligned);
690 		else
691 			ath12k_dp_arch_peer_rx_tid_qref_setup(dp, peer->peer_id, tid,
692 							      paddr_aligned);
693 
694 		spin_unlock_bh(&dp->dp_lock);
695 	} else {
696 		spin_unlock_bh(&dp->dp_lock);
697 		ret = ath12k_wmi_peer_rx_reorder_queue_setup(ar, vdev_id, peer_mac,
698 							     paddr_aligned, tid, 1,
699 							     ba_win_sz);
700 	}
701 
702 	return ret;
703 }
704 
ath12k_dp_rx_ampdu_start(struct ath12k * ar,struct ieee80211_ampdu_params * params,u8 link_id)705 int ath12k_dp_rx_ampdu_start(struct ath12k *ar,
706 			     struct ieee80211_ampdu_params *params,
707 			     u8 link_id)
708 {
709 	struct ath12k_base *ab = ar->ab;
710 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
711 	struct ath12k_link_sta *arsta;
712 	int vdev_id;
713 	int ret;
714 
715 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
716 
717 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
718 				  ahsta->link[link_id]);
719 	if (!arsta)
720 		return -ENOLINK;
721 
722 	vdev_id = arsta->arvif->vdev_id;
723 
724 	ret = ath12k_dp_rx_peer_tid_setup(ar, arsta->addr, vdev_id,
725 					  params->tid, params->buf_size,
726 					  params->ssn, arsta->ahsta->pn_type);
727 	if (ret)
728 		ath12k_warn(ab, "failed to setup rx tid %d\n", ret);
729 
730 	return ret;
731 }
732 
ath12k_dp_rx_ampdu_stop(struct ath12k * ar,struct ieee80211_ampdu_params * params,u8 link_id)733 int ath12k_dp_rx_ampdu_stop(struct ath12k *ar,
734 			    struct ieee80211_ampdu_params *params,
735 			    u8 link_id)
736 {
737 	struct ath12k_base *ab = ar->ab;
738 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
739 	struct ath12k_dp_link_peer *peer;
740 	struct ath12k_sta *ahsta = ath12k_sta_to_ahsta(params->sta);
741 	struct ath12k_dp_rx_tid *rx_tid;
742 	struct ath12k_link_sta *arsta;
743 	int vdev_id;
744 	bool active;
745 	int ret;
746 
747 	lockdep_assert_wiphy(ath12k_ar_to_hw(ar)->wiphy);
748 
749 	arsta = wiphy_dereference(ath12k_ar_to_hw(ar)->wiphy,
750 				  ahsta->link[link_id]);
751 	if (!arsta)
752 		return -ENOLINK;
753 
754 	vdev_id = arsta->arvif->vdev_id;
755 
756 	spin_lock_bh(&dp->dp_lock);
757 
758 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, arsta->addr);
759 	if (!peer || !peer->dp_peer) {
760 		spin_unlock_bh(&dp->dp_lock);
761 		ath12k_warn(ab, "failed to find the peer to stop rx aggregation\n");
762 		return -ENOENT;
763 	}
764 
765 	if (ab->hw_params->dp_primary_link_only &&
766 	    !peer->primary_link) {
767 		spin_unlock_bh(&dp->dp_lock);
768 		return 0;
769 	}
770 
771 	active = peer->rx_tid_active_bitmask & (1 << params->tid);
772 	if (!active) {
773 		spin_unlock_bh(&dp->dp_lock);
774 		return 0;
775 	}
776 
777 	rx_tid = &peer->dp_peer->rx_tid[params->tid];
778 	ret = ath12k_dp_arch_peer_rx_tid_reo_update(dp, peer, rx_tid,
779 						    1, 0, false);
780 	spin_unlock_bh(&dp->dp_lock);
781 	if (ret) {
782 		ath12k_warn(ab, "failed to update reo for rx tid %d: %d\n",
783 			    params->tid, ret);
784 		return ret;
785 	}
786 
787 	return ret;
788 }
789 
ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif * arvif,const u8 * peer_addr,enum set_key_cmd key_cmd,struct ieee80211_key_conf * key)790 int ath12k_dp_rx_peer_pn_replay_config(struct ath12k_link_vif *arvif,
791 				       const u8 *peer_addr,
792 				       enum set_key_cmd key_cmd,
793 				       struct ieee80211_key_conf *key)
794 {
795 	struct ath12k *ar = arvif->ar;
796 	struct ath12k_base *ab = ar->ab;
797 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
798 	struct ath12k_hal_reo_cmd cmd = {};
799 	struct ath12k_dp_link_peer *peer;
800 	struct ath12k_dp_rx_tid *rx_tid;
801 	struct ath12k_dp_rx_tid_rxq rx_tid_rxq;
802 	u8 tid;
803 	int ret = 0;
804 
805 	/* NOTE: Enable PN/TSC replay check offload only for unicast frames.
806 	 * We use mac80211 PN/TSC replay check functionality for bcast/mcast
807 	 * for now.
808 	 */
809 	if (!(key->flags & IEEE80211_KEY_FLAG_PAIRWISE))
810 		return 0;
811 
812 	spin_lock_bh(&dp->dp_lock);
813 
814 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, arvif->vdev_id,
815 							 peer_addr);
816 	if (!peer || !peer->dp_peer) {
817 		spin_unlock_bh(&dp->dp_lock);
818 		ath12k_warn(ab, "failed to find the peer %pM to configure pn replay detection\n",
819 			    peer_addr);
820 		return -ENOENT;
821 	}
822 
823 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
824 		if (!(peer->rx_tid_active_bitmask & (1 << tid)))
825 			continue;
826 
827 		rx_tid = &peer->dp_peer->rx_tid[tid];
828 		ath12k_dp_init_rx_tid_rxq(&rx_tid_rxq, rx_tid,
829 					  (peer->rx_tid_active_bitmask & (1 << tid)));
830 		ath12k_dp_arch_setup_pn_check_reo_cmd(dp, &cmd, rx_tid, key->cipher,
831 						      key_cmd);
832 		ret = ath12k_dp_arch_reo_cmd_send(dp, &rx_tid_rxq,
833 						  HAL_REO_CMD_UPDATE_RX_QUEUE,
834 						  &cmd, NULL);
835 		if (ret) {
836 			ath12k_warn(ab, "failed to configure rx tid %d queue of peer %pM for pn replay detection %d\n",
837 				    tid, peer_addr, ret);
838 			break;
839 		}
840 	}
841 
842 	spin_unlock_bh(&dp->dp_lock);
843 
844 	return ret;
845 }
846 EXPORT_SYMBOL(ath12k_dp_rx_get_msdu_last_buf);
847 
ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head * msdu_list,struct sk_buff * first)848 struct sk_buff *ath12k_dp_rx_get_msdu_last_buf(struct sk_buff_head *msdu_list,
849 					       struct sk_buff *first)
850 {
851 	struct sk_buff *skb;
852 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(first);
853 
854 	if (!rxcb->is_continuation)
855 		return first;
856 
857 	skb_queue_walk(msdu_list, skb) {
858 		rxcb = ATH12K_SKB_RXCB(skb);
859 		if (!rxcb->is_continuation)
860 			return skb;
861 	}
862 
863 	return NULL;
864 }
865 
ath12k_dp_rx_crypto_mic_len(struct ath12k_dp * dp,enum hal_encrypt_type enctype)866 int ath12k_dp_rx_crypto_mic_len(struct ath12k_dp *dp, enum hal_encrypt_type enctype)
867 {
868 	switch (enctype) {
869 	case HAL_ENCRYPT_TYPE_OPEN:
870 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
871 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
872 		return 0;
873 	case HAL_ENCRYPT_TYPE_CCMP_128:
874 		return IEEE80211_CCMP_MIC_LEN;
875 	case HAL_ENCRYPT_TYPE_CCMP_256:
876 		return IEEE80211_CCMP_256_MIC_LEN;
877 	case HAL_ENCRYPT_TYPE_GCMP_128:
878 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
879 		return IEEE80211_GCMP_MIC_LEN;
880 	case HAL_ENCRYPT_TYPE_WEP_40:
881 	case HAL_ENCRYPT_TYPE_WEP_104:
882 	case HAL_ENCRYPT_TYPE_WEP_128:
883 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
884 	case HAL_ENCRYPT_TYPE_WAPI:
885 		break;
886 	}
887 
888 	ath12k_warn(dp->ab, "unsupported encryption type %d for mic len\n", enctype);
889 	return 0;
890 }
891 
ath12k_dp_rx_crypto_param_len(struct ath12k_pdev_dp * dp_pdev,enum hal_encrypt_type enctype)892 static int ath12k_dp_rx_crypto_param_len(struct ath12k_pdev_dp *dp_pdev,
893 					 enum hal_encrypt_type enctype)
894 {
895 	switch (enctype) {
896 	case HAL_ENCRYPT_TYPE_OPEN:
897 		return 0;
898 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
899 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
900 		return IEEE80211_TKIP_IV_LEN;
901 	case HAL_ENCRYPT_TYPE_CCMP_128:
902 		return IEEE80211_CCMP_HDR_LEN;
903 	case HAL_ENCRYPT_TYPE_CCMP_256:
904 		return IEEE80211_CCMP_256_HDR_LEN;
905 	case HAL_ENCRYPT_TYPE_GCMP_128:
906 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
907 		return IEEE80211_GCMP_HDR_LEN;
908 	case HAL_ENCRYPT_TYPE_WEP_40:
909 	case HAL_ENCRYPT_TYPE_WEP_104:
910 	case HAL_ENCRYPT_TYPE_WEP_128:
911 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
912 	case HAL_ENCRYPT_TYPE_WAPI:
913 		break;
914 	}
915 
916 	ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
917 	return 0;
918 }
919 
ath12k_dp_rx_crypto_icv_len(struct ath12k_pdev_dp * dp_pdev,enum hal_encrypt_type enctype)920 static int ath12k_dp_rx_crypto_icv_len(struct ath12k_pdev_dp *dp_pdev,
921 				       enum hal_encrypt_type enctype)
922 {
923 	switch (enctype) {
924 	case HAL_ENCRYPT_TYPE_OPEN:
925 	case HAL_ENCRYPT_TYPE_CCMP_128:
926 	case HAL_ENCRYPT_TYPE_CCMP_256:
927 	case HAL_ENCRYPT_TYPE_GCMP_128:
928 	case HAL_ENCRYPT_TYPE_AES_GCMP_256:
929 		return 0;
930 	case HAL_ENCRYPT_TYPE_TKIP_NO_MIC:
931 	case HAL_ENCRYPT_TYPE_TKIP_MIC:
932 		return IEEE80211_TKIP_ICV_LEN;
933 	case HAL_ENCRYPT_TYPE_WEP_40:
934 	case HAL_ENCRYPT_TYPE_WEP_104:
935 	case HAL_ENCRYPT_TYPE_WEP_128:
936 	case HAL_ENCRYPT_TYPE_WAPI_GCM_SM4:
937 	case HAL_ENCRYPT_TYPE_WAPI:
938 		break;
939 	}
940 
941 	ath12k_warn(dp_pdev->dp->ab, "unsupported encryption type %d\n", enctype);
942 	return 0;
943 }
944 
ath12k_dp_rx_h_undecap_nwifi(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,enum hal_encrypt_type enctype,struct hal_rx_desc_data * rx_info)945 static void ath12k_dp_rx_h_undecap_nwifi(struct ath12k_pdev_dp *dp_pdev,
946 					 struct sk_buff *msdu,
947 					 enum hal_encrypt_type enctype,
948 					 struct hal_rx_desc_data *rx_info)
949 {
950 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
951 	u8 decap_hdr[DP_MAX_NWIFI_HDR_LEN];
952 	struct ieee80211_hdr *hdr;
953 	size_t hdr_len;
954 	u8 *crypto_hdr;
955 	u16 qos_ctl;
956 
957 	/* pull decapped header */
958 	hdr = (struct ieee80211_hdr *)msdu->data;
959 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
960 	skb_pull(msdu, hdr_len);
961 
962 	/*  Rebuild qos header */
963 	hdr->frame_control |= __cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
964 
965 	/* Reset the order bit as the HT_Control header is stripped */
966 	hdr->frame_control &= ~(__cpu_to_le16(IEEE80211_FCTL_ORDER));
967 
968 	qos_ctl = rxcb->tid;
969 
970 	if (rx_info->mesh_ctrl_present)
971 		qos_ctl |= IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT;
972 
973 	/* TODO: Add other QoS ctl fields when required */
974 
975 	/* copy decap header before overwriting for reuse below */
976 	memcpy(decap_hdr, hdr, hdr_len);
977 
978 	/* Rebuild crypto header for mac80211 use */
979 	if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
980 		crypto_hdr = skb_push(msdu,
981 				      ath12k_dp_rx_crypto_param_len(dp_pdev, enctype));
982 		ath12k_dp_rx_desc_get_crypto_header(dp_pdev->dp->hal,
983 						    rxcb->rx_desc, crypto_hdr,
984 						    enctype);
985 	}
986 
987 	memcpy(skb_push(msdu,
988 			IEEE80211_QOS_CTL_LEN), &qos_ctl,
989 			IEEE80211_QOS_CTL_LEN);
990 	memcpy(skb_push(msdu, hdr_len), decap_hdr, hdr_len);
991 }
992 
ath12k_dp_rx_h_undecap_raw(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,enum hal_encrypt_type enctype,struct ieee80211_rx_status * status,bool decrypted)993 static void ath12k_dp_rx_h_undecap_raw(struct ath12k_pdev_dp *dp_pdev,
994 				       struct sk_buff *msdu,
995 				       enum hal_encrypt_type enctype,
996 				       struct ieee80211_rx_status *status,
997 				       bool decrypted)
998 {
999 	struct ath12k_dp *dp = dp_pdev->dp;
1000 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1001 	struct ieee80211_hdr *hdr;
1002 	size_t hdr_len;
1003 	size_t crypto_len;
1004 
1005 	if (!rxcb->is_first_msdu ||
1006 	    !(rxcb->is_first_msdu && rxcb->is_last_msdu)) {
1007 		WARN_ON_ONCE(1);
1008 		return;
1009 	}
1010 
1011 	skb_trim(msdu, msdu->len - FCS_LEN);
1012 
1013 	if (!decrypted)
1014 		return;
1015 
1016 	hdr = (void *)msdu->data;
1017 
1018 	/* Tail */
1019 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1020 		skb_trim(msdu, msdu->len -
1021 			 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1022 
1023 		skb_trim(msdu, msdu->len -
1024 			 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1025 	} else {
1026 		/* MIC */
1027 		if (status->flag & RX_FLAG_MIC_STRIPPED)
1028 			skb_trim(msdu, msdu->len -
1029 				 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1030 
1031 		/* ICV */
1032 		if (status->flag & RX_FLAG_ICV_STRIPPED)
1033 			skb_trim(msdu, msdu->len -
1034 				 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1035 	}
1036 
1037 	/* MMIC */
1038 	if ((status->flag & RX_FLAG_MMIC_STRIPPED) &&
1039 	    !ieee80211_has_morefrags(hdr->frame_control) &&
1040 	    enctype == HAL_ENCRYPT_TYPE_TKIP_MIC)
1041 		skb_trim(msdu, msdu->len - IEEE80211_CCMP_MIC_LEN);
1042 
1043 	/* Head */
1044 	if (status->flag & RX_FLAG_IV_STRIPPED) {
1045 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1046 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1047 
1048 		memmove(msdu->data + crypto_len, msdu->data, hdr_len);
1049 		skb_pull(msdu, crypto_len);
1050 	}
1051 }
1052 
ath12k_get_dot11_hdr_from_rx_desc(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct ath12k_skb_rxcb * rxcb,enum hal_encrypt_type enctype,struct hal_rx_desc_data * rx_info)1053 static void ath12k_get_dot11_hdr_from_rx_desc(struct ath12k_pdev_dp *dp_pdev,
1054 					      struct sk_buff *msdu,
1055 					      struct ath12k_skb_rxcb *rxcb,
1056 					      enum hal_encrypt_type enctype,
1057 					      struct hal_rx_desc_data *rx_info)
1058 {
1059 	struct hal_rx_desc *rx_desc = rxcb->rx_desc;
1060 	struct ath12k_dp *dp = dp_pdev->dp;
1061 	struct ath12k_hal *hal = dp->hal;
1062 	size_t hdr_len, crypto_len;
1063 	struct ieee80211_hdr hdr;
1064 	__le16 qos_ctl;
1065 	u8 *crypto_hdr;
1066 
1067 	ath12k_dp_rx_desc_get_dot11_hdr(hal, rx_desc, &hdr);
1068 	hdr_len = ieee80211_hdrlen(hdr.frame_control);
1069 
1070 	if (!(rx_info->rx_status->flag & RX_FLAG_IV_STRIPPED)) {
1071 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1072 		crypto_hdr = skb_push(msdu, crypto_len);
1073 		ath12k_dp_rx_desc_get_crypto_header(dp->hal, rx_desc, crypto_hdr,
1074 						    enctype);
1075 	}
1076 
1077 	skb_push(msdu, hdr_len);
1078 	memcpy(msdu->data, &hdr, min(hdr_len, sizeof(hdr)));
1079 
1080 	if (rxcb->is_mcbc)
1081 		rx_info->rx_status->flag &= ~RX_FLAG_PN_VALIDATED;
1082 
1083 	/* Add QOS header */
1084 	if (ieee80211_is_data_qos(hdr.frame_control)) {
1085 		struct ieee80211_hdr *qos_ptr = (struct ieee80211_hdr *)msdu->data;
1086 
1087 		qos_ctl = cpu_to_le16(rxcb->tid & IEEE80211_QOS_CTL_TID_MASK);
1088 		if (rx_info->mesh_ctrl_present)
1089 			qos_ctl |= cpu_to_le16(IEEE80211_QOS_CTL_MESH_CONTROL_PRESENT);
1090 
1091 		memcpy(ieee80211_get_qos_ctl(qos_ptr), &qos_ctl, IEEE80211_QOS_CTL_LEN);
1092 	}
1093 }
1094 
ath12k_dp_rx_h_undecap_eth(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,enum hal_encrypt_type enctype,struct hal_rx_desc_data * rx_info)1095 static void ath12k_dp_rx_h_undecap_eth(struct ath12k_pdev_dp *dp_pdev,
1096 				       struct sk_buff *msdu,
1097 				       enum hal_encrypt_type enctype,
1098 				       struct hal_rx_desc_data *rx_info)
1099 {
1100 	struct ieee80211_hdr *hdr;
1101 	struct ethhdr *eth;
1102 	u8 da[ETH_ALEN];
1103 	u8 sa[ETH_ALEN];
1104 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1105 	struct ath12k_dp_rx_rfc1042_hdr rfc = {0xaa, 0xaa, 0x03, {0x00, 0x00, 0x00}};
1106 
1107 	eth = (struct ethhdr *)msdu->data;
1108 	ether_addr_copy(da, eth->h_dest);
1109 	ether_addr_copy(sa, eth->h_source);
1110 	rfc.snap_type = eth->h_proto;
1111 	skb_pull(msdu, sizeof(*eth));
1112 	memcpy(skb_push(msdu, sizeof(rfc)), &rfc,
1113 	       sizeof(rfc));
1114 	ath12k_get_dot11_hdr_from_rx_desc(dp_pdev, msdu, rxcb, enctype, rx_info);
1115 
1116 	/* original 802.11 header has a different DA and in
1117 	 * case of 4addr it may also have different SA
1118 	 */
1119 	hdr = (struct ieee80211_hdr *)msdu->data;
1120 	ether_addr_copy(ieee80211_get_DA(hdr), da);
1121 	ether_addr_copy(ieee80211_get_SA(hdr), sa);
1122 }
1123 
ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,enum hal_encrypt_type enctype,bool decrypted,struct hal_rx_desc_data * rx_info)1124 void ath12k_dp_rx_h_undecap(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1125 			    enum hal_encrypt_type enctype,
1126 			    bool decrypted,
1127 			    struct hal_rx_desc_data *rx_info)
1128 {
1129 	struct ethhdr *ehdr;
1130 
1131 	switch (rx_info->decap_type) {
1132 	case DP_RX_DECAP_TYPE_NATIVE_WIFI:
1133 		ath12k_dp_rx_h_undecap_nwifi(dp_pdev, msdu, enctype, rx_info);
1134 		break;
1135 	case DP_RX_DECAP_TYPE_RAW:
1136 		ath12k_dp_rx_h_undecap_raw(dp_pdev, msdu, enctype, rx_info->rx_status,
1137 					   decrypted);
1138 		break;
1139 	case DP_RX_DECAP_TYPE_ETHERNET2_DIX:
1140 		ehdr = (struct ethhdr *)msdu->data;
1141 
1142 		/* mac80211 allows fast path only for authorized STA */
1143 		if (ehdr->h_proto == cpu_to_be16(ETH_P_PAE)) {
1144 			ATH12K_SKB_RXCB(msdu)->is_eapol = true;
1145 			ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
1146 			break;
1147 		}
1148 
1149 		/* PN for mcast packets will be validated in mac80211;
1150 		 * remove eth header and add 802.11 header.
1151 		 */
1152 		if (ATH12K_SKB_RXCB(msdu)->is_mcbc && decrypted)
1153 			ath12k_dp_rx_h_undecap_eth(dp_pdev, msdu, enctype, rx_info);
1154 		break;
1155 	case DP_RX_DECAP_TYPE_8023:
1156 		/* TODO: Handle undecap for these formats */
1157 		break;
1158 	}
1159 }
1160 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap);
1161 
1162 struct ath12k_dp_link_peer *
ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)1163 ath12k_dp_rx_h_find_link_peer(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1164 			      struct hal_rx_desc_data *rx_info)
1165 {
1166 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1167 	struct ath12k_dp_link_peer *peer = NULL;
1168 	struct ath12k_dp *dp = dp_pdev->dp;
1169 
1170 	lockdep_assert_held(&dp->dp_lock);
1171 
1172 	if (rxcb->peer_id)
1173 		peer = ath12k_dp_link_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
1174 
1175 	if (peer)
1176 		return peer;
1177 
1178 	if (rx_info->addr2_present)
1179 		peer = ath12k_dp_link_peer_find_by_addr(dp, rx_info->addr2);
1180 
1181 	return peer;
1182 }
1183 
ath12k_dp_rx_h_rate(struct ath12k_pdev_dp * dp_pdev,struct hal_rx_desc_data * rx_info)1184 static void ath12k_dp_rx_h_rate(struct ath12k_pdev_dp *dp_pdev,
1185 				struct hal_rx_desc_data *rx_info)
1186 {
1187 	struct ath12k_dp *dp = dp_pdev->dp;
1188 	struct ieee80211_supported_band *sband;
1189 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
1190 	enum rx_msdu_start_pkt_type pkt_type = rx_info->pkt_type;
1191 	u8 bw = rx_info->bw, sgi = rx_info->sgi;
1192 	u8 rate_mcs = rx_info->rate_mcs, nss = rx_info->nss;
1193 	bool is_cck;
1194 	struct ath12k *ar;
1195 
1196 	switch (pkt_type) {
1197 	case RX_MSDU_START_PKT_TYPE_11A:
1198 	case RX_MSDU_START_PKT_TYPE_11B:
1199 		ar = ath12k_pdev_dp_to_ar(dp_pdev);
1200 		is_cck = (pkt_type == RX_MSDU_START_PKT_TYPE_11B);
1201 		sband = &ar->mac.sbands[rx_status->band];
1202 		rx_status->rate_idx = ath12k_mac_hw_rate_to_idx(sband, rate_mcs,
1203 								is_cck);
1204 		break;
1205 	case RX_MSDU_START_PKT_TYPE_11N:
1206 		rx_status->encoding = RX_ENC_HT;
1207 		if (rate_mcs > ATH12K_HT_MCS_MAX) {
1208 			ath12k_warn(dp->ab,
1209 				    "Received with invalid mcs in HT mode %d\n",
1210 				     rate_mcs);
1211 			break;
1212 		}
1213 		rx_status->rate_idx = rate_mcs + (8 * (nss - 1));
1214 		if (sgi)
1215 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1216 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1217 		break;
1218 	case RX_MSDU_START_PKT_TYPE_11AC:
1219 		rx_status->encoding = RX_ENC_VHT;
1220 		rx_status->rate_idx = rate_mcs;
1221 		if (rate_mcs > ATH12K_VHT_MCS_MAX) {
1222 			ath12k_warn(dp->ab,
1223 				    "Received with invalid mcs in VHT mode %d\n",
1224 				     rate_mcs);
1225 			break;
1226 		}
1227 		rx_status->nss = nss;
1228 		if (sgi)
1229 			rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI;
1230 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1231 		break;
1232 	case RX_MSDU_START_PKT_TYPE_11AX:
1233 		rx_status->rate_idx = rate_mcs;
1234 		if (rate_mcs > ATH12K_HE_MCS_MAX) {
1235 			ath12k_warn(dp->ab,
1236 				    "Received with invalid mcs in HE mode %d\n",
1237 				    rate_mcs);
1238 			break;
1239 		}
1240 		rx_status->encoding = RX_ENC_HE;
1241 		rx_status->nss = nss;
1242 		rx_status->he_gi = ath12k_he_gi_to_nl80211_he_gi(sgi);
1243 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1244 		break;
1245 	case RX_MSDU_START_PKT_TYPE_11BE:
1246 		rx_status->rate_idx = rate_mcs;
1247 
1248 		if (rate_mcs > ATH12K_EHT_MCS_MAX) {
1249 			ath12k_warn(dp->ab,
1250 				    "Received with invalid mcs in EHT mode %d\n",
1251 				    rate_mcs);
1252 			break;
1253 		}
1254 
1255 		rx_status->encoding = RX_ENC_EHT;
1256 		rx_status->nss = nss;
1257 		rx_status->eht.gi = ath12k_mac_eht_gi_to_nl80211_eht_gi(sgi);
1258 		rx_status->bw = ath12k_mac_bw_to_mac80211_bw(bw);
1259 		break;
1260 	default:
1261 		break;
1262 	}
1263 }
1264 
ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp * dp_pdev,struct hal_rx_desc_data * rx_info)1265 void ath12k_dp_rx_h_ppdu(struct ath12k_pdev_dp *dp_pdev,
1266 			 struct hal_rx_desc_data *rx_info)
1267 {
1268 	struct ieee80211_rx_status *rx_status = rx_info->rx_status;
1269 	u8 channel_num;
1270 	u32 center_freq, meta_data;
1271 	struct ieee80211_channel *channel;
1272 
1273 	rx_status->freq = 0;
1274 	rx_status->rate_idx = 0;
1275 	rx_status->nss = 0;
1276 	rx_status->encoding = RX_ENC_LEGACY;
1277 	rx_status->bw = RATE_INFO_BW_20;
1278 	rx_status->enc_flags = 0;
1279 
1280 	rx_status->flag |= RX_FLAG_NO_SIGNAL_VAL;
1281 
1282 	meta_data = rx_info->phy_meta_data;
1283 	channel_num = meta_data;
1284 	center_freq = meta_data >> 16;
1285 
1286 	rx_status->band = NUM_NL80211_BANDS;
1287 
1288 	if (center_freq >= ATH12K_MIN_6GHZ_FREQ &&
1289 	    center_freq <= ATH12K_MAX_6GHZ_FREQ) {
1290 		rx_status->band = NL80211_BAND_6GHZ;
1291 		rx_status->freq = center_freq;
1292 	} else if (channel_num >= 1 && channel_num <= 14) {
1293 		rx_status->band = NL80211_BAND_2GHZ;
1294 	} else if (channel_num >= 36 && channel_num <= 173) {
1295 		rx_status->band = NL80211_BAND_5GHZ;
1296 	}
1297 
1298 	if (unlikely(rx_status->band == NUM_NL80211_BANDS ||
1299 		     !ath12k_pdev_dp_to_hw(dp_pdev)->wiphy->bands[rx_status->band])) {
1300 		struct ath12k *ar = ath12k_pdev_dp_to_ar(dp_pdev);
1301 
1302 		ath12k_warn(ar->ab, "sband is NULL for status band %d channel_num %d center_freq %d pdev_id %d\n",
1303 			    rx_status->band, channel_num, center_freq, ar->pdev_idx);
1304 
1305 		spin_lock_bh(&ar->data_lock);
1306 		channel = ar->rx_channel;
1307 		if (channel) {
1308 			rx_status->band = channel->band;
1309 			channel_num =
1310 				ieee80211_frequency_to_channel(channel->center_freq);
1311 			rx_status->freq = ieee80211_channel_to_frequency(channel_num,
1312 									 rx_status->band);
1313 		} else {
1314 			ath12k_err(ar->ab, "unable to determine channel, band for rx packet");
1315 		}
1316 		spin_unlock_bh(&ar->data_lock);
1317 		goto h_rate;
1318 	}
1319 
1320 	if (rx_status->band != NL80211_BAND_6GHZ)
1321 		rx_status->freq = ieee80211_channel_to_frequency(channel_num,
1322 								 rx_status->band);
1323 
1324 h_rate:
1325 	ath12k_dp_rx_h_rate(dp_pdev, rx_info);
1326 }
1327 EXPORT_SYMBOL(ath12k_dp_rx_h_ppdu);
1328 
ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp * dp_pdev,struct napi_struct * napi,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)1329 void ath12k_dp_rx_deliver_msdu(struct ath12k_pdev_dp *dp_pdev, struct napi_struct *napi,
1330 			       struct sk_buff *msdu,
1331 			       struct hal_rx_desc_data *rx_info)
1332 {
1333 	struct ath12k_dp *dp = dp_pdev->dp;
1334 	struct ieee80211_rx_status *rx_status;
1335 	struct ieee80211_sta *pubsta;
1336 	struct ath12k_dp_peer *peer;
1337 	struct ath12k_skb_rxcb *rxcb = ATH12K_SKB_RXCB(msdu);
1338 	struct ieee80211_rx_status *status = rx_info->rx_status;
1339 	u8 decap = rx_info->decap_type;
1340 	bool is_mcbc = rxcb->is_mcbc;
1341 	bool is_eapol = rxcb->is_eapol;
1342 
1343 	peer = ath12k_dp_peer_find_by_peerid(dp_pdev, rxcb->peer_id);
1344 
1345 	pubsta = peer ? peer->sta : NULL;
1346 
1347 	if (pubsta && pubsta->valid_links) {
1348 		status->link_valid = 1;
1349 		status->link_id = peer->hw_links[rxcb->hw_link_id];
1350 	}
1351 
1352 	ath12k_dbg(dp->ab, ATH12K_DBG_DATA,
1353 		   "rx skb %p len %u peer %pM %d %s sn %u %s%s%s%s%s%s%s%s%s%s rate_idx %u vht_nss %u freq %u band %u flag 0x%x fcs-err %i mic-err %i amsdu-more %i\n",
1354 		   msdu,
1355 		   msdu->len,
1356 		   peer ? peer->addr : NULL,
1357 		   rxcb->tid,
1358 		   is_mcbc ? "mcast" : "ucast",
1359 		   rx_info->seq_no,
1360 		   (status->encoding == RX_ENC_LEGACY) ? "legacy" : "",
1361 		   (status->encoding == RX_ENC_HT) ? "ht" : "",
1362 		   (status->encoding == RX_ENC_VHT) ? "vht" : "",
1363 		   (status->encoding == RX_ENC_HE) ? "he" : "",
1364 		   (status->encoding == RX_ENC_EHT) ? "eht" : "",
1365 		   (status->bw == RATE_INFO_BW_40) ? "40" : "",
1366 		   (status->bw == RATE_INFO_BW_80) ? "80" : "",
1367 		   (status->bw == RATE_INFO_BW_160) ? "160" : "",
1368 		   (status->bw == RATE_INFO_BW_320) ? "320" : "",
1369 		   status->enc_flags & RX_ENC_FLAG_SHORT_GI ? "sgi " : "",
1370 		   status->rate_idx,
1371 		   status->nss,
1372 		   status->freq,
1373 		   status->band, status->flag,
1374 		   !!(status->flag & RX_FLAG_FAILED_FCS_CRC),
1375 		   !!(status->flag & RX_FLAG_MMIC_ERROR),
1376 		   !!(status->flag & RX_FLAG_AMSDU_MORE));
1377 
1378 	ath12k_dbg_dump(dp->ab, ATH12K_DBG_DP_RX, NULL, "dp rx msdu: ",
1379 			msdu->data, msdu->len);
1380 
1381 	rx_status = IEEE80211_SKB_RXCB(msdu);
1382 	*rx_status = *status;
1383 
1384 	/* TODO: trace rx packet */
1385 
1386 	/* PN for multicast packets are not validate in HW,
1387 	 * so skip 802.3 rx path
1388 	 * Also, fast_rx expects the STA to be authorized, hence
1389 	 * eapol packets are sent in slow path.
1390 	 */
1391 	if (decap == DP_RX_DECAP_TYPE_ETHERNET2_DIX && !is_eapol &&
1392 	    !(is_mcbc && rx_status->flag & RX_FLAG_DECRYPTED))
1393 		rx_status->flag |= RX_FLAG_8023;
1394 
1395 	ieee80211_rx_napi(ath12k_pdev_dp_to_hw(dp_pdev), pubsta, msdu, napi);
1396 }
1397 EXPORT_SYMBOL(ath12k_dp_rx_deliver_msdu);
1398 
ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp * dp,struct sk_buff * msdu,struct hal_rx_desc_data * rx_info)1399 bool ath12k_dp_rx_check_nwifi_hdr_len_valid(struct ath12k_dp *dp,
1400 					    struct sk_buff *msdu,
1401 					    struct hal_rx_desc_data *rx_info)
1402 {
1403 	struct ieee80211_hdr *hdr;
1404 	u32 hdr_len;
1405 
1406 	if (rx_info->decap_type != DP_RX_DECAP_TYPE_NATIVE_WIFI)
1407 		return true;
1408 
1409 	hdr = (struct ieee80211_hdr *)msdu->data;
1410 	hdr_len = ieee80211_hdrlen(hdr->frame_control);
1411 
1412 	if ((likely(hdr_len <= DP_MAX_NWIFI_HDR_LEN)))
1413 		return true;
1414 
1415 	dp->device_stats.invalid_rbm++;
1416 	WARN_ON_ONCE(1);
1417 	return false;
1418 }
1419 EXPORT_SYMBOL(ath12k_dp_rx_check_nwifi_hdr_len_valid);
1420 
ath12k_dp_rx_frag_timer(struct timer_list * timer)1421 static void ath12k_dp_rx_frag_timer(struct timer_list *timer)
1422 {
1423 	struct ath12k_dp_rx_tid *rx_tid = timer_container_of(rx_tid, timer,
1424 							     frag_timer);
1425 
1426 	spin_lock_bh(&rx_tid->dp->dp_lock);
1427 	if (rx_tid->last_frag_no &&
1428 	    rx_tid->rx_frag_bitmap == GENMASK(rx_tid->last_frag_no, 0)) {
1429 		spin_unlock_bh(&rx_tid->dp->dp_lock);
1430 		return;
1431 	}
1432 	ath12k_dp_arch_rx_frags_cleanup(rx_tid->dp, rx_tid, true);
1433 	spin_unlock_bh(&rx_tid->dp->dp_lock);
1434 }
1435 
ath12k_dp_rx_peer_frag_setup(struct ath12k * ar,const u8 * peer_mac,int vdev_id)1436 int ath12k_dp_rx_peer_frag_setup(struct ath12k *ar, const u8 *peer_mac, int vdev_id)
1437 {
1438 	struct ath12k_base *ab = ar->ab;
1439 	struct ath12k_dp_link_peer *peer;
1440 	struct ath12k_dp_rx_tid *rx_tid;
1441 	int i;
1442 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1443 
1444 	if (fips_enabled) {
1445 		ath12k_warn(ab, "This driver is disabled due to FIPS\n");
1446 		return -ENOENT;
1447 	}
1448 
1449 	spin_lock_bh(&dp->dp_lock);
1450 
1451 	peer = ath12k_dp_link_peer_find_by_vdev_and_addr(dp, vdev_id, peer_mac);
1452 	if (!peer || !peer->dp_peer) {
1453 		spin_unlock_bh(&dp->dp_lock);
1454 		ath12k_warn(ab, "failed to find the peer to set up fragment info\n");
1455 		return -ENOENT;
1456 	}
1457 
1458 	if (!peer->primary_link) {
1459 		spin_unlock_bh(&dp->dp_lock);
1460 		return 0;
1461 	}
1462 
1463 	for (i = 0; i <= IEEE80211_NUM_TIDS; i++) {
1464 		rx_tid = &peer->dp_peer->rx_tid[i];
1465 		rx_tid->dp = dp;
1466 		timer_setup(&rx_tid->frag_timer, ath12k_dp_rx_frag_timer, 0);
1467 		skb_queue_head_init(&rx_tid->rx_frags);
1468 	}
1469 
1470 	peer->dp_peer->dp_setup_done = true;
1471 	spin_unlock_bh(&dp->dp_lock);
1472 
1473 	return 0;
1474 }
1475 
ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp * dp_pdev,struct sk_buff * msdu,enum hal_encrypt_type enctype,u32 flags)1476 void ath12k_dp_rx_h_undecap_frag(struct ath12k_pdev_dp *dp_pdev, struct sk_buff *msdu,
1477 				 enum hal_encrypt_type enctype, u32 flags)
1478 {
1479 	struct ath12k_dp *dp = dp_pdev->dp;
1480 	struct ieee80211_hdr *hdr;
1481 	size_t hdr_len;
1482 	size_t crypto_len;
1483 	u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
1484 
1485 	if (!flags)
1486 		return;
1487 
1488 	hdr = (struct ieee80211_hdr *)(msdu->data + hal_rx_desc_sz);
1489 
1490 	if (flags & RX_FLAG_MIC_STRIPPED)
1491 		skb_trim(msdu, msdu->len -
1492 			 ath12k_dp_rx_crypto_mic_len(dp, enctype));
1493 
1494 	if (flags & RX_FLAG_ICV_STRIPPED)
1495 		skb_trim(msdu, msdu->len -
1496 			 ath12k_dp_rx_crypto_icv_len(dp_pdev, enctype));
1497 
1498 	if (flags & RX_FLAG_IV_STRIPPED) {
1499 		hdr_len = ieee80211_hdrlen(hdr->frame_control);
1500 		crypto_len = ath12k_dp_rx_crypto_param_len(dp_pdev, enctype);
1501 
1502 		memmove(msdu->data + hal_rx_desc_sz + crypto_len,
1503 			msdu->data + hal_rx_desc_sz, hdr_len);
1504 		skb_pull(msdu, crypto_len);
1505 	}
1506 }
1507 EXPORT_SYMBOL(ath12k_dp_rx_h_undecap_frag);
1508 
ath12k_dp_rx_h_cmp_frags(struct ath12k_hal * hal,struct sk_buff * a,struct sk_buff * b)1509 static int ath12k_dp_rx_h_cmp_frags(struct ath12k_hal *hal,
1510 				    struct sk_buff *a, struct sk_buff *b)
1511 {
1512 	int frag1, frag2;
1513 
1514 	frag1 = ath12k_dp_rx_h_frag_no(hal, a);
1515 	frag2 = ath12k_dp_rx_h_frag_no(hal, b);
1516 
1517 	return frag1 - frag2;
1518 }
1519 
ath12k_dp_rx_h_sort_frags(struct ath12k_hal * hal,struct sk_buff_head * frag_list,struct sk_buff * cur_frag)1520 void ath12k_dp_rx_h_sort_frags(struct ath12k_hal *hal,
1521 			       struct sk_buff_head *frag_list,
1522 			       struct sk_buff *cur_frag)
1523 {
1524 	struct sk_buff *skb;
1525 	int cmp;
1526 
1527 	skb_queue_walk(frag_list, skb) {
1528 		cmp = ath12k_dp_rx_h_cmp_frags(hal, skb, cur_frag);
1529 		if (cmp < 0)
1530 			continue;
1531 		__skb_queue_before(frag_list, skb, cur_frag);
1532 		return;
1533 	}
1534 	__skb_queue_tail(frag_list, cur_frag);
1535 }
1536 EXPORT_SYMBOL(ath12k_dp_rx_h_sort_frags);
1537 
ath12k_dp_rx_h_get_pn(struct ath12k_dp * dp,struct sk_buff * skb)1538 u64 ath12k_dp_rx_h_get_pn(struct ath12k_dp *dp, struct sk_buff *skb)
1539 {
1540 	struct ieee80211_hdr *hdr;
1541 	u64 pn = 0;
1542 	u8 *ehdr;
1543 	u32 hal_rx_desc_sz = dp->ab->hal.hal_desc_sz;
1544 
1545 	hdr = (struct ieee80211_hdr *)(skb->data + hal_rx_desc_sz);
1546 	ehdr = skb->data + hal_rx_desc_sz + ieee80211_hdrlen(hdr->frame_control);
1547 
1548 	pn = ehdr[0];
1549 	pn |= (u64)ehdr[1] << 8;
1550 	pn |= (u64)ehdr[4] << 16;
1551 	pn |= (u64)ehdr[5] << 24;
1552 	pn |= (u64)ehdr[6] << 32;
1553 	pn |= (u64)ehdr[7] << 40;
1554 
1555 	return pn;
1556 }
1557 EXPORT_SYMBOL(ath12k_dp_rx_h_get_pn);
1558 
ath12k_dp_rx_free(struct ath12k_base * ab)1559 void ath12k_dp_rx_free(struct ath12k_base *ab)
1560 {
1561 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1562 	struct dp_srng *srng;
1563 	int i;
1564 
1565 	ath12k_dp_srng_cleanup(ab, &dp->rx_refill_buf_ring.refill_buf_ring);
1566 
1567 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1568 		if (ab->hw_params->rx_mac_buf_ring)
1569 			ath12k_dp_srng_cleanup(ab, &dp->rx_mac_buf_ring[i]);
1570 		if (!ab->hw_params->rxdma1_enable) {
1571 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
1572 			ath12k_dp_srng_cleanup(ab, srng);
1573 		}
1574 	}
1575 
1576 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++)
1577 		ath12k_dp_srng_cleanup(ab, &dp->rxdma_err_dst_ring[i]);
1578 
1579 	ath12k_dp_srng_cleanup(ab, &dp->rxdma_mon_buf_ring.refill_buf_ring);
1580 
1581 	ath12k_dp_rxdma_buf_free(ab);
1582 }
1583 
ath12k_dp_rx_pdev_free(struct ath12k_base * ab,int mac_id)1584 void ath12k_dp_rx_pdev_free(struct ath12k_base *ab, int mac_id)
1585 {
1586 	struct ath12k *ar = ab->pdevs[mac_id].ar;
1587 
1588 	ath12k_dp_rx_pdev_srng_free(ar);
1589 }
1590 
ath12k_dp_rx_htt_setup(struct ath12k_base * ab)1591 int ath12k_dp_rx_htt_setup(struct ath12k_base *ab)
1592 {
1593 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1594 	u32 ring_id;
1595 	int i, ret;
1596 
1597 	/* TODO: Need to verify the HTT setup for QCN9224 */
1598 	ring_id = dp->rx_refill_buf_ring.refill_buf_ring.ring_id;
1599 	ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, 0, HAL_RXDMA_BUF);
1600 	if (ret) {
1601 		ath12k_warn(ab, "failed to configure rx_refill_buf_ring %d\n",
1602 			    ret);
1603 		return ret;
1604 	}
1605 
1606 	if (ab->hw_params->rx_mac_buf_ring) {
1607 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1608 			ring_id = dp->rx_mac_buf_ring[i].ring_id;
1609 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1610 							  i, HAL_RXDMA_BUF);
1611 			if (ret) {
1612 				ath12k_warn(ab, "failed to configure rx_mac_buf_ring%d %d\n",
1613 					    i, ret);
1614 				return ret;
1615 			}
1616 		}
1617 	}
1618 
1619 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
1620 		ring_id = dp->rxdma_err_dst_ring[i].ring_id;
1621 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1622 						  i, HAL_RXDMA_DST);
1623 		if (ret) {
1624 			ath12k_warn(ab, "failed to configure rxdma_err_dest_ring%d %d\n",
1625 				    i, ret);
1626 			return ret;
1627 		}
1628 	}
1629 
1630 	if (ab->hw_params->rxdma1_enable) {
1631 		ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1632 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1633 						  0, HAL_RXDMA_MONITOR_BUF);
1634 		if (ret) {
1635 			ath12k_warn(ab, "failed to configure rxdma_mon_buf_ring %d\n",
1636 				    ret);
1637 			return ret;
1638 		}
1639 	} else {
1640 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1641 			ring_id =
1642 				dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1643 			ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id, i,
1644 							  HAL_RXDMA_MONITOR_STATUS);
1645 			if (ret) {
1646 				ath12k_warn(ab,
1647 					    "failed to configure mon_status_refill_ring%d %d\n",
1648 					    i, ret);
1649 				return ret;
1650 			}
1651 		}
1652 	}
1653 
1654 	ret = ab->hw_params->hw_ops->rxdma_ring_sel_config(ab);
1655 	if (ret) {
1656 		ath12k_warn(ab, "failed to setup rxdma ring selection config\n");
1657 		return ret;
1658 	}
1659 
1660 	return 0;
1661 }
1662 
ath12k_dp_rx_alloc(struct ath12k_base * ab)1663 int ath12k_dp_rx_alloc(struct ath12k_base *ab)
1664 {
1665 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
1666 	struct dp_srng *srng;
1667 	int i, ret;
1668 
1669 	idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
1670 	spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
1671 
1672 	ret = ath12k_dp_srng_setup(ab,
1673 				   &dp->rx_refill_buf_ring.refill_buf_ring,
1674 				   HAL_RXDMA_BUF, 0, 0,
1675 				   DP_RXDMA_BUF_RING_SIZE);
1676 	if (ret) {
1677 		ath12k_warn(ab, "failed to setup rx_refill_buf_ring\n");
1678 		return ret;
1679 	}
1680 
1681 	if (ab->hw_params->rx_mac_buf_ring) {
1682 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1683 			ret = ath12k_dp_srng_setup(ab,
1684 						   &dp->rx_mac_buf_ring[i],
1685 						   HAL_RXDMA_BUF, 1,
1686 						   i, DP_RX_MAC_BUF_RING_SIZE);
1687 			if (ret) {
1688 				ath12k_warn(ab, "failed to setup rx_mac_buf_ring %d\n",
1689 					    i);
1690 				return ret;
1691 			}
1692 		}
1693 	}
1694 
1695 	for (i = 0; i < ab->hw_params->num_rxdma_dst_ring; i++) {
1696 		ret = ath12k_dp_srng_setup(ab, &dp->rxdma_err_dst_ring[i],
1697 					   HAL_RXDMA_DST, 0, i,
1698 					   DP_RXDMA_ERR_DST_RING_SIZE);
1699 		if (ret) {
1700 			ath12k_warn(ab, "failed to setup rxdma_err_dst_ring %d\n", i);
1701 			return ret;
1702 		}
1703 	}
1704 
1705 	if (ab->hw_params->rxdma1_enable) {
1706 		ret = ath12k_dp_srng_setup(ab,
1707 					   &dp->rxdma_mon_buf_ring.refill_buf_ring,
1708 					   HAL_RXDMA_MONITOR_BUF, 0, 0,
1709 					   DP_RXDMA_MONITOR_BUF_RING_SIZE(ab));
1710 		if (ret) {
1711 			ath12k_warn(ab, "failed to setup HAL_RXDMA_MONITOR_BUF\n");
1712 			return ret;
1713 		}
1714 	} else {
1715 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1716 			idr_init(&dp->rx_mon_status_refill_ring[i].bufs_idr);
1717 			spin_lock_init(&dp->rx_mon_status_refill_ring[i].idr_lock);
1718 		}
1719 
1720 		for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1721 			srng = &dp->rx_mon_status_refill_ring[i].refill_buf_ring;
1722 			ret = ath12k_dp_srng_setup(ab, srng,
1723 						   HAL_RXDMA_MONITOR_STATUS, 0, i,
1724 						   DP_RXDMA_MON_STATUS_RING_SIZE);
1725 			if (ret) {
1726 				ath12k_warn(ab, "failed to setup mon status ring %d\n",
1727 					    i);
1728 				return ret;
1729 			}
1730 		}
1731 	}
1732 
1733 	ret = ath12k_dp_rxdma_buf_setup(ab);
1734 	if (ret) {
1735 		ath12k_warn(ab, "failed to setup rxdma ring\n");
1736 		return ret;
1737 	}
1738 
1739 	return 0;
1740 }
1741 
ath12k_dp_rx_pdev_alloc(struct ath12k_base * ab,int mac_id)1742 int ath12k_dp_rx_pdev_alloc(struct ath12k_base *ab, int mac_id)
1743 {
1744 	struct ath12k *ar = ab->pdevs[mac_id].ar;
1745 	struct ath12k_pdev_dp *dp = &ar->dp;
1746 	u32 ring_id;
1747 	int i;
1748 	int ret;
1749 
1750 	if (!ab->hw_params->rxdma1_enable)
1751 		goto out;
1752 
1753 	ret = ath12k_dp_rx_pdev_srng_alloc(ar);
1754 	if (ret) {
1755 		ath12k_warn(ab, "failed to setup rx srngs\n");
1756 		return ret;
1757 	}
1758 
1759 	for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1760 		ring_id = dp->rxdma_mon_dst_ring[i].ring_id;
1761 		ret = ath12k_dp_tx_htt_srng_setup(ab, ring_id,
1762 						  mac_id + i,
1763 						  HAL_RXDMA_MONITOR_DST);
1764 		if (ret) {
1765 			ath12k_warn(ab,
1766 				    "failed to configure rxdma_mon_dst_ring %d %d\n",
1767 				    i, ret);
1768 			return ret;
1769 		}
1770 	}
1771 out:
1772 	return 0;
1773 }
1774 
ath12k_dp_rx_pdev_mon_status_attach(struct ath12k * ar)1775 static int ath12k_dp_rx_pdev_mon_status_attach(struct ath12k *ar)
1776 {
1777 	struct ath12k_pdev_dp *dp = &ar->dp;
1778 	struct ath12k_mon_data *pmon = (struct ath12k_mon_data *)&dp->mon_data;
1779 
1780 	skb_queue_head_init(&pmon->rx_status_q);
1781 
1782 	pmon->mon_ppdu_status = DP_PPDU_STATUS_START;
1783 
1784 	memset(&pmon->rx_mon_stats, 0,
1785 	       sizeof(pmon->rx_mon_stats));
1786 	return 0;
1787 }
1788 
ath12k_dp_rx_pdev_mon_attach(struct ath12k * ar)1789 int ath12k_dp_rx_pdev_mon_attach(struct ath12k *ar)
1790 {
1791 	struct ath12k_pdev_dp *dp = &ar->dp;
1792 	struct ath12k_mon_data *pmon = &dp->mon_data;
1793 	int ret = 0;
1794 
1795 	ret = ath12k_dp_rx_pdev_mon_status_attach(ar);
1796 	if (ret) {
1797 		ath12k_warn(ar->ab, "pdev_mon_status_attach() failed");
1798 		return ret;
1799 	}
1800 
1801 	pmon->mon_last_linkdesc_paddr = 0;
1802 	pmon->mon_last_buf_cookie = DP_RX_DESC_COOKIE_MAX + 1;
1803 	spin_lock_init(&pmon->mon_lock);
1804 
1805 	if (!ar->ab->hw_params->rxdma1_enable)
1806 		return 0;
1807 
1808 	INIT_LIST_HEAD(&pmon->dp_rx_mon_mpdu_list);
1809 	pmon->mon_mpdu = NULL;
1810 
1811 	return 0;
1812 }
1813