xref: /freebsd/sys/contrib/dev/athk/ath11k/dp.c (revision c95ea407b322379bcb3c013a2dca9a18072c1df8)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6  */
7 
8 #if defined(__FreeBSD__)
9 #include <asm/io.h>
10 #endif
11 #include <crypto/hash.h>
12 #include <linux/export.h>
13 #include "core.h"
14 #include "dp_tx.h"
15 #include "hal_tx.h"
16 #include "hif.h"
17 #include "debug.h"
18 #include "dp_rx.h"
19 #include "peer.h"
20 
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)21 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
22 					  struct sk_buff *skb)
23 {
24 	dev_kfree_skb_any(skb);
25 }
26 
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)27 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
28 {
29 	struct ath11k_base *ab = ar->ab;
30 	struct ath11k_peer *peer;
31 
32 	/* TODO: Any other peer specific DP cleanup */
33 
34 	spin_lock_bh(&ab->base_lock);
35 	peer = ath11k_peer_find(ab, vdev_id, addr);
36 	if (!peer) {
37 		ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
38 			    addr, vdev_id);
39 		spin_unlock_bh(&ab->base_lock);
40 		return;
41 	}
42 
43 	ath11k_peer_rx_tid_cleanup(ar, peer);
44 	peer->dp_setup_done = false;
45 	crypto_free_shash(peer->tfm_mmic);
46 	spin_unlock_bh(&ab->base_lock);
47 }
48 
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)49 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
50 {
51 	struct ath11k_base *ab = ar->ab;
52 	struct ath11k_peer *peer;
53 	u32 reo_dest;
54 	int ret = 0, tid;
55 
56 	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
57 	reo_dest = ar->dp.mac_id + 1;
58 	ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
59 					WMI_PEER_SET_DEFAULT_ROUTING,
60 					DP_RX_HASH_ENABLE | (reo_dest << 1));
61 
62 	if (ret) {
63 		ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
64 			    ret, addr, vdev_id);
65 		return ret;
66 	}
67 
68 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
69 		ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
70 					       HAL_PN_TYPE_NONE);
71 		if (ret) {
72 			ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
73 				    tid, ret);
74 			goto peer_clean;
75 		}
76 	}
77 
78 	ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
79 	if (ret) {
80 		ath11k_warn(ab, "failed to setup rx defrag context\n");
81 		tid--;
82 		goto peer_clean;
83 	}
84 
85 	/* TODO: Setup other peer specific resource used in data path */
86 
87 	return 0;
88 
89 peer_clean:
90 	spin_lock_bh(&ab->base_lock);
91 
92 	peer = ath11k_peer_find(ab, vdev_id, addr);
93 	if (!peer) {
94 		ath11k_warn(ab, "failed to find the peer to del rx tid\n");
95 		spin_unlock_bh(&ab->base_lock);
96 		return -ENOENT;
97 	}
98 
99 	for (; tid >= 0; tid--)
100 		ath11k_peer_rx_tid_delete(ar, peer, tid);
101 
102 	spin_unlock_bh(&ab->base_lock);
103 
104 	return ret;
105 }
106 
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)107 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
108 {
109 	if (!ring->vaddr_unaligned)
110 		return;
111 
112 	if (ring->cached)
113 		dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
114 				     ring->paddr_unaligned, DMA_FROM_DEVICE);
115 	else
116 		dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
117 				  ring->paddr_unaligned);
118 
119 	ring->vaddr_unaligned = NULL;
120 }
121 
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)122 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
123 {
124 	int ext_group_num;
125 	u8 mask = 1 << ring_num;
126 
127 	for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
128 	     ext_group_num++) {
129 		if (mask & grp_mask[ext_group_num])
130 			return ext_group_num;
131 	}
132 
133 	return -ENOENT;
134 }
135 
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)136 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
137 					      enum hal_ring_type type, int ring_num)
138 {
139 	const u8 *grp_mask;
140 
141 	switch (type) {
142 	case HAL_WBM2SW_RELEASE:
143 		if (ring_num == DP_RX_RELEASE_RING_NUM) {
144 			grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
145 			ring_num = 0;
146 		} else {
147 			grp_mask = &ab->hw_params.ring_mask->tx[0];
148 		}
149 		break;
150 	case HAL_REO_EXCEPTION:
151 		grp_mask = &ab->hw_params.ring_mask->rx_err[0];
152 		break;
153 	case HAL_REO_DST:
154 		grp_mask = &ab->hw_params.ring_mask->rx[0];
155 		break;
156 	case HAL_REO_STATUS:
157 		grp_mask = &ab->hw_params.ring_mask->reo_status[0];
158 		break;
159 	case HAL_RXDMA_MONITOR_STATUS:
160 	case HAL_RXDMA_MONITOR_DST:
161 		grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
162 		break;
163 	case HAL_RXDMA_DST:
164 		grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
165 		break;
166 	case HAL_RXDMA_BUF:
167 		grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
168 		break;
169 	case HAL_RXDMA_MONITOR_BUF:
170 	case HAL_TCL_DATA:
171 	case HAL_TCL_CMD:
172 	case HAL_REO_CMD:
173 	case HAL_SW2WBM_RELEASE:
174 	case HAL_WBM_IDLE_LINK:
175 	case HAL_TCL_STATUS:
176 	case HAL_REO_REINJECT:
177 	case HAL_CE_SRC:
178 	case HAL_CE_DST:
179 	case HAL_CE_DST_STATUS:
180 	default:
181 		return -ENOENT;
182 	}
183 
184 	return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
185 }
186 
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)187 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
188 				     struct hal_srng_params *ring_params,
189 				     enum hal_ring_type type, int ring_num)
190 {
191 	int msi_group_number, msi_data_count;
192 	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
193 	int ret;
194 
195 	ret = ath11k_get_user_msi_vector(ab, "DP",
196 					 &msi_data_count, &msi_data_start,
197 					 &msi_irq_start);
198 	if (ret)
199 		return;
200 
201 	msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
202 							      ring_num);
203 	if (msi_group_number < 0) {
204 		ath11k_dbg(ab, ATH11K_DBG_PCI,
205 			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
206 			   type, ring_num);
207 		ring_params->msi_addr = 0;
208 		ring_params->msi_data = 0;
209 		return;
210 	}
211 
212 	if (msi_group_number > msi_data_count) {
213 		ath11k_dbg(ab, ATH11K_DBG_PCI,
214 			   "multiple msi_groups share one msi, msi_group_num %d",
215 			   msi_group_number);
216 	}
217 
218 	ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
219 
220 	ring_params->msi_addr = addr_lo;
221 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
222 	ring_params->msi_data = (msi_group_number % msi_data_count)
223 		+ msi_data_start;
224 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
225 }
226 
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)227 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
228 			 enum hal_ring_type type, int ring_num,
229 			 int mac_id, int num_entries)
230 {
231 	struct hal_srng_params params = {};
232 	int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
233 	int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
234 	int ret;
235 	bool cached = false;
236 
237 	if (max_entries < 0 || entry_sz < 0)
238 		return -EINVAL;
239 
240 	if (num_entries > max_entries)
241 		num_entries = max_entries;
242 
243 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
244 
245 	if (ab->hw_params.alloc_cacheable_memory) {
246 		/* Allocate the reo dst and tx completion rings from cacheable memory */
247 		switch (type) {
248 		case HAL_REO_DST:
249 		case HAL_WBM2SW_RELEASE:
250 			cached = true;
251 			break;
252 		default:
253 			cached = false;
254 		}
255 	}
256 
257 	if (cached)
258 		ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
259 							      &ring->paddr_unaligned,
260 							      DMA_FROM_DEVICE,
261 							      GFP_KERNEL);
262 	else
263 		ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
264 							   &ring->paddr_unaligned,
265 							   GFP_KERNEL);
266 
267 	if (!ring->vaddr_unaligned)
268 		return -ENOMEM;
269 
270 	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
271 	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
272 		      (unsigned long)ring->vaddr_unaligned);
273 
274 	params.ring_base_vaddr = ring->vaddr;
275 	params.ring_base_paddr = ring->paddr;
276 	params.num_entries = num_entries;
277 	ath11k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
278 
279 	switch (type) {
280 	case HAL_REO_DST:
281 		params.intr_batch_cntr_thres_entries =
282 					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
283 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
284 		break;
285 	case HAL_RXDMA_BUF:
286 	case HAL_RXDMA_MONITOR_BUF:
287 	case HAL_RXDMA_MONITOR_STATUS:
288 		params.low_threshold = num_entries >> 3;
289 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
290 		params.intr_batch_cntr_thres_entries = 0;
291 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
292 		break;
293 	case HAL_WBM2SW_RELEASE:
294 		if (ring_num < 3) {
295 			params.intr_batch_cntr_thres_entries =
296 					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
297 			params.intr_timer_thres_us =
298 					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
299 			break;
300 		}
301 		/* follow through when ring_num >= 3 */
302 		fallthrough;
303 	case HAL_REO_EXCEPTION:
304 	case HAL_REO_REINJECT:
305 	case HAL_REO_CMD:
306 	case HAL_REO_STATUS:
307 	case HAL_TCL_DATA:
308 	case HAL_TCL_CMD:
309 	case HAL_TCL_STATUS:
310 	case HAL_WBM_IDLE_LINK:
311 	case HAL_SW2WBM_RELEASE:
312 	case HAL_RXDMA_DST:
313 	case HAL_RXDMA_MONITOR_DST:
314 	case HAL_RXDMA_MONITOR_DESC:
315 		params.intr_batch_cntr_thres_entries =
316 					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
317 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
318 		break;
319 	case HAL_RXDMA_DIR_BUF:
320 		break;
321 	default:
322 		ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
323 		return -EINVAL;
324 	}
325 
326 	if (cached) {
327 		params.flags |= HAL_SRNG_FLAGS_CACHED;
328 		ring->cached = 1;
329 	}
330 
331 	ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
332 	if (ret < 0) {
333 		ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
334 			    ret, ring_num);
335 		return ret;
336 	}
337 
338 	ring->ring_id = ret;
339 
340 	return 0;
341 }
342 
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)343 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
344 {
345 	int i;
346 
347 	if (!ab->hw_params.supports_shadow_regs)
348 		return;
349 
350 	for (i = 0; i < ab->hw_params.max_tx_ring; i++)
351 		ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
352 
353 	ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
354 }
355 
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)356 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
357 {
358 	struct ath11k_dp *dp = &ab->dp;
359 	int i;
360 
361 	ath11k_dp_stop_shadow_timers(ab);
362 	ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
363 	ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
364 	ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
365 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
366 		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
367 		ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
368 	}
369 	ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
370 	ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
371 	ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
372 	ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
373 	ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
374 }
375 
ath11k_dp_srng_common_setup(struct ath11k_base * ab)376 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
377 {
378 	struct ath11k_dp *dp = &ab->dp;
379 	struct hal_srng *srng;
380 	int i, ret;
381 	u8 tcl_num, wbm_num;
382 
383 	ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
384 				   HAL_SW2WBM_RELEASE, 0, 0,
385 				   DP_WBM_RELEASE_RING_SIZE);
386 	if (ret) {
387 		ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
388 			    ret);
389 		goto err;
390 	}
391 
392 	ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
393 				   DP_TCL_CMD_RING_SIZE);
394 	if (ret) {
395 		ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
396 		goto err;
397 	}
398 
399 	ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
400 				   0, 0, DP_TCL_STATUS_RING_SIZE);
401 	if (ret) {
402 		ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
403 		goto err;
404 	}
405 
406 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
407 		tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
408 		wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
409 
410 		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
411 					   HAL_TCL_DATA, tcl_num, 0,
412 					   ab->hw_params.tx_ring_size);
413 		if (ret) {
414 			ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
415 				    i, ret);
416 			goto err;
417 		}
418 
419 		ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
420 					   HAL_WBM2SW_RELEASE, wbm_num, 0,
421 					   DP_TX_COMP_RING_SIZE);
422 		if (ret) {
423 			ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
424 				    i, ret);
425 			goto err;
426 		}
427 
428 		srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
429 		ath11k_hal_tx_init_data_ring(ab, srng);
430 
431 		ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
432 					    ATH11K_SHADOW_DP_TIMER_INTERVAL,
433 					    dp->tx_ring[i].tcl_data_ring.ring_id);
434 	}
435 
436 	ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
437 				   0, 0, DP_REO_REINJECT_RING_SIZE);
438 	if (ret) {
439 		ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
440 			    ret);
441 		goto err;
442 	}
443 
444 	ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
445 				   DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
446 	if (ret) {
447 		ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
448 		goto err;
449 	}
450 
451 	ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
452 				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
453 	if (ret) {
454 		ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
455 			    ret);
456 		goto err;
457 	}
458 
459 	ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
460 				   0, 0, DP_REO_CMD_RING_SIZE);
461 	if (ret) {
462 		ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
463 		goto err;
464 	}
465 
466 	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
467 	ath11k_hal_reo_init_cmd_ring(ab, srng);
468 
469 	ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
470 				    ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
471 				    dp->reo_cmd_ring.ring_id);
472 
473 	ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
474 				   0, 0, DP_REO_STATUS_RING_SIZE);
475 	if (ret) {
476 		ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
477 		goto err;
478 	}
479 
480 	/* When hash based routing of rx packet is enabled, 32 entries to map
481 	 * the hash values to the ring will be configured.
482 	 */
483 	ab->hw_params.hw_ops->reo_setup(ab);
484 
485 	return 0;
486 
487 err:
488 	ath11k_dp_srng_common_cleanup(ab);
489 
490 	return ret;
491 }
492 
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)493 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
494 {
495 	struct ath11k_dp *dp = &ab->dp;
496 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
497 	int i;
498 
499 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
500 		if (!slist[i].vaddr)
501 			continue;
502 
503 		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
504 				  slist[i].vaddr, slist[i].paddr);
505 		slist[i].vaddr = NULL;
506 	}
507 }
508 
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)509 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
510 						  int size,
511 						  u32 n_link_desc_bank,
512 						  u32 n_link_desc,
513 						  u32 last_bank_sz)
514 {
515 	struct ath11k_dp *dp = &ab->dp;
516 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
517 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
518 	u32 n_entries_per_buf;
519 	int num_scatter_buf, scatter_idx;
520 	struct hal_wbm_link_desc *scatter_buf;
521 	int align_bytes, n_entries;
522 	dma_addr_t paddr;
523 	int rem_entries;
524 	int i;
525 	int ret = 0;
526 	u32 end_offset;
527 
528 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
529 		ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
530 	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
531 
532 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
533 		return -EINVAL;
534 
535 	for (i = 0; i < num_scatter_buf; i++) {
536 		slist[i].vaddr = dma_alloc_coherent(ab->dev,
537 						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
538 						    &slist[i].paddr, GFP_KERNEL);
539 		if (!slist[i].vaddr) {
540 			ret = -ENOMEM;
541 			goto err;
542 		}
543 	}
544 
545 	scatter_idx = 0;
546 	scatter_buf = slist[scatter_idx].vaddr;
547 	rem_entries = n_entries_per_buf;
548 
549 	for (i = 0; i < n_link_desc_bank; i++) {
550 #if defined(__linux__)
551 		align_bytes = link_desc_banks[i].vaddr -
552 			      link_desc_banks[i].vaddr_unaligned;
553 #elif defined(__FreeBSD__)
554 		align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
555 			      (uintptr_t)link_desc_banks[i].vaddr_unaligned;
556 #endif
557 		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
558 			     HAL_LINK_DESC_SIZE;
559 		paddr = link_desc_banks[i].paddr;
560 		while (n_entries) {
561 			ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
562 			n_entries--;
563 			paddr += HAL_LINK_DESC_SIZE;
564 			if (rem_entries) {
565 				rem_entries--;
566 				scatter_buf++;
567 				continue;
568 			}
569 
570 			rem_entries = n_entries_per_buf;
571 			scatter_idx++;
572 			scatter_buf = slist[scatter_idx].vaddr;
573 		}
574 	}
575 
576 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
577 		     sizeof(struct hal_wbm_link_desc);
578 	ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
579 					n_link_desc, end_offset);
580 
581 	return 0;
582 
583 err:
584 	ath11k_dp_scatter_idle_link_desc_cleanup(ab);
585 
586 	return ret;
587 }
588 
589 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)590 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
591 			      struct dp_link_desc_bank *link_desc_banks)
592 {
593 	int i;
594 
595 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
596 		if (link_desc_banks[i].vaddr_unaligned) {
597 			dma_free_coherent(ab->dev,
598 					  link_desc_banks[i].size,
599 					  link_desc_banks[i].vaddr_unaligned,
600 					  link_desc_banks[i].paddr_unaligned);
601 			link_desc_banks[i].vaddr_unaligned = NULL;
602 		}
603 	}
604 }
605 
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)606 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
607 					  struct dp_link_desc_bank *desc_bank,
608 					  int n_link_desc_bank,
609 					  int last_bank_sz)
610 {
611 	struct ath11k_dp *dp = &ab->dp;
612 	int i;
613 	int ret = 0;
614 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
615 
616 	for (i = 0; i < n_link_desc_bank; i++) {
617 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
618 			desc_sz = last_bank_sz;
619 
620 		desc_bank[i].vaddr_unaligned =
621 					dma_alloc_coherent(ab->dev, desc_sz,
622 							   &desc_bank[i].paddr_unaligned,
623 							   GFP_KERNEL);
624 		if (!desc_bank[i].vaddr_unaligned) {
625 			ret = -ENOMEM;
626 			goto err;
627 		}
628 
629 		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
630 					       HAL_LINK_DESC_ALIGN);
631 		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
632 				     ((unsigned long)desc_bank[i].vaddr -
633 				      (unsigned long)desc_bank[i].vaddr_unaligned);
634 		desc_bank[i].size = desc_sz;
635 	}
636 
637 	return 0;
638 
639 err:
640 	ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
641 
642 	return ret;
643 }
644 
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)645 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
646 				 struct dp_link_desc_bank *desc_bank,
647 				 u32 ring_type, struct dp_srng *ring)
648 {
649 	ath11k_dp_link_desc_bank_free(ab, desc_bank);
650 
651 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
652 		ath11k_dp_srng_cleanup(ab, ring);
653 		ath11k_dp_scatter_idle_link_desc_cleanup(ab);
654 	}
655 }
656 
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)657 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
658 {
659 	struct ath11k_dp *dp = &ab->dp;
660 	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
661 	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
662 	int ret = 0;
663 
664 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
665 			   HAL_NUM_MPDUS_PER_LINK_DESC;
666 
667 	n_mpdu_queue_desc = n_mpdu_link_desc /
668 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
669 
670 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
671 			       DP_AVG_MSDUS_PER_FLOW) /
672 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
673 
674 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
675 			       DP_AVG_MSDUS_PER_MPDU) /
676 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
677 
678 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
679 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
680 
681 	if (*n_link_desc & (*n_link_desc - 1))
682 		*n_link_desc = 1 << fls(*n_link_desc);
683 
684 	ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
685 				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
686 	if (ret) {
687 		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
688 		return ret;
689 	}
690 	return ret;
691 }
692 
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)693 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
694 			      struct dp_link_desc_bank *link_desc_banks,
695 			      u32 ring_type, struct hal_srng *srng,
696 			      u32 n_link_desc)
697 {
698 	u32 tot_mem_sz;
699 	u32 n_link_desc_bank, last_bank_sz;
700 	u32 entry_sz, align_bytes, n_entries;
701 	u32 paddr;
702 	u32 *desc;
703 	int i, ret;
704 
705 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
706 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
707 
708 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
709 		n_link_desc_bank = 1;
710 		last_bank_sz = tot_mem_sz;
711 	} else {
712 		n_link_desc_bank = tot_mem_sz /
713 				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
714 				    HAL_LINK_DESC_ALIGN);
715 		last_bank_sz = tot_mem_sz %
716 			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
717 				HAL_LINK_DESC_ALIGN);
718 
719 		if (last_bank_sz)
720 			n_link_desc_bank += 1;
721 	}
722 
723 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
724 		return -EINVAL;
725 
726 	ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
727 					     n_link_desc_bank, last_bank_sz);
728 	if (ret)
729 		return ret;
730 
731 	/* Setup link desc idle list for HW internal usage */
732 	entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
733 	tot_mem_sz = entry_sz * n_link_desc;
734 
735 	/* Setup scatter desc list when the total memory requirement is more */
736 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
737 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
738 		ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
739 							     n_link_desc_bank,
740 							     n_link_desc,
741 							     last_bank_sz);
742 		if (ret) {
743 			ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
744 				    ret);
745 			goto fail_desc_bank_free;
746 		}
747 
748 		return 0;
749 	}
750 
751 	spin_lock_bh(&srng->lock);
752 
753 	ath11k_hal_srng_access_begin(ab, srng);
754 
755 	for (i = 0; i < n_link_desc_bank; i++) {
756 #if defined(__linux__)
757 		align_bytes = link_desc_banks[i].vaddr -
758 			      link_desc_banks[i].vaddr_unaligned;
759 #elif defined(__FreeBSD__)
760 		align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
761 			      (uintptr_t)link_desc_banks[i].vaddr_unaligned;
762 #endif
763 		n_entries = (link_desc_banks[i].size - align_bytes) /
764 			    HAL_LINK_DESC_SIZE;
765 		paddr = link_desc_banks[i].paddr;
766 		while (n_entries &&
767 		       (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
768 			ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
769 						      i, paddr);
770 			n_entries--;
771 			paddr += HAL_LINK_DESC_SIZE;
772 		}
773 	}
774 
775 	ath11k_hal_srng_access_end(ab, srng);
776 
777 	spin_unlock_bh(&srng->lock);
778 
779 	return 0;
780 
781 fail_desc_bank_free:
782 	ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
783 
784 	return ret;
785 }
786 
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)787 int ath11k_dp_service_srng(struct ath11k_base *ab,
788 			   struct ath11k_ext_irq_grp *irq_grp,
789 			   int budget)
790 {
791 	struct napi_struct *napi = &irq_grp->napi;
792 	const struct ath11k_hw_hal_params *hal_params;
793 	int grp_id = irq_grp->grp_id;
794 	int work_done = 0;
795 	int i, j;
796 	int tot_work_done = 0;
797 
798 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
799 		if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
800 		    ab->hw_params.ring_mask->tx[grp_id])
801 			ath11k_dp_tx_completion_handler(ab, i);
802 	}
803 
804 	if (ab->hw_params.ring_mask->rx_err[grp_id]) {
805 		work_done = ath11k_dp_process_rx_err(ab, napi, budget);
806 		budget -= work_done;
807 		tot_work_done += work_done;
808 		if (budget <= 0)
809 			goto done;
810 	}
811 
812 	if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
813 		work_done = ath11k_dp_rx_process_wbm_err(ab,
814 							 napi,
815 							 budget);
816 		budget -= work_done;
817 		tot_work_done += work_done;
818 
819 		if (budget <= 0)
820 			goto done;
821 	}
822 
823 	if (ab->hw_params.ring_mask->rx[grp_id]) {
824 		i =  fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
825 		work_done = ath11k_dp_process_rx(ab, i, napi,
826 						 budget);
827 		budget -= work_done;
828 		tot_work_done += work_done;
829 		if (budget <= 0)
830 			goto done;
831 	}
832 
833 	if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
834 		for (i = 0; i < ab->num_radios; i++) {
835 			for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
836 				int id = i * ab->hw_params.num_rxdma_per_pdev + j;
837 
838 				if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
839 					BIT(id)) {
840 					work_done =
841 					ath11k_dp_rx_process_mon_rings(ab,
842 								       id,
843 								       napi, budget);
844 					budget -= work_done;
845 					tot_work_done += work_done;
846 
847 					if (budget <= 0)
848 						goto done;
849 				}
850 			}
851 		}
852 	}
853 
854 	if (ab->hw_params.ring_mask->reo_status[grp_id])
855 		ath11k_dp_process_reo_status(ab);
856 
857 	for (i = 0; i < ab->num_radios; i++) {
858 		for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
859 			int id = i * ab->hw_params.num_rxdma_per_pdev + j;
860 
861 			if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
862 				work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
863 				budget -= work_done;
864 				tot_work_done += work_done;
865 			}
866 
867 			if (budget <= 0)
868 				goto done;
869 
870 			if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
871 				struct ath11k *ar = ath11k_ab_to_ar(ab, id);
872 				struct ath11k_pdev_dp *dp = &ar->dp;
873 				struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
874 
875 				hal_params = ab->hw_params.hal_params;
876 				ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
877 							   hal_params->rx_buf_rbm);
878 			}
879 		}
880 	}
881 	/* TODO: Implement handler for other interrupts */
882 
883 done:
884 	return tot_work_done;
885 }
886 EXPORT_SYMBOL(ath11k_dp_service_srng);
887 
ath11k_dp_pdev_free(struct ath11k_base * ab)888 void ath11k_dp_pdev_free(struct ath11k_base *ab)
889 {
890 	struct ath11k *ar;
891 	int i;
892 
893 	timer_delete_sync(&ab->mon_reap_timer);
894 
895 	for (i = 0; i < ab->num_radios; i++) {
896 		ar = ab->pdevs[i].ar;
897 		ath11k_dp_rx_pdev_free(ab, i);
898 		ath11k_debugfs_unregister(ar);
899 		ath11k_dp_rx_pdev_mon_detach(ar);
900 	}
901 }
902 
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)903 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
904 {
905 	struct ath11k *ar;
906 	struct ath11k_pdev_dp *dp;
907 	int i;
908 	int j;
909 
910 	for (i = 0; i <  ab->num_radios; i++) {
911 		ar = ab->pdevs[i].ar;
912 		dp = &ar->dp;
913 		dp->mac_id = i;
914 		idr_init(&dp->rx_refill_buf_ring.bufs_idr);
915 		spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
916 		atomic_set(&dp->num_tx_pending, 0);
917 		init_waitqueue_head(&dp->tx_empty_waitq);
918 		for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
919 			idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
920 			spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
921 		}
922 		idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
923 		spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
924 	}
925 }
926 
ath11k_dp_pdev_alloc(struct ath11k_base * ab)927 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
928 {
929 	struct ath11k *ar;
930 	int ret;
931 	int i;
932 
933 	/* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
934 	for (i = 0; i < ab->num_radios; i++) {
935 		ar = ab->pdevs[i].ar;
936 		ret = ath11k_dp_rx_pdev_alloc(ab, i);
937 		if (ret) {
938 			ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
939 				    i);
940 			goto err;
941 		}
942 		ret = ath11k_dp_rx_pdev_mon_attach(ar);
943 		if (ret) {
944 			ath11k_warn(ab, "failed to initialize mon pdev %d\n",
945 				    i);
946 			goto err;
947 		}
948 	}
949 
950 	return 0;
951 
952 err:
953 	ath11k_dp_pdev_free(ab);
954 
955 	return ret;
956 }
957 
ath11k_dp_htt_connect(struct ath11k_dp * dp)958 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
959 {
960 	struct ath11k_htc_svc_conn_req conn_req;
961 	struct ath11k_htc_svc_conn_resp conn_resp;
962 	int status;
963 
964 	memset(&conn_req, 0, sizeof(conn_req));
965 	memset(&conn_resp, 0, sizeof(conn_resp));
966 
967 	conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
968 	conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
969 
970 	/* connect to control service */
971 	conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
972 
973 	status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
974 					    &conn_resp);
975 
976 	if (status)
977 		return status;
978 
979 	dp->eid = conn_resp.eid;
980 
981 	return 0;
982 }
983 
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)984 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
985 {
986 	 /* When v2_map_support is true:for STA mode, enable address
987 	  * search index, tcl uses ast_hash value in the descriptor.
988 	  * When v2_map_support is false: for STA mode, don't enable
989 	  * address search index.
990 	  */
991 	switch (arvif->vdev_type) {
992 	case WMI_VDEV_TYPE_STA:
993 		if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
994 			arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
995 			arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
996 		} else {
997 			arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
998 			arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
999 		}
1000 		break;
1001 	case WMI_VDEV_TYPE_AP:
1002 	case WMI_VDEV_TYPE_IBSS:
1003 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1004 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1005 		break;
1006 	case WMI_VDEV_TYPE_MONITOR:
1007 	default:
1008 		return;
1009 	}
1010 }
1011 
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)1012 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
1013 {
1014 	arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
1015 			       FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1016 					  arvif->vdev_id) |
1017 			       FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1018 					  ar->pdev->pdev_id);
1019 
1020 	/* set HTT extension valid bit to 0 by default */
1021 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1022 
1023 	ath11k_dp_update_vdev_search(arvif);
1024 }
1025 
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)1026 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1027 {
1028 	struct ath11k_base *ab = ctx;
1029 	struct sk_buff *msdu = skb;
1030 
1031 	dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1032 			 DMA_TO_DEVICE);
1033 
1034 	dev_kfree_skb_any(msdu);
1035 
1036 	return 0;
1037 }
1038 
ath11k_dp_free(struct ath11k_base * ab)1039 void ath11k_dp_free(struct ath11k_base *ab)
1040 {
1041 	struct ath11k_dp *dp = &ab->dp;
1042 	int i;
1043 
1044 	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1045 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1046 
1047 	ath11k_dp_srng_common_cleanup(ab);
1048 
1049 	ath11k_dp_reo_cmd_list_cleanup(ab);
1050 
1051 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1052 		spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1053 		idr_for_each(&dp->tx_ring[i].txbuf_idr,
1054 			     ath11k_dp_tx_pending_cleanup, ab);
1055 		idr_destroy(&dp->tx_ring[i].txbuf_idr);
1056 		spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1057 		kfree(dp->tx_ring[i].tx_status);
1058 	}
1059 
1060 	/* Deinit any SOC level resource */
1061 }
1062 
ath11k_dp_alloc(struct ath11k_base * ab)1063 int ath11k_dp_alloc(struct ath11k_base *ab)
1064 {
1065 	struct ath11k_dp *dp = &ab->dp;
1066 	struct hal_srng *srng = NULL;
1067 	size_t size = 0;
1068 	u32 n_link_desc = 0;
1069 	int ret;
1070 	int i;
1071 
1072 	dp->ab = ab;
1073 
1074 	INIT_LIST_HEAD(&dp->reo_cmd_list);
1075 	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1076 	INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1077 	spin_lock_init(&dp->reo_cmd_lock);
1078 
1079 	dp->reo_cmd_cache_flush_count = 0;
1080 
1081 	ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1082 	if (ret) {
1083 		ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1084 		return ret;
1085 	}
1086 
1087 	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1088 
1089 	ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1090 					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1091 	if (ret) {
1092 		ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1093 		return ret;
1094 	}
1095 
1096 	ret = ath11k_dp_srng_common_setup(ab);
1097 	if (ret)
1098 		goto fail_link_desc_cleanup;
1099 
1100 	size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1101 
1102 	for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1103 		idr_init(&dp->tx_ring[i].txbuf_idr);
1104 		spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1105 		dp->tx_ring[i].tcl_data_ring_id = i;
1106 
1107 		dp->tx_ring[i].tx_status_head = 0;
1108 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1109 		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1110 		if (!dp->tx_ring[i].tx_status) {
1111 			ret = -ENOMEM;
1112 			goto fail_cmn_srng_cleanup;
1113 		}
1114 	}
1115 
1116 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1117 		ath11k_hal_tx_set_dscp_tid_map(ab, i);
1118 
1119 	/* Init any SOC level resource for DP */
1120 
1121 	return 0;
1122 
1123 fail_cmn_srng_cleanup:
1124 	ath11k_dp_srng_common_cleanup(ab);
1125 
1126 fail_link_desc_cleanup:
1127 	ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1128 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1129 
1130 	return ret;
1131 }
1132 
ath11k_dp_shadow_timer_handler(struct timer_list * t)1133 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1134 {
1135 	struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer,
1136 									 t,
1137 									 timer);
1138 	struct ath11k_base *ab = update_timer->ab;
1139 	struct hal_srng	*srng = &ab->hal.srng_list[update_timer->ring_id];
1140 
1141 	spin_lock_bh(&srng->lock);
1142 
1143 	/* when the timer is fired, the handler checks whether there
1144 	 * are new TX happened. The handler updates HP only when there
1145 	 * are no TX operations during the timeout interval, and stop
1146 	 * the timer. Timer will be started again when TX happens again.
1147 	 */
1148 	if (update_timer->timer_tx_num != update_timer->tx_num) {
1149 		update_timer->timer_tx_num = update_timer->tx_num;
1150 		mod_timer(&update_timer->timer, jiffies +
1151 		  msecs_to_jiffies(update_timer->interval));
1152 	} else {
1153 		update_timer->started = false;
1154 		ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1155 	}
1156 
1157 	spin_unlock_bh(&srng->lock);
1158 }
1159 
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1160 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1161 				  struct hal_srng *srng,
1162 				  struct ath11k_hp_update_timer *update_timer)
1163 {
1164 	lockdep_assert_held(&srng->lock);
1165 
1166 	if (!ab->hw_params.supports_shadow_regs)
1167 		return;
1168 
1169 	update_timer->tx_num++;
1170 
1171 	if (update_timer->started)
1172 		return;
1173 
1174 	update_timer->started = true;
1175 	update_timer->timer_tx_num = update_timer->tx_num;
1176 	mod_timer(&update_timer->timer, jiffies +
1177 		  msecs_to_jiffies(update_timer->interval));
1178 }
1179 
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1180 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1181 				 struct ath11k_hp_update_timer *update_timer)
1182 {
1183 	if (!ab->hw_params.supports_shadow_regs)
1184 		return;
1185 
1186 	if (!update_timer->init)
1187 		return;
1188 
1189 	timer_delete_sync(&update_timer->timer);
1190 }
1191 
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1192 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1193 				 struct ath11k_hp_update_timer *update_timer,
1194 				 u32 interval, u32 ring_id)
1195 {
1196 	if (!ab->hw_params.supports_shadow_regs)
1197 		return;
1198 
1199 	update_timer->tx_num = 0;
1200 	update_timer->timer_tx_num = 0;
1201 	update_timer->ab = ab;
1202 	update_timer->ring_id = ring_id;
1203 	update_timer->interval = interval;
1204 	update_timer->init = true;
1205 	timer_setup(&update_timer->timer,
1206 		    ath11k_dp_shadow_timer_handler, 0);
1207 }
1208