1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <crypto/hash.h>
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "hal_tx.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "dp_rx.h"
14 #include "peer.h"
15
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)16 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
17 struct sk_buff *skb)
18 {
19 dev_kfree_skb_any(skb);
20 }
21
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)22 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
23 {
24 struct ath11k_base *ab = ar->ab;
25 struct ath11k_peer *peer;
26
27 /* TODO: Any other peer specific DP cleanup */
28
29 spin_lock_bh(&ab->base_lock);
30 peer = ath11k_peer_find(ab, vdev_id, addr);
31 if (!peer) {
32 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
33 addr, vdev_id);
34 spin_unlock_bh(&ab->base_lock);
35 return;
36 }
37
38 ath11k_peer_rx_tid_cleanup(ar, peer);
39 peer->dp_setup_done = false;
40 crypto_free_shash(peer->tfm_mmic);
41 spin_unlock_bh(&ab->base_lock);
42 }
43
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)44 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
45 {
46 struct ath11k_base *ab = ar->ab;
47 struct ath11k_peer *peer;
48 u32 reo_dest;
49 int ret = 0, tid;
50
51 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
52 reo_dest = ar->dp.mac_id + 1;
53 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
54 WMI_PEER_SET_DEFAULT_ROUTING,
55 DP_RX_HASH_ENABLE | (reo_dest << 1));
56
57 if (ret) {
58 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
59 ret, addr, vdev_id);
60 return ret;
61 }
62
63 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
64 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
65 HAL_PN_TYPE_NONE);
66 if (ret) {
67 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
68 tid, ret);
69 goto peer_clean;
70 }
71 }
72
73 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
74 if (ret) {
75 ath11k_warn(ab, "failed to setup rx defrag context\n");
76 tid--;
77 goto peer_clean;
78 }
79
80 /* TODO: Setup other peer specific resource used in data path */
81
82 return 0;
83
84 peer_clean:
85 spin_lock_bh(&ab->base_lock);
86
87 peer = ath11k_peer_find(ab, vdev_id, addr);
88 if (!peer) {
89 ath11k_warn(ab, "failed to find the peer to del rx tid\n");
90 spin_unlock_bh(&ab->base_lock);
91 return -ENOENT;
92 }
93
94 for (; tid >= 0; tid--)
95 ath11k_peer_rx_tid_delete(ar, peer, tid);
96
97 spin_unlock_bh(&ab->base_lock);
98
99 return ret;
100 }
101
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)102 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
103 {
104 if (!ring->vaddr_unaligned)
105 return;
106
107 if (ring->cached)
108 dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
109 ring->paddr_unaligned, DMA_FROM_DEVICE);
110 else
111 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
112 ring->paddr_unaligned);
113
114 ring->vaddr_unaligned = NULL;
115 }
116
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)117 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
118 {
119 int ext_group_num;
120 u8 mask = 1 << ring_num;
121
122 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
123 ext_group_num++) {
124 if (mask & grp_mask[ext_group_num])
125 return ext_group_num;
126 }
127
128 return -ENOENT;
129 }
130
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)131 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
132 enum hal_ring_type type, int ring_num)
133 {
134 const u8 *grp_mask;
135
136 switch (type) {
137 case HAL_WBM2SW_RELEASE:
138 if (ring_num == DP_RX_RELEASE_RING_NUM) {
139 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
140 ring_num = 0;
141 } else {
142 grp_mask = &ab->hw_params.ring_mask->tx[0];
143 }
144 break;
145 case HAL_REO_EXCEPTION:
146 grp_mask = &ab->hw_params.ring_mask->rx_err[0];
147 break;
148 case HAL_REO_DST:
149 grp_mask = &ab->hw_params.ring_mask->rx[0];
150 break;
151 case HAL_REO_STATUS:
152 grp_mask = &ab->hw_params.ring_mask->reo_status[0];
153 break;
154 case HAL_RXDMA_MONITOR_STATUS:
155 case HAL_RXDMA_MONITOR_DST:
156 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
157 break;
158 case HAL_RXDMA_DST:
159 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
160 break;
161 case HAL_RXDMA_BUF:
162 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
163 break;
164 case HAL_RXDMA_MONITOR_BUF:
165 case HAL_TCL_DATA:
166 case HAL_TCL_CMD:
167 case HAL_REO_CMD:
168 case HAL_SW2WBM_RELEASE:
169 case HAL_WBM_IDLE_LINK:
170 case HAL_TCL_STATUS:
171 case HAL_REO_REINJECT:
172 case HAL_CE_SRC:
173 case HAL_CE_DST:
174 case HAL_CE_DST_STATUS:
175 default:
176 return -ENOENT;
177 }
178
179 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
180 }
181
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)182 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
183 struct hal_srng_params *ring_params,
184 enum hal_ring_type type, int ring_num)
185 {
186 int msi_group_number, msi_data_count;
187 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
188 int ret;
189
190 ret = ath11k_get_user_msi_vector(ab, "DP",
191 &msi_data_count, &msi_data_start,
192 &msi_irq_start);
193 if (ret)
194 return;
195
196 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
197 ring_num);
198 if (msi_group_number < 0) {
199 ath11k_dbg(ab, ATH11K_DBG_PCI,
200 "ring not part of an ext_group; ring_type: %d,ring_num %d",
201 type, ring_num);
202 ring_params->msi_addr = 0;
203 ring_params->msi_data = 0;
204 return;
205 }
206
207 if (msi_group_number > msi_data_count) {
208 ath11k_dbg(ab, ATH11K_DBG_PCI,
209 "multiple msi_groups share one msi, msi_group_num %d",
210 msi_group_number);
211 }
212
213 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
214
215 ring_params->msi_addr = addr_lo;
216 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
217 ring_params->msi_data = (msi_group_number % msi_data_count)
218 + msi_data_start;
219 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
220 }
221
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)222 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
223 enum hal_ring_type type, int ring_num,
224 int mac_id, int num_entries)
225 {
226 struct hal_srng_params params = { 0 };
227 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
228 int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
229 int ret;
230 bool cached = false;
231
232 if (max_entries < 0 || entry_sz < 0)
233 return -EINVAL;
234
235 if (num_entries > max_entries)
236 num_entries = max_entries;
237
238 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
239
240 if (ab->hw_params.alloc_cacheable_memory) {
241 /* Allocate the reo dst and tx completion rings from cacheable memory */
242 switch (type) {
243 case HAL_REO_DST:
244 case HAL_WBM2SW_RELEASE:
245 cached = true;
246 break;
247 default:
248 cached = false;
249 }
250 }
251
252 if (cached)
253 ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
254 &ring->paddr_unaligned,
255 DMA_FROM_DEVICE,
256 GFP_KERNEL);
257 else
258 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
259 &ring->paddr_unaligned,
260 GFP_KERNEL);
261
262 if (!ring->vaddr_unaligned)
263 return -ENOMEM;
264
265 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
266 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
267 (unsigned long)ring->vaddr_unaligned);
268
269 params.ring_base_vaddr = ring->vaddr;
270 params.ring_base_paddr = ring->paddr;
271 params.num_entries = num_entries;
272 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
273
274 switch (type) {
275 case HAL_REO_DST:
276 params.intr_batch_cntr_thres_entries =
277 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
278 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
279 break;
280 case HAL_RXDMA_BUF:
281 case HAL_RXDMA_MONITOR_BUF:
282 case HAL_RXDMA_MONITOR_STATUS:
283 params.low_threshold = num_entries >> 3;
284 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
285 params.intr_batch_cntr_thres_entries = 0;
286 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
287 break;
288 case HAL_WBM2SW_RELEASE:
289 if (ring_num < 3) {
290 params.intr_batch_cntr_thres_entries =
291 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
292 params.intr_timer_thres_us =
293 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
294 break;
295 }
296 /* follow through when ring_num >= 3 */
297 fallthrough;
298 case HAL_REO_EXCEPTION:
299 case HAL_REO_REINJECT:
300 case HAL_REO_CMD:
301 case HAL_REO_STATUS:
302 case HAL_TCL_DATA:
303 case HAL_TCL_CMD:
304 case HAL_TCL_STATUS:
305 case HAL_WBM_IDLE_LINK:
306 case HAL_SW2WBM_RELEASE:
307 case HAL_RXDMA_DST:
308 case HAL_RXDMA_MONITOR_DST:
309 case HAL_RXDMA_MONITOR_DESC:
310 params.intr_batch_cntr_thres_entries =
311 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
312 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
313 break;
314 case HAL_RXDMA_DIR_BUF:
315 break;
316 default:
317 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
318 return -EINVAL;
319 }
320
321 if (cached) {
322 params.flags |= HAL_SRNG_FLAGS_CACHED;
323 ring->cached = 1;
324 }
325
326 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
327 if (ret < 0) {
328 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
329 ret, ring_num);
330 return ret;
331 }
332
333 ring->ring_id = ret;
334
335 return 0;
336 }
337
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)338 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
339 {
340 int i;
341
342 if (!ab->hw_params.supports_shadow_regs)
343 return;
344
345 for (i = 0; i < ab->hw_params.max_tx_ring; i++)
346 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
347
348 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
349 }
350
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)351 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
352 {
353 struct ath11k_dp *dp = &ab->dp;
354 int i;
355
356 ath11k_dp_stop_shadow_timers(ab);
357 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
358 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
359 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
360 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
361 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
362 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
363 }
364 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
365 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
366 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
367 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
368 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
369 }
370
ath11k_dp_srng_common_setup(struct ath11k_base * ab)371 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
372 {
373 struct ath11k_dp *dp = &ab->dp;
374 struct hal_srng *srng;
375 int i, ret;
376 u8 tcl_num, wbm_num;
377
378 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
379 HAL_SW2WBM_RELEASE, 0, 0,
380 DP_WBM_RELEASE_RING_SIZE);
381 if (ret) {
382 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
383 ret);
384 goto err;
385 }
386
387 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
388 DP_TCL_CMD_RING_SIZE);
389 if (ret) {
390 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
391 goto err;
392 }
393
394 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
395 0, 0, DP_TCL_STATUS_RING_SIZE);
396 if (ret) {
397 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
398 goto err;
399 }
400
401 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
402 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
403 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
404
405 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
406 HAL_TCL_DATA, tcl_num, 0,
407 ab->hw_params.tx_ring_size);
408 if (ret) {
409 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
410 i, ret);
411 goto err;
412 }
413
414 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
415 HAL_WBM2SW_RELEASE, wbm_num, 0,
416 DP_TX_COMP_RING_SIZE);
417 if (ret) {
418 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
419 i, ret);
420 goto err;
421 }
422
423 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
424 ath11k_hal_tx_init_data_ring(ab, srng);
425
426 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
427 ATH11K_SHADOW_DP_TIMER_INTERVAL,
428 dp->tx_ring[i].tcl_data_ring.ring_id);
429 }
430
431 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
432 0, 0, DP_REO_REINJECT_RING_SIZE);
433 if (ret) {
434 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
435 ret);
436 goto err;
437 }
438
439 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
440 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
441 if (ret) {
442 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
443 goto err;
444 }
445
446 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
447 0, 0, DP_REO_EXCEPTION_RING_SIZE);
448 if (ret) {
449 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
450 ret);
451 goto err;
452 }
453
454 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
455 0, 0, DP_REO_CMD_RING_SIZE);
456 if (ret) {
457 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
458 goto err;
459 }
460
461 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
462 ath11k_hal_reo_init_cmd_ring(ab, srng);
463
464 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
465 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
466 dp->reo_cmd_ring.ring_id);
467
468 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
469 0, 0, DP_REO_STATUS_RING_SIZE);
470 if (ret) {
471 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
472 goto err;
473 }
474
475 /* When hash based routing of rx packet is enabled, 32 entries to map
476 * the hash values to the ring will be configured.
477 */
478 ab->hw_params.hw_ops->reo_setup(ab);
479
480 return 0;
481
482 err:
483 ath11k_dp_srng_common_cleanup(ab);
484
485 return ret;
486 }
487
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)488 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
489 {
490 struct ath11k_dp *dp = &ab->dp;
491 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
492 int i;
493
494 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
495 if (!slist[i].vaddr)
496 continue;
497
498 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
499 slist[i].vaddr, slist[i].paddr);
500 slist[i].vaddr = NULL;
501 }
502 }
503
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)504 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
505 int size,
506 u32 n_link_desc_bank,
507 u32 n_link_desc,
508 u32 last_bank_sz)
509 {
510 struct ath11k_dp *dp = &ab->dp;
511 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
512 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
513 u32 n_entries_per_buf;
514 int num_scatter_buf, scatter_idx;
515 struct hal_wbm_link_desc *scatter_buf;
516 int align_bytes, n_entries;
517 dma_addr_t paddr;
518 int rem_entries;
519 int i;
520 int ret = 0;
521 u32 end_offset;
522
523 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
524 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
525 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
526
527 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
528 return -EINVAL;
529
530 for (i = 0; i < num_scatter_buf; i++) {
531 slist[i].vaddr = dma_alloc_coherent(ab->dev,
532 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
533 &slist[i].paddr, GFP_KERNEL);
534 if (!slist[i].vaddr) {
535 ret = -ENOMEM;
536 goto err;
537 }
538 }
539
540 scatter_idx = 0;
541 scatter_buf = slist[scatter_idx].vaddr;
542 rem_entries = n_entries_per_buf;
543
544 for (i = 0; i < n_link_desc_bank; i++) {
545 align_bytes = link_desc_banks[i].vaddr -
546 link_desc_banks[i].vaddr_unaligned;
547 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
548 HAL_LINK_DESC_SIZE;
549 paddr = link_desc_banks[i].paddr;
550 while (n_entries) {
551 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
552 n_entries--;
553 paddr += HAL_LINK_DESC_SIZE;
554 if (rem_entries) {
555 rem_entries--;
556 scatter_buf++;
557 continue;
558 }
559
560 rem_entries = n_entries_per_buf;
561 scatter_idx++;
562 scatter_buf = slist[scatter_idx].vaddr;
563 }
564 }
565
566 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
567 sizeof(struct hal_wbm_link_desc);
568 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
569 n_link_desc, end_offset);
570
571 return 0;
572
573 err:
574 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
575
576 return ret;
577 }
578
579 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)580 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
581 struct dp_link_desc_bank *link_desc_banks)
582 {
583 int i;
584
585 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
586 if (link_desc_banks[i].vaddr_unaligned) {
587 dma_free_coherent(ab->dev,
588 link_desc_banks[i].size,
589 link_desc_banks[i].vaddr_unaligned,
590 link_desc_banks[i].paddr_unaligned);
591 link_desc_banks[i].vaddr_unaligned = NULL;
592 }
593 }
594 }
595
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)596 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
597 struct dp_link_desc_bank *desc_bank,
598 int n_link_desc_bank,
599 int last_bank_sz)
600 {
601 struct ath11k_dp *dp = &ab->dp;
602 int i;
603 int ret = 0;
604 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
605
606 for (i = 0; i < n_link_desc_bank; i++) {
607 if (i == (n_link_desc_bank - 1) && last_bank_sz)
608 desc_sz = last_bank_sz;
609
610 desc_bank[i].vaddr_unaligned =
611 dma_alloc_coherent(ab->dev, desc_sz,
612 &desc_bank[i].paddr_unaligned,
613 GFP_KERNEL);
614 if (!desc_bank[i].vaddr_unaligned) {
615 ret = -ENOMEM;
616 goto err;
617 }
618
619 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
620 HAL_LINK_DESC_ALIGN);
621 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
622 ((unsigned long)desc_bank[i].vaddr -
623 (unsigned long)desc_bank[i].vaddr_unaligned);
624 desc_bank[i].size = desc_sz;
625 }
626
627 return 0;
628
629 err:
630 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
631
632 return ret;
633 }
634
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)635 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
636 struct dp_link_desc_bank *desc_bank,
637 u32 ring_type, struct dp_srng *ring)
638 {
639 ath11k_dp_link_desc_bank_free(ab, desc_bank);
640
641 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
642 ath11k_dp_srng_cleanup(ab, ring);
643 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
644 }
645 }
646
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)647 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
648 {
649 struct ath11k_dp *dp = &ab->dp;
650 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
651 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
652 int ret = 0;
653
654 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
655 HAL_NUM_MPDUS_PER_LINK_DESC;
656
657 n_mpdu_queue_desc = n_mpdu_link_desc /
658 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
659
660 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
661 DP_AVG_MSDUS_PER_FLOW) /
662 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
663
664 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
665 DP_AVG_MSDUS_PER_MPDU) /
666 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
667
668 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
669 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
670
671 if (*n_link_desc & (*n_link_desc - 1))
672 *n_link_desc = 1 << fls(*n_link_desc);
673
674 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
675 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
676 if (ret) {
677 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
678 return ret;
679 }
680 return ret;
681 }
682
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)683 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
684 struct dp_link_desc_bank *link_desc_banks,
685 u32 ring_type, struct hal_srng *srng,
686 u32 n_link_desc)
687 {
688 u32 tot_mem_sz;
689 u32 n_link_desc_bank, last_bank_sz;
690 u32 entry_sz, align_bytes, n_entries;
691 u32 paddr;
692 u32 *desc;
693 int i, ret;
694
695 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
696 tot_mem_sz += HAL_LINK_DESC_ALIGN;
697
698 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
699 n_link_desc_bank = 1;
700 last_bank_sz = tot_mem_sz;
701 } else {
702 n_link_desc_bank = tot_mem_sz /
703 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
704 HAL_LINK_DESC_ALIGN);
705 last_bank_sz = tot_mem_sz %
706 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
707 HAL_LINK_DESC_ALIGN);
708
709 if (last_bank_sz)
710 n_link_desc_bank += 1;
711 }
712
713 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
714 return -EINVAL;
715
716 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
717 n_link_desc_bank, last_bank_sz);
718 if (ret)
719 return ret;
720
721 /* Setup link desc idle list for HW internal usage */
722 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
723 tot_mem_sz = entry_sz * n_link_desc;
724
725 /* Setup scatter desc list when the total memory requirement is more */
726 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
727 ring_type != HAL_RXDMA_MONITOR_DESC) {
728 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
729 n_link_desc_bank,
730 n_link_desc,
731 last_bank_sz);
732 if (ret) {
733 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
734 ret);
735 goto fail_desc_bank_free;
736 }
737
738 return 0;
739 }
740
741 spin_lock_bh(&srng->lock);
742
743 ath11k_hal_srng_access_begin(ab, srng);
744
745 for (i = 0; i < n_link_desc_bank; i++) {
746 align_bytes = link_desc_banks[i].vaddr -
747 link_desc_banks[i].vaddr_unaligned;
748 n_entries = (link_desc_banks[i].size - align_bytes) /
749 HAL_LINK_DESC_SIZE;
750 paddr = link_desc_banks[i].paddr;
751 while (n_entries &&
752 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
753 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
754 i, paddr);
755 n_entries--;
756 paddr += HAL_LINK_DESC_SIZE;
757 }
758 }
759
760 ath11k_hal_srng_access_end(ab, srng);
761
762 spin_unlock_bh(&srng->lock);
763
764 return 0;
765
766 fail_desc_bank_free:
767 ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
768
769 return ret;
770 }
771
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)772 int ath11k_dp_service_srng(struct ath11k_base *ab,
773 struct ath11k_ext_irq_grp *irq_grp,
774 int budget)
775 {
776 struct napi_struct *napi = &irq_grp->napi;
777 const struct ath11k_hw_hal_params *hal_params;
778 int grp_id = irq_grp->grp_id;
779 int work_done = 0;
780 int i, j;
781 int tot_work_done = 0;
782
783 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
784 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
785 ab->hw_params.ring_mask->tx[grp_id])
786 ath11k_dp_tx_completion_handler(ab, i);
787 }
788
789 if (ab->hw_params.ring_mask->rx_err[grp_id]) {
790 work_done = ath11k_dp_process_rx_err(ab, napi, budget);
791 budget -= work_done;
792 tot_work_done += work_done;
793 if (budget <= 0)
794 goto done;
795 }
796
797 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
798 work_done = ath11k_dp_rx_process_wbm_err(ab,
799 napi,
800 budget);
801 budget -= work_done;
802 tot_work_done += work_done;
803
804 if (budget <= 0)
805 goto done;
806 }
807
808 if (ab->hw_params.ring_mask->rx[grp_id]) {
809 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
810 work_done = ath11k_dp_process_rx(ab, i, napi,
811 budget);
812 budget -= work_done;
813 tot_work_done += work_done;
814 if (budget <= 0)
815 goto done;
816 }
817
818 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
819 for (i = 0; i < ab->num_radios; i++) {
820 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
821 int id = i * ab->hw_params.num_rxdma_per_pdev + j;
822
823 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
824 BIT(id)) {
825 work_done =
826 ath11k_dp_rx_process_mon_rings(ab,
827 id,
828 napi, budget);
829 budget -= work_done;
830 tot_work_done += work_done;
831
832 if (budget <= 0)
833 goto done;
834 }
835 }
836 }
837 }
838
839 if (ab->hw_params.ring_mask->reo_status[grp_id])
840 ath11k_dp_process_reo_status(ab);
841
842 for (i = 0; i < ab->num_radios; i++) {
843 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
844 int id = i * ab->hw_params.num_rxdma_per_pdev + j;
845
846 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
847 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
848 budget -= work_done;
849 tot_work_done += work_done;
850 }
851
852 if (budget <= 0)
853 goto done;
854
855 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
856 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
857 struct ath11k_pdev_dp *dp = &ar->dp;
858 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
859
860 hal_params = ab->hw_params.hal_params;
861 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
862 hal_params->rx_buf_rbm);
863 }
864 }
865 }
866 /* TODO: Implement handler for other interrupts */
867
868 done:
869 return tot_work_done;
870 }
871 EXPORT_SYMBOL(ath11k_dp_service_srng);
872
ath11k_dp_pdev_free(struct ath11k_base * ab)873 void ath11k_dp_pdev_free(struct ath11k_base *ab)
874 {
875 struct ath11k *ar;
876 int i;
877
878 del_timer_sync(&ab->mon_reap_timer);
879
880 for (i = 0; i < ab->num_radios; i++) {
881 ar = ab->pdevs[i].ar;
882 ath11k_dp_rx_pdev_free(ab, i);
883 ath11k_debugfs_unregister(ar);
884 ath11k_dp_rx_pdev_mon_detach(ar);
885 }
886 }
887
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)888 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
889 {
890 struct ath11k *ar;
891 struct ath11k_pdev_dp *dp;
892 int i;
893 int j;
894
895 for (i = 0; i < ab->num_radios; i++) {
896 ar = ab->pdevs[i].ar;
897 dp = &ar->dp;
898 dp->mac_id = i;
899 idr_init(&dp->rx_refill_buf_ring.bufs_idr);
900 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
901 atomic_set(&dp->num_tx_pending, 0);
902 init_waitqueue_head(&dp->tx_empty_waitq);
903 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
904 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
905 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
906 }
907 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
908 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
909 }
910 }
911
ath11k_dp_pdev_alloc(struct ath11k_base * ab)912 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
913 {
914 struct ath11k *ar;
915 int ret;
916 int i;
917
918 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
919 for (i = 0; i < ab->num_radios; i++) {
920 ar = ab->pdevs[i].ar;
921 ret = ath11k_dp_rx_pdev_alloc(ab, i);
922 if (ret) {
923 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
924 i);
925 goto err;
926 }
927 ret = ath11k_dp_rx_pdev_mon_attach(ar);
928 if (ret) {
929 ath11k_warn(ab, "failed to initialize mon pdev %d\n",
930 i);
931 goto err;
932 }
933 }
934
935 return 0;
936
937 err:
938 ath11k_dp_pdev_free(ab);
939
940 return ret;
941 }
942
ath11k_dp_htt_connect(struct ath11k_dp * dp)943 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
944 {
945 struct ath11k_htc_svc_conn_req conn_req;
946 struct ath11k_htc_svc_conn_resp conn_resp;
947 int status;
948
949 memset(&conn_req, 0, sizeof(conn_req));
950 memset(&conn_resp, 0, sizeof(conn_resp));
951
952 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
953 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
954
955 /* connect to control service */
956 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
957
958 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
959 &conn_resp);
960
961 if (status)
962 return status;
963
964 dp->eid = conn_resp.eid;
965
966 return 0;
967 }
968
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)969 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
970 {
971 /* When v2_map_support is true:for STA mode, enable address
972 * search index, tcl uses ast_hash value in the descriptor.
973 * When v2_map_support is false: for STA mode, don't enable
974 * address search index.
975 */
976 switch (arvif->vdev_type) {
977 case WMI_VDEV_TYPE_STA:
978 if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
979 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
980 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
981 } else {
982 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
983 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
984 }
985 break;
986 case WMI_VDEV_TYPE_AP:
987 case WMI_VDEV_TYPE_IBSS:
988 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
989 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
990 break;
991 case WMI_VDEV_TYPE_MONITOR:
992 default:
993 return;
994 }
995 }
996
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)997 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
998 {
999 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
1000 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1001 arvif->vdev_id) |
1002 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1003 ar->pdev->pdev_id);
1004
1005 /* set HTT extension valid bit to 0 by default */
1006 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1007
1008 ath11k_dp_update_vdev_search(arvif);
1009 }
1010
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)1011 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1012 {
1013 struct ath11k_base *ab = ctx;
1014 struct sk_buff *msdu = skb;
1015
1016 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1017 DMA_TO_DEVICE);
1018
1019 dev_kfree_skb_any(msdu);
1020
1021 return 0;
1022 }
1023
ath11k_dp_free(struct ath11k_base * ab)1024 void ath11k_dp_free(struct ath11k_base *ab)
1025 {
1026 struct ath11k_dp *dp = &ab->dp;
1027 int i;
1028
1029 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1030 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1031
1032 ath11k_dp_srng_common_cleanup(ab);
1033
1034 ath11k_dp_reo_cmd_list_cleanup(ab);
1035
1036 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1037 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1038 idr_for_each(&dp->tx_ring[i].txbuf_idr,
1039 ath11k_dp_tx_pending_cleanup, ab);
1040 idr_destroy(&dp->tx_ring[i].txbuf_idr);
1041 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1042 kfree(dp->tx_ring[i].tx_status);
1043 }
1044
1045 /* Deinit any SOC level resource */
1046 }
1047
ath11k_dp_alloc(struct ath11k_base * ab)1048 int ath11k_dp_alloc(struct ath11k_base *ab)
1049 {
1050 struct ath11k_dp *dp = &ab->dp;
1051 struct hal_srng *srng = NULL;
1052 size_t size = 0;
1053 u32 n_link_desc = 0;
1054 int ret;
1055 int i;
1056
1057 dp->ab = ab;
1058
1059 INIT_LIST_HEAD(&dp->reo_cmd_list);
1060 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1061 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1062 spin_lock_init(&dp->reo_cmd_lock);
1063
1064 dp->reo_cmd_cache_flush_count = 0;
1065
1066 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1067 if (ret) {
1068 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1069 return ret;
1070 }
1071
1072 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1073
1074 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1075 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1076 if (ret) {
1077 ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1078 return ret;
1079 }
1080
1081 ret = ath11k_dp_srng_common_setup(ab);
1082 if (ret)
1083 goto fail_link_desc_cleanup;
1084
1085 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1086
1087 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1088 idr_init(&dp->tx_ring[i].txbuf_idr);
1089 spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1090 dp->tx_ring[i].tcl_data_ring_id = i;
1091
1092 dp->tx_ring[i].tx_status_head = 0;
1093 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1094 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1095 if (!dp->tx_ring[i].tx_status) {
1096 ret = -ENOMEM;
1097 goto fail_cmn_srng_cleanup;
1098 }
1099 }
1100
1101 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1102 ath11k_hal_tx_set_dscp_tid_map(ab, i);
1103
1104 /* Init any SOC level resource for DP */
1105
1106 return 0;
1107
1108 fail_cmn_srng_cleanup:
1109 ath11k_dp_srng_common_cleanup(ab);
1110
1111 fail_link_desc_cleanup:
1112 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1113 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1114
1115 return ret;
1116 }
1117
ath11k_dp_shadow_timer_handler(struct timer_list * t)1118 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1119 {
1120 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1121 t, timer);
1122 struct ath11k_base *ab = update_timer->ab;
1123 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1124
1125 spin_lock_bh(&srng->lock);
1126
1127 /* when the timer is fired, the handler checks whether there
1128 * are new TX happened. The handler updates HP only when there
1129 * are no TX operations during the timeout interval, and stop
1130 * the timer. Timer will be started again when TX happens again.
1131 */
1132 if (update_timer->timer_tx_num != update_timer->tx_num) {
1133 update_timer->timer_tx_num = update_timer->tx_num;
1134 mod_timer(&update_timer->timer, jiffies +
1135 msecs_to_jiffies(update_timer->interval));
1136 } else {
1137 update_timer->started = false;
1138 ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1139 }
1140
1141 spin_unlock_bh(&srng->lock);
1142 }
1143
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1144 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1145 struct hal_srng *srng,
1146 struct ath11k_hp_update_timer *update_timer)
1147 {
1148 lockdep_assert_held(&srng->lock);
1149
1150 if (!ab->hw_params.supports_shadow_regs)
1151 return;
1152
1153 update_timer->tx_num++;
1154
1155 if (update_timer->started)
1156 return;
1157
1158 update_timer->started = true;
1159 update_timer->timer_tx_num = update_timer->tx_num;
1160 mod_timer(&update_timer->timer, jiffies +
1161 msecs_to_jiffies(update_timer->interval));
1162 }
1163
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1164 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1165 struct ath11k_hp_update_timer *update_timer)
1166 {
1167 if (!ab->hw_params.supports_shadow_regs)
1168 return;
1169
1170 if (!update_timer->init)
1171 return;
1172
1173 del_timer_sync(&update_timer->timer);
1174 }
1175
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1176 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1177 struct ath11k_hp_update_timer *update_timer,
1178 u32 interval, u32 ring_id)
1179 {
1180 if (!ab->hw_params.supports_shadow_regs)
1181 return;
1182
1183 update_timer->tx_num = 0;
1184 update_timer->timer_tx_num = 0;
1185 update_timer->ab = ab;
1186 update_timer->ring_id = ring_id;
1187 update_timer->interval = interval;
1188 update_timer->init = true;
1189 timer_setup(&update_timer->timer,
1190 ath11k_dp_shadow_timer_handler, 0);
1191 }
1192