1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6 */
7
8 #include <crypto/hash.h>
9 #include <linux/export.h>
10 #include "core.h"
11 #include "dp_tx.h"
12 #include "hal_tx.h"
13 #include "hif.h"
14 #include "debug.h"
15 #include "dp_rx.h"
16 #include "peer.h"
17
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)18 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
19 struct sk_buff *skb)
20 {
21 dev_kfree_skb_any(skb);
22 }
23
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)24 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
25 {
26 struct ath11k_base *ab = ar->ab;
27 struct ath11k_peer *peer;
28
29 /* TODO: Any other peer specific DP cleanup */
30
31 spin_lock_bh(&ab->base_lock);
32 peer = ath11k_peer_find(ab, vdev_id, addr);
33 if (!peer) {
34 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
35 addr, vdev_id);
36 spin_unlock_bh(&ab->base_lock);
37 return;
38 }
39
40 ath11k_peer_rx_tid_cleanup(ar, peer);
41 peer->dp_setup_done = false;
42 crypto_free_shash(peer->tfm_mmic);
43 spin_unlock_bh(&ab->base_lock);
44 }
45
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)46 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
47 {
48 struct ath11k_base *ab = ar->ab;
49 struct ath11k_peer *peer;
50 u32 reo_dest;
51 int ret = 0, tid;
52
53 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
54 reo_dest = ar->dp.mac_id + 1;
55 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
56 WMI_PEER_SET_DEFAULT_ROUTING,
57 DP_RX_HASH_ENABLE | (reo_dest << 1));
58
59 if (ret) {
60 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
61 ret, addr, vdev_id);
62 return ret;
63 }
64
65 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
66 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
67 HAL_PN_TYPE_NONE);
68 if (ret) {
69 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
70 tid, ret);
71 goto peer_clean;
72 }
73 }
74
75 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
76 if (ret) {
77 ath11k_warn(ab, "failed to setup rx defrag context\n");
78 tid--;
79 goto peer_clean;
80 }
81
82 /* TODO: Setup other peer specific resource used in data path */
83
84 return 0;
85
86 peer_clean:
87 spin_lock_bh(&ab->base_lock);
88
89 peer = ath11k_peer_find(ab, vdev_id, addr);
90 if (!peer) {
91 ath11k_warn(ab, "failed to find the peer to del rx tid\n");
92 spin_unlock_bh(&ab->base_lock);
93 return -ENOENT;
94 }
95
96 for (; tid >= 0; tid--)
97 ath11k_peer_rx_tid_delete(ar, peer, tid);
98
99 spin_unlock_bh(&ab->base_lock);
100
101 return ret;
102 }
103
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)104 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
105 {
106 if (!ring->vaddr_unaligned)
107 return;
108
109 if (ring->cached)
110 dma_free_noncoherent(ab->dev, ring->size, ring->vaddr_unaligned,
111 ring->paddr_unaligned, DMA_FROM_DEVICE);
112 else
113 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
114 ring->paddr_unaligned);
115
116 ring->vaddr_unaligned = NULL;
117 }
118
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)119 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
120 {
121 int ext_group_num;
122 u8 mask = 1 << ring_num;
123
124 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
125 ext_group_num++) {
126 if (mask & grp_mask[ext_group_num])
127 return ext_group_num;
128 }
129
130 return -ENOENT;
131 }
132
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)133 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
134 enum hal_ring_type type, int ring_num)
135 {
136 const u8 *grp_mask;
137
138 switch (type) {
139 case HAL_WBM2SW_RELEASE:
140 if (ring_num == DP_RX_RELEASE_RING_NUM) {
141 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
142 ring_num = 0;
143 } else {
144 grp_mask = &ab->hw_params.ring_mask->tx[0];
145 }
146 break;
147 case HAL_REO_EXCEPTION:
148 grp_mask = &ab->hw_params.ring_mask->rx_err[0];
149 break;
150 case HAL_REO_DST:
151 grp_mask = &ab->hw_params.ring_mask->rx[0];
152 break;
153 case HAL_REO_STATUS:
154 grp_mask = &ab->hw_params.ring_mask->reo_status[0];
155 break;
156 case HAL_RXDMA_MONITOR_STATUS:
157 case HAL_RXDMA_MONITOR_DST:
158 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
159 break;
160 case HAL_RXDMA_DST:
161 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
162 break;
163 case HAL_RXDMA_BUF:
164 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
165 break;
166 case HAL_RXDMA_MONITOR_BUF:
167 case HAL_TCL_DATA:
168 case HAL_TCL_CMD:
169 case HAL_REO_CMD:
170 case HAL_SW2WBM_RELEASE:
171 case HAL_WBM_IDLE_LINK:
172 case HAL_TCL_STATUS:
173 case HAL_REO_REINJECT:
174 case HAL_CE_SRC:
175 case HAL_CE_DST:
176 case HAL_CE_DST_STATUS:
177 default:
178 return -ENOENT;
179 }
180
181 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
182 }
183
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)184 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
185 struct hal_srng_params *ring_params,
186 enum hal_ring_type type, int ring_num)
187 {
188 int msi_group_number, msi_data_count;
189 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
190 int ret;
191
192 ret = ath11k_get_user_msi_vector(ab, "DP",
193 &msi_data_count, &msi_data_start,
194 &msi_irq_start);
195 if (ret)
196 return;
197
198 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
199 ring_num);
200 if (msi_group_number < 0) {
201 ath11k_dbg(ab, ATH11K_DBG_PCI,
202 "ring not part of an ext_group; ring_type: %d,ring_num %d",
203 type, ring_num);
204 ring_params->msi_addr = 0;
205 ring_params->msi_data = 0;
206 return;
207 }
208
209 if (msi_group_number > msi_data_count) {
210 ath11k_dbg(ab, ATH11K_DBG_PCI,
211 "multiple msi_groups share one msi, msi_group_num %d",
212 msi_group_number);
213 }
214
215 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
216
217 ring_params->msi_addr = addr_lo;
218 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
219 ring_params->msi_data = (msi_group_number % msi_data_count)
220 + msi_data_start;
221 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
222 }
223
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)224 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
225 enum hal_ring_type type, int ring_num,
226 int mac_id, int num_entries)
227 {
228 struct hal_srng_params params = {};
229 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
230 int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
231 int ret;
232 bool cached = false;
233
234 if (max_entries < 0 || entry_sz < 0)
235 return -EINVAL;
236
237 if (num_entries > max_entries)
238 num_entries = max_entries;
239
240 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
241
242 if (ab->hw_params.alloc_cacheable_memory) {
243 /* Allocate the reo dst and tx completion rings from cacheable memory */
244 switch (type) {
245 case HAL_REO_DST:
246 case HAL_WBM2SW_RELEASE:
247 cached = true;
248 break;
249 default:
250 cached = false;
251 }
252 }
253
254 if (cached)
255 ring->vaddr_unaligned = dma_alloc_noncoherent(ab->dev, ring->size,
256 &ring->paddr_unaligned,
257 DMA_FROM_DEVICE,
258 GFP_KERNEL);
259 else
260 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
261 &ring->paddr_unaligned,
262 GFP_KERNEL);
263
264 if (!ring->vaddr_unaligned)
265 return -ENOMEM;
266
267 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
268 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
269 (unsigned long)ring->vaddr_unaligned);
270
271 params.ring_base_vaddr = ring->vaddr;
272 params.ring_base_paddr = ring->paddr;
273 params.num_entries = num_entries;
274 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
275
276 switch (type) {
277 case HAL_REO_DST:
278 params.intr_batch_cntr_thres_entries =
279 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
280 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
281 break;
282 case HAL_RXDMA_BUF:
283 case HAL_RXDMA_MONITOR_BUF:
284 case HAL_RXDMA_MONITOR_STATUS:
285 params.low_threshold = num_entries >> 3;
286 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
287 params.intr_batch_cntr_thres_entries = 0;
288 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
289 break;
290 case HAL_WBM2SW_RELEASE:
291 if (ring_num < 3) {
292 params.intr_batch_cntr_thres_entries =
293 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
294 params.intr_timer_thres_us =
295 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
296 break;
297 }
298 /* follow through when ring_num >= 3 */
299 fallthrough;
300 case HAL_REO_EXCEPTION:
301 case HAL_REO_REINJECT:
302 case HAL_REO_CMD:
303 case HAL_REO_STATUS:
304 case HAL_TCL_DATA:
305 case HAL_TCL_CMD:
306 case HAL_TCL_STATUS:
307 case HAL_WBM_IDLE_LINK:
308 case HAL_SW2WBM_RELEASE:
309 case HAL_RXDMA_DST:
310 case HAL_RXDMA_MONITOR_DST:
311 case HAL_RXDMA_MONITOR_DESC:
312 params.intr_batch_cntr_thres_entries =
313 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
314 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
315 break;
316 case HAL_RXDMA_DIR_BUF:
317 break;
318 default:
319 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
320 return -EINVAL;
321 }
322
323 if (cached) {
324 params.flags |= HAL_SRNG_FLAGS_CACHED;
325 ring->cached = 1;
326 }
327
328 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
329 if (ret < 0) {
330 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
331 ret, ring_num);
332 return ret;
333 }
334
335 ring->ring_id = ret;
336
337 return 0;
338 }
339
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)340 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
341 {
342 int i;
343
344 if (!ab->hw_params.supports_shadow_regs)
345 return;
346
347 for (i = 0; i < ab->hw_params.max_tx_ring; i++)
348 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
349
350 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
351 }
352
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)353 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
354 {
355 struct ath11k_dp *dp = &ab->dp;
356 int i;
357
358 ath11k_dp_stop_shadow_timers(ab);
359 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
360 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
361 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
362 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
363 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
364 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
365 }
366 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
367 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
368 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
369 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
370 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
371 }
372
ath11k_dp_srng_common_setup(struct ath11k_base * ab)373 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
374 {
375 struct ath11k_dp *dp = &ab->dp;
376 struct hal_srng *srng;
377 int i, ret;
378 u8 tcl_num, wbm_num;
379
380 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
381 HAL_SW2WBM_RELEASE, 0, 0,
382 DP_WBM_RELEASE_RING_SIZE);
383 if (ret) {
384 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
385 ret);
386 goto err;
387 }
388
389 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
390 DP_TCL_CMD_RING_SIZE);
391 if (ret) {
392 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
393 goto err;
394 }
395
396 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
397 0, 0, DP_TCL_STATUS_RING_SIZE);
398 if (ret) {
399 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
400 goto err;
401 }
402
403 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
404 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
405 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
406
407 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
408 HAL_TCL_DATA, tcl_num, 0,
409 ab->hw_params.tx_ring_size);
410 if (ret) {
411 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
412 i, ret);
413 goto err;
414 }
415
416 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
417 HAL_WBM2SW_RELEASE, wbm_num, 0,
418 DP_TX_COMP_RING_SIZE);
419 if (ret) {
420 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
421 i, ret);
422 goto err;
423 }
424
425 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
426 ath11k_hal_tx_init_data_ring(ab, srng);
427
428 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
429 ATH11K_SHADOW_DP_TIMER_INTERVAL,
430 dp->tx_ring[i].tcl_data_ring.ring_id);
431 }
432
433 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
434 0, 0, DP_REO_REINJECT_RING_SIZE);
435 if (ret) {
436 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
437 ret);
438 goto err;
439 }
440
441 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
442 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
443 if (ret) {
444 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
445 goto err;
446 }
447
448 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
449 0, 0, DP_REO_EXCEPTION_RING_SIZE);
450 if (ret) {
451 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
452 ret);
453 goto err;
454 }
455
456 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
457 0, 0, DP_REO_CMD_RING_SIZE);
458 if (ret) {
459 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
460 goto err;
461 }
462
463 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
464 ath11k_hal_reo_init_cmd_ring(ab, srng);
465
466 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
467 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
468 dp->reo_cmd_ring.ring_id);
469
470 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
471 0, 0, DP_REO_STATUS_RING_SIZE);
472 if (ret) {
473 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
474 goto err;
475 }
476
477 /* When hash based routing of rx packet is enabled, 32 entries to map
478 * the hash values to the ring will be configured.
479 */
480 ab->hw_params.hw_ops->reo_setup(ab);
481
482 return 0;
483
484 err:
485 ath11k_dp_srng_common_cleanup(ab);
486
487 return ret;
488 }
489
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)490 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
491 {
492 struct ath11k_dp *dp = &ab->dp;
493 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
494 int i;
495
496 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
497 if (!slist[i].vaddr)
498 continue;
499
500 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
501 slist[i].vaddr, slist[i].paddr);
502 slist[i].vaddr = NULL;
503 }
504 }
505
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)506 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
507 int size,
508 u32 n_link_desc_bank,
509 u32 n_link_desc,
510 u32 last_bank_sz)
511 {
512 struct ath11k_dp *dp = &ab->dp;
513 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
514 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
515 u32 n_entries_per_buf;
516 int num_scatter_buf, scatter_idx;
517 struct hal_wbm_link_desc *scatter_buf;
518 int align_bytes, n_entries;
519 dma_addr_t paddr;
520 int rem_entries;
521 int i;
522 int ret = 0;
523 u32 end_offset;
524
525 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
526 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
527 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
528
529 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
530 return -EINVAL;
531
532 for (i = 0; i < num_scatter_buf; i++) {
533 slist[i].vaddr = dma_alloc_coherent(ab->dev,
534 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
535 &slist[i].paddr, GFP_KERNEL);
536 if (!slist[i].vaddr) {
537 ret = -ENOMEM;
538 goto err;
539 }
540 }
541
542 scatter_idx = 0;
543 scatter_buf = slist[scatter_idx].vaddr;
544 rem_entries = n_entries_per_buf;
545
546 for (i = 0; i < n_link_desc_bank; i++) {
547 align_bytes = link_desc_banks[i].vaddr -
548 link_desc_banks[i].vaddr_unaligned;
549 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
550 HAL_LINK_DESC_SIZE;
551 paddr = link_desc_banks[i].paddr;
552 while (n_entries) {
553 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
554 n_entries--;
555 paddr += HAL_LINK_DESC_SIZE;
556 if (rem_entries) {
557 rem_entries--;
558 scatter_buf++;
559 continue;
560 }
561
562 rem_entries = n_entries_per_buf;
563 scatter_idx++;
564 scatter_buf = slist[scatter_idx].vaddr;
565 }
566 }
567
568 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
569 sizeof(struct hal_wbm_link_desc);
570 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
571 n_link_desc, end_offset);
572
573 return 0;
574
575 err:
576 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
577
578 return ret;
579 }
580
581 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)582 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
583 struct dp_link_desc_bank *link_desc_banks)
584 {
585 int i;
586
587 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
588 if (link_desc_banks[i].vaddr_unaligned) {
589 dma_free_coherent(ab->dev,
590 link_desc_banks[i].size,
591 link_desc_banks[i].vaddr_unaligned,
592 link_desc_banks[i].paddr_unaligned);
593 link_desc_banks[i].vaddr_unaligned = NULL;
594 }
595 }
596 }
597
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)598 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
599 struct dp_link_desc_bank *desc_bank,
600 int n_link_desc_bank,
601 int last_bank_sz)
602 {
603 struct ath11k_dp *dp = &ab->dp;
604 int i;
605 int ret = 0;
606 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
607
608 for (i = 0; i < n_link_desc_bank; i++) {
609 if (i == (n_link_desc_bank - 1) && last_bank_sz)
610 desc_sz = last_bank_sz;
611
612 desc_bank[i].vaddr_unaligned =
613 dma_alloc_coherent(ab->dev, desc_sz,
614 &desc_bank[i].paddr_unaligned,
615 GFP_KERNEL);
616 if (!desc_bank[i].vaddr_unaligned) {
617 ret = -ENOMEM;
618 goto err;
619 }
620
621 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
622 HAL_LINK_DESC_ALIGN);
623 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
624 ((unsigned long)desc_bank[i].vaddr -
625 (unsigned long)desc_bank[i].vaddr_unaligned);
626 desc_bank[i].size = desc_sz;
627 }
628
629 return 0;
630
631 err:
632 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
633
634 return ret;
635 }
636
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)637 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
638 struct dp_link_desc_bank *desc_bank,
639 u32 ring_type, struct dp_srng *ring)
640 {
641 ath11k_dp_link_desc_bank_free(ab, desc_bank);
642
643 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
644 ath11k_dp_srng_cleanup(ab, ring);
645 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
646 }
647 }
648
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)649 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
650 {
651 struct ath11k_dp *dp = &ab->dp;
652 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
653 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
654 int ret = 0;
655
656 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
657 HAL_NUM_MPDUS_PER_LINK_DESC;
658
659 n_mpdu_queue_desc = n_mpdu_link_desc /
660 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
661
662 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
663 DP_AVG_MSDUS_PER_FLOW) /
664 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
665
666 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
667 DP_AVG_MSDUS_PER_MPDU) /
668 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
669
670 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
671 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
672
673 if (*n_link_desc & (*n_link_desc - 1))
674 *n_link_desc = 1 << fls(*n_link_desc);
675
676 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
677 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
678 if (ret) {
679 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
680 return ret;
681 }
682 return ret;
683 }
684
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)685 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
686 struct dp_link_desc_bank *link_desc_banks,
687 u32 ring_type, struct hal_srng *srng,
688 u32 n_link_desc)
689 {
690 u32 tot_mem_sz;
691 u32 n_link_desc_bank, last_bank_sz;
692 u32 entry_sz, align_bytes, n_entries;
693 u32 paddr;
694 u32 *desc;
695 int i, ret;
696
697 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
698 tot_mem_sz += HAL_LINK_DESC_ALIGN;
699
700 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
701 n_link_desc_bank = 1;
702 last_bank_sz = tot_mem_sz;
703 } else {
704 n_link_desc_bank = tot_mem_sz /
705 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
706 HAL_LINK_DESC_ALIGN);
707 last_bank_sz = tot_mem_sz %
708 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
709 HAL_LINK_DESC_ALIGN);
710
711 if (last_bank_sz)
712 n_link_desc_bank += 1;
713 }
714
715 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
716 return -EINVAL;
717
718 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
719 n_link_desc_bank, last_bank_sz);
720 if (ret)
721 return ret;
722
723 /* Setup link desc idle list for HW internal usage */
724 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
725 tot_mem_sz = entry_sz * n_link_desc;
726
727 /* Setup scatter desc list when the total memory requirement is more */
728 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
729 ring_type != HAL_RXDMA_MONITOR_DESC) {
730 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
731 n_link_desc_bank,
732 n_link_desc,
733 last_bank_sz);
734 if (ret) {
735 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
736 ret);
737 goto fail_desc_bank_free;
738 }
739
740 return 0;
741 }
742
743 spin_lock_bh(&srng->lock);
744
745 ath11k_hal_srng_access_begin(ab, srng);
746
747 for (i = 0; i < n_link_desc_bank; i++) {
748 align_bytes = link_desc_banks[i].vaddr -
749 link_desc_banks[i].vaddr_unaligned;
750 n_entries = (link_desc_banks[i].size - align_bytes) /
751 HAL_LINK_DESC_SIZE;
752 paddr = link_desc_banks[i].paddr;
753 while (n_entries &&
754 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
755 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
756 i, paddr);
757 n_entries--;
758 paddr += HAL_LINK_DESC_SIZE;
759 }
760 }
761
762 ath11k_hal_srng_access_end(ab, srng);
763
764 spin_unlock_bh(&srng->lock);
765
766 return 0;
767
768 fail_desc_bank_free:
769 ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
770
771 return ret;
772 }
773
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)774 int ath11k_dp_service_srng(struct ath11k_base *ab,
775 struct ath11k_ext_irq_grp *irq_grp,
776 int budget)
777 {
778 struct napi_struct *napi = &irq_grp->napi;
779 const struct ath11k_hw_hal_params *hal_params;
780 int grp_id = irq_grp->grp_id;
781 int work_done = 0;
782 int i, j;
783 int tot_work_done = 0;
784
785 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
786 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
787 ab->hw_params.ring_mask->tx[grp_id])
788 ath11k_dp_tx_completion_handler(ab, i);
789 }
790
791 if (ab->hw_params.ring_mask->rx_err[grp_id]) {
792 work_done = ath11k_dp_process_rx_err(ab, napi, budget);
793 budget -= work_done;
794 tot_work_done += work_done;
795 if (budget <= 0)
796 goto done;
797 }
798
799 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
800 work_done = ath11k_dp_rx_process_wbm_err(ab,
801 napi,
802 budget);
803 budget -= work_done;
804 tot_work_done += work_done;
805
806 if (budget <= 0)
807 goto done;
808 }
809
810 if (ab->hw_params.ring_mask->rx[grp_id]) {
811 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
812 work_done = ath11k_dp_process_rx(ab, i, napi,
813 budget);
814 budget -= work_done;
815 tot_work_done += work_done;
816 if (budget <= 0)
817 goto done;
818 }
819
820 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
821 for (i = 0; i < ab->num_radios; i++) {
822 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
823 int id = i * ab->hw_params.num_rxdma_per_pdev + j;
824
825 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
826 BIT(id)) {
827 work_done =
828 ath11k_dp_rx_process_mon_rings(ab,
829 id,
830 napi, budget);
831 budget -= work_done;
832 tot_work_done += work_done;
833
834 if (budget <= 0)
835 goto done;
836 }
837 }
838 }
839 }
840
841 if (ab->hw_params.ring_mask->reo_status[grp_id])
842 ath11k_dp_process_reo_status(ab);
843
844 for (i = 0; i < ab->num_radios; i++) {
845 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
846 int id = i * ab->hw_params.num_rxdma_per_pdev + j;
847
848 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
849 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
850 budget -= work_done;
851 tot_work_done += work_done;
852 }
853
854 if (budget <= 0)
855 goto done;
856
857 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
858 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
859 struct ath11k_pdev_dp *dp = &ar->dp;
860 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
861
862 hal_params = ab->hw_params.hal_params;
863 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
864 hal_params->rx_buf_rbm);
865 }
866 }
867 }
868 /* TODO: Implement handler for other interrupts */
869
870 done:
871 return tot_work_done;
872 }
873 EXPORT_SYMBOL(ath11k_dp_service_srng);
874
ath11k_dp_pdev_free(struct ath11k_base * ab)875 void ath11k_dp_pdev_free(struct ath11k_base *ab)
876 {
877 struct ath11k *ar;
878 int i;
879
880 timer_delete_sync(&ab->mon_reap_timer);
881
882 for (i = 0; i < ab->num_radios; i++) {
883 ar = ab->pdevs[i].ar;
884 ath11k_dp_rx_pdev_free(ab, i);
885 ath11k_debugfs_unregister(ar);
886 ath11k_dp_rx_pdev_mon_detach(ar);
887 }
888 }
889
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)890 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
891 {
892 struct ath11k *ar;
893 struct ath11k_pdev_dp *dp;
894 int i;
895 int j;
896
897 for (i = 0; i < ab->num_radios; i++) {
898 ar = ab->pdevs[i].ar;
899 dp = &ar->dp;
900 dp->mac_id = i;
901 idr_init(&dp->rx_refill_buf_ring.bufs_idr);
902 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
903 atomic_set(&dp->num_tx_pending, 0);
904 init_waitqueue_head(&dp->tx_empty_waitq);
905 for (j = 0; j < ab->hw_params.num_rxdma_per_pdev; j++) {
906 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
907 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
908 }
909 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
910 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
911 }
912 }
913
ath11k_dp_pdev_alloc(struct ath11k_base * ab)914 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
915 {
916 struct ath11k *ar;
917 int ret;
918 int i;
919
920 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
921 for (i = 0; i < ab->num_radios; i++) {
922 ar = ab->pdevs[i].ar;
923 ret = ath11k_dp_rx_pdev_alloc(ab, i);
924 if (ret) {
925 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
926 i);
927 goto err;
928 }
929 ret = ath11k_dp_rx_pdev_mon_attach(ar);
930 if (ret) {
931 ath11k_warn(ab, "failed to initialize mon pdev %d\n",
932 i);
933 goto err;
934 }
935 }
936
937 return 0;
938
939 err:
940 ath11k_dp_pdev_free(ab);
941
942 return ret;
943 }
944
ath11k_dp_htt_connect(struct ath11k_dp * dp)945 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
946 {
947 struct ath11k_htc_svc_conn_req conn_req;
948 struct ath11k_htc_svc_conn_resp conn_resp;
949 int status;
950
951 memset(&conn_req, 0, sizeof(conn_req));
952 memset(&conn_resp, 0, sizeof(conn_resp));
953
954 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
955 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
956
957 /* connect to control service */
958 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
959
960 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
961 &conn_resp);
962
963 if (status)
964 return status;
965
966 dp->eid = conn_resp.eid;
967
968 return 0;
969 }
970
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)971 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
972 {
973 /* When v2_map_support is true:for STA mode, enable address
974 * search index, tcl uses ast_hash value in the descriptor.
975 * When v2_map_support is false: for STA mode, don't enable
976 * address search index.
977 */
978 switch (arvif->vdev_type) {
979 case WMI_VDEV_TYPE_STA:
980 if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
981 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
982 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
983 } else {
984 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
985 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
986 }
987 break;
988 case WMI_VDEV_TYPE_AP:
989 case WMI_VDEV_TYPE_IBSS:
990 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
991 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
992 break;
993 case WMI_VDEV_TYPE_MONITOR:
994 default:
995 return;
996 }
997 }
998
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)999 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
1000 {
1001 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
1002 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1003 arvif->vdev_id) |
1004 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1005 ar->pdev->pdev_id);
1006
1007 /* set HTT extension valid bit to 0 by default */
1008 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1009
1010 ath11k_dp_update_vdev_search(arvif);
1011 }
1012
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)1013 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1014 {
1015 struct ath11k_base *ab = ctx;
1016 struct sk_buff *msdu = skb;
1017
1018 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1019 DMA_TO_DEVICE);
1020
1021 dev_kfree_skb_any(msdu);
1022
1023 return 0;
1024 }
1025
ath11k_dp_free(struct ath11k_base * ab)1026 void ath11k_dp_free(struct ath11k_base *ab)
1027 {
1028 struct ath11k_dp *dp = &ab->dp;
1029 int i;
1030
1031 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1032 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1033
1034 ath11k_dp_srng_common_cleanup(ab);
1035
1036 ath11k_dp_reo_cmd_list_cleanup(ab);
1037
1038 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1039 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1040 idr_for_each(&dp->tx_ring[i].txbuf_idr,
1041 ath11k_dp_tx_pending_cleanup, ab);
1042 idr_destroy(&dp->tx_ring[i].txbuf_idr);
1043 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1044 kfree(dp->tx_ring[i].tx_status);
1045 }
1046
1047 /* Deinit any SOC level resource */
1048 }
1049
ath11k_dp_alloc(struct ath11k_base * ab)1050 int ath11k_dp_alloc(struct ath11k_base *ab)
1051 {
1052 struct ath11k_dp *dp = &ab->dp;
1053 struct hal_srng *srng = NULL;
1054 size_t size = 0;
1055 u32 n_link_desc = 0;
1056 int ret;
1057 int i;
1058
1059 dp->ab = ab;
1060
1061 INIT_LIST_HEAD(&dp->reo_cmd_list);
1062 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1063 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1064 spin_lock_init(&dp->reo_cmd_lock);
1065
1066 dp->reo_cmd_cache_flush_count = 0;
1067
1068 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1069 if (ret) {
1070 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1071 return ret;
1072 }
1073
1074 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1075
1076 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1077 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1078 if (ret) {
1079 ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1080 return ret;
1081 }
1082
1083 ret = ath11k_dp_srng_common_setup(ab);
1084 if (ret)
1085 goto fail_link_desc_cleanup;
1086
1087 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1088
1089 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1090 idr_init(&dp->tx_ring[i].txbuf_idr);
1091 spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1092 dp->tx_ring[i].tcl_data_ring_id = i;
1093
1094 dp->tx_ring[i].tx_status_head = 0;
1095 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1096 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1097 if (!dp->tx_ring[i].tx_status) {
1098 ret = -ENOMEM;
1099 goto fail_cmn_srng_cleanup;
1100 }
1101 }
1102
1103 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1104 ath11k_hal_tx_set_dscp_tid_map(ab, i);
1105
1106 /* Init any SOC level resource for DP */
1107
1108 return 0;
1109
1110 fail_cmn_srng_cleanup:
1111 ath11k_dp_srng_common_cleanup(ab);
1112
1113 fail_link_desc_cleanup:
1114 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1115 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1116
1117 return ret;
1118 }
1119
ath11k_dp_shadow_timer_handler(struct timer_list * t)1120 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1121 {
1122 struct ath11k_hp_update_timer *update_timer = timer_container_of(update_timer,
1123 t,
1124 timer);
1125 struct ath11k_base *ab = update_timer->ab;
1126 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1127
1128 spin_lock_bh(&srng->lock);
1129
1130 /* when the timer is fired, the handler checks whether there
1131 * are new TX happened. The handler updates HP only when there
1132 * are no TX operations during the timeout interval, and stop
1133 * the timer. Timer will be started again when TX happens again.
1134 */
1135 if (update_timer->timer_tx_num != update_timer->tx_num) {
1136 update_timer->timer_tx_num = update_timer->tx_num;
1137 mod_timer(&update_timer->timer, jiffies +
1138 msecs_to_jiffies(update_timer->interval));
1139 } else {
1140 update_timer->started = false;
1141 ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1142 }
1143
1144 spin_unlock_bh(&srng->lock);
1145 }
1146
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1147 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1148 struct hal_srng *srng,
1149 struct ath11k_hp_update_timer *update_timer)
1150 {
1151 lockdep_assert_held(&srng->lock);
1152
1153 if (!ab->hw_params.supports_shadow_regs)
1154 return;
1155
1156 update_timer->tx_num++;
1157
1158 if (update_timer->started)
1159 return;
1160
1161 update_timer->started = true;
1162 update_timer->timer_tx_num = update_timer->tx_num;
1163 mod_timer(&update_timer->timer, jiffies +
1164 msecs_to_jiffies(update_timer->interval));
1165 }
1166
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1167 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1168 struct ath11k_hp_update_timer *update_timer)
1169 {
1170 if (!ab->hw_params.supports_shadow_regs)
1171 return;
1172
1173 if (!update_timer->init)
1174 return;
1175
1176 timer_delete_sync(&update_timer->timer);
1177 }
1178
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1179 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1180 struct ath11k_hp_update_timer *update_timer,
1181 u32 interval, u32 ring_id)
1182 {
1183 if (!ab->hw_params.supports_shadow_regs)
1184 return;
1185
1186 update_timer->tx_num = 0;
1187 update_timer->timer_tx_num = 0;
1188 update_timer->ab = ab;
1189 update_timer->ring_id = ring_id;
1190 update_timer->interval = interval;
1191 update_timer->init = true;
1192 timer_setup(&update_timer->timer,
1193 ath11k_dp_shadow_timer_handler, 0);
1194 }
1195