1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #if defined(__FreeBSD__)
8 #include <asm/io.h>
9 #endif
10 #include <crypto/hash.h>
11 #include "core.h"
12 #include "dp_tx.h"
13 #include "hal_tx.h"
14 #include "hif.h"
15 #include "debug.h"
16 #include "dp_rx.h"
17 #include "peer.h"
18
ath11k_dp_htt_htc_tx_complete(struct ath11k_base * ab,struct sk_buff * skb)19 static void ath11k_dp_htt_htc_tx_complete(struct ath11k_base *ab,
20 struct sk_buff *skb)
21 {
22 dev_kfree_skb_any(skb);
23 }
24
ath11k_dp_peer_cleanup(struct ath11k * ar,int vdev_id,const u8 * addr)25 void ath11k_dp_peer_cleanup(struct ath11k *ar, int vdev_id, const u8 *addr)
26 {
27 struct ath11k_base *ab = ar->ab;
28 struct ath11k_peer *peer;
29
30 /* TODO: Any other peer specific DP cleanup */
31
32 spin_lock_bh(&ab->base_lock);
33 peer = ath11k_peer_find(ab, vdev_id, addr);
34 if (!peer) {
35 ath11k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
36 addr, vdev_id);
37 spin_unlock_bh(&ab->base_lock);
38 return;
39 }
40
41 ath11k_peer_rx_tid_cleanup(ar, peer);
42 peer->dp_setup_done = false;
43 crypto_free_shash(peer->tfm_mmic);
44 spin_unlock_bh(&ab->base_lock);
45 }
46
ath11k_dp_peer_setup(struct ath11k * ar,int vdev_id,const u8 * addr)47 int ath11k_dp_peer_setup(struct ath11k *ar, int vdev_id, const u8 *addr)
48 {
49 struct ath11k_base *ab = ar->ab;
50 struct ath11k_peer *peer;
51 u32 reo_dest;
52 int ret = 0, tid;
53
54 /* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
55 reo_dest = ar->dp.mac_id + 1;
56 ret = ath11k_wmi_set_peer_param(ar, addr, vdev_id,
57 WMI_PEER_SET_DEFAULT_ROUTING,
58 DP_RX_HASH_ENABLE | (reo_dest << 1));
59
60 if (ret) {
61 ath11k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
62 ret, addr, vdev_id);
63 return ret;
64 }
65
66 for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
67 ret = ath11k_peer_rx_tid_setup(ar, addr, vdev_id, tid, 1, 0,
68 HAL_PN_TYPE_NONE);
69 if (ret) {
70 ath11k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
71 tid, ret);
72 goto peer_clean;
73 }
74 }
75
76 ret = ath11k_peer_rx_frag_setup(ar, addr, vdev_id);
77 if (ret) {
78 ath11k_warn(ab, "failed to setup rx defrag context\n");
79 tid--;
80 goto peer_clean;
81 }
82
83 /* TODO: Setup other peer specific resource used in data path */
84
85 return 0;
86
87 peer_clean:
88 spin_lock_bh(&ab->base_lock);
89
90 peer = ath11k_peer_find(ab, vdev_id, addr);
91 if (!peer) {
92 ath11k_warn(ab, "failed to find the peer to del rx tid\n");
93 spin_unlock_bh(&ab->base_lock);
94 return -ENOENT;
95 }
96
97 for (; tid >= 0; tid--)
98 ath11k_peer_rx_tid_delete(ar, peer, tid);
99
100 spin_unlock_bh(&ab->base_lock);
101
102 return ret;
103 }
104
ath11k_dp_srng_cleanup(struct ath11k_base * ab,struct dp_srng * ring)105 void ath11k_dp_srng_cleanup(struct ath11k_base *ab, struct dp_srng *ring)
106 {
107 if (!ring->vaddr_unaligned)
108 return;
109
110 if (ring->cached)
111 kfree(ring->vaddr_unaligned);
112 else
113 dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
114 ring->paddr_unaligned);
115
116 ring->vaddr_unaligned = NULL;
117 }
118
ath11k_dp_srng_find_ring_in_mask(int ring_num,const u8 * grp_mask)119 static int ath11k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
120 {
121 int ext_group_num;
122 u8 mask = 1 << ring_num;
123
124 for (ext_group_num = 0; ext_group_num < ATH11K_EXT_IRQ_GRP_NUM_MAX;
125 ext_group_num++) {
126 if (mask & grp_mask[ext_group_num])
127 return ext_group_num;
128 }
129
130 return -ENOENT;
131 }
132
ath11k_dp_srng_calculate_msi_group(struct ath11k_base * ab,enum hal_ring_type type,int ring_num)133 static int ath11k_dp_srng_calculate_msi_group(struct ath11k_base *ab,
134 enum hal_ring_type type, int ring_num)
135 {
136 const u8 *grp_mask;
137
138 switch (type) {
139 case HAL_WBM2SW_RELEASE:
140 if (ring_num == DP_RX_RELEASE_RING_NUM) {
141 grp_mask = &ab->hw_params.ring_mask->rx_wbm_rel[0];
142 ring_num = 0;
143 } else {
144 grp_mask = &ab->hw_params.ring_mask->tx[0];
145 }
146 break;
147 case HAL_REO_EXCEPTION:
148 grp_mask = &ab->hw_params.ring_mask->rx_err[0];
149 break;
150 case HAL_REO_DST:
151 grp_mask = &ab->hw_params.ring_mask->rx[0];
152 break;
153 case HAL_REO_STATUS:
154 grp_mask = &ab->hw_params.ring_mask->reo_status[0];
155 break;
156 case HAL_RXDMA_MONITOR_STATUS:
157 case HAL_RXDMA_MONITOR_DST:
158 grp_mask = &ab->hw_params.ring_mask->rx_mon_status[0];
159 break;
160 case HAL_RXDMA_DST:
161 grp_mask = &ab->hw_params.ring_mask->rxdma2host[0];
162 break;
163 case HAL_RXDMA_BUF:
164 grp_mask = &ab->hw_params.ring_mask->host2rxdma[0];
165 break;
166 case HAL_RXDMA_MONITOR_BUF:
167 case HAL_TCL_DATA:
168 case HAL_TCL_CMD:
169 case HAL_REO_CMD:
170 case HAL_SW2WBM_RELEASE:
171 case HAL_WBM_IDLE_LINK:
172 case HAL_TCL_STATUS:
173 case HAL_REO_REINJECT:
174 case HAL_CE_SRC:
175 case HAL_CE_DST:
176 case HAL_CE_DST_STATUS:
177 default:
178 return -ENOENT;
179 }
180
181 return ath11k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
182 }
183
ath11k_dp_srng_msi_setup(struct ath11k_base * ab,struct hal_srng_params * ring_params,enum hal_ring_type type,int ring_num)184 static void ath11k_dp_srng_msi_setup(struct ath11k_base *ab,
185 struct hal_srng_params *ring_params,
186 enum hal_ring_type type, int ring_num)
187 {
188 int msi_group_number, msi_data_count;
189 u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
190 int ret;
191
192 ret = ath11k_get_user_msi_vector(ab, "DP",
193 &msi_data_count, &msi_data_start,
194 &msi_irq_start);
195 if (ret)
196 return;
197
198 msi_group_number = ath11k_dp_srng_calculate_msi_group(ab, type,
199 ring_num);
200 if (msi_group_number < 0) {
201 ath11k_dbg(ab, ATH11K_DBG_PCI,
202 "ring not part of an ext_group; ring_type: %d,ring_num %d",
203 type, ring_num);
204 ring_params->msi_addr = 0;
205 ring_params->msi_data = 0;
206 return;
207 }
208
209 if (msi_group_number > msi_data_count) {
210 ath11k_dbg(ab, ATH11K_DBG_PCI,
211 "multiple msi_groups share one msi, msi_group_num %d",
212 msi_group_number);
213 }
214
215 ath11k_get_msi_address(ab, &addr_lo, &addr_hi);
216
217 ring_params->msi_addr = addr_lo;
218 ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
219 ring_params->msi_data = (msi_group_number % msi_data_count)
220 + msi_data_start;
221 ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
222 }
223
ath11k_dp_srng_setup(struct ath11k_base * ab,struct dp_srng * ring,enum hal_ring_type type,int ring_num,int mac_id,int num_entries)224 int ath11k_dp_srng_setup(struct ath11k_base *ab, struct dp_srng *ring,
225 enum hal_ring_type type, int ring_num,
226 int mac_id, int num_entries)
227 {
228 struct hal_srng_params params = { 0 };
229 int entry_sz = ath11k_hal_srng_get_entrysize(ab, type);
230 int max_entries = ath11k_hal_srng_get_max_entries(ab, type);
231 int ret;
232 bool cached = false;
233
234 if (max_entries < 0 || entry_sz < 0)
235 return -EINVAL;
236
237 if (num_entries > max_entries)
238 num_entries = max_entries;
239
240 ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
241
242 if (ab->hw_params.alloc_cacheable_memory) {
243 /* Allocate the reo dst and tx completion rings from cacheable memory */
244 switch (type) {
245 case HAL_REO_DST:
246 case HAL_WBM2SW_RELEASE:
247 cached = true;
248 break;
249 default:
250 cached = false;
251 }
252
253 if (cached) {
254 ring->vaddr_unaligned = kzalloc(ring->size, GFP_KERNEL);
255 ring->paddr_unaligned = virt_to_phys(ring->vaddr_unaligned);
256 }
257 }
258
259 if (!cached)
260 ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
261 &ring->paddr_unaligned,
262 GFP_KERNEL);
263
264 if (!ring->vaddr_unaligned)
265 return -ENOMEM;
266
267 ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
268 ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
269 (unsigned long)ring->vaddr_unaligned);
270
271 params.ring_base_vaddr = ring->vaddr;
272 params.ring_base_paddr = ring->paddr;
273 params.num_entries = num_entries;
274 ath11k_dp_srng_msi_setup(ab, ¶ms, type, ring_num + mac_id);
275
276 switch (type) {
277 case HAL_REO_DST:
278 params.intr_batch_cntr_thres_entries =
279 HAL_SRNG_INT_BATCH_THRESHOLD_RX;
280 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
281 break;
282 case HAL_RXDMA_BUF:
283 case HAL_RXDMA_MONITOR_BUF:
284 case HAL_RXDMA_MONITOR_STATUS:
285 params.low_threshold = num_entries >> 3;
286 params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
287 params.intr_batch_cntr_thres_entries = 0;
288 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
289 break;
290 case HAL_WBM2SW_RELEASE:
291 if (ring_num < 3) {
292 params.intr_batch_cntr_thres_entries =
293 HAL_SRNG_INT_BATCH_THRESHOLD_TX;
294 params.intr_timer_thres_us =
295 HAL_SRNG_INT_TIMER_THRESHOLD_TX;
296 break;
297 }
298 /* follow through when ring_num >= 3 */
299 fallthrough;
300 case HAL_REO_EXCEPTION:
301 case HAL_REO_REINJECT:
302 case HAL_REO_CMD:
303 case HAL_REO_STATUS:
304 case HAL_TCL_DATA:
305 case HAL_TCL_CMD:
306 case HAL_TCL_STATUS:
307 case HAL_WBM_IDLE_LINK:
308 case HAL_SW2WBM_RELEASE:
309 case HAL_RXDMA_DST:
310 case HAL_RXDMA_MONITOR_DST:
311 case HAL_RXDMA_MONITOR_DESC:
312 params.intr_batch_cntr_thres_entries =
313 HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
314 params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
315 break;
316 case HAL_RXDMA_DIR_BUF:
317 break;
318 default:
319 ath11k_warn(ab, "Not a valid ring type in dp :%d\n", type);
320 return -EINVAL;
321 }
322
323 if (cached) {
324 params.flags |= HAL_SRNG_FLAGS_CACHED;
325 ring->cached = 1;
326 }
327
328 ret = ath11k_hal_srng_setup(ab, type, ring_num, mac_id, ¶ms);
329 if (ret < 0) {
330 ath11k_warn(ab, "failed to setup srng: %d ring_id %d\n",
331 ret, ring_num);
332 return ret;
333 }
334
335 ring->ring_id = ret;
336
337 return 0;
338 }
339
ath11k_dp_stop_shadow_timers(struct ath11k_base * ab)340 void ath11k_dp_stop_shadow_timers(struct ath11k_base *ab)
341 {
342 int i;
343
344 if (!ab->hw_params.supports_shadow_regs)
345 return;
346
347 for (i = 0; i < ab->hw_params.max_tx_ring; i++)
348 ath11k_dp_shadow_stop_timer(ab, &ab->dp.tx_ring_timer[i]);
349
350 ath11k_dp_shadow_stop_timer(ab, &ab->dp.reo_cmd_timer);
351 }
352
ath11k_dp_srng_common_cleanup(struct ath11k_base * ab)353 static void ath11k_dp_srng_common_cleanup(struct ath11k_base *ab)
354 {
355 struct ath11k_dp *dp = &ab->dp;
356 int i;
357
358 ath11k_dp_stop_shadow_timers(ab);
359 ath11k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
360 ath11k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
361 ath11k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
362 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
363 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
364 ath11k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
365 }
366 ath11k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
367 ath11k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
368 ath11k_dp_srng_cleanup(ab, &dp->reo_except_ring);
369 ath11k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
370 ath11k_dp_srng_cleanup(ab, &dp->reo_status_ring);
371 }
372
ath11k_dp_srng_common_setup(struct ath11k_base * ab)373 static int ath11k_dp_srng_common_setup(struct ath11k_base *ab)
374 {
375 struct ath11k_dp *dp = &ab->dp;
376 struct hal_srng *srng;
377 int i, ret;
378 u8 tcl_num, wbm_num;
379
380 ret = ath11k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
381 HAL_SW2WBM_RELEASE, 0, 0,
382 DP_WBM_RELEASE_RING_SIZE);
383 if (ret) {
384 ath11k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
385 ret);
386 goto err;
387 }
388
389 ret = ath11k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
390 DP_TCL_CMD_RING_SIZE);
391 if (ret) {
392 ath11k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
393 goto err;
394 }
395
396 ret = ath11k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
397 0, 0, DP_TCL_STATUS_RING_SIZE);
398 if (ret) {
399 ath11k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
400 goto err;
401 }
402
403 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
404 tcl_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].tcl_ring_num;
405 wbm_num = ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num;
406
407 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
408 HAL_TCL_DATA, tcl_num, 0,
409 ab->hw_params.tx_ring_size);
410 if (ret) {
411 ath11k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
412 i, ret);
413 goto err;
414 }
415
416 ret = ath11k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
417 HAL_WBM2SW_RELEASE, wbm_num, 0,
418 DP_TX_COMP_RING_SIZE);
419 if (ret) {
420 ath11k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
421 i, ret);
422 goto err;
423 }
424
425 srng = &ab->hal.srng_list[dp->tx_ring[i].tcl_data_ring.ring_id];
426 ath11k_hal_tx_init_data_ring(ab, srng);
427
428 ath11k_dp_shadow_init_timer(ab, &dp->tx_ring_timer[i],
429 ATH11K_SHADOW_DP_TIMER_INTERVAL,
430 dp->tx_ring[i].tcl_data_ring.ring_id);
431 }
432
433 ret = ath11k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
434 0, 0, DP_REO_REINJECT_RING_SIZE);
435 if (ret) {
436 ath11k_warn(ab, "failed to set up reo_reinject ring :%d\n",
437 ret);
438 goto err;
439 }
440
441 ret = ath11k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
442 DP_RX_RELEASE_RING_NUM, 0, DP_RX_RELEASE_RING_SIZE);
443 if (ret) {
444 ath11k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
445 goto err;
446 }
447
448 ret = ath11k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
449 0, 0, DP_REO_EXCEPTION_RING_SIZE);
450 if (ret) {
451 ath11k_warn(ab, "failed to set up reo_exception ring :%d\n",
452 ret);
453 goto err;
454 }
455
456 ret = ath11k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
457 0, 0, DP_REO_CMD_RING_SIZE);
458 if (ret) {
459 ath11k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
460 goto err;
461 }
462
463 srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
464 ath11k_hal_reo_init_cmd_ring(ab, srng);
465
466 ath11k_dp_shadow_init_timer(ab, &dp->reo_cmd_timer,
467 ATH11K_SHADOW_CTRL_TIMER_INTERVAL,
468 dp->reo_cmd_ring.ring_id);
469
470 ret = ath11k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
471 0, 0, DP_REO_STATUS_RING_SIZE);
472 if (ret) {
473 ath11k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
474 goto err;
475 }
476
477 /* When hash based routing of rx packet is enabled, 32 entries to map
478 * the hash values to the ring will be configured.
479 */
480 ab->hw_params.hw_ops->reo_setup(ab);
481
482 return 0;
483
484 err:
485 ath11k_dp_srng_common_cleanup(ab);
486
487 return ret;
488 }
489
ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base * ab)490 static void ath11k_dp_scatter_idle_link_desc_cleanup(struct ath11k_base *ab)
491 {
492 struct ath11k_dp *dp = &ab->dp;
493 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
494 int i;
495
496 for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
497 if (!slist[i].vaddr)
498 continue;
499
500 dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
501 slist[i].vaddr, slist[i].paddr);
502 slist[i].vaddr = NULL;
503 }
504 }
505
ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base * ab,int size,u32 n_link_desc_bank,u32 n_link_desc,u32 last_bank_sz)506 static int ath11k_dp_scatter_idle_link_desc_setup(struct ath11k_base *ab,
507 int size,
508 u32 n_link_desc_bank,
509 u32 n_link_desc,
510 u32 last_bank_sz)
511 {
512 struct ath11k_dp *dp = &ab->dp;
513 struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
514 struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
515 u32 n_entries_per_buf;
516 int num_scatter_buf, scatter_idx;
517 struct hal_wbm_link_desc *scatter_buf;
518 int align_bytes, n_entries;
519 dma_addr_t paddr;
520 int rem_entries;
521 int i;
522 int ret = 0;
523 u32 end_offset;
524
525 n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
526 ath11k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
527 num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
528
529 if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
530 return -EINVAL;
531
532 for (i = 0; i < num_scatter_buf; i++) {
533 slist[i].vaddr = dma_alloc_coherent(ab->dev,
534 HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
535 &slist[i].paddr, GFP_KERNEL);
536 if (!slist[i].vaddr) {
537 ret = -ENOMEM;
538 goto err;
539 }
540 }
541
542 scatter_idx = 0;
543 scatter_buf = slist[scatter_idx].vaddr;
544 rem_entries = n_entries_per_buf;
545
546 for (i = 0; i < n_link_desc_bank; i++) {
547 #if defined(__linux__)
548 align_bytes = link_desc_banks[i].vaddr -
549 link_desc_banks[i].vaddr_unaligned;
550 #elif defined(__FreeBSD__)
551 align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
552 (uintptr_t)link_desc_banks[i].vaddr_unaligned;
553 #endif
554 n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
555 HAL_LINK_DESC_SIZE;
556 paddr = link_desc_banks[i].paddr;
557 while (n_entries) {
558 ath11k_hal_set_link_desc_addr(scatter_buf, i, paddr);
559 n_entries--;
560 paddr += HAL_LINK_DESC_SIZE;
561 if (rem_entries) {
562 rem_entries--;
563 scatter_buf++;
564 continue;
565 }
566
567 rem_entries = n_entries_per_buf;
568 scatter_idx++;
569 scatter_buf = slist[scatter_idx].vaddr;
570 }
571 }
572
573 end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
574 sizeof(struct hal_wbm_link_desc);
575 ath11k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
576 n_link_desc, end_offset);
577
578 return 0;
579
580 err:
581 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
582
583 return ret;
584 }
585
586 static void
ath11k_dp_link_desc_bank_free(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks)587 ath11k_dp_link_desc_bank_free(struct ath11k_base *ab,
588 struct dp_link_desc_bank *link_desc_banks)
589 {
590 int i;
591
592 for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
593 if (link_desc_banks[i].vaddr_unaligned) {
594 dma_free_coherent(ab->dev,
595 link_desc_banks[i].size,
596 link_desc_banks[i].vaddr_unaligned,
597 link_desc_banks[i].paddr_unaligned);
598 link_desc_banks[i].vaddr_unaligned = NULL;
599 }
600 }
601 }
602
ath11k_dp_link_desc_bank_alloc(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,int n_link_desc_bank,int last_bank_sz)603 static int ath11k_dp_link_desc_bank_alloc(struct ath11k_base *ab,
604 struct dp_link_desc_bank *desc_bank,
605 int n_link_desc_bank,
606 int last_bank_sz)
607 {
608 struct ath11k_dp *dp = &ab->dp;
609 int i;
610 int ret = 0;
611 int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
612
613 for (i = 0; i < n_link_desc_bank; i++) {
614 if (i == (n_link_desc_bank - 1) && last_bank_sz)
615 desc_sz = last_bank_sz;
616
617 desc_bank[i].vaddr_unaligned =
618 dma_alloc_coherent(ab->dev, desc_sz,
619 &desc_bank[i].paddr_unaligned,
620 GFP_KERNEL);
621 if (!desc_bank[i].vaddr_unaligned) {
622 ret = -ENOMEM;
623 goto err;
624 }
625
626 desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
627 HAL_LINK_DESC_ALIGN);
628 desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
629 ((unsigned long)desc_bank[i].vaddr -
630 (unsigned long)desc_bank[i].vaddr_unaligned);
631 desc_bank[i].size = desc_sz;
632 }
633
634 return 0;
635
636 err:
637 ath11k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
638
639 return ret;
640 }
641
ath11k_dp_link_desc_cleanup(struct ath11k_base * ab,struct dp_link_desc_bank * desc_bank,u32 ring_type,struct dp_srng * ring)642 void ath11k_dp_link_desc_cleanup(struct ath11k_base *ab,
643 struct dp_link_desc_bank *desc_bank,
644 u32 ring_type, struct dp_srng *ring)
645 {
646 ath11k_dp_link_desc_bank_free(ab, desc_bank);
647
648 if (ring_type != HAL_RXDMA_MONITOR_DESC) {
649 ath11k_dp_srng_cleanup(ab, ring);
650 ath11k_dp_scatter_idle_link_desc_cleanup(ab);
651 }
652 }
653
ath11k_wbm_idle_ring_setup(struct ath11k_base * ab,u32 * n_link_desc)654 static int ath11k_wbm_idle_ring_setup(struct ath11k_base *ab, u32 *n_link_desc)
655 {
656 struct ath11k_dp *dp = &ab->dp;
657 u32 n_mpdu_link_desc, n_mpdu_queue_desc;
658 u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
659 int ret = 0;
660
661 n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
662 HAL_NUM_MPDUS_PER_LINK_DESC;
663
664 n_mpdu_queue_desc = n_mpdu_link_desc /
665 HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
666
667 n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
668 DP_AVG_MSDUS_PER_FLOW) /
669 HAL_NUM_TX_MSDUS_PER_LINK_DESC;
670
671 n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
672 DP_AVG_MSDUS_PER_MPDU) /
673 HAL_NUM_RX_MSDUS_PER_LINK_DESC;
674
675 *n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
676 n_tx_msdu_link_desc + n_rx_msdu_link_desc;
677
678 if (*n_link_desc & (*n_link_desc - 1))
679 *n_link_desc = 1 << fls(*n_link_desc);
680
681 ret = ath11k_dp_srng_setup(ab, &dp->wbm_idle_ring,
682 HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
683 if (ret) {
684 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
685 return ret;
686 }
687 return ret;
688 }
689
ath11k_dp_link_desc_setup(struct ath11k_base * ab,struct dp_link_desc_bank * link_desc_banks,u32 ring_type,struct hal_srng * srng,u32 n_link_desc)690 int ath11k_dp_link_desc_setup(struct ath11k_base *ab,
691 struct dp_link_desc_bank *link_desc_banks,
692 u32 ring_type, struct hal_srng *srng,
693 u32 n_link_desc)
694 {
695 u32 tot_mem_sz;
696 u32 n_link_desc_bank, last_bank_sz;
697 u32 entry_sz, align_bytes, n_entries;
698 u32 paddr;
699 u32 *desc;
700 int i, ret;
701
702 tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
703 tot_mem_sz += HAL_LINK_DESC_ALIGN;
704
705 if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
706 n_link_desc_bank = 1;
707 last_bank_sz = tot_mem_sz;
708 } else {
709 n_link_desc_bank = tot_mem_sz /
710 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
711 HAL_LINK_DESC_ALIGN);
712 last_bank_sz = tot_mem_sz %
713 (DP_LINK_DESC_ALLOC_SIZE_THRESH -
714 HAL_LINK_DESC_ALIGN);
715
716 if (last_bank_sz)
717 n_link_desc_bank += 1;
718 }
719
720 if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
721 return -EINVAL;
722
723 ret = ath11k_dp_link_desc_bank_alloc(ab, link_desc_banks,
724 n_link_desc_bank, last_bank_sz);
725 if (ret)
726 return ret;
727
728 /* Setup link desc idle list for HW internal usage */
729 entry_sz = ath11k_hal_srng_get_entrysize(ab, ring_type);
730 tot_mem_sz = entry_sz * n_link_desc;
731
732 /* Setup scatter desc list when the total memory requirement is more */
733 if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
734 ring_type != HAL_RXDMA_MONITOR_DESC) {
735 ret = ath11k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
736 n_link_desc_bank,
737 n_link_desc,
738 last_bank_sz);
739 if (ret) {
740 ath11k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
741 ret);
742 goto fail_desc_bank_free;
743 }
744
745 return 0;
746 }
747
748 spin_lock_bh(&srng->lock);
749
750 ath11k_hal_srng_access_begin(ab, srng);
751
752 for (i = 0; i < n_link_desc_bank; i++) {
753 #if defined(__linux__)
754 align_bytes = link_desc_banks[i].vaddr -
755 link_desc_banks[i].vaddr_unaligned;
756 #elif defined(__FreeBSD__)
757 align_bytes = (uintptr_t)link_desc_banks[i].vaddr -
758 (uintptr_t)link_desc_banks[i].vaddr_unaligned;
759 #endif
760 n_entries = (link_desc_banks[i].size - align_bytes) /
761 HAL_LINK_DESC_SIZE;
762 paddr = link_desc_banks[i].paddr;
763 while (n_entries &&
764 (desc = ath11k_hal_srng_src_get_next_entry(ab, srng))) {
765 ath11k_hal_set_link_desc_addr((struct hal_wbm_link_desc *)desc,
766 i, paddr);
767 n_entries--;
768 paddr += HAL_LINK_DESC_SIZE;
769 }
770 }
771
772 ath11k_hal_srng_access_end(ab, srng);
773
774 spin_unlock_bh(&srng->lock);
775
776 return 0;
777
778 fail_desc_bank_free:
779 ath11k_dp_link_desc_bank_free(ab, link_desc_banks);
780
781 return ret;
782 }
783
ath11k_dp_service_srng(struct ath11k_base * ab,struct ath11k_ext_irq_grp * irq_grp,int budget)784 int ath11k_dp_service_srng(struct ath11k_base *ab,
785 struct ath11k_ext_irq_grp *irq_grp,
786 int budget)
787 {
788 struct napi_struct *napi = &irq_grp->napi;
789 const struct ath11k_hw_hal_params *hal_params;
790 int grp_id = irq_grp->grp_id;
791 int work_done = 0;
792 int i, j;
793 int tot_work_done = 0;
794
795 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
796 if (BIT(ab->hw_params.hal_params->tcl2wbm_rbm_map[i].wbm_ring_num) &
797 ab->hw_params.ring_mask->tx[grp_id])
798 ath11k_dp_tx_completion_handler(ab, i);
799 }
800
801 if (ab->hw_params.ring_mask->rx_err[grp_id]) {
802 work_done = ath11k_dp_process_rx_err(ab, napi, budget);
803 budget -= work_done;
804 tot_work_done += work_done;
805 if (budget <= 0)
806 goto done;
807 }
808
809 if (ab->hw_params.ring_mask->rx_wbm_rel[grp_id]) {
810 work_done = ath11k_dp_rx_process_wbm_err(ab,
811 napi,
812 budget);
813 budget -= work_done;
814 tot_work_done += work_done;
815
816 if (budget <= 0)
817 goto done;
818 }
819
820 if (ab->hw_params.ring_mask->rx[grp_id]) {
821 i = fls(ab->hw_params.ring_mask->rx[grp_id]) - 1;
822 work_done = ath11k_dp_process_rx(ab, i, napi,
823 budget);
824 budget -= work_done;
825 tot_work_done += work_done;
826 if (budget <= 0)
827 goto done;
828 }
829
830 if (ab->hw_params.ring_mask->rx_mon_status[grp_id]) {
831 for (i = 0; i < ab->num_radios; i++) {
832 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
833 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
834
835 if (ab->hw_params.ring_mask->rx_mon_status[grp_id] &
836 BIT(id)) {
837 work_done =
838 ath11k_dp_rx_process_mon_rings(ab,
839 id,
840 napi, budget);
841 budget -= work_done;
842 tot_work_done += work_done;
843
844 if (budget <= 0)
845 goto done;
846 }
847 }
848 }
849 }
850
851 if (ab->hw_params.ring_mask->reo_status[grp_id])
852 ath11k_dp_process_reo_status(ab);
853
854 for (i = 0; i < ab->num_radios; i++) {
855 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
856 int id = i * ab->hw_params.num_rxmda_per_pdev + j;
857
858 if (ab->hw_params.ring_mask->rxdma2host[grp_id] & BIT(id)) {
859 work_done = ath11k_dp_process_rxdma_err(ab, id, budget);
860 budget -= work_done;
861 tot_work_done += work_done;
862 }
863
864 if (budget <= 0)
865 goto done;
866
867 if (ab->hw_params.ring_mask->host2rxdma[grp_id] & BIT(id)) {
868 struct ath11k *ar = ath11k_ab_to_ar(ab, id);
869 struct ath11k_pdev_dp *dp = &ar->dp;
870 struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
871
872 hal_params = ab->hw_params.hal_params;
873 ath11k_dp_rxbufs_replenish(ab, id, rx_ring, 0,
874 hal_params->rx_buf_rbm);
875 }
876 }
877 }
878 /* TODO: Implement handler for other interrupts */
879
880 done:
881 return tot_work_done;
882 }
883 EXPORT_SYMBOL(ath11k_dp_service_srng);
884
ath11k_dp_pdev_free(struct ath11k_base * ab)885 void ath11k_dp_pdev_free(struct ath11k_base *ab)
886 {
887 struct ath11k *ar;
888 int i;
889
890 del_timer_sync(&ab->mon_reap_timer);
891
892 for (i = 0; i < ab->num_radios; i++) {
893 ar = ab->pdevs[i].ar;
894 ath11k_dp_rx_pdev_free(ab, i);
895 ath11k_debugfs_unregister(ar);
896 ath11k_dp_rx_pdev_mon_detach(ar);
897 }
898 }
899
ath11k_dp_pdev_pre_alloc(struct ath11k_base * ab)900 void ath11k_dp_pdev_pre_alloc(struct ath11k_base *ab)
901 {
902 struct ath11k *ar;
903 struct ath11k_pdev_dp *dp;
904 int i;
905 int j;
906
907 for (i = 0; i < ab->num_radios; i++) {
908 ar = ab->pdevs[i].ar;
909 dp = &ar->dp;
910 dp->mac_id = i;
911 idr_init(&dp->rx_refill_buf_ring.bufs_idr);
912 spin_lock_init(&dp->rx_refill_buf_ring.idr_lock);
913 atomic_set(&dp->num_tx_pending, 0);
914 init_waitqueue_head(&dp->tx_empty_waitq);
915 for (j = 0; j < ab->hw_params.num_rxmda_per_pdev; j++) {
916 idr_init(&dp->rx_mon_status_refill_ring[j].bufs_idr);
917 spin_lock_init(&dp->rx_mon_status_refill_ring[j].idr_lock);
918 }
919 idr_init(&dp->rxdma_mon_buf_ring.bufs_idr);
920 spin_lock_init(&dp->rxdma_mon_buf_ring.idr_lock);
921 }
922 }
923
ath11k_dp_pdev_alloc(struct ath11k_base * ab)924 int ath11k_dp_pdev_alloc(struct ath11k_base *ab)
925 {
926 struct ath11k *ar;
927 int ret;
928 int i;
929
930 /* TODO:Per-pdev rx ring unlike tx ring which is mapped to different AC's */
931 for (i = 0; i < ab->num_radios; i++) {
932 ar = ab->pdevs[i].ar;
933 ret = ath11k_dp_rx_pdev_alloc(ab, i);
934 if (ret) {
935 ath11k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
936 i);
937 goto err;
938 }
939 ret = ath11k_dp_rx_pdev_mon_attach(ar);
940 if (ret) {
941 ath11k_warn(ab, "failed to initialize mon pdev %d\n",
942 i);
943 goto err;
944 }
945 }
946
947 return 0;
948
949 err:
950 ath11k_dp_pdev_free(ab);
951
952 return ret;
953 }
954
ath11k_dp_htt_connect(struct ath11k_dp * dp)955 int ath11k_dp_htt_connect(struct ath11k_dp *dp)
956 {
957 struct ath11k_htc_svc_conn_req conn_req;
958 struct ath11k_htc_svc_conn_resp conn_resp;
959 int status;
960
961 memset(&conn_req, 0, sizeof(conn_req));
962 memset(&conn_resp, 0, sizeof(conn_resp));
963
964 conn_req.ep_ops.ep_tx_complete = ath11k_dp_htt_htc_tx_complete;
965 conn_req.ep_ops.ep_rx_complete = ath11k_dp_htt_htc_t2h_msg_handler;
966
967 /* connect to control service */
968 conn_req.service_id = ATH11K_HTC_SVC_ID_HTT_DATA_MSG;
969
970 status = ath11k_htc_connect_service(&dp->ab->htc, &conn_req,
971 &conn_resp);
972
973 if (status)
974 return status;
975
976 dp->eid = conn_resp.eid;
977
978 return 0;
979 }
980
ath11k_dp_update_vdev_search(struct ath11k_vif * arvif)981 static void ath11k_dp_update_vdev_search(struct ath11k_vif *arvif)
982 {
983 /* When v2_map_support is true:for STA mode, enable address
984 * search index, tcl uses ast_hash value in the descriptor.
985 * When v2_map_support is false: for STA mode, don't enable
986 * address search index.
987 */
988 switch (arvif->vdev_type) {
989 case WMI_VDEV_TYPE_STA:
990 if (arvif->ar->ab->hw_params.htt_peer_map_v2) {
991 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
992 arvif->search_type = HAL_TX_ADDR_SEARCH_INDEX;
993 } else {
994 arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
995 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
996 }
997 break;
998 case WMI_VDEV_TYPE_AP:
999 case WMI_VDEV_TYPE_IBSS:
1000 arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1001 arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1002 break;
1003 case WMI_VDEV_TYPE_MONITOR:
1004 default:
1005 return;
1006 }
1007 }
1008
ath11k_dp_vdev_tx_attach(struct ath11k * ar,struct ath11k_vif * arvif)1009 void ath11k_dp_vdev_tx_attach(struct ath11k *ar, struct ath11k_vif *arvif)
1010 {
1011 arvif->tcl_metadata |= FIELD_PREP(HTT_TCL_META_DATA_TYPE, 1) |
1012 FIELD_PREP(HTT_TCL_META_DATA_VDEV_ID,
1013 arvif->vdev_id) |
1014 FIELD_PREP(HTT_TCL_META_DATA_PDEV_ID,
1015 ar->pdev->pdev_id);
1016
1017 /* set HTT extension valid bit to 0 by default */
1018 arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1019
1020 ath11k_dp_update_vdev_search(arvif);
1021 }
1022
ath11k_dp_tx_pending_cleanup(int buf_id,void * skb,void * ctx)1023 static int ath11k_dp_tx_pending_cleanup(int buf_id, void *skb, void *ctx)
1024 {
1025 struct ath11k_base *ab = (struct ath11k_base *)ctx;
1026 struct sk_buff *msdu = skb;
1027
1028 dma_unmap_single(ab->dev, ATH11K_SKB_CB(msdu)->paddr, msdu->len,
1029 DMA_TO_DEVICE);
1030
1031 dev_kfree_skb_any(msdu);
1032
1033 return 0;
1034 }
1035
ath11k_dp_free(struct ath11k_base * ab)1036 void ath11k_dp_free(struct ath11k_base *ab)
1037 {
1038 struct ath11k_dp *dp = &ab->dp;
1039 int i;
1040
1041 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1042 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1043
1044 ath11k_dp_srng_common_cleanup(ab);
1045
1046 ath11k_dp_reo_cmd_list_cleanup(ab);
1047
1048 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1049 spin_lock_bh(&dp->tx_ring[i].tx_idr_lock);
1050 idr_for_each(&dp->tx_ring[i].txbuf_idr,
1051 ath11k_dp_tx_pending_cleanup, ab);
1052 idr_destroy(&dp->tx_ring[i].txbuf_idr);
1053 spin_unlock_bh(&dp->tx_ring[i].tx_idr_lock);
1054 kfree(dp->tx_ring[i].tx_status);
1055 }
1056
1057 /* Deinit any SOC level resource */
1058 }
1059
ath11k_dp_alloc(struct ath11k_base * ab)1060 int ath11k_dp_alloc(struct ath11k_base *ab)
1061 {
1062 struct ath11k_dp *dp = &ab->dp;
1063 struct hal_srng *srng = NULL;
1064 size_t size = 0;
1065 u32 n_link_desc = 0;
1066 int ret;
1067 int i;
1068
1069 dp->ab = ab;
1070
1071 INIT_LIST_HEAD(&dp->reo_cmd_list);
1072 INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1073 INIT_LIST_HEAD(&dp->dp_full_mon_mpdu_list);
1074 spin_lock_init(&dp->reo_cmd_lock);
1075
1076 dp->reo_cmd_cache_flush_count = 0;
1077
1078 ret = ath11k_wbm_idle_ring_setup(ab, &n_link_desc);
1079 if (ret) {
1080 ath11k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1081 return ret;
1082 }
1083
1084 srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1085
1086 ret = ath11k_dp_link_desc_setup(ab, dp->link_desc_banks,
1087 HAL_WBM_IDLE_LINK, srng, n_link_desc);
1088 if (ret) {
1089 ath11k_warn(ab, "failed to setup link desc: %d\n", ret);
1090 return ret;
1091 }
1092
1093 ret = ath11k_dp_srng_common_setup(ab);
1094 if (ret)
1095 goto fail_link_desc_cleanup;
1096
1097 size = sizeof(struct hal_wbm_release_ring) * DP_TX_COMP_RING_SIZE;
1098
1099 for (i = 0; i < ab->hw_params.max_tx_ring; i++) {
1100 idr_init(&dp->tx_ring[i].txbuf_idr);
1101 spin_lock_init(&dp->tx_ring[i].tx_idr_lock);
1102 dp->tx_ring[i].tcl_data_ring_id = i;
1103
1104 dp->tx_ring[i].tx_status_head = 0;
1105 dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1106 dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1107 if (!dp->tx_ring[i].tx_status) {
1108 ret = -ENOMEM;
1109 goto fail_cmn_srng_cleanup;
1110 }
1111 }
1112
1113 for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1114 ath11k_hal_tx_set_dscp_tid_map(ab, i);
1115
1116 /* Init any SOC level resource for DP */
1117
1118 return 0;
1119
1120 fail_cmn_srng_cleanup:
1121 ath11k_dp_srng_common_cleanup(ab);
1122
1123 fail_link_desc_cleanup:
1124 ath11k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1125 HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1126
1127 return ret;
1128 }
1129
ath11k_dp_shadow_timer_handler(struct timer_list * t)1130 static void ath11k_dp_shadow_timer_handler(struct timer_list *t)
1131 {
1132 struct ath11k_hp_update_timer *update_timer = from_timer(update_timer,
1133 t, timer);
1134 struct ath11k_base *ab = update_timer->ab;
1135 struct hal_srng *srng = &ab->hal.srng_list[update_timer->ring_id];
1136
1137 spin_lock_bh(&srng->lock);
1138
1139 /* when the timer is fired, the handler checks whether there
1140 * are new TX happened. The handler updates HP only when there
1141 * are no TX operations during the timeout interval, and stop
1142 * the timer. Timer will be started again when TX happens again.
1143 */
1144 if (update_timer->timer_tx_num != update_timer->tx_num) {
1145 update_timer->timer_tx_num = update_timer->tx_num;
1146 mod_timer(&update_timer->timer, jiffies +
1147 msecs_to_jiffies(update_timer->interval));
1148 } else {
1149 update_timer->started = false;
1150 ath11k_hal_srng_shadow_update_hp_tp(ab, srng);
1151 }
1152
1153 spin_unlock_bh(&srng->lock);
1154 }
1155
ath11k_dp_shadow_start_timer(struct ath11k_base * ab,struct hal_srng * srng,struct ath11k_hp_update_timer * update_timer)1156 void ath11k_dp_shadow_start_timer(struct ath11k_base *ab,
1157 struct hal_srng *srng,
1158 struct ath11k_hp_update_timer *update_timer)
1159 {
1160 lockdep_assert_held(&srng->lock);
1161
1162 if (!ab->hw_params.supports_shadow_regs)
1163 return;
1164
1165 update_timer->tx_num++;
1166
1167 if (update_timer->started)
1168 return;
1169
1170 update_timer->started = true;
1171 update_timer->timer_tx_num = update_timer->tx_num;
1172 mod_timer(&update_timer->timer, jiffies +
1173 msecs_to_jiffies(update_timer->interval));
1174 }
1175
ath11k_dp_shadow_stop_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer)1176 void ath11k_dp_shadow_stop_timer(struct ath11k_base *ab,
1177 struct ath11k_hp_update_timer *update_timer)
1178 {
1179 if (!ab->hw_params.supports_shadow_regs)
1180 return;
1181
1182 if (!update_timer->init)
1183 return;
1184
1185 del_timer_sync(&update_timer->timer);
1186 }
1187
ath11k_dp_shadow_init_timer(struct ath11k_base * ab,struct ath11k_hp_update_timer * update_timer,u32 interval,u32 ring_id)1188 void ath11k_dp_shadow_init_timer(struct ath11k_base *ab,
1189 struct ath11k_hp_update_timer *update_timer,
1190 u32 interval, u32 ring_id)
1191 {
1192 if (!ab->hw_params.supports_shadow_regs)
1193 return;
1194
1195 update_timer->tx_num = 0;
1196 update_timer->timer_tx_num = 0;
1197 update_timer->ab = ab;
1198 update_timer->ring_id = ring_id;
1199 update_timer->interval = interval;
1200 update_timer->init = true;
1201 timer_setup(&update_timer->timer,
1202 ath11k_dp_shadow_timer_handler, 0);
1203 }
1204