1d8899132SKalle Valo // SPDX-License-Identifier: BSD-3-Clause-Clear
2d8899132SKalle Valo /*
3d8899132SKalle Valo * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4*05205b95SJeff Johnson * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5d8899132SKalle Valo */
6d8899132SKalle Valo
7d8899132SKalle Valo #include "core.h"
8d8899132SKalle Valo #include "debug.h"
9d8899132SKalle Valo
ath12k_dbring_bufs_replenish(struct ath12k * ar,struct ath12k_dbring * ring,struct ath12k_dbring_element * buff,gfp_t gfp)10d8899132SKalle Valo static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
11d8899132SKalle Valo struct ath12k_dbring *ring,
12d8899132SKalle Valo struct ath12k_dbring_element *buff,
13d8899132SKalle Valo gfp_t gfp)
14d8899132SKalle Valo {
15d8899132SKalle Valo struct ath12k_base *ab = ar->ab;
16d8899132SKalle Valo struct hal_srng *srng;
17d8899132SKalle Valo dma_addr_t paddr;
18d8899132SKalle Valo void *ptr_aligned, *ptr_unaligned, *desc;
19d8899132SKalle Valo int ret;
20d8899132SKalle Valo int buf_id;
21d8899132SKalle Valo u32 cookie;
22d8899132SKalle Valo
23d8899132SKalle Valo srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
24d8899132SKalle Valo
25d8899132SKalle Valo lockdep_assert_held(&srng->lock);
26d8899132SKalle Valo
27d8899132SKalle Valo ath12k_hal_srng_access_begin(ab, srng);
28d8899132SKalle Valo
29d8899132SKalle Valo ptr_unaligned = buff->payload;
30d8899132SKalle Valo ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
31d8899132SKalle Valo paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
32d8899132SKalle Valo DMA_FROM_DEVICE);
33d8899132SKalle Valo
34d8899132SKalle Valo ret = dma_mapping_error(ab->dev, paddr);
35d8899132SKalle Valo if (ret)
36d8899132SKalle Valo goto err;
37d8899132SKalle Valo
38d8899132SKalle Valo spin_lock_bh(&ring->idr_lock);
39d8899132SKalle Valo buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
40d8899132SKalle Valo spin_unlock_bh(&ring->idr_lock);
41d8899132SKalle Valo if (buf_id < 0) {
42d8899132SKalle Valo ret = -ENOBUFS;
43d8899132SKalle Valo goto err_dma_unmap;
44d8899132SKalle Valo }
45d8899132SKalle Valo
46d8899132SKalle Valo desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
47d8899132SKalle Valo if (!desc) {
48d8899132SKalle Valo ret = -ENOENT;
49d8899132SKalle Valo goto err_idr_remove;
50d8899132SKalle Valo }
51d8899132SKalle Valo
52d8899132SKalle Valo buff->paddr = paddr;
53d8899132SKalle Valo
54d8899132SKalle Valo cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
55d8899132SKalle Valo u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
56d8899132SKalle Valo
57d8899132SKalle Valo ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
58d8899132SKalle Valo
59d8899132SKalle Valo ath12k_hal_srng_access_end(ab, srng);
60d8899132SKalle Valo
61d8899132SKalle Valo return 0;
62d8899132SKalle Valo
63d8899132SKalle Valo err_idr_remove:
64d8899132SKalle Valo spin_lock_bh(&ring->idr_lock);
65d8899132SKalle Valo idr_remove(&ring->bufs_idr, buf_id);
66d8899132SKalle Valo spin_unlock_bh(&ring->idr_lock);
67d8899132SKalle Valo err_dma_unmap:
68d8899132SKalle Valo dma_unmap_single(ab->dev, paddr, ring->buf_sz,
69d8899132SKalle Valo DMA_FROM_DEVICE);
70d8899132SKalle Valo err:
71d8899132SKalle Valo ath12k_hal_srng_access_end(ab, srng);
72d8899132SKalle Valo return ret;
73d8899132SKalle Valo }
74d8899132SKalle Valo
ath12k_dbring_fill_bufs(struct ath12k * ar,struct ath12k_dbring * ring,gfp_t gfp)75d8899132SKalle Valo static int ath12k_dbring_fill_bufs(struct ath12k *ar,
76d8899132SKalle Valo struct ath12k_dbring *ring,
77d8899132SKalle Valo gfp_t gfp)
78d8899132SKalle Valo {
79d8899132SKalle Valo struct ath12k_dbring_element *buff;
80d8899132SKalle Valo struct hal_srng *srng;
81d8899132SKalle Valo struct ath12k_base *ab = ar->ab;
82d8899132SKalle Valo int num_remain, req_entries, num_free;
83d8899132SKalle Valo u32 align;
84d8899132SKalle Valo int size, ret;
85d8899132SKalle Valo
86d8899132SKalle Valo srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
87d8899132SKalle Valo
88d8899132SKalle Valo spin_lock_bh(&srng->lock);
89d8899132SKalle Valo
90d8899132SKalle Valo num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
91d8899132SKalle Valo req_entries = min(num_free, ring->bufs_max);
92d8899132SKalle Valo num_remain = req_entries;
93d8899132SKalle Valo align = ring->buf_align;
94d8899132SKalle Valo size = sizeof(*buff) + ring->buf_sz + align - 1;
95d8899132SKalle Valo
96d8899132SKalle Valo while (num_remain > 0) {
97d8899132SKalle Valo buff = kzalloc(size, gfp);
98d8899132SKalle Valo if (!buff)
99d8899132SKalle Valo break;
100d8899132SKalle Valo
101d8899132SKalle Valo ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp);
102d8899132SKalle Valo if (ret) {
103d8899132SKalle Valo ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n",
104d8899132SKalle Valo num_remain, req_entries);
105d8899132SKalle Valo kfree(buff);
106d8899132SKalle Valo break;
107d8899132SKalle Valo }
108d8899132SKalle Valo num_remain--;
109d8899132SKalle Valo }
110d8899132SKalle Valo
111d8899132SKalle Valo spin_unlock_bh(&srng->lock);
112d8899132SKalle Valo
113d8899132SKalle Valo return num_remain;
114d8899132SKalle Valo }
115d8899132SKalle Valo
ath12k_dbring_wmi_cfg_setup(struct ath12k * ar,struct ath12k_dbring * ring,enum wmi_direct_buffer_module id)116d8899132SKalle Valo int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
117d8899132SKalle Valo struct ath12k_dbring *ring,
118d8899132SKalle Valo enum wmi_direct_buffer_module id)
119d8899132SKalle Valo {
120d8899132SKalle Valo struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
121d8899132SKalle Valo int ret;
122d8899132SKalle Valo
123d8899132SKalle Valo if (id >= WMI_DIRECT_BUF_MAX)
124d8899132SKalle Valo return -EINVAL;
125d8899132SKalle Valo
126d8899132SKalle Valo arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
127d8899132SKalle Valo arg.module_id = id;
128d8899132SKalle Valo arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
129d8899132SKalle Valo arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
130d8899132SKalle Valo arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
131d8899132SKalle Valo arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
132d8899132SKalle Valo arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
133d8899132SKalle Valo arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
134d8899132SKalle Valo arg.num_elems = ring->bufs_max;
135d8899132SKalle Valo arg.buf_size = ring->buf_sz;
136d8899132SKalle Valo arg.num_resp_per_event = ring->num_resp_per_event;
137d8899132SKalle Valo arg.event_timeout_ms = ring->event_timeout_ms;
138d8899132SKalle Valo
139d8899132SKalle Valo ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg);
140d8899132SKalle Valo if (ret) {
141d8899132SKalle Valo ath12k_warn(ar->ab, "failed to setup db ring cfg\n");
142d8899132SKalle Valo return ret;
143d8899132SKalle Valo }
144d8899132SKalle Valo
145d8899132SKalle Valo return 0;
146d8899132SKalle Valo }
147d8899132SKalle Valo
ath12k_dbring_set_cfg(struct ath12k * ar,struct ath12k_dbring * ring,u32 num_resp_per_event,u32 event_timeout_ms,int (* handler)(struct ath12k *,struct ath12k_dbring_data *))148d8899132SKalle Valo int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring,
149d8899132SKalle Valo u32 num_resp_per_event, u32 event_timeout_ms,
150d8899132SKalle Valo int (*handler)(struct ath12k *,
151d8899132SKalle Valo struct ath12k_dbring_data *))
152d8899132SKalle Valo {
153d8899132SKalle Valo if (WARN_ON(!ring))
154d8899132SKalle Valo return -EINVAL;
155d8899132SKalle Valo
156d8899132SKalle Valo ring->num_resp_per_event = num_resp_per_event;
157d8899132SKalle Valo ring->event_timeout_ms = event_timeout_ms;
158d8899132SKalle Valo ring->handler = handler;
159d8899132SKalle Valo
160d8899132SKalle Valo return 0;
161d8899132SKalle Valo }
162d8899132SKalle Valo
ath12k_dbring_buf_setup(struct ath12k * ar,struct ath12k_dbring * ring,struct ath12k_dbring_cap * db_cap)163d8899132SKalle Valo int ath12k_dbring_buf_setup(struct ath12k *ar,
164d8899132SKalle Valo struct ath12k_dbring *ring,
165d8899132SKalle Valo struct ath12k_dbring_cap *db_cap)
166d8899132SKalle Valo {
167d8899132SKalle Valo struct ath12k_base *ab = ar->ab;
168d8899132SKalle Valo struct hal_srng *srng;
169d8899132SKalle Valo int ret;
170d8899132SKalle Valo
171d8899132SKalle Valo srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
172d8899132SKalle Valo ring->bufs_max = ring->refill_srng.size /
173d8899132SKalle Valo ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
174d8899132SKalle Valo
175d8899132SKalle Valo ring->buf_sz = db_cap->min_buf_sz;
176d8899132SKalle Valo ring->buf_align = db_cap->min_buf_align;
177d8899132SKalle Valo ring->pdev_id = db_cap->pdev_id;
178d8899132SKalle Valo ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
179d8899132SKalle Valo ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
180d8899132SKalle Valo
181d8899132SKalle Valo ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
182d8899132SKalle Valo
183d8899132SKalle Valo return ret;
184d8899132SKalle Valo }
185d8899132SKalle Valo
ath12k_dbring_srng_setup(struct ath12k * ar,struct ath12k_dbring * ring,int ring_num,int num_entries)186d8899132SKalle Valo int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
187d8899132SKalle Valo int ring_num, int num_entries)
188d8899132SKalle Valo {
189d8899132SKalle Valo int ret;
190d8899132SKalle Valo
191d8899132SKalle Valo ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
192d8899132SKalle Valo ring_num, ar->pdev_idx, num_entries);
193d8899132SKalle Valo if (ret < 0) {
194d8899132SKalle Valo ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
195d8899132SKalle Valo ret, ring_num);
196d8899132SKalle Valo goto err;
197d8899132SKalle Valo }
198d8899132SKalle Valo
199d8899132SKalle Valo return 0;
200d8899132SKalle Valo err:
201d8899132SKalle Valo ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
202d8899132SKalle Valo return ret;
203d8899132SKalle Valo }
204d8899132SKalle Valo
ath12k_dbring_get_cap(struct ath12k_base * ab,u8 pdev_idx,enum wmi_direct_buffer_module id,struct ath12k_dbring_cap * db_cap)205d8899132SKalle Valo int ath12k_dbring_get_cap(struct ath12k_base *ab,
206d8899132SKalle Valo u8 pdev_idx,
207d8899132SKalle Valo enum wmi_direct_buffer_module id,
208d8899132SKalle Valo struct ath12k_dbring_cap *db_cap)
209d8899132SKalle Valo {
210d8899132SKalle Valo int i;
211d8899132SKalle Valo
212d8899132SKalle Valo if (!ab->num_db_cap || !ab->db_caps)
213d8899132SKalle Valo return -ENOENT;
214d8899132SKalle Valo
215d8899132SKalle Valo if (id >= WMI_DIRECT_BUF_MAX)
216d8899132SKalle Valo return -EINVAL;
217d8899132SKalle Valo
218d8899132SKalle Valo for (i = 0; i < ab->num_db_cap; i++) {
219d8899132SKalle Valo if (pdev_idx == ab->db_caps[i].pdev_id &&
220d8899132SKalle Valo id == ab->db_caps[i].id) {
221d8899132SKalle Valo *db_cap = ab->db_caps[i];
222d8899132SKalle Valo
223d8899132SKalle Valo return 0;
224d8899132SKalle Valo }
225d8899132SKalle Valo }
226d8899132SKalle Valo
227d8899132SKalle Valo return -ENOENT;
228d8899132SKalle Valo }
229d8899132SKalle Valo
ath12k_dbring_buffer_release_event(struct ath12k_base * ab,struct ath12k_dbring_buf_release_event * ev)230d8899132SKalle Valo int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
231d8899132SKalle Valo struct ath12k_dbring_buf_release_event *ev)
232d8899132SKalle Valo {
233d1335f0dSThiraviyam Mariyappan struct ath12k_dbring *ring = NULL;
234d8899132SKalle Valo struct hal_srng *srng;
235d8899132SKalle Valo struct ath12k *ar;
236d8899132SKalle Valo struct ath12k_dbring_element *buff;
237d8899132SKalle Valo struct ath12k_dbring_data handler_data;
238d8899132SKalle Valo struct ath12k_buffer_addr desc;
239d8899132SKalle Valo u8 *vaddr_unalign;
240d8899132SKalle Valo u32 num_entry, num_buff_reaped;
241d8899132SKalle Valo u8 pdev_idx, rbm;
242d8899132SKalle Valo u32 cookie;
243d8899132SKalle Valo int buf_id;
244d8899132SKalle Valo int size;
245d8899132SKalle Valo dma_addr_t paddr;
246d8899132SKalle Valo int ret = 0;
247d8899132SKalle Valo
248d8899132SKalle Valo pdev_idx = le32_to_cpu(ev->fixed.pdev_id);
249d8899132SKalle Valo
250d8899132SKalle Valo if (pdev_idx >= ab->num_radios) {
251d8899132SKalle Valo ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
252d8899132SKalle Valo return -EINVAL;
253d8899132SKalle Valo }
254d8899132SKalle Valo
255d8899132SKalle Valo if (ev->fixed.num_buf_release_entry !=
256d8899132SKalle Valo ev->fixed.num_meta_data_entry) {
257d8899132SKalle Valo ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
258d8899132SKalle Valo ev->fixed.num_buf_release_entry,
259d8899132SKalle Valo ev->fixed.num_meta_data_entry);
260d8899132SKalle Valo return -EINVAL;
261d8899132SKalle Valo }
262d8899132SKalle Valo
263d8899132SKalle Valo ar = ab->pdevs[pdev_idx].ar;
264d8899132SKalle Valo
265d8899132SKalle Valo rcu_read_lock();
266d8899132SKalle Valo if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
267d8899132SKalle Valo ret = -EINVAL;
268d8899132SKalle Valo goto rcu_unlock;
269d8899132SKalle Valo }
270d8899132SKalle Valo
271d8899132SKalle Valo switch (ev->fixed.module_id) {
272d8899132SKalle Valo case WMI_DIRECT_BUF_SPECTRAL:
273d8899132SKalle Valo break;
274d8899132SKalle Valo default:
275d8899132SKalle Valo ring = NULL;
276d8899132SKalle Valo ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
277d8899132SKalle Valo ev->fixed.module_id);
278d8899132SKalle Valo break;
279d8899132SKalle Valo }
280d8899132SKalle Valo
281d8899132SKalle Valo if (!ring) {
282d8899132SKalle Valo ret = -EINVAL;
283d8899132SKalle Valo goto rcu_unlock;
284d8899132SKalle Valo }
285d8899132SKalle Valo
286d8899132SKalle Valo srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
287d8899132SKalle Valo num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry);
288d8899132SKalle Valo size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
289d8899132SKalle Valo num_buff_reaped = 0;
290d8899132SKalle Valo
291d8899132SKalle Valo spin_lock_bh(&srng->lock);
292d8899132SKalle Valo
293d8899132SKalle Valo while (num_buff_reaped < num_entry) {
294d8899132SKalle Valo desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
295d8899132SKalle Valo desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
296d8899132SKalle Valo handler_data.meta = ev->meta_data[num_buff_reaped];
297d8899132SKalle Valo
298d8899132SKalle Valo num_buff_reaped++;
299d8899132SKalle Valo
300d8899132SKalle Valo ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
301d8899132SKalle Valo
302d8899132SKalle Valo buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
303d8899132SKalle Valo
304d8899132SKalle Valo spin_lock_bh(&ring->idr_lock);
305d8899132SKalle Valo buff = idr_find(&ring->bufs_idr, buf_id);
306d8899132SKalle Valo if (!buff) {
307d8899132SKalle Valo spin_unlock_bh(&ring->idr_lock);
308d8899132SKalle Valo continue;
309d8899132SKalle Valo }
310d8899132SKalle Valo idr_remove(&ring->bufs_idr, buf_id);
311d8899132SKalle Valo spin_unlock_bh(&ring->idr_lock);
312d8899132SKalle Valo
313d8899132SKalle Valo dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
314d8899132SKalle Valo DMA_FROM_DEVICE);
315d8899132SKalle Valo
316d8899132SKalle Valo if (ring->handler) {
317d8899132SKalle Valo vaddr_unalign = buff->payload;
318d8899132SKalle Valo handler_data.data = PTR_ALIGN(vaddr_unalign,
319d8899132SKalle Valo ring->buf_align);
320d8899132SKalle Valo handler_data.data_sz = ring->buf_sz;
321d8899132SKalle Valo
322d8899132SKalle Valo ring->handler(ar, &handler_data);
323d8899132SKalle Valo }
324d8899132SKalle Valo
325d8899132SKalle Valo memset(buff, 0, size);
326d8899132SKalle Valo ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
327d8899132SKalle Valo }
328d8899132SKalle Valo
329d8899132SKalle Valo spin_unlock_bh(&srng->lock);
330d8899132SKalle Valo
331d8899132SKalle Valo rcu_unlock:
332d8899132SKalle Valo rcu_read_unlock();
333d8899132SKalle Valo
334d8899132SKalle Valo return ret;
335d8899132SKalle Valo }
336d8899132SKalle Valo
ath12k_dbring_srng_cleanup(struct ath12k * ar,struct ath12k_dbring * ring)337d8899132SKalle Valo void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
338d8899132SKalle Valo {
339d8899132SKalle Valo ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
340d8899132SKalle Valo }
341d8899132SKalle Valo
ath12k_dbring_buf_cleanup(struct ath12k * ar,struct ath12k_dbring * ring)342d8899132SKalle Valo void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
343d8899132SKalle Valo {
344d8899132SKalle Valo struct ath12k_dbring_element *buff;
345d8899132SKalle Valo int buf_id;
346d8899132SKalle Valo
347d8899132SKalle Valo spin_lock_bh(&ring->idr_lock);
348d8899132SKalle Valo idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
349d8899132SKalle Valo idr_remove(&ring->bufs_idr, buf_id);
350d8899132SKalle Valo dma_unmap_single(ar->ab->dev, buff->paddr,
351d8899132SKalle Valo ring->buf_sz, DMA_FROM_DEVICE);
352d8899132SKalle Valo kfree(buff);
353d8899132SKalle Valo }
354d8899132SKalle Valo
355d8899132SKalle Valo idr_destroy(&ring->bufs_idr);
356d8899132SKalle Valo spin_unlock_bh(&ring->idr_lock);
357d8899132SKalle Valo }
358