xref: /freebsd/sys/contrib/dev/athk/ath12k/dbring.c (revision 5c1def83a4cc2eb3f828600dfd786f8c5788fb7d)
1*5c1def83SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*5c1def83SBjoern A. Zeeb /*
3*5c1def83SBjoern A. Zeeb  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4*5c1def83SBjoern A. Zeeb  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5*5c1def83SBjoern A. Zeeb  */
6*5c1def83SBjoern A. Zeeb 
7*5c1def83SBjoern A. Zeeb #include "core.h"
8*5c1def83SBjoern A. Zeeb #include "debug.h"
9*5c1def83SBjoern A. Zeeb 
ath12k_dbring_bufs_replenish(struct ath12k * ar,struct ath12k_dbring * ring,struct ath12k_dbring_element * buff,gfp_t gfp)10*5c1def83SBjoern A. Zeeb static int ath12k_dbring_bufs_replenish(struct ath12k *ar,
11*5c1def83SBjoern A. Zeeb 					struct ath12k_dbring *ring,
12*5c1def83SBjoern A. Zeeb 					struct ath12k_dbring_element *buff,
13*5c1def83SBjoern A. Zeeb 					gfp_t gfp)
14*5c1def83SBjoern A. Zeeb {
15*5c1def83SBjoern A. Zeeb 	struct ath12k_base *ab = ar->ab;
16*5c1def83SBjoern A. Zeeb 	struct hal_srng *srng;
17*5c1def83SBjoern A. Zeeb 	dma_addr_t paddr;
18*5c1def83SBjoern A. Zeeb 	void *ptr_aligned, *ptr_unaligned, *desc;
19*5c1def83SBjoern A. Zeeb 	int ret;
20*5c1def83SBjoern A. Zeeb 	int buf_id;
21*5c1def83SBjoern A. Zeeb 	u32 cookie;
22*5c1def83SBjoern A. Zeeb 
23*5c1def83SBjoern A. Zeeb 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
24*5c1def83SBjoern A. Zeeb 
25*5c1def83SBjoern A. Zeeb 	lockdep_assert_held(&srng->lock);
26*5c1def83SBjoern A. Zeeb 
27*5c1def83SBjoern A. Zeeb 	ath12k_hal_srng_access_begin(ab, srng);
28*5c1def83SBjoern A. Zeeb 
29*5c1def83SBjoern A. Zeeb 	ptr_unaligned = buff->payload;
30*5c1def83SBjoern A. Zeeb 	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
31*5c1def83SBjoern A. Zeeb 	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
32*5c1def83SBjoern A. Zeeb 			       DMA_FROM_DEVICE);
33*5c1def83SBjoern A. Zeeb 
34*5c1def83SBjoern A. Zeeb 	ret = dma_mapping_error(ab->dev, paddr);
35*5c1def83SBjoern A. Zeeb 	if (ret)
36*5c1def83SBjoern A. Zeeb 		goto err;
37*5c1def83SBjoern A. Zeeb 
38*5c1def83SBjoern A. Zeeb 	spin_lock_bh(&ring->idr_lock);
39*5c1def83SBjoern A. Zeeb 	buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, gfp);
40*5c1def83SBjoern A. Zeeb 	spin_unlock_bh(&ring->idr_lock);
41*5c1def83SBjoern A. Zeeb 	if (buf_id < 0) {
42*5c1def83SBjoern A. Zeeb 		ret = -ENOBUFS;
43*5c1def83SBjoern A. Zeeb 		goto err_dma_unmap;
44*5c1def83SBjoern A. Zeeb 	}
45*5c1def83SBjoern A. Zeeb 
46*5c1def83SBjoern A. Zeeb 	desc = ath12k_hal_srng_src_get_next_entry(ab, srng);
47*5c1def83SBjoern A. Zeeb 	if (!desc) {
48*5c1def83SBjoern A. Zeeb 		ret = -ENOENT;
49*5c1def83SBjoern A. Zeeb 		goto err_idr_remove;
50*5c1def83SBjoern A. Zeeb 	}
51*5c1def83SBjoern A. Zeeb 
52*5c1def83SBjoern A. Zeeb 	buff->paddr = paddr;
53*5c1def83SBjoern A. Zeeb 
54*5c1def83SBjoern A. Zeeb 	cookie = u32_encode_bits(ar->pdev_idx, DP_RXDMA_BUF_COOKIE_PDEV_ID) |
55*5c1def83SBjoern A. Zeeb 		 u32_encode_bits(buf_id, DP_RXDMA_BUF_COOKIE_BUF_ID);
56*5c1def83SBjoern A. Zeeb 
57*5c1def83SBjoern A. Zeeb 	ath12k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
58*5c1def83SBjoern A. Zeeb 
59*5c1def83SBjoern A. Zeeb 	ath12k_hal_srng_access_end(ab, srng);
60*5c1def83SBjoern A. Zeeb 
61*5c1def83SBjoern A. Zeeb 	return 0;
62*5c1def83SBjoern A. Zeeb 
63*5c1def83SBjoern A. Zeeb err_idr_remove:
64*5c1def83SBjoern A. Zeeb 	spin_lock_bh(&ring->idr_lock);
65*5c1def83SBjoern A. Zeeb 	idr_remove(&ring->bufs_idr, buf_id);
66*5c1def83SBjoern A. Zeeb 	spin_unlock_bh(&ring->idr_lock);
67*5c1def83SBjoern A. Zeeb err_dma_unmap:
68*5c1def83SBjoern A. Zeeb 	dma_unmap_single(ab->dev, paddr, ring->buf_sz,
69*5c1def83SBjoern A. Zeeb 			 DMA_FROM_DEVICE);
70*5c1def83SBjoern A. Zeeb err:
71*5c1def83SBjoern A. Zeeb 	ath12k_hal_srng_access_end(ab, srng);
72*5c1def83SBjoern A. Zeeb 	return ret;
73*5c1def83SBjoern A. Zeeb }
74*5c1def83SBjoern A. Zeeb 
ath12k_dbring_fill_bufs(struct ath12k * ar,struct ath12k_dbring * ring,gfp_t gfp)75*5c1def83SBjoern A. Zeeb static int ath12k_dbring_fill_bufs(struct ath12k *ar,
76*5c1def83SBjoern A. Zeeb 				   struct ath12k_dbring *ring,
77*5c1def83SBjoern A. Zeeb 				   gfp_t gfp)
78*5c1def83SBjoern A. Zeeb {
79*5c1def83SBjoern A. Zeeb 	struct ath12k_dbring_element *buff;
80*5c1def83SBjoern A. Zeeb 	struct hal_srng *srng;
81*5c1def83SBjoern A. Zeeb 	struct ath12k_base *ab = ar->ab;
82*5c1def83SBjoern A. Zeeb 	int num_remain, req_entries, num_free;
83*5c1def83SBjoern A. Zeeb 	u32 align;
84*5c1def83SBjoern A. Zeeb 	int size, ret;
85*5c1def83SBjoern A. Zeeb 
86*5c1def83SBjoern A. Zeeb 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
87*5c1def83SBjoern A. Zeeb 
88*5c1def83SBjoern A. Zeeb 	spin_lock_bh(&srng->lock);
89*5c1def83SBjoern A. Zeeb 
90*5c1def83SBjoern A. Zeeb 	num_free = ath12k_hal_srng_src_num_free(ab, srng, true);
91*5c1def83SBjoern A. Zeeb 	req_entries = min(num_free, ring->bufs_max);
92*5c1def83SBjoern A. Zeeb 	num_remain = req_entries;
93*5c1def83SBjoern A. Zeeb 	align = ring->buf_align;
94*5c1def83SBjoern A. Zeeb 	size = sizeof(*buff) + ring->buf_sz + align - 1;
95*5c1def83SBjoern A. Zeeb 
96*5c1def83SBjoern A. Zeeb 	while (num_remain > 0) {
97*5c1def83SBjoern A. Zeeb 		buff = kzalloc(size, gfp);
98*5c1def83SBjoern A. Zeeb 		if (!buff)
99*5c1def83SBjoern A. Zeeb 			break;
100*5c1def83SBjoern A. Zeeb 
101*5c1def83SBjoern A. Zeeb 		ret = ath12k_dbring_bufs_replenish(ar, ring, buff, gfp);
102*5c1def83SBjoern A. Zeeb 		if (ret) {
103*5c1def83SBjoern A. Zeeb 			ath12k_warn(ab, "failed to replenish db ring num_remain %d req_ent %d\n",
104*5c1def83SBjoern A. Zeeb 				    num_remain, req_entries);
105*5c1def83SBjoern A. Zeeb 			kfree(buff);
106*5c1def83SBjoern A. Zeeb 			break;
107*5c1def83SBjoern A. Zeeb 		}
108*5c1def83SBjoern A. Zeeb 		num_remain--;
109*5c1def83SBjoern A. Zeeb 	}
110*5c1def83SBjoern A. Zeeb 
111*5c1def83SBjoern A. Zeeb 	spin_unlock_bh(&srng->lock);
112*5c1def83SBjoern A. Zeeb 
113*5c1def83SBjoern A. Zeeb 	return num_remain;
114*5c1def83SBjoern A. Zeeb }
115*5c1def83SBjoern A. Zeeb 
ath12k_dbring_wmi_cfg_setup(struct ath12k * ar,struct ath12k_dbring * ring,enum wmi_direct_buffer_module id)116*5c1def83SBjoern A. Zeeb int ath12k_dbring_wmi_cfg_setup(struct ath12k *ar,
117*5c1def83SBjoern A. Zeeb 				struct ath12k_dbring *ring,
118*5c1def83SBjoern A. Zeeb 				enum wmi_direct_buffer_module id)
119*5c1def83SBjoern A. Zeeb {
120*5c1def83SBjoern A. Zeeb 	struct ath12k_wmi_pdev_dma_ring_cfg_arg arg = {0};
121*5c1def83SBjoern A. Zeeb 	int ret;
122*5c1def83SBjoern A. Zeeb 
123*5c1def83SBjoern A. Zeeb 	if (id >= WMI_DIRECT_BUF_MAX)
124*5c1def83SBjoern A. Zeeb 		return -EINVAL;
125*5c1def83SBjoern A. Zeeb 
126*5c1def83SBjoern A. Zeeb 	arg.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
127*5c1def83SBjoern A. Zeeb 	arg.module_id = id;
128*5c1def83SBjoern A. Zeeb 	arg.base_paddr_lo = lower_32_bits(ring->refill_srng.paddr);
129*5c1def83SBjoern A. Zeeb 	arg.base_paddr_hi = upper_32_bits(ring->refill_srng.paddr);
130*5c1def83SBjoern A. Zeeb 	arg.head_idx_paddr_lo = lower_32_bits(ring->hp_addr);
131*5c1def83SBjoern A. Zeeb 	arg.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
132*5c1def83SBjoern A. Zeeb 	arg.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
133*5c1def83SBjoern A. Zeeb 	arg.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
134*5c1def83SBjoern A. Zeeb 	arg.num_elems = ring->bufs_max;
135*5c1def83SBjoern A. Zeeb 	arg.buf_size = ring->buf_sz;
136*5c1def83SBjoern A. Zeeb 	arg.num_resp_per_event = ring->num_resp_per_event;
137*5c1def83SBjoern A. Zeeb 	arg.event_timeout_ms = ring->event_timeout_ms;
138*5c1def83SBjoern A. Zeeb 
139*5c1def83SBjoern A. Zeeb 	ret = ath12k_wmi_pdev_dma_ring_cfg(ar, &arg);
140*5c1def83SBjoern A. Zeeb 	if (ret) {
141*5c1def83SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to setup db ring cfg\n");
142*5c1def83SBjoern A. Zeeb 		return ret;
143*5c1def83SBjoern A. Zeeb 	}
144*5c1def83SBjoern A. Zeeb 
145*5c1def83SBjoern A. Zeeb 	return 0;
146*5c1def83SBjoern A. Zeeb }
147*5c1def83SBjoern A. Zeeb 
ath12k_dbring_set_cfg(struct ath12k * ar,struct ath12k_dbring * ring,u32 num_resp_per_event,u32 event_timeout_ms,int (* handler)(struct ath12k *,struct ath12k_dbring_data *))148*5c1def83SBjoern A. Zeeb int ath12k_dbring_set_cfg(struct ath12k *ar, struct ath12k_dbring *ring,
149*5c1def83SBjoern A. Zeeb 			  u32 num_resp_per_event, u32 event_timeout_ms,
150*5c1def83SBjoern A. Zeeb 			  int (*handler)(struct ath12k *,
151*5c1def83SBjoern A. Zeeb 					 struct ath12k_dbring_data *))
152*5c1def83SBjoern A. Zeeb {
153*5c1def83SBjoern A. Zeeb 	if (WARN_ON(!ring))
154*5c1def83SBjoern A. Zeeb 		return -EINVAL;
155*5c1def83SBjoern A. Zeeb 
156*5c1def83SBjoern A. Zeeb 	ring->num_resp_per_event = num_resp_per_event;
157*5c1def83SBjoern A. Zeeb 	ring->event_timeout_ms = event_timeout_ms;
158*5c1def83SBjoern A. Zeeb 	ring->handler = handler;
159*5c1def83SBjoern A. Zeeb 
160*5c1def83SBjoern A. Zeeb 	return 0;
161*5c1def83SBjoern A. Zeeb }
162*5c1def83SBjoern A. Zeeb 
ath12k_dbring_buf_setup(struct ath12k * ar,struct ath12k_dbring * ring,struct ath12k_dbring_cap * db_cap)163*5c1def83SBjoern A. Zeeb int ath12k_dbring_buf_setup(struct ath12k *ar,
164*5c1def83SBjoern A. Zeeb 			    struct ath12k_dbring *ring,
165*5c1def83SBjoern A. Zeeb 			    struct ath12k_dbring_cap *db_cap)
166*5c1def83SBjoern A. Zeeb {
167*5c1def83SBjoern A. Zeeb 	struct ath12k_base *ab = ar->ab;
168*5c1def83SBjoern A. Zeeb 	struct hal_srng *srng;
169*5c1def83SBjoern A. Zeeb 	int ret;
170*5c1def83SBjoern A. Zeeb 
171*5c1def83SBjoern A. Zeeb 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
172*5c1def83SBjoern A. Zeeb 	ring->bufs_max = ring->refill_srng.size /
173*5c1def83SBjoern A. Zeeb 		ath12k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
174*5c1def83SBjoern A. Zeeb 
175*5c1def83SBjoern A. Zeeb 	ring->buf_sz = db_cap->min_buf_sz;
176*5c1def83SBjoern A. Zeeb 	ring->buf_align = db_cap->min_buf_align;
177*5c1def83SBjoern A. Zeeb 	ring->pdev_id = db_cap->pdev_id;
178*5c1def83SBjoern A. Zeeb 	ring->hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
179*5c1def83SBjoern A. Zeeb 	ring->tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
180*5c1def83SBjoern A. Zeeb 
181*5c1def83SBjoern A. Zeeb 	ret = ath12k_dbring_fill_bufs(ar, ring, GFP_KERNEL);
182*5c1def83SBjoern A. Zeeb 
183*5c1def83SBjoern A. Zeeb 	return ret;
184*5c1def83SBjoern A. Zeeb }
185*5c1def83SBjoern A. Zeeb 
ath12k_dbring_srng_setup(struct ath12k * ar,struct ath12k_dbring * ring,int ring_num,int num_entries)186*5c1def83SBjoern A. Zeeb int ath12k_dbring_srng_setup(struct ath12k *ar, struct ath12k_dbring *ring,
187*5c1def83SBjoern A. Zeeb 			     int ring_num, int num_entries)
188*5c1def83SBjoern A. Zeeb {
189*5c1def83SBjoern A. Zeeb 	int ret;
190*5c1def83SBjoern A. Zeeb 
191*5c1def83SBjoern A. Zeeb 	ret = ath12k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
192*5c1def83SBjoern A. Zeeb 				   ring_num, ar->pdev_idx, num_entries);
193*5c1def83SBjoern A. Zeeb 	if (ret < 0) {
194*5c1def83SBjoern A. Zeeb 		ath12k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
195*5c1def83SBjoern A. Zeeb 			    ret, ring_num);
196*5c1def83SBjoern A. Zeeb 		goto err;
197*5c1def83SBjoern A. Zeeb 	}
198*5c1def83SBjoern A. Zeeb 
199*5c1def83SBjoern A. Zeeb 	return 0;
200*5c1def83SBjoern A. Zeeb err:
201*5c1def83SBjoern A. Zeeb 	ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
202*5c1def83SBjoern A. Zeeb 	return ret;
203*5c1def83SBjoern A. Zeeb }
204*5c1def83SBjoern A. Zeeb 
ath12k_dbring_get_cap(struct ath12k_base * ab,u8 pdev_idx,enum wmi_direct_buffer_module id,struct ath12k_dbring_cap * db_cap)205*5c1def83SBjoern A. Zeeb int ath12k_dbring_get_cap(struct ath12k_base *ab,
206*5c1def83SBjoern A. Zeeb 			  u8 pdev_idx,
207*5c1def83SBjoern A. Zeeb 			  enum wmi_direct_buffer_module id,
208*5c1def83SBjoern A. Zeeb 			  struct ath12k_dbring_cap *db_cap)
209*5c1def83SBjoern A. Zeeb {
210*5c1def83SBjoern A. Zeeb 	int i;
211*5c1def83SBjoern A. Zeeb 
212*5c1def83SBjoern A. Zeeb 	if (!ab->num_db_cap || !ab->db_caps)
213*5c1def83SBjoern A. Zeeb 		return -ENOENT;
214*5c1def83SBjoern A. Zeeb 
215*5c1def83SBjoern A. Zeeb 	if (id >= WMI_DIRECT_BUF_MAX)
216*5c1def83SBjoern A. Zeeb 		return -EINVAL;
217*5c1def83SBjoern A. Zeeb 
218*5c1def83SBjoern A. Zeeb 	for (i = 0; i < ab->num_db_cap; i++) {
219*5c1def83SBjoern A. Zeeb 		if (pdev_idx == ab->db_caps[i].pdev_id &&
220*5c1def83SBjoern A. Zeeb 		    id == ab->db_caps[i].id) {
221*5c1def83SBjoern A. Zeeb 			*db_cap = ab->db_caps[i];
222*5c1def83SBjoern A. Zeeb 
223*5c1def83SBjoern A. Zeeb 			return 0;
224*5c1def83SBjoern A. Zeeb 		}
225*5c1def83SBjoern A. Zeeb 	}
226*5c1def83SBjoern A. Zeeb 
227*5c1def83SBjoern A. Zeeb 	return -ENOENT;
228*5c1def83SBjoern A. Zeeb }
229*5c1def83SBjoern A. Zeeb 
ath12k_dbring_buffer_release_event(struct ath12k_base * ab,struct ath12k_dbring_buf_release_event * ev)230*5c1def83SBjoern A. Zeeb int ath12k_dbring_buffer_release_event(struct ath12k_base *ab,
231*5c1def83SBjoern A. Zeeb 				       struct ath12k_dbring_buf_release_event *ev)
232*5c1def83SBjoern A. Zeeb {
233*5c1def83SBjoern A. Zeeb 	struct ath12k_dbring *ring = NULL;
234*5c1def83SBjoern A. Zeeb 	struct hal_srng *srng;
235*5c1def83SBjoern A. Zeeb 	struct ath12k *ar;
236*5c1def83SBjoern A. Zeeb 	struct ath12k_dbring_element *buff;
237*5c1def83SBjoern A. Zeeb 	struct ath12k_dbring_data handler_data;
238*5c1def83SBjoern A. Zeeb 	struct ath12k_buffer_addr desc;
239*5c1def83SBjoern A. Zeeb 	u8 *vaddr_unalign;
240*5c1def83SBjoern A. Zeeb 	u32 num_entry, num_buff_reaped;
241*5c1def83SBjoern A. Zeeb 	u8 pdev_idx, rbm;
242*5c1def83SBjoern A. Zeeb 	u32 cookie;
243*5c1def83SBjoern A. Zeeb 	int buf_id;
244*5c1def83SBjoern A. Zeeb 	int size;
245*5c1def83SBjoern A. Zeeb 	dma_addr_t paddr;
246*5c1def83SBjoern A. Zeeb 	int ret = 0;
247*5c1def83SBjoern A. Zeeb 
248*5c1def83SBjoern A. Zeeb 	pdev_idx = le32_to_cpu(ev->fixed.pdev_id);
249*5c1def83SBjoern A. Zeeb 
250*5c1def83SBjoern A. Zeeb 	if (pdev_idx >= ab->num_radios) {
251*5c1def83SBjoern A. Zeeb 		ath12k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
252*5c1def83SBjoern A. Zeeb 		return -EINVAL;
253*5c1def83SBjoern A. Zeeb 	}
254*5c1def83SBjoern A. Zeeb 
255*5c1def83SBjoern A. Zeeb 	if (ev->fixed.num_buf_release_entry !=
256*5c1def83SBjoern A. Zeeb 	    ev->fixed.num_meta_data_entry) {
257*5c1def83SBjoern A. Zeeb 		ath12k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
258*5c1def83SBjoern A. Zeeb 			    ev->fixed.num_buf_release_entry,
259*5c1def83SBjoern A. Zeeb 			    ev->fixed.num_meta_data_entry);
260*5c1def83SBjoern A. Zeeb 		return -EINVAL;
261*5c1def83SBjoern A. Zeeb 	}
262*5c1def83SBjoern A. Zeeb 
263*5c1def83SBjoern A. Zeeb 	ar = ab->pdevs[pdev_idx].ar;
264*5c1def83SBjoern A. Zeeb 
265*5c1def83SBjoern A. Zeeb 	rcu_read_lock();
266*5c1def83SBjoern A. Zeeb 	if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
267*5c1def83SBjoern A. Zeeb 		ret = -EINVAL;
268*5c1def83SBjoern A. Zeeb 		goto rcu_unlock;
269*5c1def83SBjoern A. Zeeb 	}
270*5c1def83SBjoern A. Zeeb 
271*5c1def83SBjoern A. Zeeb 	switch (ev->fixed.module_id) {
272*5c1def83SBjoern A. Zeeb 	case WMI_DIRECT_BUF_SPECTRAL:
273*5c1def83SBjoern A. Zeeb 		break;
274*5c1def83SBjoern A. Zeeb 	default:
275*5c1def83SBjoern A. Zeeb 		ring = NULL;
276*5c1def83SBjoern A. Zeeb 		ath12k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
277*5c1def83SBjoern A. Zeeb 			    ev->fixed.module_id);
278*5c1def83SBjoern A. Zeeb 		break;
279*5c1def83SBjoern A. Zeeb 	}
280*5c1def83SBjoern A. Zeeb 
281*5c1def83SBjoern A. Zeeb 	if (!ring) {
282*5c1def83SBjoern A. Zeeb 		ret = -EINVAL;
283*5c1def83SBjoern A. Zeeb 		goto rcu_unlock;
284*5c1def83SBjoern A. Zeeb 	}
285*5c1def83SBjoern A. Zeeb 
286*5c1def83SBjoern A. Zeeb 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
287*5c1def83SBjoern A. Zeeb 	num_entry = le32_to_cpu(ev->fixed.num_buf_release_entry);
288*5c1def83SBjoern A. Zeeb 	size = sizeof(*buff) + ring->buf_sz + ring->buf_align - 1;
289*5c1def83SBjoern A. Zeeb 	num_buff_reaped = 0;
290*5c1def83SBjoern A. Zeeb 
291*5c1def83SBjoern A. Zeeb 	spin_lock_bh(&srng->lock);
292*5c1def83SBjoern A. Zeeb 
293*5c1def83SBjoern A. Zeeb 	while (num_buff_reaped < num_entry) {
294*5c1def83SBjoern A. Zeeb 		desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
295*5c1def83SBjoern A. Zeeb 		desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
296*5c1def83SBjoern A. Zeeb 		handler_data.meta = ev->meta_data[num_buff_reaped];
297*5c1def83SBjoern A. Zeeb 
298*5c1def83SBjoern A. Zeeb 		num_buff_reaped++;
299*5c1def83SBjoern A. Zeeb 
300*5c1def83SBjoern A. Zeeb 		ath12k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
301*5c1def83SBjoern A. Zeeb 
302*5c1def83SBjoern A. Zeeb 		buf_id = u32_get_bits(cookie, DP_RXDMA_BUF_COOKIE_BUF_ID);
303*5c1def83SBjoern A. Zeeb 
304*5c1def83SBjoern A. Zeeb 		spin_lock_bh(&ring->idr_lock);
305*5c1def83SBjoern A. Zeeb 		buff = idr_find(&ring->bufs_idr, buf_id);
306*5c1def83SBjoern A. Zeeb 		if (!buff) {
307*5c1def83SBjoern A. Zeeb 			spin_unlock_bh(&ring->idr_lock);
308*5c1def83SBjoern A. Zeeb 			continue;
309*5c1def83SBjoern A. Zeeb 		}
310*5c1def83SBjoern A. Zeeb 		idr_remove(&ring->bufs_idr, buf_id);
311*5c1def83SBjoern A. Zeeb 		spin_unlock_bh(&ring->idr_lock);
312*5c1def83SBjoern A. Zeeb 
313*5c1def83SBjoern A. Zeeb 		dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
314*5c1def83SBjoern A. Zeeb 				 DMA_FROM_DEVICE);
315*5c1def83SBjoern A. Zeeb 
316*5c1def83SBjoern A. Zeeb 		if (ring->handler) {
317*5c1def83SBjoern A. Zeeb 			vaddr_unalign = buff->payload;
318*5c1def83SBjoern A. Zeeb 			handler_data.data = PTR_ALIGN(vaddr_unalign,
319*5c1def83SBjoern A. Zeeb 						      ring->buf_align);
320*5c1def83SBjoern A. Zeeb 			handler_data.data_sz = ring->buf_sz;
321*5c1def83SBjoern A. Zeeb 
322*5c1def83SBjoern A. Zeeb 			ring->handler(ar, &handler_data);
323*5c1def83SBjoern A. Zeeb 		}
324*5c1def83SBjoern A. Zeeb 
325*5c1def83SBjoern A. Zeeb 		memset(buff, 0, size);
326*5c1def83SBjoern A. Zeeb 		ath12k_dbring_bufs_replenish(ar, ring, buff, GFP_ATOMIC);
327*5c1def83SBjoern A. Zeeb 	}
328*5c1def83SBjoern A. Zeeb 
329*5c1def83SBjoern A. Zeeb 	spin_unlock_bh(&srng->lock);
330*5c1def83SBjoern A. Zeeb 
331*5c1def83SBjoern A. Zeeb rcu_unlock:
332*5c1def83SBjoern A. Zeeb 	rcu_read_unlock();
333*5c1def83SBjoern A. Zeeb 
334*5c1def83SBjoern A. Zeeb 	return ret;
335*5c1def83SBjoern A. Zeeb }
336*5c1def83SBjoern A. Zeeb 
ath12k_dbring_srng_cleanup(struct ath12k * ar,struct ath12k_dbring * ring)337*5c1def83SBjoern A. Zeeb void ath12k_dbring_srng_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
338*5c1def83SBjoern A. Zeeb {
339*5c1def83SBjoern A. Zeeb 	ath12k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
340*5c1def83SBjoern A. Zeeb }
341*5c1def83SBjoern A. Zeeb 
ath12k_dbring_buf_cleanup(struct ath12k * ar,struct ath12k_dbring * ring)342*5c1def83SBjoern A. Zeeb void ath12k_dbring_buf_cleanup(struct ath12k *ar, struct ath12k_dbring *ring)
343*5c1def83SBjoern A. Zeeb {
344*5c1def83SBjoern A. Zeeb 	struct ath12k_dbring_element *buff;
345*5c1def83SBjoern A. Zeeb 	int buf_id;
346*5c1def83SBjoern A. Zeeb 
347*5c1def83SBjoern A. Zeeb 	spin_lock_bh(&ring->idr_lock);
348*5c1def83SBjoern A. Zeeb 	idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
349*5c1def83SBjoern A. Zeeb 		idr_remove(&ring->bufs_idr, buf_id);
350*5c1def83SBjoern A. Zeeb 		dma_unmap_single(ar->ab->dev, buff->paddr,
351*5c1def83SBjoern A. Zeeb 				 ring->buf_sz, DMA_FROM_DEVICE);
352*5c1def83SBjoern A. Zeeb 		kfree(buff);
353*5c1def83SBjoern A. Zeeb 	}
354*5c1def83SBjoern A. Zeeb 
355*5c1def83SBjoern A. Zeeb 	idr_destroy(&ring->bufs_idr);
356*5c1def83SBjoern A. Zeeb 	spin_unlock_bh(&ring->idr_lock);
357*5c1def83SBjoern A. Zeeb }
358