xref: /linux/drivers/net/wireless/ath/ath11k/dbring.c (revision 306facc029ba8d217ef5a46e8cf4bd50c70603d0)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6  */
7 
8 #include "core.h"
9 #include "debug.h"
10 
11 #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
12 
13 int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
14 {
15 	u32 *temp;
16 	int idx;
17 
18 	size = size >> 2;
19 
20 	for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
21 		if (*temp == ATH11K_DB_MAGIC_VALUE)
22 			return -EINVAL;
23 	}
24 
25 	return 0;
26 }
27 
28 static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
29 					   void *buffer, u32 size)
30 {
31 	/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
32 	 * and the variable size is expected to be the number of u32 values
33 	 * to be stored, not the number of bytes.
34 	 */
35 	size = size / sizeof(u32);
36 
37 	memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
38 }
39 
40 static int ath11k_dbring_bufs_replenish(struct ath11k *ar,
41 					struct ath11k_dbring *ring,
42 					struct ath11k_dbring_element *buff,
43 					enum wmi_direct_buffer_module id)
44 {
45 	struct ath11k_base *ab = ar->ab;
46 	struct hal_srng *srng;
47 	dma_addr_t paddr;
48 	void *ptr_aligned, *ptr_unaligned, *desc;
49 	int ret;
50 	int buf_id;
51 	u32 cookie;
52 
53 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
54 
55 	lockdep_assert_held(&srng->lock);
56 
57 	ath11k_hal_srng_access_begin(ab, srng);
58 
59 	ptr_unaligned = buff->payload;
60 	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
61 	ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
62 	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
63 			       DMA_FROM_DEVICE);
64 
65 	ret = dma_mapping_error(ab->dev, paddr);
66 	if (ret)
67 		goto err;
68 
69 	spin_lock_bh(&ring->idr_lock);
70 	buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
71 	spin_unlock_bh(&ring->idr_lock);
72 	if (buf_id < 0) {
73 		ret = -ENOBUFS;
74 		goto err_dma_unmap;
75 	}
76 
77 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
78 	if (!desc) {
79 		ret = -ENOENT;
80 		goto err_idr_remove;
81 	}
82 
83 	buff->paddr = paddr;
84 
85 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
86 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
87 
88 	ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
89 
90 	ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
91 	ath11k_hal_srng_access_end(ab, srng);
92 
93 	return 0;
94 
95 err_idr_remove:
96 	spin_lock_bh(&ring->idr_lock);
97 	idr_remove(&ring->bufs_idr, buf_id);
98 	spin_unlock_bh(&ring->idr_lock);
99 err_dma_unmap:
100 	dma_unmap_single(ab->dev, paddr, ring->buf_sz,
101 			 DMA_FROM_DEVICE);
102 err:
103 	ath11k_hal_srng_access_end(ab, srng);
104 	return ret;
105 }
106 
107 static int ath11k_dbring_fill_bufs(struct ath11k *ar,
108 				   struct ath11k_dbring *ring,
109 				   enum wmi_direct_buffer_module id)
110 {
111 	struct ath11k_dbring_element *buff;
112 	struct hal_srng *srng;
113 	int num_remain, req_entries, num_free;
114 	u32 align;
115 	int size, ret;
116 
117 	srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
118 
119 	spin_lock_bh(&srng->lock);
120 
121 	num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
122 	req_entries = min(num_free, ring->bufs_max);
123 	num_remain = req_entries;
124 	align = ring->buf_align;
125 	size = ring->buf_sz + align - 1;
126 
127 	while (num_remain > 0) {
128 		buff = kzalloc(sizeof(*buff), GFP_ATOMIC);
129 		if (!buff)
130 			break;
131 
132 		buff->payload = kzalloc(size, GFP_ATOMIC);
133 		if (!buff->payload) {
134 			kfree(buff);
135 			break;
136 		}
137 		ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
138 		if (ret) {
139 			ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
140 				    num_remain, req_entries);
141 			kfree(buff->payload);
142 			kfree(buff);
143 			break;
144 		}
145 		num_remain--;
146 	}
147 
148 	spin_unlock_bh(&srng->lock);
149 
150 	return num_remain;
151 }
152 
153 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
154 				struct ath11k_dbring *ring,
155 				enum wmi_direct_buffer_module id)
156 {
157 	struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
158 	int ret;
159 
160 	if (id >= WMI_DIRECT_BUF_MAX)
161 		return -EINVAL;
162 
163 	param.pdev_id		= DP_SW2HW_MACID(ring->pdev_id);
164 	param.module_id		= id;
165 	param.base_paddr_lo	= lower_32_bits(ring->refill_srng.paddr);
166 	param.base_paddr_hi	= upper_32_bits(ring->refill_srng.paddr);
167 	param.head_idx_paddr_lo	= lower_32_bits(ring->hp_addr);
168 	param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
169 	param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
170 	param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
171 	param.num_elems		= ring->bufs_max;
172 	param.buf_size		= ring->buf_sz;
173 	param.num_resp_per_event = ring->num_resp_per_event;
174 	param.event_timeout_ms	= ring->event_timeout_ms;
175 
176 	ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
177 	if (ret) {
178 		ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
179 		return ret;
180 	}
181 
182 	return 0;
183 }
184 
185 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
186 			  u32 num_resp_per_event, u32 event_timeout_ms,
187 			  int (*handler)(struct ath11k *,
188 					 struct ath11k_dbring_data *))
189 {
190 	if (WARN_ON(!ring))
191 		return -EINVAL;
192 
193 	ring->num_resp_per_event = num_resp_per_event;
194 	ring->event_timeout_ms = event_timeout_ms;
195 	ring->handler = handler;
196 
197 	return 0;
198 }
199 
200 int ath11k_dbring_buf_setup(struct ath11k *ar,
201 			    struct ath11k_dbring *ring,
202 			    struct ath11k_dbring_cap *db_cap)
203 {
204 	struct ath11k_base *ab = ar->ab;
205 	struct hal_srng *srng;
206 	int ret;
207 
208 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
209 	ring->bufs_max = ring->refill_srng.size /
210 		ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
211 
212 	ring->buf_sz = db_cap->min_buf_sz;
213 	ring->buf_align = db_cap->min_buf_align;
214 	ring->pdev_id = db_cap->pdev_id;
215 	ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
216 	ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
217 
218 	ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
219 
220 	return ret;
221 }
222 
223 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
224 			     int ring_num, int num_entries)
225 {
226 	int ret;
227 
228 	ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
229 				   ring_num, ar->pdev_idx, num_entries);
230 	if (ret < 0) {
231 		ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
232 			    ret, ring_num);
233 		goto err;
234 	}
235 
236 	return 0;
237 err:
238 	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
239 	return ret;
240 }
241 
242 int ath11k_dbring_get_cap(struct ath11k_base *ab,
243 			  u8 pdev_idx,
244 			  enum wmi_direct_buffer_module id,
245 			  struct ath11k_dbring_cap *db_cap)
246 {
247 	int i;
248 
249 	if (!ab->num_db_cap || !ab->db_caps)
250 		return -ENOENT;
251 
252 	if (id >= WMI_DIRECT_BUF_MAX)
253 		return -EINVAL;
254 
255 	for (i = 0; i < ab->num_db_cap; i++) {
256 		if (pdev_idx == ab->db_caps[i].pdev_id &&
257 		    id == ab->db_caps[i].id) {
258 			*db_cap = ab->db_caps[i];
259 
260 			return 0;
261 		}
262 	}
263 
264 	return -ENOENT;
265 }
266 
267 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
268 				       struct ath11k_dbring_buf_release_event *ev)
269 {
270 	struct ath11k_dbring *ring;
271 	struct hal_srng *srng;
272 	struct ath11k *ar;
273 	struct ath11k_dbring_element *buff;
274 	struct ath11k_dbring_data handler_data;
275 	struct ath11k_buffer_addr desc;
276 	u8 *vaddr_unalign;
277 	u32 num_entry, num_buff_reaped;
278 	u8 pdev_idx, rbm, module_id;
279 	u32 cookie;
280 	int buf_id;
281 	int size;
282 	dma_addr_t paddr;
283 	int ret = 0;
284 
285 	pdev_idx = ev->fixed.pdev_id;
286 	module_id = ev->fixed.module_id;
287 
288 	if (pdev_idx >= ab->num_radios) {
289 		ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
290 		return -EINVAL;
291 	}
292 
293 	if (ev->fixed.num_buf_release_entry !=
294 	    ev->fixed.num_meta_data_entry) {
295 		ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
296 			    ev->fixed.num_buf_release_entry,
297 			    ev->fixed.num_meta_data_entry);
298 		return -EINVAL;
299 	}
300 
301 	ar = ab->pdevs[pdev_idx].ar;
302 
303 	rcu_read_lock();
304 	if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
305 		ret = -EINVAL;
306 		goto rcu_unlock;
307 	}
308 
309 	switch (ev->fixed.module_id) {
310 	case WMI_DIRECT_BUF_SPECTRAL:
311 		ring = ath11k_spectral_get_dbring(ar);
312 		break;
313 	default:
314 		ring = NULL;
315 		ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
316 			    ev->fixed.module_id);
317 		break;
318 	}
319 
320 	if (!ring) {
321 		ret = -EINVAL;
322 		goto rcu_unlock;
323 	}
324 
325 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
326 	num_entry = ev->fixed.num_buf_release_entry;
327 	size = ring->buf_sz + ring->buf_align - 1;
328 	num_buff_reaped = 0;
329 
330 	spin_lock_bh(&srng->lock);
331 
332 	while (num_buff_reaped < num_entry) {
333 		desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
334 		desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
335 		handler_data.meta = ev->meta_data[num_buff_reaped];
336 
337 		num_buff_reaped++;
338 
339 		ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
340 
341 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
342 
343 		spin_lock_bh(&ring->idr_lock);
344 		buff = idr_find(&ring->bufs_idr, buf_id);
345 		if (!buff) {
346 			spin_unlock_bh(&ring->idr_lock);
347 			continue;
348 		}
349 		idr_remove(&ring->bufs_idr, buf_id);
350 		spin_unlock_bh(&ring->idr_lock);
351 
352 		dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
353 				 DMA_FROM_DEVICE);
354 
355 		ath11k_debugfs_add_dbring_entry(ar, module_id,
356 						ATH11K_DBG_DBR_EVENT_RX, srng);
357 
358 		if (ring->handler) {
359 			vaddr_unalign = buff->payload;
360 			handler_data.data = PTR_ALIGN(vaddr_unalign,
361 						      ring->buf_align);
362 			handler_data.data_sz = ring->buf_sz;
363 
364 			ring->handler(ar, &handler_data);
365 		}
366 
367 		buff->paddr = 0;
368 		memset(buff->payload, 0, size);
369 		ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
370 	}
371 
372 	spin_unlock_bh(&srng->lock);
373 
374 rcu_unlock:
375 	rcu_read_unlock();
376 
377 	return ret;
378 }
379 
380 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
381 {
382 	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
383 }
384 
385 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
386 {
387 	struct ath11k_dbring_element *buff;
388 	int buf_id;
389 
390 	spin_lock_bh(&ring->idr_lock);
391 	idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
392 		idr_remove(&ring->bufs_idr, buf_id);
393 		dma_unmap_single(ar->ab->dev, buff->paddr,
394 				 ring->buf_sz, DMA_FROM_DEVICE);
395 		kfree(buff->payload);
396 		kfree(buff);
397 	}
398 
399 	idr_destroy(&ring->bufs_idr);
400 	spin_unlock_bh(&ring->idr_lock);
401 }
402