xref: /linux/drivers/net/wireless/ath/ath11k/dbring.c (revision 8934827db5403eae57d4537114a9ff88b0a8460f)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include "core.h"
8 #include "debug.h"
9 
10 #define ATH11K_DB_MAGIC_VALUE 0xdeadbeaf
11 
ath11k_dbring_validate_buffer(struct ath11k * ar,void * buffer,u32 size)12 int ath11k_dbring_validate_buffer(struct ath11k *ar, void *buffer, u32 size)
13 {
14 	u32 *temp;
15 	int idx;
16 
17 	size = size >> 2;
18 
19 	for (idx = 0, temp = buffer; idx < size; idx++, temp++) {
20 		if (*temp == ATH11K_DB_MAGIC_VALUE)
21 			return -EINVAL;
22 	}
23 
24 	return 0;
25 }
26 
ath11k_dbring_fill_magic_value(struct ath11k * ar,void * buffer,u32 size)27 static void ath11k_dbring_fill_magic_value(struct ath11k *ar,
28 					   void *buffer, u32 size)
29 {
30 	/* memset32 function fills buffer payload with the ATH11K_DB_MAGIC_VALUE
31 	 * and the variable size is expected to be the number of u32 values
32 	 * to be stored, not the number of bytes.
33 	 */
34 	size = size / sizeof(u32);
35 
36 	memset32(buffer, ATH11K_DB_MAGIC_VALUE, size);
37 }
38 
ath11k_dbring_bufs_replenish(struct ath11k * ar,struct ath11k_dbring * ring,struct ath11k_dbring_element * buff,enum wmi_direct_buffer_module id)39 int ath11k_dbring_bufs_replenish(struct ath11k *ar,
40 				 struct ath11k_dbring *ring,
41 				 struct ath11k_dbring_element *buff,
42 				 enum wmi_direct_buffer_module id)
43 {
44 	struct ath11k_base *ab = ar->ab;
45 	struct hal_srng *srng;
46 	dma_addr_t paddr;
47 	void *ptr_aligned, *ptr_unaligned, *desc;
48 	int ret;
49 	int buf_id;
50 	u32 cookie;
51 
52 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
53 
54 	lockdep_assert_held(&srng->lock);
55 
56 	ath11k_hal_srng_access_begin(ab, srng);
57 
58 	ptr_unaligned = buff->payload;
59 	ptr_aligned = PTR_ALIGN(ptr_unaligned, ring->buf_align);
60 	ath11k_dbring_fill_magic_value(ar, ptr_aligned, ring->buf_sz);
61 	paddr = dma_map_single(ab->dev, ptr_aligned, ring->buf_sz,
62 			       DMA_FROM_DEVICE);
63 
64 	ret = dma_mapping_error(ab->dev, paddr);
65 	if (ret)
66 		goto err;
67 
68 	spin_lock_bh(&ring->idr_lock);
69 	buf_id = idr_alloc(&ring->bufs_idr, buff, 0, ring->bufs_max, GFP_ATOMIC);
70 	spin_unlock_bh(&ring->idr_lock);
71 	if (buf_id < 0) {
72 		ret = -ENOBUFS;
73 		goto err_dma_unmap;
74 	}
75 
76 	desc = ath11k_hal_srng_src_get_next_entry(ab, srng);
77 	if (!desc) {
78 		ret = -ENOENT;
79 		goto err_idr_remove;
80 	}
81 
82 	if (id == WMI_DIRECT_BUF_CFR)
83 		ath11k_cfr_lut_update_paddr(ar, paddr, buf_id);
84 
85 	buff->paddr = paddr;
86 
87 	cookie = FIELD_PREP(DP_RXDMA_BUF_COOKIE_PDEV_ID, ar->pdev_idx) |
88 		 FIELD_PREP(DP_RXDMA_BUF_COOKIE_BUF_ID, buf_id);
89 
90 	ath11k_hal_rx_buf_addr_info_set(desc, paddr, cookie, 0);
91 
92 	ath11k_debugfs_add_dbring_entry(ar, id, ATH11K_DBG_DBR_EVENT_REPLENISH, srng);
93 	ath11k_hal_srng_access_end(ab, srng);
94 
95 	return 0;
96 
97 err_idr_remove:
98 	spin_lock_bh(&ring->idr_lock);
99 	idr_remove(&ring->bufs_idr, buf_id);
100 	spin_unlock_bh(&ring->idr_lock);
101 err_dma_unmap:
102 	dma_unmap_single(ab->dev, paddr, ring->buf_sz,
103 			 DMA_FROM_DEVICE);
104 err:
105 	ath11k_hal_srng_access_end(ab, srng);
106 	return ret;
107 }
108 
ath11k_dbring_fill_bufs(struct ath11k * ar,struct ath11k_dbring * ring,enum wmi_direct_buffer_module id)109 static int ath11k_dbring_fill_bufs(struct ath11k *ar,
110 				   struct ath11k_dbring *ring,
111 				   enum wmi_direct_buffer_module id)
112 {
113 	struct ath11k_dbring_element *buff;
114 	struct hal_srng *srng;
115 	int num_remain, req_entries, num_free;
116 	u32 align;
117 	int size, ret;
118 
119 	srng = &ar->ab->hal.srng_list[ring->refill_srng.ring_id];
120 
121 	spin_lock_bh(&srng->lock);
122 
123 	num_free = ath11k_hal_srng_src_num_free(ar->ab, srng, true);
124 	req_entries = min(num_free, ring->bufs_max);
125 	num_remain = req_entries;
126 	align = ring->buf_align;
127 	size = ring->buf_sz + align - 1;
128 
129 	while (num_remain > 0) {
130 		buff = kzalloc_obj(*buff, GFP_ATOMIC);
131 		if (!buff)
132 			break;
133 
134 		buff->payload = kzalloc(size, GFP_ATOMIC);
135 		if (!buff->payload) {
136 			kfree(buff);
137 			break;
138 		}
139 		ret = ath11k_dbring_bufs_replenish(ar, ring, buff, id);
140 		if (ret) {
141 			ath11k_warn(ar->ab, "failed to replenish db ring num_remain %d req_ent %d\n",
142 				    num_remain, req_entries);
143 			kfree(buff->payload);
144 			kfree(buff);
145 			break;
146 		}
147 		num_remain--;
148 	}
149 
150 	spin_unlock_bh(&srng->lock);
151 
152 	return num_remain;
153 }
154 
ath11k_dbring_wmi_cfg_setup(struct ath11k * ar,struct ath11k_dbring * ring,enum wmi_direct_buffer_module id)155 int ath11k_dbring_wmi_cfg_setup(struct ath11k *ar,
156 				struct ath11k_dbring *ring,
157 				enum wmi_direct_buffer_module id)
158 {
159 	struct ath11k_wmi_pdev_dma_ring_cfg_req_cmd param = {};
160 	int ret, i;
161 
162 	if (id >= WMI_DIRECT_BUF_MAX)
163 		return -EINVAL;
164 
165 	param.module_id		= id;
166 	param.base_paddr_lo	= lower_32_bits(ring->refill_srng.paddr);
167 	param.base_paddr_hi	= upper_32_bits(ring->refill_srng.paddr);
168 	param.head_idx_paddr_lo	= lower_32_bits(ring->hp_addr);
169 	param.head_idx_paddr_hi = upper_32_bits(ring->hp_addr);
170 	param.tail_idx_paddr_lo = lower_32_bits(ring->tp_addr);
171 	param.tail_idx_paddr_hi = upper_32_bits(ring->tp_addr);
172 	param.num_elems		= ring->bufs_max;
173 	param.buf_size		= ring->buf_sz;
174 	param.num_resp_per_event = ring->num_resp_per_event;
175 	param.event_timeout_ms	= ring->event_timeout_ms;
176 
177 	/* For single pdev, 2GHz and 5GHz use one DBR. */
178 	if (ar->ab->hw_params.single_pdev_only) {
179 		for (i = 0; i < ar->ab->target_pdev_count; i++) {
180 			param.pdev_id = ar->ab->target_pdev_ids[i].pdev_id;
181 			ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
182 			if (ret) {
183 				ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
184 				return ret;
185 			}
186 		}
187 	} else {
188 		param.pdev_id = DP_SW2HW_MACID(ring->pdev_id);
189 		ret = ath11k_wmi_pdev_dma_ring_cfg(ar, &param);
190 		if (ret) {
191 			ath11k_warn(ar->ab, "failed to setup db ring cfg\n");
192 			return ret;
193 		}
194 	}
195 
196 	return 0;
197 }
198 
ath11k_dbring_set_cfg(struct ath11k * ar,struct ath11k_dbring * ring,u32 num_resp_per_event,u32 event_timeout_ms,int (* handler)(struct ath11k *,struct ath11k_dbring_data *))199 int ath11k_dbring_set_cfg(struct ath11k *ar, struct ath11k_dbring *ring,
200 			  u32 num_resp_per_event, u32 event_timeout_ms,
201 			  int (*handler)(struct ath11k *,
202 					 struct ath11k_dbring_data *))
203 {
204 	if (WARN_ON(!ring))
205 		return -EINVAL;
206 
207 	ring->num_resp_per_event = num_resp_per_event;
208 	ring->event_timeout_ms = event_timeout_ms;
209 	ring->handler = handler;
210 
211 	return 0;
212 }
213 
ath11k_dbring_buf_setup(struct ath11k * ar,struct ath11k_dbring * ring,struct ath11k_dbring_cap * db_cap)214 int ath11k_dbring_buf_setup(struct ath11k *ar,
215 			    struct ath11k_dbring *ring,
216 			    struct ath11k_dbring_cap *db_cap)
217 {
218 	struct ath11k_base *ab = ar->ab;
219 	struct hal_srng *srng;
220 	int ret;
221 
222 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
223 	ring->bufs_max = ring->refill_srng.size /
224 		ath11k_hal_srng_get_entrysize(ab, HAL_RXDMA_DIR_BUF);
225 
226 	ring->buf_sz = db_cap->min_buf_sz;
227 	ring->buf_align = db_cap->min_buf_align;
228 	ring->pdev_id = db_cap->pdev_id;
229 	ring->hp_addr = ath11k_hal_srng_get_hp_addr(ar->ab, srng);
230 	ring->tp_addr = ath11k_hal_srng_get_tp_addr(ar->ab, srng);
231 
232 	ret = ath11k_dbring_fill_bufs(ar, ring, db_cap->id);
233 
234 	return ret;
235 }
236 
ath11k_dbring_srng_setup(struct ath11k * ar,struct ath11k_dbring * ring,int ring_num,int num_entries)237 int ath11k_dbring_srng_setup(struct ath11k *ar, struct ath11k_dbring *ring,
238 			     int ring_num, int num_entries)
239 {
240 	int ret;
241 
242 	ret = ath11k_dp_srng_setup(ar->ab, &ring->refill_srng, HAL_RXDMA_DIR_BUF,
243 				   ring_num, ar->pdev_idx, num_entries);
244 	if (ret < 0) {
245 		ath11k_warn(ar->ab, "failed to setup srng: %d ring_id %d\n",
246 			    ret, ring_num);
247 		goto err;
248 	}
249 
250 	return 0;
251 err:
252 	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
253 	return ret;
254 }
255 
ath11k_dbring_get_cap(struct ath11k_base * ab,u8 pdev_idx,enum wmi_direct_buffer_module id,struct ath11k_dbring_cap * db_cap)256 int ath11k_dbring_get_cap(struct ath11k_base *ab,
257 			  u8 pdev_idx,
258 			  enum wmi_direct_buffer_module id,
259 			  struct ath11k_dbring_cap *db_cap)
260 {
261 	int i;
262 
263 	if (!ab->num_db_cap || !ab->db_caps)
264 		return -ENOENT;
265 
266 	if (id >= WMI_DIRECT_BUF_MAX)
267 		return -EINVAL;
268 
269 	for (i = 0; i < ab->num_db_cap; i++) {
270 		if (pdev_idx == ab->db_caps[i].pdev_id &&
271 		    id == ab->db_caps[i].id) {
272 			*db_cap = ab->db_caps[i];
273 
274 			return 0;
275 		}
276 	}
277 
278 	return -ENOENT;
279 }
280 
ath11k_dbring_buffer_release_event(struct ath11k_base * ab,struct ath11k_dbring_buf_release_event * ev)281 int ath11k_dbring_buffer_release_event(struct ath11k_base *ab,
282 				       struct ath11k_dbring_buf_release_event *ev)
283 {
284 	struct ath11k_dbring *ring;
285 	struct hal_srng *srng;
286 	struct ath11k *ar;
287 	struct ath11k_dbring_element *buff;
288 	struct ath11k_dbring_data handler_data;
289 	struct ath11k_buffer_addr desc;
290 	u8 *vaddr_unalign;
291 	u32 num_entry, num_buff_reaped;
292 	u8 pdev_idx, rbm, module_id;
293 	u32 cookie;
294 	int buf_id;
295 	int size;
296 	dma_addr_t paddr;
297 	int ret = 0;
298 	int status;
299 
300 	pdev_idx = ev->fixed.pdev_id;
301 	module_id = ev->fixed.module_id;
302 
303 	if (ab->hw_params.single_pdev_only &&
304 	    pdev_idx < ab->target_pdev_count)
305 		pdev_idx = 0;
306 
307 	if (pdev_idx >= ab->num_radios) {
308 		ath11k_warn(ab, "Invalid pdev id %d\n", pdev_idx);
309 		return -EINVAL;
310 	}
311 
312 	if (ev->fixed.num_buf_release_entry !=
313 	    ev->fixed.num_meta_data_entry) {
314 		ath11k_warn(ab, "Buffer entry %d mismatch meta entry %d\n",
315 			    ev->fixed.num_buf_release_entry,
316 			    ev->fixed.num_meta_data_entry);
317 		return -EINVAL;
318 	}
319 
320 	ar = ab->pdevs[pdev_idx].ar;
321 
322 	rcu_read_lock();
323 	if (!rcu_dereference(ab->pdevs_active[pdev_idx])) {
324 		ret = -EINVAL;
325 		goto rcu_unlock;
326 	}
327 
328 	switch (ev->fixed.module_id) {
329 	case WMI_DIRECT_BUF_SPECTRAL:
330 		ring = ath11k_spectral_get_dbring(ar);
331 		break;
332 	case WMI_DIRECT_BUF_CFR:
333 		ring = ath11k_cfr_get_dbring(ar);
334 		break;
335 	default:
336 		ring = NULL;
337 		ath11k_warn(ab, "Recv dma buffer release ev on unsupp module %d\n",
338 			    ev->fixed.module_id);
339 		break;
340 	}
341 
342 	if (!ring) {
343 		ret = -EINVAL;
344 		goto rcu_unlock;
345 	}
346 
347 	srng = &ab->hal.srng_list[ring->refill_srng.ring_id];
348 	num_entry = ev->fixed.num_buf_release_entry;
349 	size = ring->buf_sz + ring->buf_align - 1;
350 	num_buff_reaped = 0;
351 
352 	spin_lock_bh(&srng->lock);
353 
354 	while (num_buff_reaped < num_entry) {
355 		desc.info0 = ev->buf_entry[num_buff_reaped].paddr_lo;
356 		desc.info1 = ev->buf_entry[num_buff_reaped].paddr_hi;
357 		handler_data.meta = ev->meta_data[num_buff_reaped];
358 
359 		num_buff_reaped++;
360 
361 		ath11k_hal_rx_buf_addr_info_get(&desc, &paddr, &cookie, &rbm);
362 
363 		buf_id = FIELD_GET(DP_RXDMA_BUF_COOKIE_BUF_ID, cookie);
364 
365 		spin_lock_bh(&ring->idr_lock);
366 		buff = idr_find(&ring->bufs_idr, buf_id);
367 		if (!buff) {
368 			spin_unlock_bh(&ring->idr_lock);
369 			continue;
370 		}
371 		idr_remove(&ring->bufs_idr, buf_id);
372 		spin_unlock_bh(&ring->idr_lock);
373 
374 		dma_unmap_single(ab->dev, buff->paddr, ring->buf_sz,
375 				 DMA_FROM_DEVICE);
376 
377 		ath11k_debugfs_add_dbring_entry(ar, module_id,
378 						ATH11K_DBG_DBR_EVENT_RX, srng);
379 
380 		if (ring->handler) {
381 			vaddr_unalign = buff->payload;
382 			handler_data.data = PTR_ALIGN(vaddr_unalign,
383 						      ring->buf_align);
384 			handler_data.data_sz = ring->buf_sz;
385 			handler_data.buff = buff;
386 			handler_data.buf_id = buf_id;
387 
388 			status = ring->handler(ar, &handler_data);
389 			if (status == ATH11K_CORRELATE_STATUS_HOLD)
390 				continue;
391 		}
392 
393 		buff->paddr = 0;
394 		memset(buff->payload, 0, size);
395 		ath11k_dbring_bufs_replenish(ar, ring, buff, module_id);
396 	}
397 
398 	spin_unlock_bh(&srng->lock);
399 
400 rcu_unlock:
401 	rcu_read_unlock();
402 
403 	return ret;
404 }
405 
ath11k_dbring_srng_cleanup(struct ath11k * ar,struct ath11k_dbring * ring)406 void ath11k_dbring_srng_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
407 {
408 	ath11k_dp_srng_cleanup(ar->ab, &ring->refill_srng);
409 }
410 
ath11k_dbring_buf_cleanup(struct ath11k * ar,struct ath11k_dbring * ring)411 void ath11k_dbring_buf_cleanup(struct ath11k *ar, struct ath11k_dbring *ring)
412 {
413 	struct ath11k_dbring_element *buff;
414 	int buf_id;
415 
416 	spin_lock_bh(&ring->idr_lock);
417 	idr_for_each_entry(&ring->bufs_idr, buff, buf_id) {
418 		idr_remove(&ring->bufs_idr, buf_id);
419 		dma_unmap_single(ar->ab->dev, buff->paddr,
420 				 ring->buf_sz, DMA_FROM_DEVICE);
421 		kfree(buff->payload);
422 		kfree(buff);
423 	}
424 
425 	idr_destroy(&ring->bufs_idr);
426 	spin_unlock_bh(&ring->idr_lock);
427 }
428