xref: /freebsd/sys/contrib/dev/athk/ath11k/hal.c (revision c95ea407b322379bcb3c013a2dca9a18072c1df8)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6  */
7 #if defined(__FreeBSD__)
8 #include <asm/io.h>
9 #endif
10 #include <linux/dma-mapping.h>
11 #include <linux/export.h>
12 #include "hal_tx.h"
13 #include "debug.h"
14 #include "hal_desc.h"
15 #include "hif.h"
16 
17 static const struct hal_srng_config hw_srng_config_template[] = {
18 	/* TODO: max_rings can populated by querying HW capabilities */
19 	{ /* REO_DST */
20 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
21 		.max_rings = 4,
22 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
23 		.lmac_ring = false,
24 		.ring_dir = HAL_SRNG_DIR_DST,
25 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
26 	},
27 	{ /* REO_EXCEPTION */
28 		/* Designating REO2TCL ring as exception ring. This ring is
29 		 * similar to other REO2SW rings though it is named as REO2TCL.
30 		 * Any of theREO2SW rings can be used as exception ring.
31 		 */
32 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
33 		.max_rings = 1,
34 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
35 		.lmac_ring = false,
36 		.ring_dir = HAL_SRNG_DIR_DST,
37 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
38 	},
39 	{ /* REO_REINJECT */
40 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
41 		.max_rings = 1,
42 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
43 		.lmac_ring = false,
44 		.ring_dir = HAL_SRNG_DIR_SRC,
45 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
46 	},
47 	{ /* REO_CMD */
48 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
49 		.max_rings = 1,
50 		.entry_size = (sizeof(struct hal_tlv_hdr) +
51 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
52 		.lmac_ring = false,
53 		.ring_dir = HAL_SRNG_DIR_SRC,
54 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
55 	},
56 	{ /* REO_STATUS */
57 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
58 		.max_rings = 1,
59 		.entry_size = (sizeof(struct hal_tlv_hdr) +
60 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
61 		.lmac_ring = false,
62 		.ring_dir = HAL_SRNG_DIR_DST,
63 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
64 	},
65 	{ /* TCL_DATA */
66 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
67 		.max_rings = 3,
68 		.entry_size = (sizeof(struct hal_tlv_hdr) +
69 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
70 		.lmac_ring = false,
71 		.ring_dir = HAL_SRNG_DIR_SRC,
72 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
73 	},
74 	{ /* TCL_CMD */
75 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
76 		.max_rings = 1,
77 		.entry_size = (sizeof(struct hal_tlv_hdr) +
78 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
79 		.lmac_ring =  false,
80 		.ring_dir = HAL_SRNG_DIR_SRC,
81 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
82 	},
83 	{ /* TCL_STATUS */
84 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
85 		.max_rings = 1,
86 		.entry_size = (sizeof(struct hal_tlv_hdr) +
87 			     sizeof(struct hal_tcl_status_ring)) >> 2,
88 		.lmac_ring = false,
89 		.ring_dir = HAL_SRNG_DIR_DST,
90 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
91 	},
92 	{ /* CE_SRC */
93 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
94 		.max_rings = 12,
95 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
96 		.lmac_ring = false,
97 		.ring_dir = HAL_SRNG_DIR_SRC,
98 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
99 	},
100 	{ /* CE_DST */
101 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
102 		.max_rings = 12,
103 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
104 		.lmac_ring = false,
105 		.ring_dir = HAL_SRNG_DIR_SRC,
106 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
107 	},
108 	{ /* CE_DST_STATUS */
109 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
110 		.max_rings = 12,
111 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
112 		.lmac_ring = false,
113 		.ring_dir = HAL_SRNG_DIR_DST,
114 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
115 	},
116 	{ /* WBM_IDLE_LINK */
117 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
118 		.max_rings = 1,
119 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
120 		.lmac_ring = false,
121 		.ring_dir = HAL_SRNG_DIR_SRC,
122 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
123 	},
124 	{ /* SW2WBM_RELEASE */
125 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
126 		.max_rings = 1,
127 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
128 		.lmac_ring = false,
129 		.ring_dir = HAL_SRNG_DIR_SRC,
130 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
131 	},
132 	{ /* WBM2SW_RELEASE */
133 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
134 		.max_rings = 5,
135 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
136 		.lmac_ring = false,
137 		.ring_dir = HAL_SRNG_DIR_DST,
138 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
139 	},
140 	{ /* RXDMA_BUF */
141 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
142 		.max_rings = 2,
143 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
144 		.lmac_ring = true,
145 		.ring_dir = HAL_SRNG_DIR_SRC,
146 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
147 	},
148 	{ /* RXDMA_DST */
149 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
150 		.max_rings = 1,
151 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
152 		.lmac_ring = true,
153 		.ring_dir = HAL_SRNG_DIR_DST,
154 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
155 	},
156 	{ /* RXDMA_MONITOR_BUF */
157 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
158 		.max_rings = 1,
159 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
160 		.lmac_ring = true,
161 		.ring_dir = HAL_SRNG_DIR_SRC,
162 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
163 	},
164 	{ /* RXDMA_MONITOR_STATUS */
165 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
166 		.max_rings = 1,
167 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
168 		.lmac_ring = true,
169 		.ring_dir = HAL_SRNG_DIR_SRC,
170 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
171 	},
172 	{ /* RXDMA_MONITOR_DST */
173 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
174 		.max_rings = 1,
175 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
176 		.lmac_ring = true,
177 		.ring_dir = HAL_SRNG_DIR_DST,
178 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
179 	},
180 	{ /* RXDMA_MONITOR_DESC */
181 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
182 		.max_rings = 1,
183 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
184 		.lmac_ring = true,
185 		.ring_dir = HAL_SRNG_DIR_SRC,
186 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
187 	},
188 	{ /* RXDMA DIR BUF */
189 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
190 		.max_rings = 1,
191 		.entry_size = 8 >> 2, /* TODO: Define the struct */
192 		.lmac_ring = true,
193 		.ring_dir = HAL_SRNG_DIR_SRC,
194 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
195 	},
196 };
197 
ath11k_hal_alloc_cont_rdp(struct ath11k_base * ab)198 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
199 {
200 	struct ath11k_hal *hal = &ab->hal;
201 	size_t size;
202 
203 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
204 	hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
205 					    GFP_KERNEL);
206 	if (!hal->rdp.vaddr)
207 		return -ENOMEM;
208 
209 	return 0;
210 }
211 
ath11k_hal_free_cont_rdp(struct ath11k_base * ab)212 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
213 {
214 	struct ath11k_hal *hal = &ab->hal;
215 	size_t size;
216 
217 	if (!hal->rdp.vaddr)
218 		return;
219 
220 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
221 	dma_free_coherent(ab->dev, size,
222 			  hal->rdp.vaddr, hal->rdp.paddr);
223 	hal->rdp.vaddr = NULL;
224 }
225 
ath11k_hal_alloc_cont_wrp(struct ath11k_base * ab)226 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
227 {
228 	struct ath11k_hal *hal = &ab->hal;
229 	size_t size;
230 
231 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
232 	hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
233 					    GFP_KERNEL);
234 	if (!hal->wrp.vaddr)
235 		return -ENOMEM;
236 
237 	return 0;
238 }
239 
ath11k_hal_free_cont_wrp(struct ath11k_base * ab)240 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
241 {
242 	struct ath11k_hal *hal = &ab->hal;
243 	size_t size;
244 
245 	if (!hal->wrp.vaddr)
246 		return;
247 
248 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
249 	dma_free_coherent(ab->dev, size,
250 			  hal->wrp.vaddr, hal->wrp.paddr);
251 	hal->wrp.vaddr = NULL;
252 }
253 
ath11k_hal_ce_dst_setup(struct ath11k_base * ab,struct hal_srng * srng,int ring_num)254 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
255 				    struct hal_srng *srng, int ring_num)
256 {
257 	struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
258 	u32 addr;
259 	u32 val;
260 
261 	addr = HAL_CE_DST_RING_CTRL +
262 	       srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
263 	       ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
264 
265 	val = ath11k_hif_read32(ab, addr);
266 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
267 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
268 			  srng->u.dst_ring.max_buffer_length);
269 	ath11k_hif_write32(ab, addr, val);
270 }
271 
ath11k_hal_srng_dst_hw_init(struct ath11k_base * ab,struct hal_srng * srng)272 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
273 					struct hal_srng *srng)
274 {
275 	struct ath11k_hal *hal = &ab->hal;
276 	u32 val;
277 	u64 hp_addr;
278 	u32 reg_base;
279 
280 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
281 
282 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
283 		ath11k_hif_write32(ab, reg_base +
284 				   HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
285 				   srng->msi_addr);
286 
287 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
288 				 ((u64)srng->msi_addr >>
289 				  HAL_ADDR_MSB_REG_SHIFT)) |
290 		      HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
291 		ath11k_hif_write32(ab, reg_base +
292 				       HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
293 
294 		ath11k_hif_write32(ab,
295 				   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
296 				   srng->msi_data);
297 	}
298 
299 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
300 
301 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
302 			 ((u64)srng->ring_base_paddr >>
303 			  HAL_ADDR_MSB_REG_SHIFT)) |
304 	      FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
305 			 (srng->entry_size * srng->num_entries));
306 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
307 
308 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
309 	      FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
310 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
311 
312 	/* interrupt setup */
313 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
314 			 (srng->intr_timer_thres_us >> 3));
315 
316 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
317 			  (srng->intr_batch_cntr_thres_entries *
318 			   srng->entry_size));
319 
320 	ath11k_hif_write32(ab,
321 			   reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
322 			   val);
323 
324 	hp_addr = hal->rdp.paddr +
325 		  ((unsigned long)srng->u.dst_ring.hp_addr -
326 		   (unsigned long)hal->rdp.vaddr);
327 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
328 			   hp_addr & HAL_ADDR_LSB_REG_MASK);
329 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
330 			   hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
331 
332 	/* Initialize head and tail pointers to indicate ring is empty */
333 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
334 	ath11k_hif_write32(ab, reg_base, 0);
335 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
336 	*srng->u.dst_ring.hp_addr = 0;
337 
338 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
339 	val = 0;
340 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
341 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
342 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
343 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
344 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
345 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
346 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
347 
348 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
349 }
350 
ath11k_hal_srng_src_hw_init(struct ath11k_base * ab,struct hal_srng * srng)351 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
352 					struct hal_srng *srng)
353 {
354 	struct ath11k_hal *hal = &ab->hal;
355 	u32 val;
356 	u64 tp_addr;
357 	u32 reg_base;
358 
359 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
360 
361 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
362 		ath11k_hif_write32(ab, reg_base +
363 				   HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
364 				   srng->msi_addr);
365 
366 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
367 				 ((u64)srng->msi_addr >>
368 				  HAL_ADDR_MSB_REG_SHIFT)) |
369 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
370 		ath11k_hif_write32(ab, reg_base +
371 				       HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
372 				   val);
373 
374 		ath11k_hif_write32(ab, reg_base +
375 				       HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
376 				   srng->msi_data);
377 	}
378 
379 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
380 
381 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
382 			 ((u64)srng->ring_base_paddr >>
383 			  HAL_ADDR_MSB_REG_SHIFT)) |
384 	      FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
385 			 (srng->entry_size * srng->num_entries));
386 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
387 
388 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
389 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
390 
391 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
392 		ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
393 		val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
394 				 ((u64)srng->ring_base_paddr >>
395 				 HAL_ADDR_MSB_REG_SHIFT)) |
396 			FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
397 				   (srng->entry_size * srng->num_entries));
398 		ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
399 	}
400 
401 	/* interrupt setup */
402 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
403 	 * unit of 8 usecs instead of 1 usec (as required by v1).
404 	 */
405 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
406 			 srng->intr_timer_thres_us);
407 
408 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
409 			  (srng->intr_batch_cntr_thres_entries *
410 			   srng->entry_size));
411 
412 	ath11k_hif_write32(ab,
413 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
414 			   val);
415 
416 	val = 0;
417 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
418 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
419 				  srng->u.src_ring.low_threshold);
420 	}
421 	ath11k_hif_write32(ab,
422 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
423 			   val);
424 
425 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
426 		tp_addr = hal->rdp.paddr +
427 			  ((unsigned long)srng->u.src_ring.tp_addr -
428 			   (unsigned long)hal->rdp.vaddr);
429 		ath11k_hif_write32(ab,
430 				   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
431 				   tp_addr & HAL_ADDR_LSB_REG_MASK);
432 		ath11k_hif_write32(ab,
433 				   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
434 				   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
435 	}
436 
437 	/* Initialize head and tail pointers to indicate ring is empty */
438 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
439 	ath11k_hif_write32(ab, reg_base, 0);
440 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
441 	*srng->u.src_ring.tp_addr = 0;
442 
443 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
444 	val = 0;
445 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
446 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
447 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
448 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
449 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
450 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
451 
452 	/* Loop count is not used for SRC rings */
453 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
454 
455 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
456 
457 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
458 }
459 
ath11k_hal_srng_hw_init(struct ath11k_base * ab,struct hal_srng * srng)460 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
461 				    struct hal_srng *srng)
462 {
463 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
464 		ath11k_hal_srng_src_hw_init(ab, srng);
465 	else
466 		ath11k_hal_srng_dst_hw_init(ab, srng);
467 }
468 
ath11k_hal_srng_get_ring_id(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id)469 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
470 				       enum hal_ring_type type,
471 				       int ring_num, int mac_id)
472 {
473 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
474 	int ring_id;
475 
476 	if (ring_num >= srng_config->max_rings) {
477 		ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
478 		return -EINVAL;
479 	}
480 
481 	ring_id = srng_config->start_ring_id + ring_num;
482 	if (srng_config->lmac_ring)
483 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
484 
485 	if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
486 		return -EINVAL;
487 
488 	return ring_id;
489 }
490 
ath11k_hal_srng_get_entrysize(struct ath11k_base * ab,u32 ring_type)491 int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
492 {
493 	struct hal_srng_config *srng_config;
494 
495 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
496 		return -EINVAL;
497 
498 	srng_config = &ab->hal.srng_config[ring_type];
499 
500 	return (srng_config->entry_size << 2);
501 }
502 
ath11k_hal_srng_get_max_entries(struct ath11k_base * ab,u32 ring_type)503 int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
504 {
505 	struct hal_srng_config *srng_config;
506 
507 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
508 		return -EINVAL;
509 
510 	srng_config = &ab->hal.srng_config[ring_type];
511 
512 	return (srng_config->max_size / srng_config->entry_size);
513 }
514 
ath11k_hal_srng_get_params(struct ath11k_base * ab,struct hal_srng * srng,struct hal_srng_params * params)515 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
516 				struct hal_srng_params *params)
517 {
518 	params->ring_base_paddr = srng->ring_base_paddr;
519 	params->ring_base_vaddr = srng->ring_base_vaddr;
520 	params->num_entries = srng->num_entries;
521 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
522 	params->intr_batch_cntr_thres_entries =
523 		srng->intr_batch_cntr_thres_entries;
524 	params->low_threshold = srng->u.src_ring.low_threshold;
525 	params->msi_addr = srng->msi_addr;
526 	params->msi_data = srng->msi_data;
527 	params->flags = srng->flags;
528 }
529 
ath11k_hal_srng_get_hp_addr(struct ath11k_base * ab,struct hal_srng * srng)530 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
531 				       struct hal_srng *srng)
532 {
533 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
534 		return 0;
535 
536 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
537 		return ab->hal.wrp.paddr +
538 		       ((unsigned long)srng->u.src_ring.hp_addr -
539 			(unsigned long)ab->hal.wrp.vaddr);
540 	else
541 		return ab->hal.rdp.paddr +
542 		       ((unsigned long)srng->u.dst_ring.hp_addr -
543 			 (unsigned long)ab->hal.rdp.vaddr);
544 }
545 
ath11k_hal_srng_get_tp_addr(struct ath11k_base * ab,struct hal_srng * srng)546 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
547 				       struct hal_srng *srng)
548 {
549 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
550 		return 0;
551 
552 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
553 		return ab->hal.rdp.paddr +
554 		       ((unsigned long)srng->u.src_ring.tp_addr -
555 			(unsigned long)ab->hal.rdp.vaddr);
556 	else
557 		return ab->hal.wrp.paddr +
558 		       ((unsigned long)srng->u.dst_ring.tp_addr -
559 			(unsigned long)ab->hal.wrp.vaddr);
560 }
561 
ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)562 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
563 {
564 	switch (type) {
565 	case HAL_CE_DESC_SRC:
566 		return sizeof(struct hal_ce_srng_src_desc);
567 	case HAL_CE_DESC_DST:
568 		return sizeof(struct hal_ce_srng_dest_desc);
569 	case HAL_CE_DESC_DST_STATUS:
570 		return sizeof(struct hal_ce_srng_dst_status_desc);
571 	}
572 
573 	return 0;
574 }
575 
ath11k_hal_ce_src_set_desc(void * buf,dma_addr_t paddr,u32 len,u32 id,u8 byte_swap_data)576 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
577 				u8 byte_swap_data)
578 {
579 	struct hal_ce_srng_src_desc *desc = buf;
580 
581 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
582 	desc->buffer_addr_info =
583 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
584 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
585 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
586 			   byte_swap_data) |
587 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
588 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
589 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
590 }
591 
ath11k_hal_ce_dst_set_desc(void * buf,dma_addr_t paddr)592 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
593 {
594 	struct hal_ce_srng_dest_desc *desc = buf;
595 
596 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
597 	desc->buffer_addr_info =
598 		FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
599 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
600 }
601 
ath11k_hal_ce_dst_status_get_length(void * buf)602 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
603 {
604 	struct hal_ce_srng_dst_status_desc *desc = buf;
605 	u32 len;
606 
607 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
608 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
609 
610 	return len;
611 }
612 
ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc * desc,u32 cookie,dma_addr_t paddr)613 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
614 				   dma_addr_t paddr)
615 {
616 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
617 					       (paddr & HAL_ADDR_LSB_REG_MASK));
618 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
619 					       ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
620 				    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
621 				    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
622 }
623 
ath11k_hal_srng_dst_peek(struct ath11k_base * ab,struct hal_srng * srng)624 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
625 {
626 	lockdep_assert_held(&srng->lock);
627 
628 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
629 		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
630 
631 	return NULL;
632 }
633 
ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base * ab,struct hal_srng * srng,dma_addr_t * paddr)634 static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
635 					      struct hal_srng *srng, dma_addr_t *paddr)
636 {
637 	lockdep_assert_held(&srng->lock);
638 
639 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
640 		*paddr = srng->ring_base_paddr +
641 			  sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
642 		return srng->ring_base_vaddr + srng->u.dst_ring.tp;
643 	}
644 
645 	return NULL;
646 }
647 
ath11k_hal_srng_prefetch_desc(struct ath11k_base * ab,struct hal_srng * srng)648 static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
649 					  struct hal_srng *srng)
650 {
651 	dma_addr_t desc_paddr;
652 	u32 *desc;
653 
654 	/* prefetch only if desc is available */
655 	desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
656 	if (likely(desc)) {
657 		dma_sync_single_for_cpu(ab->dev, desc_paddr,
658 					(srng->entry_size * sizeof(u32)),
659 					DMA_FROM_DEVICE);
660 		prefetch(desc);
661 	}
662 }
663 
ath11k_hal_srng_dst_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)664 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
665 					struct hal_srng *srng)
666 {
667 	u32 *desc;
668 
669 	lockdep_assert_held(&srng->lock);
670 
671 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
672 		return NULL;
673 
674 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
675 
676 	srng->u.dst_ring.tp += srng->entry_size;
677 
678 	/* wrap around to start of ring*/
679 	if (srng->u.dst_ring.tp == srng->ring_size)
680 		srng->u.dst_ring.tp = 0;
681 
682 	/* Try to prefetch the next descriptor in the ring */
683 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
684 		ath11k_hal_srng_prefetch_desc(ab, srng);
685 
686 	return desc;
687 }
688 
ath11k_hal_srng_dst_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)689 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
690 				 bool sync_hw_ptr)
691 {
692 	u32 tp, hp;
693 
694 	lockdep_assert_held(&srng->lock);
695 
696 	tp = srng->u.dst_ring.tp;
697 
698 	if (sync_hw_ptr) {
699 		hp = *srng->u.dst_ring.hp_addr;
700 		srng->u.dst_ring.cached_hp = hp;
701 	} else {
702 		hp = srng->u.dst_ring.cached_hp;
703 	}
704 
705 	if (hp >= tp)
706 		return (hp - tp) / srng->entry_size;
707 	else
708 		return (srng->ring_size - tp + hp) / srng->entry_size;
709 }
710 
711 /* Returns number of available entries in src ring */
ath11k_hal_srng_src_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)712 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
713 				 bool sync_hw_ptr)
714 {
715 	u32 tp, hp;
716 
717 	lockdep_assert_held(&srng->lock);
718 
719 	hp = srng->u.src_ring.hp;
720 
721 	if (sync_hw_ptr) {
722 		tp = *srng->u.src_ring.tp_addr;
723 		srng->u.src_ring.cached_tp = tp;
724 	} else {
725 		tp = srng->u.src_ring.cached_tp;
726 	}
727 
728 	if (tp > hp)
729 		return ((tp - hp) / srng->entry_size) - 1;
730 	else
731 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
732 }
733 
ath11k_hal_srng_src_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)734 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
735 					struct hal_srng *srng)
736 {
737 	u32 *desc;
738 	u32 next_hp;
739 
740 	lockdep_assert_held(&srng->lock);
741 
742 	/* TODO: Using % is expensive, but we have to do this since size of some
743 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
744 	 * if separate function is defined for rings having power of 2 ring size
745 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
746 	 * overhead of % by using mask (with &).
747 	 */
748 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
749 
750 	if (next_hp == srng->u.src_ring.cached_tp)
751 		return NULL;
752 
753 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
754 	srng->u.src_ring.hp = next_hp;
755 
756 	/* TODO: Reap functionality is not used by all rings. If particular
757 	 * ring does not use reap functionality, we need not update reap_hp
758 	 * with next_hp pointer. Need to make sure a separate function is used
759 	 * before doing any optimization by removing below code updating
760 	 * reap_hp.
761 	 */
762 	srng->u.src_ring.reap_hp = next_hp;
763 
764 	return desc;
765 }
766 
ath11k_hal_srng_src_reap_next(struct ath11k_base * ab,struct hal_srng * srng)767 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
768 				   struct hal_srng *srng)
769 {
770 	u32 *desc;
771 	u32 next_reap_hp;
772 
773 	lockdep_assert_held(&srng->lock);
774 
775 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
776 		       srng->ring_size;
777 
778 	if (next_reap_hp == srng->u.src_ring.cached_tp)
779 		return NULL;
780 
781 	desc = srng->ring_base_vaddr + next_reap_hp;
782 	srng->u.src_ring.reap_hp = next_reap_hp;
783 
784 	return desc;
785 }
786 
ath11k_hal_srng_src_get_next_reaped(struct ath11k_base * ab,struct hal_srng * srng)787 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
788 					 struct hal_srng *srng)
789 {
790 	u32 *desc;
791 
792 	lockdep_assert_held(&srng->lock);
793 
794 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
795 		return NULL;
796 
797 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
798 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
799 			      srng->ring_size;
800 
801 	return desc;
802 }
803 
ath11k_hal_srng_src_next_peek(struct ath11k_base * ab,struct hal_srng * srng)804 u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
805 {
806 	u32 next_hp;
807 
808 	lockdep_assert_held(&srng->lock);
809 
810 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
811 
812 	if (next_hp != srng->u.src_ring.cached_tp)
813 		return srng->ring_base_vaddr + next_hp;
814 
815 	return NULL;
816 }
817 
ath11k_hal_srng_src_peek(struct ath11k_base * ab,struct hal_srng * srng)818 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
819 {
820 	lockdep_assert_held(&srng->lock);
821 
822 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
823 	    srng->u.src_ring.cached_tp)
824 		return NULL;
825 
826 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
827 }
828 
ath11k_hal_srng_access_begin(struct ath11k_base * ab,struct hal_srng * srng)829 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
830 {
831 	u32 hp;
832 
833 	lockdep_assert_held(&srng->lock);
834 
835 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
836 		srng->u.src_ring.cached_tp =
837 			*(volatile u32 *)srng->u.src_ring.tp_addr;
838 	} else {
839 		hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
840 
841 		if (hp != srng->u.dst_ring.cached_hp) {
842 			srng->u.dst_ring.cached_hp = hp;
843 			/* Make sure descriptor is read after the head
844 			 * pointer.
845 			 */
846 			dma_rmb();
847 		}
848 
849 		/* Try to prefetch the next descriptor in the ring */
850 		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
851 			ath11k_hal_srng_prefetch_desc(ab, srng);
852 	}
853 }
854 
855 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
856  * should have been called before this.
857  */
ath11k_hal_srng_access_end(struct ath11k_base * ab,struct hal_srng * srng)858 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
859 {
860 	lockdep_assert_held(&srng->lock);
861 
862 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
863 		/* For LMAC rings, ring pointer updates are done through FW and
864 		 * hence written to a shared memory location that is read by FW
865 		 */
866 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
867 			srng->u.src_ring.last_tp =
868 				*(volatile u32 *)srng->u.src_ring.tp_addr;
869 			/* Make sure descriptor is written before updating the
870 			 * head pointer.
871 			 */
872 			dma_wmb();
873 			WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
874 		} else {
875 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
876 			/* Make sure descriptor is read before updating the
877 			 * tail pointer.
878 			 */
879 			dma_mb();
880 			WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
881 		}
882 	} else {
883 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
884 			srng->u.src_ring.last_tp =
885 				*(volatile u32 *)srng->u.src_ring.tp_addr;
886 			/* Assume implementation use an MMIO write accessor
887 			 * which has the required wmb() so that the descriptor
888 			 * is written before the updating the head pointer.
889 			 */
890 			ath11k_hif_write32(ab,
891 					   (unsigned long)srng->u.src_ring.hp_addr -
892 					   (unsigned long)ab->mem,
893 					   srng->u.src_ring.hp);
894 		} else {
895 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
896 			/* Make sure descriptor is read before updating the
897 			 * tail pointer.
898 			 */
899 			mb();
900 			ath11k_hif_write32(ab,
901 					   (unsigned long)srng->u.dst_ring.tp_addr -
902 					   (unsigned long)ab->mem,
903 					   srng->u.dst_ring.tp);
904 		}
905 	}
906 
907 	srng->timestamp = jiffies;
908 }
909 
ath11k_hal_setup_link_idle_list(struct ath11k_base * ab,struct hal_wbm_idle_scatter_list * sbuf,u32 nsbufs,u32 tot_link_desc,u32 end_offset)910 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
911 				     struct hal_wbm_idle_scatter_list *sbuf,
912 				     u32 nsbufs, u32 tot_link_desc,
913 				     u32 end_offset)
914 {
915 	struct ath11k_buffer_addr *link_addr;
916 	int i;
917 	u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
918 
919 #if defined(__linux__)
920 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
921 #elif defined(__FreeBSD__)
922 	link_addr = (void *)((uintptr_t)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE);
923 #endif
924 
925 	for (i = 1; i < nsbufs; i++) {
926 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
927 		link_addr->info1 = FIELD_PREP(
928 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
929 				(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
930 				FIELD_PREP(
931 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
932 				BASE_ADDR_MATCH_TAG_VAL);
933 
934 #if defined(__linux__)
935 		link_addr = (void *)sbuf[i].vaddr +
936 			     HAL_WBM_IDLE_SCATTER_BUF_SIZE;
937 #elif defined(__FreeBSD__)
938 		link_addr = (void *)((uintptr_t)sbuf[i].vaddr +
939 			     HAL_WBM_IDLE_SCATTER_BUF_SIZE);
940 #endif
941 	}
942 
943 	ath11k_hif_write32(ab,
944 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
945 			   FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
946 			   FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
947 	ath11k_hif_write32(ab,
948 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
949 			   FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
950 				      reg_scatter_buf_sz * nsbufs));
951 	ath11k_hif_write32(ab,
952 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
953 			   HAL_WBM_SCATTERED_RING_BASE_LSB,
954 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
955 				      sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
956 	ath11k_hif_write32(ab,
957 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
958 			   HAL_WBM_SCATTERED_RING_BASE_MSB,
959 			   FIELD_PREP(
960 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
961 				(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
962 				FIELD_PREP(
963 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
964 				BASE_ADDR_MATCH_TAG_VAL));
965 
966 	/* Setup head and tail pointers for the idle list */
967 	ath11k_hif_write32(ab,
968 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
969 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
970 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
971 				      sbuf[nsbufs - 1].paddr));
972 	ath11k_hif_write32(ab,
973 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
974 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
975 			   FIELD_PREP(
976 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
977 				((u64)sbuf[nsbufs - 1].paddr >>
978 				 HAL_ADDR_MSB_REG_SHIFT)) |
979 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
980 				      (end_offset >> 2)));
981 	ath11k_hif_write32(ab,
982 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
983 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
984 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
985 				      sbuf[0].paddr));
986 
987 	ath11k_hif_write32(ab,
988 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
989 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
990 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
991 				      sbuf[0].paddr));
992 	ath11k_hif_write32(ab,
993 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
994 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
995 			   FIELD_PREP(
996 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
997 				((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
998 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
999 				      0));
1000 	ath11k_hif_write32(ab,
1001 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
1002 			   HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
1003 			   2 * tot_link_desc);
1004 
1005 	/* Enable the SRNG */
1006 	ath11k_hif_write32(ab,
1007 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
1008 			   HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
1009 }
1010 
ath11k_hal_srng_setup(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id,struct hal_srng_params * params)1011 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
1012 			  int ring_num, int mac_id,
1013 			  struct hal_srng_params *params)
1014 {
1015 	struct ath11k_hal *hal = &ab->hal;
1016 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
1017 	struct hal_srng *srng;
1018 	int ring_id;
1019 	u32 lmac_idx;
1020 	int i;
1021 	u32 reg_base;
1022 
1023 	ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
1024 	if (ring_id < 0)
1025 		return ring_id;
1026 
1027 	srng = &hal->srng_list[ring_id];
1028 
1029 	srng->ring_id = ring_id;
1030 	srng->ring_dir = srng_config->ring_dir;
1031 	srng->ring_base_paddr = params->ring_base_paddr;
1032 	srng->ring_base_vaddr = params->ring_base_vaddr;
1033 	srng->entry_size = srng_config->entry_size;
1034 	srng->num_entries = params->num_entries;
1035 	srng->ring_size = srng->entry_size * srng->num_entries;
1036 	srng->intr_batch_cntr_thres_entries =
1037 				params->intr_batch_cntr_thres_entries;
1038 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
1039 	srng->flags = params->flags;
1040 	srng->msi_addr = params->msi_addr;
1041 	srng->msi_data = params->msi_data;
1042 	srng->initialized = 1;
1043 	spin_lock_init(&srng->lock);
1044 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
1045 
1046 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
1047 		srng->hwreg_base[i] = srng_config->reg_start[i] +
1048 				      (ring_num * srng_config->reg_size[i]);
1049 	}
1050 
1051 	memset(srng->ring_base_vaddr, 0,
1052 	       (srng->entry_size * srng->num_entries) << 2);
1053 
1054 	/* TODO: Add comments on these swap configurations */
1055 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1056 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1057 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
1058 
1059 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1060 
1061 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1062 		srng->u.src_ring.hp = 0;
1063 		srng->u.src_ring.cached_tp = 0;
1064 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1065 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1066 		srng->u.src_ring.low_threshold = params->low_threshold *
1067 						 srng->entry_size;
1068 		if (srng_config->lmac_ring) {
1069 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1070 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1071 						   lmac_idx);
1072 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1073 		} else {
1074 			if (!ab->hw_params.supports_shadow_regs)
1075 				srng->u.src_ring.hp_addr =
1076 				(u32 *)((unsigned long)ab->mem + reg_base);
1077 			else
1078 				ath11k_dbg(ab, ATH11K_DBG_HAL,
1079 					   "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
1080 					   type, ring_num,
1081 					   reg_base,
1082 					   (unsigned long)srng->u.src_ring.hp_addr -
1083 					   (unsigned long)ab->mem);
1084 		}
1085 	} else {
1086 		/* During initialization loop count in all the descriptors
1087 		 * will be set to zero, and HW will set it to 1 on completing
1088 		 * descriptor update in first loop, and increments it by 1 on
1089 		 * subsequent loops (loop count wraps around after reaching
1090 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1091 		 * loop count in descriptors updated by HW (to be processed
1092 		 * by SW).
1093 		 */
1094 		srng->u.dst_ring.loop_cnt = 1;
1095 		srng->u.dst_ring.tp = 0;
1096 		srng->u.dst_ring.cached_hp = 0;
1097 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1098 		if (srng_config->lmac_ring) {
1099 			/* For LMAC rings, tail pointer updates will be done
1100 			 * through FW by writing to a shared memory location
1101 			 */
1102 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1103 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1104 						   lmac_idx);
1105 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1106 		} else {
1107 			if (!ab->hw_params.supports_shadow_regs)
1108 				srng->u.dst_ring.tp_addr =
1109 				(u32 *)((unsigned long)ab->mem + reg_base +
1110 					(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
1111 			else
1112 				ath11k_dbg(ab, ATH11K_DBG_HAL,
1113 					   "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
1114 					   type, ring_num,
1115 					   reg_base + (HAL_REO1_RING_TP(ab) -
1116 						       HAL_REO1_RING_HP(ab)),
1117 					   (unsigned long)srng->u.dst_ring.tp_addr -
1118 					   (unsigned long)ab->mem);
1119 		}
1120 	}
1121 
1122 	if (srng_config->lmac_ring)
1123 		return ring_id;
1124 
1125 	ath11k_hal_srng_hw_init(ab, srng);
1126 
1127 	if (type == HAL_CE_DST) {
1128 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1129 		ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1130 	}
1131 
1132 	return ring_id;
1133 }
1134 
ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base * ab,int shadow_cfg_idx,enum hal_ring_type ring_type,int ring_num)1135 static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
1136 					      int shadow_cfg_idx,
1137 					  enum hal_ring_type ring_type,
1138 					  int ring_num)
1139 {
1140 	struct hal_srng *srng;
1141 	struct ath11k_hal *hal = &ab->hal;
1142 	int ring_id;
1143 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1144 
1145 	ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
1146 	if (ring_id < 0)
1147 		return;
1148 
1149 	srng = &hal->srng_list[ring_id];
1150 
1151 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1152 		srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1153 						   (unsigned long)ab->mem);
1154 	else
1155 		srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1156 						   (unsigned long)ab->mem);
1157 }
1158 
ath11k_hal_srng_update_shadow_config(struct ath11k_base * ab,enum hal_ring_type ring_type,int ring_num)1159 int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
1160 					 enum hal_ring_type ring_type,
1161 					 int ring_num)
1162 {
1163 	struct ath11k_hal *hal = &ab->hal;
1164 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1165 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
1166 	u32 target_reg;
1167 
1168 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
1169 		return -EINVAL;
1170 
1171 	hal->num_shadow_reg_configured++;
1172 
1173 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
1174 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
1175 		ring_num;
1176 
1177 	/* For destination ring, shadow the TP */
1178 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1179 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
1180 
1181 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
1182 
1183 	/* update hp/tp addr to hal structure*/
1184 	ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
1185 					  ring_num);
1186 
1187 	ath11k_dbg(ab, ATH11K_DBG_HAL,
1188 		   "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
1189 		  target_reg,
1190 		  HAL_SHADOW_REG(ab, shadow_cfg_idx),
1191 		  shadow_cfg_idx,
1192 		  ring_type, ring_num);
1193 
1194 	return 0;
1195 }
1196 
ath11k_hal_srng_shadow_config(struct ath11k_base * ab)1197 void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
1198 {
1199 	struct ath11k_hal *hal = &ab->hal;
1200 	int ring_type, ring_num;
1201 
1202 	/* update all the non-CE srngs. */
1203 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
1204 		struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1205 
1206 		if (ring_type == HAL_CE_SRC ||
1207 		    ring_type == HAL_CE_DST ||
1208 			ring_type == HAL_CE_DST_STATUS)
1209 			continue;
1210 
1211 		if (srng_config->lmac_ring)
1212 			continue;
1213 
1214 		for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
1215 			ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
1216 	}
1217 }
1218 
ath11k_hal_srng_get_shadow_config(struct ath11k_base * ab,u32 ** cfg,u32 * len)1219 void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
1220 				       u32 **cfg, u32 *len)
1221 {
1222 	struct ath11k_hal *hal = &ab->hal;
1223 
1224 	*len = hal->num_shadow_reg_configured;
1225 	*cfg = hal->shadow_reg_addr;
1226 }
1227 
ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base * ab,struct hal_srng * srng)1228 void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
1229 					 struct hal_srng *srng)
1230 {
1231 	lockdep_assert_held(&srng->lock);
1232 
1233 	/* check whether the ring is empty. Update the shadow
1234 	 * HP only when then ring isn't empty.
1235 	 */
1236 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
1237 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
1238 		ath11k_hal_srng_access_end(ab, srng);
1239 }
1240 
ath11k_hal_srng_create_config(struct ath11k_base * ab)1241 static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
1242 {
1243 	struct ath11k_hal *hal = &ab->hal;
1244 	struct hal_srng_config *s;
1245 
1246 	hal->srng_config = kmemdup(hw_srng_config_template,
1247 				   sizeof(hw_srng_config_template),
1248 				   GFP_KERNEL);
1249 	if (!hal->srng_config)
1250 		return -ENOMEM;
1251 
1252 	s = &hal->srng_config[HAL_REO_DST];
1253 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
1254 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
1255 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
1256 	s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
1257 
1258 	s = &hal->srng_config[HAL_REO_EXCEPTION];
1259 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
1260 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
1261 
1262 	s = &hal->srng_config[HAL_REO_REINJECT];
1263 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
1264 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
1265 
1266 	s = &hal->srng_config[HAL_REO_CMD];
1267 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
1268 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
1269 
1270 	s = &hal->srng_config[HAL_REO_STATUS];
1271 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
1272 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
1273 
1274 	s = &hal->srng_config[HAL_TCL_DATA];
1275 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
1276 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
1277 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
1278 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
1279 
1280 	s = &hal->srng_config[HAL_TCL_CMD];
1281 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
1282 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
1283 
1284 	s = &hal->srng_config[HAL_TCL_STATUS];
1285 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
1286 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
1287 
1288 	s = &hal->srng_config[HAL_CE_SRC];
1289 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1290 		ATH11K_CE_OFFSET(ab);
1291 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
1292 		ATH11K_CE_OFFSET(ab);
1293 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1294 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1295 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1296 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1297 
1298 	s = &hal->srng_config[HAL_CE_DST];
1299 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1300 		ATH11K_CE_OFFSET(ab);
1301 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
1302 		ATH11K_CE_OFFSET(ab);
1303 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1304 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1305 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1306 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1307 
1308 	s = &hal->srng_config[HAL_CE_DST_STATUS];
1309 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
1310 		HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
1311 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
1312 		ATH11K_CE_OFFSET(ab);
1313 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1314 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1315 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1316 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1317 
1318 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
1319 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
1320 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
1321 
1322 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
1323 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
1324 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
1325 
1326 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
1327 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1328 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
1329 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
1330 		HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1331 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
1332 
1333 	return 0;
1334 }
1335 
ath11k_hal_register_srng_key(struct ath11k_base * ab)1336 static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
1337 {
1338 #if defined(__linux__)
1339 	struct ath11k_hal *hal = &ab->hal;
1340 	u32 ring_id;
1341 
1342 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1343 		lockdep_register_key(hal->srng_key + ring_id);
1344 #endif
1345 }
1346 
ath11k_hal_unregister_srng_key(struct ath11k_base * ab)1347 static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
1348 {
1349 #if defined(__linux__)
1350 	struct ath11k_hal *hal = &ab->hal;
1351 	u32 ring_id;
1352 
1353 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1354 		lockdep_unregister_key(hal->srng_key + ring_id);
1355 #endif
1356 }
1357 
ath11k_hal_srng_init(struct ath11k_base * ab)1358 int ath11k_hal_srng_init(struct ath11k_base *ab)
1359 {
1360 	struct ath11k_hal *hal = &ab->hal;
1361 	int ret;
1362 
1363 	memset(hal, 0, sizeof(*hal));
1364 
1365 	ret = ath11k_hal_srng_create_config(ab);
1366 	if (ret)
1367 		goto err_hal;
1368 
1369 	ret = ath11k_hal_alloc_cont_rdp(ab);
1370 	if (ret)
1371 		goto err_hal;
1372 
1373 	ret = ath11k_hal_alloc_cont_wrp(ab);
1374 	if (ret)
1375 		goto err_free_cont_rdp;
1376 
1377 	ath11k_hal_register_srng_key(ab);
1378 
1379 	return 0;
1380 
1381 err_free_cont_rdp:
1382 	ath11k_hal_free_cont_rdp(ab);
1383 
1384 err_hal:
1385 	return ret;
1386 }
1387 EXPORT_SYMBOL(ath11k_hal_srng_init);
1388 
ath11k_hal_srng_deinit(struct ath11k_base * ab)1389 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1390 {
1391 	struct ath11k_hal *hal = &ab->hal;
1392 	int i;
1393 
1394 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
1395 		ab->hal.srng_list[i].initialized = 0;
1396 
1397 	ath11k_hal_unregister_srng_key(ab);
1398 	ath11k_hal_free_cont_rdp(ab);
1399 	ath11k_hal_free_cont_wrp(ab);
1400 	kfree(hal->srng_config);
1401 	hal->srng_config = NULL;
1402 }
1403 EXPORT_SYMBOL(ath11k_hal_srng_deinit);
1404 
ath11k_hal_srng_clear(struct ath11k_base * ab)1405 void ath11k_hal_srng_clear(struct ath11k_base *ab)
1406 {
1407 	/* No need to memset rdp and wrp memory since each individual
1408 	 * segment would get cleared in ath11k_hal_srng_src_hw_init()
1409 	 * and ath11k_hal_srng_dst_hw_init().
1410 	 */
1411 	memset(ab->hal.srng_list, 0,
1412 	       sizeof(ab->hal.srng_list));
1413 	memset(ab->hal.shadow_reg_addr, 0,
1414 	       sizeof(ab->hal.shadow_reg_addr));
1415 	ab->hal.avail_blk_resource = 0;
1416 	ab->hal.current_blk_index = 0;
1417 	ab->hal.num_shadow_reg_configured = 0;
1418 }
1419 EXPORT_SYMBOL(ath11k_hal_srng_clear);
1420 
ath11k_hal_dump_srng_stats(struct ath11k_base * ab)1421 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1422 {
1423 	struct hal_srng *srng;
1424 	struct ath11k_ext_irq_grp *irq_grp;
1425 	struct ath11k_ce_pipe *ce_pipe;
1426 	int i;
1427 
1428 	ath11k_err(ab, "Last interrupt received for each CE:\n");
1429 	for (i = 0; i < ab->hw_params.ce_count; i++) {
1430 		ce_pipe = &ab->ce.ce_pipe[i];
1431 
1432 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1433 			continue;
1434 
1435 #if defined(__linux__)
1436 		ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1437 			   i, ce_pipe->pipe_num,
1438 			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1439 #elif defined(__FreeBSD__)
1440 		ath11k_err(ab, "CE_id %d pipe_num %d %jums before\n",
1441 			   i, ce_pipe->pipe_num,
1442 			   (uintmax_t)jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1443 #endif
1444 	}
1445 
1446 	ath11k_err(ab, "\nLast interrupt received for each group:\n");
1447 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1448 		irq_grp = &ab->ext_irq_grp[i];
1449 #if defined(__linux__)
1450 		ath11k_err(ab, "group_id %d %ums before\n",
1451 			   irq_grp->grp_id,
1452 			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
1453 #elif defined(__FreeBSD__)
1454 		ath11k_err(ab, "group_id %d %jums before\n",
1455 			   irq_grp->grp_id,
1456 			   (uintmax_t)jiffies_to_msecs(jiffies - irq_grp->timestamp));
1457 #endif
1458 	}
1459 
1460 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1461 		srng = &ab->hal.srng_list[i];
1462 
1463 		if (!srng->initialized)
1464 			continue;
1465 
1466 		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1467 			ath11k_err(ab,
1468 #if defined(__linux__)
1469 				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1470 #elif defined(__FreeBSD__)
1471 				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %jums\n",
1472 #endif
1473 				   srng->ring_id, srng->u.src_ring.hp,
1474 				   srng->u.src_ring.reap_hp,
1475 				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1476 				   srng->u.src_ring.last_tp,
1477 #if defined(__linux__)
1478 				   jiffies_to_msecs(jiffies - srng->timestamp));
1479 #elif defined(__FreeBSD__)
1480 				   (uintmax_t)jiffies_to_msecs(jiffies - srng->timestamp));
1481 #endif
1482 		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1483 			ath11k_err(ab,
1484 #if defined(__linux__)
1485 				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1486 #elif defined(__FreeBSD__)
1487 				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %jums\n",
1488 #endif
1489 				   srng->ring_id, srng->u.dst_ring.tp,
1490 				   *srng->u.dst_ring.hp_addr,
1491 				   srng->u.dst_ring.cached_hp,
1492 				   srng->u.dst_ring.last_hp,
1493 #if defined(__linux__)
1494 				   jiffies_to_msecs(jiffies - srng->timestamp));
1495 #elif defined(__FreeBSD__)
1496 				   (uintmax_t)jiffies_to_msecs(jiffies - srng->timestamp));
1497 #endif
1498 	}
1499 }
1500