xref: /linux/drivers/net/wireless/ath/ath11k/hal.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 #include <linux/dma-mapping.h>
7 #include <linux/export.h>
8 #include "hal_tx.h"
9 #include "debug.h"
10 #include "hal_desc.h"
11 #include "hif.h"
12 
13 static const struct hal_srng_config hw_srng_config_template[] = {
14 	/* TODO: max_rings can populated by querying HW capabilities */
15 	{ /* REO_DST */
16 		.start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
17 		.max_rings = 4,
18 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
19 		.lmac_ring = false,
20 		.ring_dir = HAL_SRNG_DIR_DST,
21 		.max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
22 	},
23 	{ /* REO_EXCEPTION */
24 		/* Designating REO2TCL ring as exception ring. This ring is
25 		 * similar to other REO2SW rings though it is named as REO2TCL.
26 		 * Any of theREO2SW rings can be used as exception ring.
27 		 */
28 		.start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
29 		.max_rings = 1,
30 		.entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
31 		.lmac_ring = false,
32 		.ring_dir = HAL_SRNG_DIR_DST,
33 		.max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
34 	},
35 	{ /* REO_REINJECT */
36 		.start_ring_id = HAL_SRNG_RING_ID_SW2REO,
37 		.max_rings = 1,
38 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
39 		.lmac_ring = false,
40 		.ring_dir = HAL_SRNG_DIR_SRC,
41 		.max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
42 	},
43 	{ /* REO_CMD */
44 		.start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
45 		.max_rings = 1,
46 		.entry_size = (sizeof(struct hal_tlv_hdr) +
47 			sizeof(struct hal_reo_get_queue_stats)) >> 2,
48 		.lmac_ring = false,
49 		.ring_dir = HAL_SRNG_DIR_SRC,
50 		.max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
51 	},
52 	{ /* REO_STATUS */
53 		.start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
54 		.max_rings = 1,
55 		.entry_size = (sizeof(struct hal_tlv_hdr) +
56 			sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
57 		.lmac_ring = false,
58 		.ring_dir = HAL_SRNG_DIR_DST,
59 		.max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
60 	},
61 	{ /* TCL_DATA */
62 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
63 		.max_rings = 3,
64 		.entry_size = (sizeof(struct hal_tlv_hdr) +
65 			     sizeof(struct hal_tcl_data_cmd)) >> 2,
66 		.lmac_ring = false,
67 		.ring_dir = HAL_SRNG_DIR_SRC,
68 		.max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
69 	},
70 	{ /* TCL_CMD */
71 		.start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
72 		.max_rings = 1,
73 		.entry_size = (sizeof(struct hal_tlv_hdr) +
74 			     sizeof(struct hal_tcl_gse_cmd)) >> 2,
75 		.lmac_ring =  false,
76 		.ring_dir = HAL_SRNG_DIR_SRC,
77 		.max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
78 	},
79 	{ /* TCL_STATUS */
80 		.start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
81 		.max_rings = 1,
82 		.entry_size = (sizeof(struct hal_tlv_hdr) +
83 			     sizeof(struct hal_tcl_status_ring)) >> 2,
84 		.lmac_ring = false,
85 		.ring_dir = HAL_SRNG_DIR_DST,
86 		.max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
87 	},
88 	{ /* CE_SRC */
89 		.start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
90 		.max_rings = 12,
91 		.entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
92 		.lmac_ring = false,
93 		.ring_dir = HAL_SRNG_DIR_SRC,
94 		.max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
95 	},
96 	{ /* CE_DST */
97 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
98 		.max_rings = 12,
99 		.entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
100 		.lmac_ring = false,
101 		.ring_dir = HAL_SRNG_DIR_SRC,
102 		.max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
103 	},
104 	{ /* CE_DST_STATUS */
105 		.start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
106 		.max_rings = 12,
107 		.entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
108 		.lmac_ring = false,
109 		.ring_dir = HAL_SRNG_DIR_DST,
110 		.max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
111 	},
112 	{ /* WBM_IDLE_LINK */
113 		.start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
114 		.max_rings = 1,
115 		.entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
116 		.lmac_ring = false,
117 		.ring_dir = HAL_SRNG_DIR_SRC,
118 		.max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
119 	},
120 	{ /* SW2WBM_RELEASE */
121 		.start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
122 		.max_rings = 1,
123 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
124 		.lmac_ring = false,
125 		.ring_dir = HAL_SRNG_DIR_SRC,
126 		.max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
127 	},
128 	{ /* WBM2SW_RELEASE */
129 		.start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
130 		.max_rings = 5,
131 		.entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
132 		.lmac_ring = false,
133 		.ring_dir = HAL_SRNG_DIR_DST,
134 		.max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
135 	},
136 	{ /* RXDMA_BUF */
137 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
138 		.max_rings = 2,
139 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
140 		.lmac_ring = true,
141 		.ring_dir = HAL_SRNG_DIR_SRC,
142 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
143 	},
144 	{ /* RXDMA_DST */
145 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
146 		.max_rings = 1,
147 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
148 		.lmac_ring = true,
149 		.ring_dir = HAL_SRNG_DIR_DST,
150 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
151 	},
152 	{ /* RXDMA_MONITOR_BUF */
153 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
154 		.max_rings = 1,
155 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
156 		.lmac_ring = true,
157 		.ring_dir = HAL_SRNG_DIR_SRC,
158 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
159 	},
160 	{ /* RXDMA_MONITOR_STATUS */
161 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
162 		.max_rings = 1,
163 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
164 		.lmac_ring = true,
165 		.ring_dir = HAL_SRNG_DIR_SRC,
166 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
167 	},
168 	{ /* RXDMA_MONITOR_DST */
169 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
170 		.max_rings = 1,
171 		.entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
172 		.lmac_ring = true,
173 		.ring_dir = HAL_SRNG_DIR_DST,
174 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
175 	},
176 	{ /* RXDMA_MONITOR_DESC */
177 		.start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
178 		.max_rings = 1,
179 		.entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
180 		.lmac_ring = true,
181 		.ring_dir = HAL_SRNG_DIR_SRC,
182 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
183 	},
184 	{ /* RXDMA DIR BUF */
185 		.start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
186 		.max_rings = 2,
187 		.entry_size = 8 >> 2, /* TODO: Define the struct */
188 		.lmac_ring = true,
189 		.ring_dir = HAL_SRNG_DIR_SRC,
190 		.max_size = HAL_RXDMA_RING_MAX_SIZE,
191 	},
192 };
193 
194 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
195 {
196 	struct ath11k_hal *hal = &ab->hal;
197 	size_t size;
198 
199 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
200 	hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
201 					    GFP_KERNEL);
202 	if (!hal->rdp.vaddr)
203 		return -ENOMEM;
204 
205 	return 0;
206 }
207 
208 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
209 {
210 	struct ath11k_hal *hal = &ab->hal;
211 	size_t size;
212 
213 	if (!hal->rdp.vaddr)
214 		return;
215 
216 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
217 	dma_free_coherent(ab->dev, size,
218 			  hal->rdp.vaddr, hal->rdp.paddr);
219 	hal->rdp.vaddr = NULL;
220 }
221 
222 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
223 {
224 	struct ath11k_hal *hal = &ab->hal;
225 	size_t size;
226 
227 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
228 	hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
229 					    GFP_KERNEL);
230 	if (!hal->wrp.vaddr)
231 		return -ENOMEM;
232 
233 	return 0;
234 }
235 
236 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
237 {
238 	struct ath11k_hal *hal = &ab->hal;
239 	size_t size;
240 
241 	if (!hal->wrp.vaddr)
242 		return;
243 
244 	size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
245 	dma_free_coherent(ab->dev, size,
246 			  hal->wrp.vaddr, hal->wrp.paddr);
247 	hal->wrp.vaddr = NULL;
248 }
249 
250 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
251 				    struct hal_srng *srng, int ring_num)
252 {
253 	struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
254 	u32 addr;
255 	u32 val;
256 
257 	addr = HAL_CE_DST_RING_CTRL +
258 	       srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
259 	       ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
260 
261 	val = ath11k_hif_read32(ab, addr);
262 	val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
263 	val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
264 			  srng->u.dst_ring.max_buffer_length);
265 	ath11k_hif_write32(ab, addr, val);
266 }
267 
268 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
269 					struct hal_srng *srng)
270 {
271 	struct ath11k_hal *hal = &ab->hal;
272 	u32 val;
273 	u64 hp_addr;
274 	u32 reg_base;
275 
276 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
277 
278 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
279 		ath11k_hif_write32(ab, reg_base +
280 				   HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
281 				   srng->msi_addr);
282 
283 		val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
284 				 ((u64)srng->msi_addr >>
285 				  HAL_ADDR_MSB_REG_SHIFT)) |
286 		      HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
287 		ath11k_hif_write32(ab, reg_base +
288 				       HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
289 
290 		ath11k_hif_write32(ab,
291 				   reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
292 				   srng->msi_data);
293 	}
294 
295 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
296 
297 	val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
298 			 ((u64)srng->ring_base_paddr >>
299 			  HAL_ADDR_MSB_REG_SHIFT)) |
300 	      FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
301 			 (srng->entry_size * srng->num_entries));
302 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
303 
304 	val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
305 	      FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
306 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
307 
308 	/* interrupt setup */
309 	val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
310 			 (srng->intr_timer_thres_us >> 3));
311 
312 	val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
313 			  (srng->intr_batch_cntr_thres_entries *
314 			   srng->entry_size));
315 
316 	ath11k_hif_write32(ab,
317 			   reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
318 			   val);
319 
320 	hp_addr = hal->rdp.paddr +
321 		  ((unsigned long)srng->u.dst_ring.hp_addr -
322 		   (unsigned long)hal->rdp.vaddr);
323 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
324 			   hp_addr & HAL_ADDR_LSB_REG_MASK);
325 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
326 			   hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
327 
328 	/* Initialize head and tail pointers to indicate ring is empty */
329 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
330 	ath11k_hif_write32(ab, reg_base, 0);
331 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
332 	*srng->u.dst_ring.hp_addr = 0;
333 
334 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
335 	val = 0;
336 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
337 		val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
338 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
339 		val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
340 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
341 		val |= HAL_REO1_RING_MISC_MSI_SWAP;
342 	val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
343 
344 	ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
345 }
346 
347 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
348 					struct hal_srng *srng)
349 {
350 	struct ath11k_hal *hal = &ab->hal;
351 	u32 val;
352 	u64 tp_addr;
353 	u32 reg_base;
354 
355 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
356 
357 	if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
358 		ath11k_hif_write32(ab, reg_base +
359 				   HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
360 				   srng->msi_addr);
361 
362 		val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
363 				 ((u64)srng->msi_addr >>
364 				  HAL_ADDR_MSB_REG_SHIFT)) |
365 		      HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
366 		ath11k_hif_write32(ab, reg_base +
367 				       HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
368 				   val);
369 
370 		ath11k_hif_write32(ab, reg_base +
371 				       HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
372 				   srng->msi_data);
373 	}
374 
375 	ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
376 
377 	val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
378 			 ((u64)srng->ring_base_paddr >>
379 			  HAL_ADDR_MSB_REG_SHIFT)) |
380 	      FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
381 			 (srng->entry_size * srng->num_entries));
382 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
383 
384 	val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
385 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
386 
387 	if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
388 		ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
389 		val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
390 				 ((u64)srng->ring_base_paddr >>
391 				 HAL_ADDR_MSB_REG_SHIFT)) |
392 			FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
393 				   (srng->entry_size * srng->num_entries));
394 		ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
395 	}
396 
397 	/* interrupt setup */
398 	/* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
399 	 * unit of 8 usecs instead of 1 usec (as required by v1).
400 	 */
401 	val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
402 			 srng->intr_timer_thres_us);
403 
404 	val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
405 			  (srng->intr_batch_cntr_thres_entries *
406 			   srng->entry_size));
407 
408 	ath11k_hif_write32(ab,
409 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
410 			   val);
411 
412 	val = 0;
413 	if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
414 		val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
415 				  srng->u.src_ring.low_threshold);
416 	}
417 	ath11k_hif_write32(ab,
418 			   reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
419 			   val);
420 
421 	if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
422 		tp_addr = hal->rdp.paddr +
423 			  ((unsigned long)srng->u.src_ring.tp_addr -
424 			   (unsigned long)hal->rdp.vaddr);
425 		ath11k_hif_write32(ab,
426 				   reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
427 				   tp_addr & HAL_ADDR_LSB_REG_MASK);
428 		ath11k_hif_write32(ab,
429 				   reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
430 				   tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
431 	}
432 
433 	/* Initialize head and tail pointers to indicate ring is empty */
434 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
435 	ath11k_hif_write32(ab, reg_base, 0);
436 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
437 	*srng->u.src_ring.tp_addr = 0;
438 
439 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
440 	val = 0;
441 	if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
442 		val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
443 	if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
444 		val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
445 	if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
446 		val |= HAL_TCL1_RING_MISC_MSI_SWAP;
447 
448 	/* Loop count is not used for SRC rings */
449 	val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
450 
451 	val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
452 
453 	ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
454 }
455 
456 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
457 				    struct hal_srng *srng)
458 {
459 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
460 		ath11k_hal_srng_src_hw_init(ab, srng);
461 	else
462 		ath11k_hal_srng_dst_hw_init(ab, srng);
463 }
464 
465 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
466 				       enum hal_ring_type type,
467 				       int ring_num, int mac_id)
468 {
469 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
470 	int ring_id;
471 
472 	if (ring_num >= srng_config->max_rings) {
473 		ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
474 		return -EINVAL;
475 	}
476 
477 	ring_id = srng_config->start_ring_id + ring_num;
478 	if (srng_config->lmac_ring)
479 		ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
480 
481 	if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
482 		return -EINVAL;
483 
484 	return ring_id;
485 }
486 
487 int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
488 {
489 	struct hal_srng_config *srng_config;
490 
491 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
492 		return -EINVAL;
493 
494 	srng_config = &ab->hal.srng_config[ring_type];
495 
496 	return (srng_config->entry_size << 2);
497 }
498 
499 int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
500 {
501 	struct hal_srng_config *srng_config;
502 
503 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
504 		return -EINVAL;
505 
506 	srng_config = &ab->hal.srng_config[ring_type];
507 
508 	return (srng_config->max_size / srng_config->entry_size);
509 }
510 
511 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
512 				struct hal_srng_params *params)
513 {
514 	params->ring_base_paddr = srng->ring_base_paddr;
515 	params->ring_base_vaddr = srng->ring_base_vaddr;
516 	params->num_entries = srng->num_entries;
517 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
518 	params->intr_batch_cntr_thres_entries =
519 		srng->intr_batch_cntr_thres_entries;
520 	params->low_threshold = srng->u.src_ring.low_threshold;
521 	params->msi_addr = srng->msi_addr;
522 	params->msi_data = srng->msi_data;
523 	params->flags = srng->flags;
524 }
525 
526 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
527 				       struct hal_srng *srng)
528 {
529 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
530 		return 0;
531 
532 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
533 		return ab->hal.wrp.paddr +
534 		       ((unsigned long)srng->u.src_ring.hp_addr -
535 			(unsigned long)ab->hal.wrp.vaddr);
536 	else
537 		return ab->hal.rdp.paddr +
538 		       ((unsigned long)srng->u.dst_ring.hp_addr -
539 			 (unsigned long)ab->hal.rdp.vaddr);
540 }
541 
542 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
543 				       struct hal_srng *srng)
544 {
545 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
546 		return 0;
547 
548 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
549 		return ab->hal.rdp.paddr +
550 		       ((unsigned long)srng->u.src_ring.tp_addr -
551 			(unsigned long)ab->hal.rdp.vaddr);
552 	else
553 		return ab->hal.wrp.paddr +
554 		       ((unsigned long)srng->u.dst_ring.tp_addr -
555 			(unsigned long)ab->hal.wrp.vaddr);
556 }
557 
558 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
559 {
560 	switch (type) {
561 	case HAL_CE_DESC_SRC:
562 		return sizeof(struct hal_ce_srng_src_desc);
563 	case HAL_CE_DESC_DST:
564 		return sizeof(struct hal_ce_srng_dest_desc);
565 	case HAL_CE_DESC_DST_STATUS:
566 		return sizeof(struct hal_ce_srng_dst_status_desc);
567 	}
568 
569 	return 0;
570 }
571 
572 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
573 				u8 byte_swap_data)
574 {
575 	struct hal_ce_srng_src_desc *desc = buf;
576 
577 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
578 	desc->buffer_addr_info =
579 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
580 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
581 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
582 			   byte_swap_data) |
583 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
584 		FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
585 	desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
586 }
587 
588 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
589 {
590 	struct hal_ce_srng_dest_desc *desc = buf;
591 
592 	desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
593 	desc->buffer_addr_info =
594 		FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
595 			   ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
596 }
597 
598 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
599 {
600 	struct hal_ce_srng_dst_status_desc *desc = buf;
601 	u32 len;
602 
603 	len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
604 	desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
605 
606 	return len;
607 }
608 
609 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
610 				   dma_addr_t paddr)
611 {
612 	desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
613 					       (paddr & HAL_ADDR_LSB_REG_MASK));
614 	desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
615 					       ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
616 				    FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
617 				    FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
618 }
619 
620 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
621 {
622 	lockdep_assert_held(&srng->lock);
623 
624 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
625 		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
626 
627 	return NULL;
628 }
629 
630 static u32 *ath11k_hal_srng_dst_peek_with_dma(struct ath11k_base *ab,
631 					      struct hal_srng *srng, dma_addr_t *paddr)
632 {
633 	lockdep_assert_held(&srng->lock);
634 
635 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp) {
636 		*paddr = srng->ring_base_paddr +
637 			  sizeof(*srng->ring_base_vaddr) * srng->u.dst_ring.tp;
638 		return srng->ring_base_vaddr + srng->u.dst_ring.tp;
639 	}
640 
641 	return NULL;
642 }
643 
644 static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
645 					  struct hal_srng *srng)
646 {
647 	dma_addr_t desc_paddr;
648 	u32 *desc;
649 
650 	/* prefetch only if desc is available */
651 	desc = ath11k_hal_srng_dst_peek_with_dma(ab, srng, &desc_paddr);
652 	if (likely(desc)) {
653 		dma_sync_single_for_cpu(ab->dev, desc_paddr,
654 					(srng->entry_size * sizeof(u32)),
655 					DMA_FROM_DEVICE);
656 		prefetch(desc);
657 	}
658 }
659 
660 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
661 					struct hal_srng *srng)
662 {
663 	u32 *desc;
664 
665 	lockdep_assert_held(&srng->lock);
666 
667 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
668 		return NULL;
669 
670 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
671 
672 	srng->u.dst_ring.tp += srng->entry_size;
673 
674 	/* wrap around to start of ring*/
675 	if (srng->u.dst_ring.tp == srng->ring_size)
676 		srng->u.dst_ring.tp = 0;
677 
678 	/* Try to prefetch the next descriptor in the ring */
679 	if (srng->flags & HAL_SRNG_FLAGS_CACHED)
680 		ath11k_hal_srng_prefetch_desc(ab, srng);
681 
682 	return desc;
683 }
684 
685 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
686 				 bool sync_hw_ptr)
687 {
688 	u32 tp, hp;
689 
690 	lockdep_assert_held(&srng->lock);
691 
692 	tp = srng->u.dst_ring.tp;
693 
694 	if (sync_hw_ptr) {
695 		hp = *srng->u.dst_ring.hp_addr;
696 		srng->u.dst_ring.cached_hp = hp;
697 	} else {
698 		hp = srng->u.dst_ring.cached_hp;
699 	}
700 
701 	if (hp >= tp)
702 		return (hp - tp) / srng->entry_size;
703 	else
704 		return (srng->ring_size - tp + hp) / srng->entry_size;
705 }
706 
707 /* Returns number of available entries in src ring */
708 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
709 				 bool sync_hw_ptr)
710 {
711 	u32 tp, hp;
712 
713 	lockdep_assert_held(&srng->lock);
714 
715 	hp = srng->u.src_ring.hp;
716 
717 	if (sync_hw_ptr) {
718 		tp = *srng->u.src_ring.tp_addr;
719 		srng->u.src_ring.cached_tp = tp;
720 	} else {
721 		tp = srng->u.src_ring.cached_tp;
722 	}
723 
724 	if (tp > hp)
725 		return ((tp - hp) / srng->entry_size) - 1;
726 	else
727 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
728 }
729 
730 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
731 					struct hal_srng *srng)
732 {
733 	u32 *desc;
734 	u32 next_hp;
735 
736 	lockdep_assert_held(&srng->lock);
737 
738 	/* TODO: Using % is expensive, but we have to do this since size of some
739 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
740 	 * if separate function is defined for rings having power of 2 ring size
741 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
742 	 * overhead of % by using mask (with &).
743 	 */
744 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
745 
746 	if (next_hp == srng->u.src_ring.cached_tp)
747 		return NULL;
748 
749 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
750 	srng->u.src_ring.hp = next_hp;
751 
752 	/* TODO: Reap functionality is not used by all rings. If particular
753 	 * ring does not use reap functionality, we need not update reap_hp
754 	 * with next_hp pointer. Need to make sure a separate function is used
755 	 * before doing any optimization by removing below code updating
756 	 * reap_hp.
757 	 */
758 	srng->u.src_ring.reap_hp = next_hp;
759 
760 	return desc;
761 }
762 
763 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
764 				   struct hal_srng *srng)
765 {
766 	u32 *desc;
767 	u32 next_reap_hp;
768 
769 	lockdep_assert_held(&srng->lock);
770 
771 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
772 		       srng->ring_size;
773 
774 	if (next_reap_hp == srng->u.src_ring.cached_tp)
775 		return NULL;
776 
777 	desc = srng->ring_base_vaddr + next_reap_hp;
778 	srng->u.src_ring.reap_hp = next_reap_hp;
779 
780 	return desc;
781 }
782 
783 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
784 					 struct hal_srng *srng)
785 {
786 	u32 *desc;
787 
788 	lockdep_assert_held(&srng->lock);
789 
790 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
791 		return NULL;
792 
793 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
794 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
795 			      srng->ring_size;
796 
797 	return desc;
798 }
799 
800 u32 *ath11k_hal_srng_src_next_peek(struct ath11k_base *ab, struct hal_srng *srng)
801 {
802 	u32 next_hp;
803 
804 	lockdep_assert_held(&srng->lock);
805 
806 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
807 
808 	if (next_hp != srng->u.src_ring.cached_tp)
809 		return srng->ring_base_vaddr + next_hp;
810 
811 	return NULL;
812 }
813 
814 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
815 {
816 	lockdep_assert_held(&srng->lock);
817 
818 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
819 	    srng->u.src_ring.cached_tp)
820 		return NULL;
821 
822 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
823 }
824 
825 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
826 {
827 	u32 hp;
828 
829 	lockdep_assert_held(&srng->lock);
830 
831 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
832 		srng->u.src_ring.cached_tp =
833 			*(volatile u32 *)srng->u.src_ring.tp_addr;
834 	} else {
835 		hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
836 
837 		if (hp != srng->u.dst_ring.cached_hp) {
838 			srng->u.dst_ring.cached_hp = hp;
839 			/* Make sure descriptor is read after the head
840 			 * pointer.
841 			 */
842 			dma_rmb();
843 		}
844 
845 		/* Try to prefetch the next descriptor in the ring */
846 		if (srng->flags & HAL_SRNG_FLAGS_CACHED)
847 			ath11k_hal_srng_prefetch_desc(ab, srng);
848 	}
849 }
850 
851 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
852  * should have been called before this.
853  */
854 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
855 {
856 	lockdep_assert_held(&srng->lock);
857 
858 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
859 		/* For LMAC rings, ring pointer updates are done through FW and
860 		 * hence written to a shared memory location that is read by FW
861 		 */
862 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
863 			srng->u.src_ring.last_tp =
864 				*(volatile u32 *)srng->u.src_ring.tp_addr;
865 			/* Make sure descriptor is written before updating the
866 			 * head pointer.
867 			 */
868 			dma_wmb();
869 			WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
870 		} else {
871 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
872 			/* Make sure descriptor is read before updating the
873 			 * tail pointer.
874 			 */
875 			dma_mb();
876 			WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
877 		}
878 	} else {
879 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
880 			srng->u.src_ring.last_tp =
881 				*(volatile u32 *)srng->u.src_ring.tp_addr;
882 			/* Assume implementation use an MMIO write accessor
883 			 * which has the required wmb() so that the descriptor
884 			 * is written before the updating the head pointer.
885 			 */
886 			ath11k_hif_write32(ab,
887 					   (unsigned long)srng->u.src_ring.hp_addr -
888 					   (unsigned long)ab->mem,
889 					   srng->u.src_ring.hp);
890 		} else {
891 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
892 			/* Make sure descriptor is read before updating the
893 			 * tail pointer.
894 			 */
895 			mb();
896 			ath11k_hif_write32(ab,
897 					   (unsigned long)srng->u.dst_ring.tp_addr -
898 					   (unsigned long)ab->mem,
899 					   srng->u.dst_ring.tp);
900 		}
901 	}
902 
903 	srng->timestamp = jiffies;
904 }
905 
906 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
907 				     struct hal_wbm_idle_scatter_list *sbuf,
908 				     u32 nsbufs, u32 tot_link_desc,
909 				     u32 end_offset)
910 {
911 	struct ath11k_buffer_addr *link_addr;
912 	int i;
913 	u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
914 
915 	link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
916 
917 	for (i = 1; i < nsbufs; i++) {
918 		link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
919 		link_addr->info1 = FIELD_PREP(
920 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
921 				(u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
922 				FIELD_PREP(
923 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
924 				BASE_ADDR_MATCH_TAG_VAL);
925 
926 		link_addr = (void *)sbuf[i].vaddr +
927 			     HAL_WBM_IDLE_SCATTER_BUF_SIZE;
928 	}
929 
930 	ath11k_hif_write32(ab,
931 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
932 			   FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
933 			   FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
934 	ath11k_hif_write32(ab,
935 			   HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
936 			   FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
937 				      reg_scatter_buf_sz * nsbufs));
938 	ath11k_hif_write32(ab,
939 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
940 			   HAL_WBM_SCATTERED_RING_BASE_LSB,
941 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
942 				      sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
943 	ath11k_hif_write32(ab,
944 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
945 			   HAL_WBM_SCATTERED_RING_BASE_MSB,
946 			   FIELD_PREP(
947 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
948 				(u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
949 				FIELD_PREP(
950 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
951 				BASE_ADDR_MATCH_TAG_VAL));
952 
953 	/* Setup head and tail pointers for the idle list */
954 	ath11k_hif_write32(ab,
955 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
956 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
957 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
958 				      sbuf[nsbufs - 1].paddr));
959 	ath11k_hif_write32(ab,
960 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
961 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
962 			   FIELD_PREP(
963 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
964 				((u64)sbuf[nsbufs - 1].paddr >>
965 				 HAL_ADDR_MSB_REG_SHIFT)) |
966 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
967 				      (end_offset >> 2)));
968 	ath11k_hif_write32(ab,
969 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
970 			   HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
971 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
972 				      sbuf[0].paddr));
973 
974 	ath11k_hif_write32(ab,
975 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
976 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
977 			   FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
978 				      sbuf[0].paddr));
979 	ath11k_hif_write32(ab,
980 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
981 			   HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
982 			   FIELD_PREP(
983 				HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
984 				((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
985 			   FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
986 				      0));
987 	ath11k_hif_write32(ab,
988 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
989 			   HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
990 			   2 * tot_link_desc);
991 
992 	/* Enable the SRNG */
993 	ath11k_hif_write32(ab,
994 			   HAL_SEQ_WCSS_UMAC_WBM_REG +
995 			   HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
996 }
997 
998 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
999 			  int ring_num, int mac_id,
1000 			  struct hal_srng_params *params)
1001 {
1002 	struct ath11k_hal *hal = &ab->hal;
1003 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
1004 	struct hal_srng *srng;
1005 	int ring_id;
1006 	u32 lmac_idx;
1007 	int i;
1008 	u32 reg_base;
1009 
1010 	ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
1011 	if (ring_id < 0)
1012 		return ring_id;
1013 
1014 	srng = &hal->srng_list[ring_id];
1015 
1016 	srng->ring_id = ring_id;
1017 	srng->ring_dir = srng_config->ring_dir;
1018 	srng->ring_base_paddr = params->ring_base_paddr;
1019 	srng->ring_base_vaddr = params->ring_base_vaddr;
1020 	srng->entry_size = srng_config->entry_size;
1021 	srng->num_entries = params->num_entries;
1022 	srng->ring_size = srng->entry_size * srng->num_entries;
1023 	srng->intr_batch_cntr_thres_entries =
1024 				params->intr_batch_cntr_thres_entries;
1025 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
1026 	srng->flags = params->flags;
1027 	srng->msi_addr = params->msi_addr;
1028 	srng->msi_data = params->msi_data;
1029 	srng->initialized = 1;
1030 	spin_lock_init(&srng->lock);
1031 	lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
1032 
1033 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
1034 		srng->hwreg_base[i] = srng_config->reg_start[i] +
1035 				      (ring_num * srng_config->reg_size[i]);
1036 	}
1037 
1038 	memset(srng->ring_base_vaddr, 0,
1039 	       (srng->entry_size * srng->num_entries) << 2);
1040 
1041 	/* TODO: Add comments on these swap configurations */
1042 	if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1043 		srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1044 			       HAL_SRNG_FLAGS_RING_PTR_SWAP;
1045 
1046 	reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1047 
1048 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1049 		srng->u.src_ring.hp = 0;
1050 		srng->u.src_ring.cached_tp = 0;
1051 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1052 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1053 		srng->u.src_ring.low_threshold = params->low_threshold *
1054 						 srng->entry_size;
1055 		if (srng_config->lmac_ring) {
1056 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1057 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1058 						   lmac_idx);
1059 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1060 		} else {
1061 			if (!ab->hw_params.supports_shadow_regs)
1062 				srng->u.src_ring.hp_addr =
1063 				(u32 *)((unsigned long)ab->mem + reg_base);
1064 			else
1065 				ath11k_dbg(ab, ATH11K_DBG_HAL,
1066 					   "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
1067 					   type, ring_num,
1068 					   reg_base,
1069 					   (unsigned long)srng->u.src_ring.hp_addr -
1070 					   (unsigned long)ab->mem);
1071 		}
1072 	} else {
1073 		/* During initialization loop count in all the descriptors
1074 		 * will be set to zero, and HW will set it to 1 on completing
1075 		 * descriptor update in first loop, and increments it by 1 on
1076 		 * subsequent loops (loop count wraps around after reaching
1077 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1078 		 * loop count in descriptors updated by HW (to be processed
1079 		 * by SW).
1080 		 */
1081 		srng->u.dst_ring.loop_cnt = 1;
1082 		srng->u.dst_ring.tp = 0;
1083 		srng->u.dst_ring.cached_hp = 0;
1084 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1085 		if (srng_config->lmac_ring) {
1086 			/* For LMAC rings, tail pointer updates will be done
1087 			 * through FW by writing to a shared memory location
1088 			 */
1089 			lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1090 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1091 						   lmac_idx);
1092 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1093 		} else {
1094 			if (!ab->hw_params.supports_shadow_regs)
1095 				srng->u.dst_ring.tp_addr =
1096 				(u32 *)((unsigned long)ab->mem + reg_base +
1097 					(HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
1098 			else
1099 				ath11k_dbg(ab, ATH11K_DBG_HAL,
1100 					   "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
1101 					   type, ring_num,
1102 					   reg_base + (HAL_REO1_RING_TP(ab) -
1103 						       HAL_REO1_RING_HP(ab)),
1104 					   (unsigned long)srng->u.dst_ring.tp_addr -
1105 					   (unsigned long)ab->mem);
1106 		}
1107 	}
1108 
1109 	if (srng_config->lmac_ring)
1110 		return ring_id;
1111 
1112 	ath11k_hal_srng_hw_init(ab, srng);
1113 
1114 	if (type == HAL_CE_DST) {
1115 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1116 		ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1117 	}
1118 
1119 	return ring_id;
1120 }
1121 
1122 static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
1123 					      int shadow_cfg_idx,
1124 					  enum hal_ring_type ring_type,
1125 					  int ring_num)
1126 {
1127 	struct hal_srng *srng;
1128 	struct ath11k_hal *hal = &ab->hal;
1129 	int ring_id;
1130 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1131 
1132 	ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
1133 	if (ring_id < 0)
1134 		return;
1135 
1136 	srng = &hal->srng_list[ring_id];
1137 
1138 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1139 		srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1140 						   (unsigned long)ab->mem);
1141 	else
1142 		srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1143 						   (unsigned long)ab->mem);
1144 }
1145 
1146 int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
1147 					 enum hal_ring_type ring_type,
1148 					 int ring_num)
1149 {
1150 	struct ath11k_hal *hal = &ab->hal;
1151 	struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1152 	int shadow_cfg_idx = hal->num_shadow_reg_configured;
1153 	u32 target_reg;
1154 
1155 	if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
1156 		return -EINVAL;
1157 
1158 	hal->num_shadow_reg_configured++;
1159 
1160 	target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
1161 	target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
1162 		ring_num;
1163 
1164 	/* For destination ring, shadow the TP */
1165 	if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1166 		target_reg += HAL_OFFSET_FROM_HP_TO_TP;
1167 
1168 	hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
1169 
1170 	/* update hp/tp addr to hal structure*/
1171 	ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
1172 					  ring_num);
1173 
1174 	ath11k_dbg(ab, ATH11K_DBG_HAL,
1175 		   "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
1176 		  target_reg,
1177 		  HAL_SHADOW_REG(ab, shadow_cfg_idx),
1178 		  shadow_cfg_idx,
1179 		  ring_type, ring_num);
1180 
1181 	return 0;
1182 }
1183 
1184 void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
1185 {
1186 	struct ath11k_hal *hal = &ab->hal;
1187 	int ring_type, ring_num;
1188 
1189 	/* update all the non-CE srngs. */
1190 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
1191 		struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1192 
1193 		if (ring_type == HAL_CE_SRC ||
1194 		    ring_type == HAL_CE_DST ||
1195 			ring_type == HAL_CE_DST_STATUS)
1196 			continue;
1197 
1198 		if (srng_config->lmac_ring)
1199 			continue;
1200 
1201 		for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
1202 			ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
1203 	}
1204 }
1205 
1206 void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
1207 				       u32 **cfg, u32 *len)
1208 {
1209 	struct ath11k_hal *hal = &ab->hal;
1210 
1211 	*len = hal->num_shadow_reg_configured;
1212 	*cfg = hal->shadow_reg_addr;
1213 }
1214 
1215 void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
1216 					 struct hal_srng *srng)
1217 {
1218 	lockdep_assert_held(&srng->lock);
1219 
1220 	/* check whether the ring is empty. Update the shadow
1221 	 * HP only when then ring isn't empty.
1222 	 */
1223 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
1224 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
1225 		ath11k_hal_srng_access_end(ab, srng);
1226 }
1227 
1228 static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
1229 {
1230 	struct ath11k_hal *hal = &ab->hal;
1231 	struct hal_srng_config *s;
1232 
1233 	hal->srng_config = kmemdup(hw_srng_config_template,
1234 				   sizeof(hw_srng_config_template),
1235 				   GFP_KERNEL);
1236 	if (!hal->srng_config)
1237 		return -ENOMEM;
1238 
1239 	s = &hal->srng_config[HAL_REO_DST];
1240 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
1241 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
1242 	s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
1243 	s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
1244 
1245 	s = &hal->srng_config[HAL_REO_EXCEPTION];
1246 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
1247 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
1248 
1249 	s = &hal->srng_config[HAL_REO_REINJECT];
1250 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
1251 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
1252 
1253 	s = &hal->srng_config[HAL_REO_CMD];
1254 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
1255 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
1256 
1257 	s = &hal->srng_config[HAL_REO_STATUS];
1258 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
1259 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
1260 
1261 	s = &hal->srng_config[HAL_TCL_DATA];
1262 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
1263 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
1264 	s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
1265 	s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
1266 
1267 	s = &hal->srng_config[HAL_TCL_CMD];
1268 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
1269 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
1270 
1271 	s = &hal->srng_config[HAL_TCL_STATUS];
1272 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
1273 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
1274 
1275 	s = &hal->srng_config[HAL_CE_SRC];
1276 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1277 		ATH11K_CE_OFFSET(ab);
1278 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
1279 		ATH11K_CE_OFFSET(ab);
1280 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1281 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1282 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1283 		HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1284 
1285 	s = &hal->srng_config[HAL_CE_DST];
1286 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1287 		ATH11K_CE_OFFSET(ab);
1288 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
1289 		ATH11K_CE_OFFSET(ab);
1290 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1291 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1292 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1293 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1294 
1295 	s = &hal->srng_config[HAL_CE_DST_STATUS];
1296 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
1297 		HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
1298 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
1299 		ATH11K_CE_OFFSET(ab);
1300 	s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1301 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1302 	s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1303 		HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1304 
1305 	s = &hal->srng_config[HAL_WBM_IDLE_LINK];
1306 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
1307 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
1308 
1309 	s = &hal->srng_config[HAL_SW2WBM_RELEASE];
1310 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
1311 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
1312 
1313 	s = &hal->srng_config[HAL_WBM2SW_RELEASE];
1314 	s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1315 	s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
1316 	s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
1317 		HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1318 	s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
1319 
1320 	return 0;
1321 }
1322 
1323 static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
1324 {
1325 	struct ath11k_hal *hal = &ab->hal;
1326 	u32 ring_id;
1327 
1328 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1329 		lockdep_register_key(hal->srng_key + ring_id);
1330 }
1331 
1332 static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
1333 {
1334 	struct ath11k_hal *hal = &ab->hal;
1335 	u32 ring_id;
1336 
1337 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1338 		lockdep_unregister_key(hal->srng_key + ring_id);
1339 }
1340 
1341 int ath11k_hal_srng_init(struct ath11k_base *ab)
1342 {
1343 	struct ath11k_hal *hal = &ab->hal;
1344 	int ret;
1345 
1346 	memset(hal, 0, sizeof(*hal));
1347 
1348 	ret = ath11k_hal_srng_create_config(ab);
1349 	if (ret)
1350 		goto err_hal;
1351 
1352 	ret = ath11k_hal_alloc_cont_rdp(ab);
1353 	if (ret)
1354 		goto err_hal;
1355 
1356 	ret = ath11k_hal_alloc_cont_wrp(ab);
1357 	if (ret)
1358 		goto err_free_cont_rdp;
1359 
1360 	ath11k_hal_register_srng_key(ab);
1361 
1362 	return 0;
1363 
1364 err_free_cont_rdp:
1365 	ath11k_hal_free_cont_rdp(ab);
1366 
1367 err_hal:
1368 	return ret;
1369 }
1370 EXPORT_SYMBOL(ath11k_hal_srng_init);
1371 
1372 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1373 {
1374 	struct ath11k_hal *hal = &ab->hal;
1375 	int i;
1376 
1377 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++)
1378 		ab->hal.srng_list[i].initialized = 0;
1379 
1380 	ath11k_hal_unregister_srng_key(ab);
1381 	ath11k_hal_free_cont_rdp(ab);
1382 	ath11k_hal_free_cont_wrp(ab);
1383 	kfree(hal->srng_config);
1384 	hal->srng_config = NULL;
1385 }
1386 EXPORT_SYMBOL(ath11k_hal_srng_deinit);
1387 
1388 void ath11k_hal_srng_clear(struct ath11k_base *ab)
1389 {
1390 	/* No need to memset rdp and wrp memory since each individual
1391 	 * segment would get cleared in ath11k_hal_srng_src_hw_init()
1392 	 * and ath11k_hal_srng_dst_hw_init().
1393 	 */
1394 	memset(ab->hal.srng_list, 0,
1395 	       sizeof(ab->hal.srng_list));
1396 	memset(ab->hal.shadow_reg_addr, 0,
1397 	       sizeof(ab->hal.shadow_reg_addr));
1398 	ab->hal.avail_blk_resource = 0;
1399 	ab->hal.current_blk_index = 0;
1400 	ab->hal.num_shadow_reg_configured = 0;
1401 }
1402 EXPORT_SYMBOL(ath11k_hal_srng_clear);
1403 
1404 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1405 {
1406 	struct hal_srng *srng;
1407 	struct ath11k_ext_irq_grp *irq_grp;
1408 	struct ath11k_ce_pipe *ce_pipe;
1409 	int i;
1410 
1411 	ath11k_err(ab, "Last interrupt received for each CE:\n");
1412 	for (i = 0; i < ab->hw_params.ce_count; i++) {
1413 		ce_pipe = &ab->ce.ce_pipe[i];
1414 
1415 		if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1416 			continue;
1417 
1418 		ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1419 			   i, ce_pipe->pipe_num,
1420 			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1421 	}
1422 
1423 	ath11k_err(ab, "\nLast interrupt received for each group:\n");
1424 	for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1425 		irq_grp = &ab->ext_irq_grp[i];
1426 		ath11k_err(ab, "group_id %d %ums before\n",
1427 			   irq_grp->grp_id,
1428 			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
1429 	}
1430 
1431 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1432 		srng = &ab->hal.srng_list[i];
1433 
1434 		if (!srng->initialized)
1435 			continue;
1436 
1437 		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1438 			ath11k_err(ab,
1439 				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1440 				   srng->ring_id, srng->u.src_ring.hp,
1441 				   srng->u.src_ring.reap_hp,
1442 				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1443 				   srng->u.src_ring.last_tp,
1444 				   jiffies_to_msecs(jiffies - srng->timestamp));
1445 		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1446 			ath11k_err(ab,
1447 				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1448 				   srng->ring_id, srng->u.dst_ring.tp,
1449 				   *srng->u.dst_ring.hp_addr,
1450 				   srng->u.dst_ring.cached_hp,
1451 				   srng->u.dst_ring.last_hp,
1452 				   jiffies_to_msecs(jiffies - srng->timestamp));
1453 	}
1454 }
1455