1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6 #if defined(__FreeBSD__)
7 #include <asm/io.h>
8 #endif
9 #include <linux/dma-mapping.h>
10 #include "hal_tx.h"
11 #include "debug.h"
12 #include "hal_desc.h"
13 #include "hif.h"
14
15 static const struct hal_srng_config hw_srng_config_template[] = {
16 /* TODO: max_rings can populated by querying HW capabilities */
17 { /* REO_DST */
18 .start_ring_id = HAL_SRNG_RING_ID_REO2SW1,
19 .max_rings = 4,
20 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
21 .lmac_ring = false,
22 .ring_dir = HAL_SRNG_DIR_DST,
23 .max_size = HAL_REO_REO2SW1_RING_BASE_MSB_RING_SIZE,
24 },
25 { /* REO_EXCEPTION */
26 /* Designating REO2TCL ring as exception ring. This ring is
27 * similar to other REO2SW rings though it is named as REO2TCL.
28 * Any of theREO2SW rings can be used as exception ring.
29 */
30 .start_ring_id = HAL_SRNG_RING_ID_REO2TCL,
31 .max_rings = 1,
32 .entry_size = sizeof(struct hal_reo_dest_ring) >> 2,
33 .lmac_ring = false,
34 .ring_dir = HAL_SRNG_DIR_DST,
35 .max_size = HAL_REO_REO2TCL_RING_BASE_MSB_RING_SIZE,
36 },
37 { /* REO_REINJECT */
38 .start_ring_id = HAL_SRNG_RING_ID_SW2REO,
39 .max_rings = 1,
40 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
41 .lmac_ring = false,
42 .ring_dir = HAL_SRNG_DIR_SRC,
43 .max_size = HAL_REO_SW2REO_RING_BASE_MSB_RING_SIZE,
44 },
45 { /* REO_CMD */
46 .start_ring_id = HAL_SRNG_RING_ID_REO_CMD,
47 .max_rings = 1,
48 .entry_size = (sizeof(struct hal_tlv_hdr) +
49 sizeof(struct hal_reo_get_queue_stats)) >> 2,
50 .lmac_ring = false,
51 .ring_dir = HAL_SRNG_DIR_SRC,
52 .max_size = HAL_REO_CMD_RING_BASE_MSB_RING_SIZE,
53 },
54 { /* REO_STATUS */
55 .start_ring_id = HAL_SRNG_RING_ID_REO_STATUS,
56 .max_rings = 1,
57 .entry_size = (sizeof(struct hal_tlv_hdr) +
58 sizeof(struct hal_reo_get_queue_stats_status)) >> 2,
59 .lmac_ring = false,
60 .ring_dir = HAL_SRNG_DIR_DST,
61 .max_size = HAL_REO_STATUS_RING_BASE_MSB_RING_SIZE,
62 },
63 { /* TCL_DATA */
64 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL1,
65 .max_rings = 3,
66 .entry_size = (sizeof(struct hal_tlv_hdr) +
67 sizeof(struct hal_tcl_data_cmd)) >> 2,
68 .lmac_ring = false,
69 .ring_dir = HAL_SRNG_DIR_SRC,
70 .max_size = HAL_SW2TCL1_RING_BASE_MSB_RING_SIZE,
71 },
72 { /* TCL_CMD */
73 .start_ring_id = HAL_SRNG_RING_ID_SW2TCL_CMD,
74 .max_rings = 1,
75 .entry_size = (sizeof(struct hal_tlv_hdr) +
76 sizeof(struct hal_tcl_gse_cmd)) >> 2,
77 .lmac_ring = false,
78 .ring_dir = HAL_SRNG_DIR_SRC,
79 .max_size = HAL_SW2TCL1_CMD_RING_BASE_MSB_RING_SIZE,
80 },
81 { /* TCL_STATUS */
82 .start_ring_id = HAL_SRNG_RING_ID_TCL_STATUS,
83 .max_rings = 1,
84 .entry_size = (sizeof(struct hal_tlv_hdr) +
85 sizeof(struct hal_tcl_status_ring)) >> 2,
86 .lmac_ring = false,
87 .ring_dir = HAL_SRNG_DIR_DST,
88 .max_size = HAL_TCL_STATUS_RING_BASE_MSB_RING_SIZE,
89 },
90 { /* CE_SRC */
91 .start_ring_id = HAL_SRNG_RING_ID_CE0_SRC,
92 .max_rings = 12,
93 .entry_size = sizeof(struct hal_ce_srng_src_desc) >> 2,
94 .lmac_ring = false,
95 .ring_dir = HAL_SRNG_DIR_SRC,
96 .max_size = HAL_CE_SRC_RING_BASE_MSB_RING_SIZE,
97 },
98 { /* CE_DST */
99 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST,
100 .max_rings = 12,
101 .entry_size = sizeof(struct hal_ce_srng_dest_desc) >> 2,
102 .lmac_ring = false,
103 .ring_dir = HAL_SRNG_DIR_SRC,
104 .max_size = HAL_CE_DST_RING_BASE_MSB_RING_SIZE,
105 },
106 { /* CE_DST_STATUS */
107 .start_ring_id = HAL_SRNG_RING_ID_CE0_DST_STATUS,
108 .max_rings = 12,
109 .entry_size = sizeof(struct hal_ce_srng_dst_status_desc) >> 2,
110 .lmac_ring = false,
111 .ring_dir = HAL_SRNG_DIR_DST,
112 .max_size = HAL_CE_DST_STATUS_RING_BASE_MSB_RING_SIZE,
113 },
114 { /* WBM_IDLE_LINK */
115 .start_ring_id = HAL_SRNG_RING_ID_WBM_IDLE_LINK,
116 .max_rings = 1,
117 .entry_size = sizeof(struct hal_wbm_link_desc) >> 2,
118 .lmac_ring = false,
119 .ring_dir = HAL_SRNG_DIR_SRC,
120 .max_size = HAL_WBM_IDLE_LINK_RING_BASE_MSB_RING_SIZE,
121 },
122 { /* SW2WBM_RELEASE */
123 .start_ring_id = HAL_SRNG_RING_ID_WBM_SW_RELEASE,
124 .max_rings = 1,
125 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
126 .lmac_ring = false,
127 .ring_dir = HAL_SRNG_DIR_SRC,
128 .max_size = HAL_SW2WBM_RELEASE_RING_BASE_MSB_RING_SIZE,
129 },
130 { /* WBM2SW_RELEASE */
131 .start_ring_id = HAL_SRNG_RING_ID_WBM2SW0_RELEASE,
132 .max_rings = 5,
133 .entry_size = sizeof(struct hal_wbm_release_ring) >> 2,
134 .lmac_ring = false,
135 .ring_dir = HAL_SRNG_DIR_DST,
136 .max_size = HAL_WBM2SW_RELEASE_RING_BASE_MSB_RING_SIZE,
137 },
138 { /* RXDMA_BUF */
139 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF,
140 .max_rings = 2,
141 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
142 .lmac_ring = true,
143 .ring_dir = HAL_SRNG_DIR_SRC,
144 .max_size = HAL_RXDMA_RING_MAX_SIZE,
145 },
146 { /* RXDMA_DST */
147 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW0,
148 .max_rings = 1,
149 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
150 .lmac_ring = true,
151 .ring_dir = HAL_SRNG_DIR_DST,
152 .max_size = HAL_RXDMA_RING_MAX_SIZE,
153 },
154 { /* RXDMA_MONITOR_BUF */
155 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA2_BUF,
156 .max_rings = 1,
157 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
158 .lmac_ring = true,
159 .ring_dir = HAL_SRNG_DIR_SRC,
160 .max_size = HAL_RXDMA_RING_MAX_SIZE,
161 },
162 { /* RXDMA_MONITOR_STATUS */
163 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_STATBUF,
164 .max_rings = 1,
165 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
166 .lmac_ring = true,
167 .ring_dir = HAL_SRNG_DIR_SRC,
168 .max_size = HAL_RXDMA_RING_MAX_SIZE,
169 },
170 { /* RXDMA_MONITOR_DST */
171 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_RXDMA2SW1,
172 .max_rings = 1,
173 .entry_size = sizeof(struct hal_reo_entrance_ring) >> 2,
174 .lmac_ring = true,
175 .ring_dir = HAL_SRNG_DIR_DST,
176 .max_size = HAL_RXDMA_RING_MAX_SIZE,
177 },
178 { /* RXDMA_MONITOR_DESC */
179 .start_ring_id = HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_DESC,
180 .max_rings = 1,
181 .entry_size = sizeof(struct hal_wbm_buffer_ring) >> 2,
182 .lmac_ring = true,
183 .ring_dir = HAL_SRNG_DIR_SRC,
184 .max_size = HAL_RXDMA_RING_MAX_SIZE,
185 },
186 { /* RXDMA DIR BUF */
187 .start_ring_id = HAL_SRNG_RING_ID_RXDMA_DIR_BUF,
188 .max_rings = 1,
189 .entry_size = 8 >> 2, /* TODO: Define the struct */
190 .lmac_ring = true,
191 .ring_dir = HAL_SRNG_DIR_SRC,
192 .max_size = HAL_RXDMA_RING_MAX_SIZE,
193 },
194 };
195
ath11k_hal_alloc_cont_rdp(struct ath11k_base * ab)196 static int ath11k_hal_alloc_cont_rdp(struct ath11k_base *ab)
197 {
198 struct ath11k_hal *hal = &ab->hal;
199 size_t size;
200
201 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
202 hal->rdp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->rdp.paddr,
203 GFP_KERNEL);
204 if (!hal->rdp.vaddr)
205 return -ENOMEM;
206
207 return 0;
208 }
209
ath11k_hal_free_cont_rdp(struct ath11k_base * ab)210 static void ath11k_hal_free_cont_rdp(struct ath11k_base *ab)
211 {
212 struct ath11k_hal *hal = &ab->hal;
213 size_t size;
214
215 if (!hal->rdp.vaddr)
216 return;
217
218 size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
219 dma_free_coherent(ab->dev, size,
220 hal->rdp.vaddr, hal->rdp.paddr);
221 hal->rdp.vaddr = NULL;
222 }
223
ath11k_hal_alloc_cont_wrp(struct ath11k_base * ab)224 static int ath11k_hal_alloc_cont_wrp(struct ath11k_base *ab)
225 {
226 struct ath11k_hal *hal = &ab->hal;
227 size_t size;
228
229 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
230 hal->wrp.vaddr = dma_alloc_coherent(ab->dev, size, &hal->wrp.paddr,
231 GFP_KERNEL);
232 if (!hal->wrp.vaddr)
233 return -ENOMEM;
234
235 return 0;
236 }
237
ath11k_hal_free_cont_wrp(struct ath11k_base * ab)238 static void ath11k_hal_free_cont_wrp(struct ath11k_base *ab)
239 {
240 struct ath11k_hal *hal = &ab->hal;
241 size_t size;
242
243 if (!hal->wrp.vaddr)
244 return;
245
246 size = sizeof(u32) * HAL_SRNG_NUM_LMAC_RINGS;
247 dma_free_coherent(ab->dev, size,
248 hal->wrp.vaddr, hal->wrp.paddr);
249 hal->wrp.vaddr = NULL;
250 }
251
ath11k_hal_ce_dst_setup(struct ath11k_base * ab,struct hal_srng * srng,int ring_num)252 static void ath11k_hal_ce_dst_setup(struct ath11k_base *ab,
253 struct hal_srng *srng, int ring_num)
254 {
255 struct hal_srng_config *srng_config = &ab->hal.srng_config[HAL_CE_DST];
256 u32 addr;
257 u32 val;
258
259 addr = HAL_CE_DST_RING_CTRL +
260 srng_config->reg_start[HAL_SRNG_REG_GRP_R0] +
261 ring_num * srng_config->reg_size[HAL_SRNG_REG_GRP_R0];
262
263 val = ath11k_hif_read32(ab, addr);
264 val &= ~HAL_CE_DST_R0_DEST_CTRL_MAX_LEN;
265 val |= FIELD_PREP(HAL_CE_DST_R0_DEST_CTRL_MAX_LEN,
266 srng->u.dst_ring.max_buffer_length);
267 ath11k_hif_write32(ab, addr, val);
268 }
269
ath11k_hal_srng_dst_hw_init(struct ath11k_base * ab,struct hal_srng * srng)270 static void ath11k_hal_srng_dst_hw_init(struct ath11k_base *ab,
271 struct hal_srng *srng)
272 {
273 struct ath11k_hal *hal = &ab->hal;
274 u32 val;
275 u64 hp_addr;
276 u32 reg_base;
277
278 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
279
280 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
281 ath11k_hif_write32(ab, reg_base +
282 HAL_REO1_RING_MSI1_BASE_LSB_OFFSET(ab),
283 srng->msi_addr);
284
285 val = FIELD_PREP(HAL_REO1_RING_MSI1_BASE_MSB_ADDR,
286 ((u64)srng->msi_addr >>
287 HAL_ADDR_MSB_REG_SHIFT)) |
288 HAL_REO1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
289 ath11k_hif_write32(ab, reg_base +
290 HAL_REO1_RING_MSI1_BASE_MSB_OFFSET(ab), val);
291
292 ath11k_hif_write32(ab,
293 reg_base + HAL_REO1_RING_MSI1_DATA_OFFSET(ab),
294 srng->msi_data);
295 }
296
297 ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
298
299 val = FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
300 ((u64)srng->ring_base_paddr >>
301 HAL_ADDR_MSB_REG_SHIFT)) |
302 FIELD_PREP(HAL_REO1_RING_BASE_MSB_RING_SIZE,
303 (srng->entry_size * srng->num_entries));
304 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_BASE_MSB_OFFSET(ab), val);
305
306 val = FIELD_PREP(HAL_REO1_RING_ID_RING_ID, srng->ring_id) |
307 FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
308 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_ID_OFFSET(ab), val);
309
310 /* interrupt setup */
311 val = FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_INTR_TMR_THOLD,
312 (srng->intr_timer_thres_us >> 3));
313
314 val |= FIELD_PREP(HAL_REO1_RING_PRDR_INT_SETUP_BATCH_COUNTER_THOLD,
315 (srng->intr_batch_cntr_thres_entries *
316 srng->entry_size));
317
318 ath11k_hif_write32(ab,
319 reg_base + HAL_REO1_RING_PRODUCER_INT_SETUP_OFFSET(ab),
320 val);
321
322 hp_addr = hal->rdp.paddr +
323 ((unsigned long)srng->u.dst_ring.hp_addr -
324 (unsigned long)hal->rdp.vaddr);
325 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_LSB_OFFSET(ab),
326 hp_addr & HAL_ADDR_LSB_REG_MASK);
327 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_HP_ADDR_MSB_OFFSET(ab),
328 hp_addr >> HAL_ADDR_MSB_REG_SHIFT);
329
330 /* Initialize head and tail pointers to indicate ring is empty */
331 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
332 ath11k_hif_write32(ab, reg_base, 0);
333 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_TP_OFFSET(ab), 0);
334 *srng->u.dst_ring.hp_addr = 0;
335
336 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
337 val = 0;
338 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
339 val |= HAL_REO1_RING_MISC_DATA_TLV_SWAP;
340 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
341 val |= HAL_REO1_RING_MISC_HOST_FW_SWAP;
342 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
343 val |= HAL_REO1_RING_MISC_MSI_SWAP;
344 val |= HAL_REO1_RING_MISC_SRNG_ENABLE;
345
346 ath11k_hif_write32(ab, reg_base + HAL_REO1_RING_MISC_OFFSET(ab), val);
347 }
348
ath11k_hal_srng_src_hw_init(struct ath11k_base * ab,struct hal_srng * srng)349 static void ath11k_hal_srng_src_hw_init(struct ath11k_base *ab,
350 struct hal_srng *srng)
351 {
352 struct ath11k_hal *hal = &ab->hal;
353 u32 val;
354 u64 tp_addr;
355 u32 reg_base;
356
357 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
358
359 if (srng->flags & HAL_SRNG_FLAGS_MSI_INTR) {
360 ath11k_hif_write32(ab, reg_base +
361 HAL_TCL1_RING_MSI1_BASE_LSB_OFFSET(ab),
362 srng->msi_addr);
363
364 val = FIELD_PREP(HAL_TCL1_RING_MSI1_BASE_MSB_ADDR,
365 ((u64)srng->msi_addr >>
366 HAL_ADDR_MSB_REG_SHIFT)) |
367 HAL_TCL1_RING_MSI1_BASE_MSB_MSI1_ENABLE;
368 ath11k_hif_write32(ab, reg_base +
369 HAL_TCL1_RING_MSI1_BASE_MSB_OFFSET(ab),
370 val);
371
372 ath11k_hif_write32(ab, reg_base +
373 HAL_TCL1_RING_MSI1_DATA_OFFSET(ab),
374 srng->msi_data);
375 }
376
377 ath11k_hif_write32(ab, reg_base, srng->ring_base_paddr);
378
379 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
380 ((u64)srng->ring_base_paddr >>
381 HAL_ADDR_MSB_REG_SHIFT)) |
382 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
383 (srng->entry_size * srng->num_entries));
384 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
385
386 val = FIELD_PREP(HAL_REO1_RING_ID_ENTRY_SIZE, srng->entry_size);
387 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_ID_OFFSET(ab), val);
388
389 if (srng->ring_id == HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
390 ath11k_hif_write32(ab, reg_base, (u32)srng->ring_base_paddr);
391 val = FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_BASE_ADDR_MSB,
392 ((u64)srng->ring_base_paddr >>
393 HAL_ADDR_MSB_REG_SHIFT)) |
394 FIELD_PREP(HAL_TCL1_RING_BASE_MSB_RING_SIZE,
395 (srng->entry_size * srng->num_entries));
396 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_BASE_MSB_OFFSET(ab), val);
397 }
398
399 /* interrupt setup */
400 /* NOTE: IPQ8074 v2 requires the interrupt timer threshold in the
401 * unit of 8 usecs instead of 1 usec (as required by v1).
402 */
403 val = FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_INTR_TMR_THOLD,
404 srng->intr_timer_thres_us);
405
406 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX0_BATCH_COUNTER_THOLD,
407 (srng->intr_batch_cntr_thres_entries *
408 srng->entry_size));
409
410 ath11k_hif_write32(ab,
411 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX0_OFFSET(ab),
412 val);
413
414 val = 0;
415 if (srng->flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
416 val |= FIELD_PREP(HAL_TCL1_RING_CONSR_INT_SETUP_IX1_LOW_THOLD,
417 srng->u.src_ring.low_threshold);
418 }
419 ath11k_hif_write32(ab,
420 reg_base + HAL_TCL1_RING_CONSR_INT_SETUP_IX1_OFFSET(ab),
421 val);
422
423 if (srng->ring_id != HAL_SRNG_RING_ID_WBM_IDLE_LINK) {
424 tp_addr = hal->rdp.paddr +
425 ((unsigned long)srng->u.src_ring.tp_addr -
426 (unsigned long)hal->rdp.vaddr);
427 ath11k_hif_write32(ab,
428 reg_base + HAL_TCL1_RING_TP_ADDR_LSB_OFFSET(ab),
429 tp_addr & HAL_ADDR_LSB_REG_MASK);
430 ath11k_hif_write32(ab,
431 reg_base + HAL_TCL1_RING_TP_ADDR_MSB_OFFSET(ab),
432 tp_addr >> HAL_ADDR_MSB_REG_SHIFT);
433 }
434
435 /* Initialize head and tail pointers to indicate ring is empty */
436 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
437 ath11k_hif_write32(ab, reg_base, 0);
438 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_TP_OFFSET, 0);
439 *srng->u.src_ring.tp_addr = 0;
440
441 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R0];
442 val = 0;
443 if (srng->flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)
444 val |= HAL_TCL1_RING_MISC_DATA_TLV_SWAP;
445 if (srng->flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)
446 val |= HAL_TCL1_RING_MISC_HOST_FW_SWAP;
447 if (srng->flags & HAL_SRNG_FLAGS_MSI_SWAP)
448 val |= HAL_TCL1_RING_MISC_MSI_SWAP;
449
450 /* Loop count is not used for SRC rings */
451 val |= HAL_TCL1_RING_MISC_MSI_LOOPCNT_DISABLE;
452
453 val |= HAL_TCL1_RING_MISC_SRNG_ENABLE;
454
455 ath11k_hif_write32(ab, reg_base + HAL_TCL1_RING_MISC_OFFSET(ab), val);
456 }
457
ath11k_hal_srng_hw_init(struct ath11k_base * ab,struct hal_srng * srng)458 static void ath11k_hal_srng_hw_init(struct ath11k_base *ab,
459 struct hal_srng *srng)
460 {
461 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
462 ath11k_hal_srng_src_hw_init(ab, srng);
463 else
464 ath11k_hal_srng_dst_hw_init(ab, srng);
465 }
466
ath11k_hal_srng_get_ring_id(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id)467 static int ath11k_hal_srng_get_ring_id(struct ath11k_base *ab,
468 enum hal_ring_type type,
469 int ring_num, int mac_id)
470 {
471 struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
472 int ring_id;
473
474 if (ring_num >= srng_config->max_rings) {
475 ath11k_warn(ab, "invalid ring number :%d\n", ring_num);
476 return -EINVAL;
477 }
478
479 ring_id = srng_config->start_ring_id + ring_num;
480 if (srng_config->lmac_ring)
481 ring_id += mac_id * HAL_SRNG_RINGS_PER_LMAC;
482
483 if (WARN_ON(ring_id >= HAL_SRNG_RING_ID_MAX))
484 return -EINVAL;
485
486 return ring_id;
487 }
488
ath11k_hal_srng_get_entrysize(struct ath11k_base * ab,u32 ring_type)489 int ath11k_hal_srng_get_entrysize(struct ath11k_base *ab, u32 ring_type)
490 {
491 struct hal_srng_config *srng_config;
492
493 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
494 return -EINVAL;
495
496 srng_config = &ab->hal.srng_config[ring_type];
497
498 return (srng_config->entry_size << 2);
499 }
500
ath11k_hal_srng_get_max_entries(struct ath11k_base * ab,u32 ring_type)501 int ath11k_hal_srng_get_max_entries(struct ath11k_base *ab, u32 ring_type)
502 {
503 struct hal_srng_config *srng_config;
504
505 if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
506 return -EINVAL;
507
508 srng_config = &ab->hal.srng_config[ring_type];
509
510 return (srng_config->max_size / srng_config->entry_size);
511 }
512
ath11k_hal_srng_get_params(struct ath11k_base * ab,struct hal_srng * srng,struct hal_srng_params * params)513 void ath11k_hal_srng_get_params(struct ath11k_base *ab, struct hal_srng *srng,
514 struct hal_srng_params *params)
515 {
516 params->ring_base_paddr = srng->ring_base_paddr;
517 params->ring_base_vaddr = srng->ring_base_vaddr;
518 params->num_entries = srng->num_entries;
519 params->intr_timer_thres_us = srng->intr_timer_thres_us;
520 params->intr_batch_cntr_thres_entries =
521 srng->intr_batch_cntr_thres_entries;
522 params->low_threshold = srng->u.src_ring.low_threshold;
523 params->msi_addr = srng->msi_addr;
524 params->msi_data = srng->msi_data;
525 params->flags = srng->flags;
526 }
527
ath11k_hal_srng_get_hp_addr(struct ath11k_base * ab,struct hal_srng * srng)528 dma_addr_t ath11k_hal_srng_get_hp_addr(struct ath11k_base *ab,
529 struct hal_srng *srng)
530 {
531 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
532 return 0;
533
534 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
535 return ab->hal.wrp.paddr +
536 ((unsigned long)srng->u.src_ring.hp_addr -
537 (unsigned long)ab->hal.wrp.vaddr);
538 else
539 return ab->hal.rdp.paddr +
540 ((unsigned long)srng->u.dst_ring.hp_addr -
541 (unsigned long)ab->hal.rdp.vaddr);
542 }
543
ath11k_hal_srng_get_tp_addr(struct ath11k_base * ab,struct hal_srng * srng)544 dma_addr_t ath11k_hal_srng_get_tp_addr(struct ath11k_base *ab,
545 struct hal_srng *srng)
546 {
547 if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
548 return 0;
549
550 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
551 return ab->hal.rdp.paddr +
552 ((unsigned long)srng->u.src_ring.tp_addr -
553 (unsigned long)ab->hal.rdp.vaddr);
554 else
555 return ab->hal.wrp.paddr +
556 ((unsigned long)srng->u.dst_ring.tp_addr -
557 (unsigned long)ab->hal.wrp.vaddr);
558 }
559
ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)560 u32 ath11k_hal_ce_get_desc_size(enum hal_ce_desc type)
561 {
562 switch (type) {
563 case HAL_CE_DESC_SRC:
564 return sizeof(struct hal_ce_srng_src_desc);
565 case HAL_CE_DESC_DST:
566 return sizeof(struct hal_ce_srng_dest_desc);
567 case HAL_CE_DESC_DST_STATUS:
568 return sizeof(struct hal_ce_srng_dst_status_desc);
569 }
570
571 return 0;
572 }
573
ath11k_hal_ce_src_set_desc(void * buf,dma_addr_t paddr,u32 len,u32 id,u8 byte_swap_data)574 void ath11k_hal_ce_src_set_desc(void *buf, dma_addr_t paddr, u32 len, u32 id,
575 u8 byte_swap_data)
576 {
577 struct hal_ce_srng_src_desc *desc = (struct hal_ce_srng_src_desc *)buf;
578
579 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
580 desc->buffer_addr_info =
581 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_ADDR_HI,
582 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
583 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_BYTE_SWAP,
584 byte_swap_data) |
585 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_GATHER, 0) |
586 FIELD_PREP(HAL_CE_SRC_DESC_ADDR_INFO_LEN, len);
587 desc->meta_info = FIELD_PREP(HAL_CE_SRC_DESC_META_INFO_DATA, id);
588 }
589
ath11k_hal_ce_dst_set_desc(void * buf,dma_addr_t paddr)590 void ath11k_hal_ce_dst_set_desc(void *buf, dma_addr_t paddr)
591 {
592 struct hal_ce_srng_dest_desc *desc =
593 (struct hal_ce_srng_dest_desc *)buf;
594
595 desc->buffer_addr_low = paddr & HAL_ADDR_LSB_REG_MASK;
596 desc->buffer_addr_info =
597 FIELD_PREP(HAL_CE_DEST_DESC_ADDR_INFO_ADDR_HI,
598 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT));
599 }
600
ath11k_hal_ce_dst_status_get_length(void * buf)601 u32 ath11k_hal_ce_dst_status_get_length(void *buf)
602 {
603 struct hal_ce_srng_dst_status_desc *desc =
604 (struct hal_ce_srng_dst_status_desc *)buf;
605 u32 len;
606
607 len = FIELD_GET(HAL_CE_DST_STATUS_DESC_FLAGS_LEN, desc->flags);
608 desc->flags &= ~HAL_CE_DST_STATUS_DESC_FLAGS_LEN;
609
610 return len;
611 }
612
ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc * desc,u32 cookie,dma_addr_t paddr)613 void ath11k_hal_set_link_desc_addr(struct hal_wbm_link_desc *desc, u32 cookie,
614 dma_addr_t paddr)
615 {
616 desc->buf_addr_info.info0 = FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
617 (paddr & HAL_ADDR_LSB_REG_MASK));
618 desc->buf_addr_info.info1 = FIELD_PREP(BUFFER_ADDR_INFO1_ADDR,
619 ((u64)paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
620 FIELD_PREP(BUFFER_ADDR_INFO1_RET_BUF_MGR, 1) |
621 FIELD_PREP(BUFFER_ADDR_INFO1_SW_COOKIE, cookie);
622 }
623
ath11k_hal_srng_dst_peek(struct ath11k_base * ab,struct hal_srng * srng)624 u32 *ath11k_hal_srng_dst_peek(struct ath11k_base *ab, struct hal_srng *srng)
625 {
626 lockdep_assert_held(&srng->lock);
627
628 if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
629 return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
630
631 return NULL;
632 }
633
ath11k_hal_srng_prefetch_desc(struct ath11k_base * ab,struct hal_srng * srng)634 static void ath11k_hal_srng_prefetch_desc(struct ath11k_base *ab,
635 struct hal_srng *srng)
636 {
637 u32 *desc;
638
639 /* prefetch only if desc is available */
640 desc = ath11k_hal_srng_dst_peek(ab, srng);
641 if (likely(desc)) {
642 dma_sync_single_for_cpu(ab->dev, virt_to_phys(desc),
643 (srng->entry_size * sizeof(u32)),
644 DMA_FROM_DEVICE);
645 prefetch(desc);
646 }
647 }
648
ath11k_hal_srng_dst_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)649 u32 *ath11k_hal_srng_dst_get_next_entry(struct ath11k_base *ab,
650 struct hal_srng *srng)
651 {
652 u32 *desc;
653
654 lockdep_assert_held(&srng->lock);
655
656 if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
657 return NULL;
658
659 desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
660
661 srng->u.dst_ring.tp += srng->entry_size;
662
663 /* wrap around to start of ring*/
664 if (srng->u.dst_ring.tp == srng->ring_size)
665 srng->u.dst_ring.tp = 0;
666
667 /* Try to prefetch the next descriptor in the ring */
668 if (srng->flags & HAL_SRNG_FLAGS_CACHED)
669 ath11k_hal_srng_prefetch_desc(ab, srng);
670
671 return desc;
672 }
673
ath11k_hal_srng_dst_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)674 int ath11k_hal_srng_dst_num_free(struct ath11k_base *ab, struct hal_srng *srng,
675 bool sync_hw_ptr)
676 {
677 u32 tp, hp;
678
679 lockdep_assert_held(&srng->lock);
680
681 tp = srng->u.dst_ring.tp;
682
683 if (sync_hw_ptr) {
684 hp = *srng->u.dst_ring.hp_addr;
685 srng->u.dst_ring.cached_hp = hp;
686 } else {
687 hp = srng->u.dst_ring.cached_hp;
688 }
689
690 if (hp >= tp)
691 return (hp - tp) / srng->entry_size;
692 else
693 return (srng->ring_size - tp + hp) / srng->entry_size;
694 }
695
696 /* Returns number of available entries in src ring */
ath11k_hal_srng_src_num_free(struct ath11k_base * ab,struct hal_srng * srng,bool sync_hw_ptr)697 int ath11k_hal_srng_src_num_free(struct ath11k_base *ab, struct hal_srng *srng,
698 bool sync_hw_ptr)
699 {
700 u32 tp, hp;
701
702 lockdep_assert_held(&srng->lock);
703
704 hp = srng->u.src_ring.hp;
705
706 if (sync_hw_ptr) {
707 tp = *srng->u.src_ring.tp_addr;
708 srng->u.src_ring.cached_tp = tp;
709 } else {
710 tp = srng->u.src_ring.cached_tp;
711 }
712
713 if (tp > hp)
714 return ((tp - hp) / srng->entry_size) - 1;
715 else
716 return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
717 }
718
ath11k_hal_srng_src_get_next_entry(struct ath11k_base * ab,struct hal_srng * srng)719 u32 *ath11k_hal_srng_src_get_next_entry(struct ath11k_base *ab,
720 struct hal_srng *srng)
721 {
722 u32 *desc;
723 u32 next_hp;
724
725 lockdep_assert_held(&srng->lock);
726
727 /* TODO: Using % is expensive, but we have to do this since size of some
728 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
729 * if separate function is defined for rings having power of 2 ring size
730 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
731 * overhead of % by using mask (with &).
732 */
733 next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
734
735 if (next_hp == srng->u.src_ring.cached_tp)
736 return NULL;
737
738 desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
739 srng->u.src_ring.hp = next_hp;
740
741 /* TODO: Reap functionality is not used by all rings. If particular
742 * ring does not use reap functionality, we need not update reap_hp
743 * with next_hp pointer. Need to make sure a separate function is used
744 * before doing any optimization by removing below code updating
745 * reap_hp.
746 */
747 srng->u.src_ring.reap_hp = next_hp;
748
749 return desc;
750 }
751
ath11k_hal_srng_src_reap_next(struct ath11k_base * ab,struct hal_srng * srng)752 u32 *ath11k_hal_srng_src_reap_next(struct ath11k_base *ab,
753 struct hal_srng *srng)
754 {
755 u32 *desc;
756 u32 next_reap_hp;
757
758 lockdep_assert_held(&srng->lock);
759
760 next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
761 srng->ring_size;
762
763 if (next_reap_hp == srng->u.src_ring.cached_tp)
764 return NULL;
765
766 desc = srng->ring_base_vaddr + next_reap_hp;
767 srng->u.src_ring.reap_hp = next_reap_hp;
768
769 return desc;
770 }
771
ath11k_hal_srng_src_get_next_reaped(struct ath11k_base * ab,struct hal_srng * srng)772 u32 *ath11k_hal_srng_src_get_next_reaped(struct ath11k_base *ab,
773 struct hal_srng *srng)
774 {
775 u32 *desc;
776
777 lockdep_assert_held(&srng->lock);
778
779 if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
780 return NULL;
781
782 desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
783 srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
784 srng->ring_size;
785
786 return desc;
787 }
788
ath11k_hal_srng_src_peek(struct ath11k_base * ab,struct hal_srng * srng)789 u32 *ath11k_hal_srng_src_peek(struct ath11k_base *ab, struct hal_srng *srng)
790 {
791 lockdep_assert_held(&srng->lock);
792
793 if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
794 srng->u.src_ring.cached_tp)
795 return NULL;
796
797 return srng->ring_base_vaddr + srng->u.src_ring.hp;
798 }
799
ath11k_hal_srng_access_begin(struct ath11k_base * ab,struct hal_srng * srng)800 void ath11k_hal_srng_access_begin(struct ath11k_base *ab, struct hal_srng *srng)
801 {
802 lockdep_assert_held(&srng->lock);
803
804 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
805 srng->u.src_ring.cached_tp =
806 *(volatile u32 *)srng->u.src_ring.tp_addr;
807 } else {
808 srng->u.dst_ring.cached_hp = *srng->u.dst_ring.hp_addr;
809
810 /* Try to prefetch the next descriptor in the ring */
811 if (srng->flags & HAL_SRNG_FLAGS_CACHED)
812 ath11k_hal_srng_prefetch_desc(ab, srng);
813 }
814 }
815
816 /* Update cached ring head/tail pointers to HW. ath11k_hal_srng_access_begin()
817 * should have been called before this.
818 */
ath11k_hal_srng_access_end(struct ath11k_base * ab,struct hal_srng * srng)819 void ath11k_hal_srng_access_end(struct ath11k_base *ab, struct hal_srng *srng)
820 {
821 lockdep_assert_held(&srng->lock);
822
823 /* TODO: See if we need a write memory barrier here */
824 if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
825 /* For LMAC rings, ring pointer updates are done through FW and
826 * hence written to a shared memory location that is read by FW
827 */
828 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
829 srng->u.src_ring.last_tp =
830 *(volatile u32 *)srng->u.src_ring.tp_addr;
831 *srng->u.src_ring.hp_addr = srng->u.src_ring.hp;
832 } else {
833 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
834 *srng->u.dst_ring.tp_addr = srng->u.dst_ring.tp;
835 }
836 } else {
837 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
838 srng->u.src_ring.last_tp =
839 *(volatile u32 *)srng->u.src_ring.tp_addr;
840 ath11k_hif_write32(ab,
841 (unsigned long)srng->u.src_ring.hp_addr -
842 (unsigned long)ab->mem,
843 srng->u.src_ring.hp);
844 } else {
845 srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
846 ath11k_hif_write32(ab,
847 (unsigned long)srng->u.dst_ring.tp_addr -
848 (unsigned long)ab->mem,
849 srng->u.dst_ring.tp);
850 }
851 }
852
853 srng->timestamp = jiffies;
854 }
855
ath11k_hal_setup_link_idle_list(struct ath11k_base * ab,struct hal_wbm_idle_scatter_list * sbuf,u32 nsbufs,u32 tot_link_desc,u32 end_offset)856 void ath11k_hal_setup_link_idle_list(struct ath11k_base *ab,
857 struct hal_wbm_idle_scatter_list *sbuf,
858 u32 nsbufs, u32 tot_link_desc,
859 u32 end_offset)
860 {
861 struct ath11k_buffer_addr *link_addr;
862 int i;
863 u32 reg_scatter_buf_sz = HAL_WBM_IDLE_SCATTER_BUF_SIZE / 64;
864
865 #if defined(__linux__)
866 link_addr = (void *)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE;
867 #elif defined(__FreeBSD__)
868 link_addr = (void *)((uintptr_t)sbuf[0].vaddr + HAL_WBM_IDLE_SCATTER_BUF_SIZE);
869 #endif
870
871 for (i = 1; i < nsbufs; i++) {
872 link_addr->info0 = sbuf[i].paddr & HAL_ADDR_LSB_REG_MASK;
873 link_addr->info1 = FIELD_PREP(
874 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
875 (u64)sbuf[i].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
876 FIELD_PREP(
877 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
878 BASE_ADDR_MATCH_TAG_VAL);
879
880 #if defined(__linux__)
881 link_addr = (void *)sbuf[i].vaddr +
882 HAL_WBM_IDLE_SCATTER_BUF_SIZE;
883 #elif defined(__FreeBSD__)
884 link_addr = (void *)((uintptr_t)sbuf[i].vaddr +
885 HAL_WBM_IDLE_SCATTER_BUF_SIZE);
886 #endif
887 }
888
889 ath11k_hif_write32(ab,
890 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_CONTROL_ADDR,
891 FIELD_PREP(HAL_WBM_SCATTER_BUFFER_SIZE, reg_scatter_buf_sz) |
892 FIELD_PREP(HAL_WBM_LINK_DESC_IDLE_LIST_MODE, 0x1));
893 ath11k_hif_write32(ab,
894 HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_R0_IDLE_LIST_SIZE_ADDR,
895 FIELD_PREP(HAL_WBM_SCATTER_RING_SIZE_OF_IDLE_LINK_DESC_LIST,
896 reg_scatter_buf_sz * nsbufs));
897 ath11k_hif_write32(ab,
898 HAL_SEQ_WCSS_UMAC_WBM_REG +
899 HAL_WBM_SCATTERED_RING_BASE_LSB,
900 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
901 sbuf[0].paddr & HAL_ADDR_LSB_REG_MASK));
902 ath11k_hif_write32(ab,
903 HAL_SEQ_WCSS_UMAC_WBM_REG +
904 HAL_WBM_SCATTERED_RING_BASE_MSB,
905 FIELD_PREP(
906 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
907 (u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT) |
908 FIELD_PREP(
909 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_MATCH_TAG,
910 BASE_ADDR_MATCH_TAG_VAL));
911
912 /* Setup head and tail pointers for the idle list */
913 ath11k_hif_write32(ab,
914 HAL_SEQ_WCSS_UMAC_WBM_REG +
915 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
916 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
917 sbuf[nsbufs - 1].paddr));
918 ath11k_hif_write32(ab,
919 HAL_SEQ_WCSS_UMAC_WBM_REG +
920 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX1,
921 FIELD_PREP(
922 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
923 ((u64)sbuf[nsbufs - 1].paddr >>
924 HAL_ADDR_MSB_REG_SHIFT)) |
925 FIELD_PREP(HAL_WBM_SCATTERED_DESC_HEAD_P_OFFSET_IX1,
926 (end_offset >> 2)));
927 ath11k_hif_write32(ab,
928 HAL_SEQ_WCSS_UMAC_WBM_REG +
929 HAL_WBM_SCATTERED_DESC_PTR_HEAD_INFO_IX0,
930 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
931 sbuf[0].paddr));
932
933 ath11k_hif_write32(ab,
934 HAL_SEQ_WCSS_UMAC_WBM_REG +
935 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX0,
936 FIELD_PREP(BUFFER_ADDR_INFO0_ADDR,
937 sbuf[0].paddr));
938 ath11k_hif_write32(ab,
939 HAL_SEQ_WCSS_UMAC_WBM_REG +
940 HAL_WBM_SCATTERED_DESC_PTR_TAIL_INFO_IX1,
941 FIELD_PREP(
942 HAL_WBM_SCATTERED_DESC_MSB_BASE_ADDR_39_32,
943 ((u64)sbuf[0].paddr >> HAL_ADDR_MSB_REG_SHIFT)) |
944 FIELD_PREP(HAL_WBM_SCATTERED_DESC_TAIL_P_OFFSET_IX1,
945 0));
946 ath11k_hif_write32(ab,
947 HAL_SEQ_WCSS_UMAC_WBM_REG +
948 HAL_WBM_SCATTERED_DESC_PTR_HP_ADDR,
949 2 * tot_link_desc);
950
951 /* Enable the SRNG */
952 ath11k_hif_write32(ab,
953 HAL_SEQ_WCSS_UMAC_WBM_REG +
954 HAL_WBM_IDLE_LINK_RING_MISC_ADDR(ab), 0x40);
955 }
956
ath11k_hal_srng_setup(struct ath11k_base * ab,enum hal_ring_type type,int ring_num,int mac_id,struct hal_srng_params * params)957 int ath11k_hal_srng_setup(struct ath11k_base *ab, enum hal_ring_type type,
958 int ring_num, int mac_id,
959 struct hal_srng_params *params)
960 {
961 struct ath11k_hal *hal = &ab->hal;
962 struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
963 struct hal_srng *srng;
964 int ring_id;
965 u32 lmac_idx;
966 int i;
967 u32 reg_base;
968
969 ring_id = ath11k_hal_srng_get_ring_id(ab, type, ring_num, mac_id);
970 if (ring_id < 0)
971 return ring_id;
972
973 srng = &hal->srng_list[ring_id];
974
975 srng->ring_id = ring_id;
976 srng->ring_dir = srng_config->ring_dir;
977 srng->ring_base_paddr = params->ring_base_paddr;
978 srng->ring_base_vaddr = params->ring_base_vaddr;
979 srng->entry_size = srng_config->entry_size;
980 srng->num_entries = params->num_entries;
981 srng->ring_size = srng->entry_size * srng->num_entries;
982 srng->intr_batch_cntr_thres_entries =
983 params->intr_batch_cntr_thres_entries;
984 srng->intr_timer_thres_us = params->intr_timer_thres_us;
985 srng->flags = params->flags;
986 srng->msi_addr = params->msi_addr;
987 srng->msi_data = params->msi_data;
988 srng->initialized = 1;
989 spin_lock_init(&srng->lock);
990 lockdep_set_class(&srng->lock, hal->srng_key + ring_id);
991
992 for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
993 srng->hwreg_base[i] = srng_config->reg_start[i] +
994 (ring_num * srng_config->reg_size[i]);
995 }
996
997 memset(srng->ring_base_vaddr, 0,
998 (srng->entry_size * srng->num_entries) << 2);
999
1000 /* TODO: Add comments on these swap configurations */
1001 if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN))
1002 srng->flags |= HAL_SRNG_FLAGS_MSI_SWAP | HAL_SRNG_FLAGS_DATA_TLV_SWAP |
1003 HAL_SRNG_FLAGS_RING_PTR_SWAP;
1004
1005 reg_base = srng->hwreg_base[HAL_SRNG_REG_GRP_R2];
1006
1007 if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
1008 srng->u.src_ring.hp = 0;
1009 srng->u.src_ring.cached_tp = 0;
1010 srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
1011 srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
1012 srng->u.src_ring.low_threshold = params->low_threshold *
1013 srng->entry_size;
1014 if (srng_config->lmac_ring) {
1015 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1016 srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
1017 lmac_idx);
1018 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1019 } else {
1020 if (!ab->hw_params.supports_shadow_regs)
1021 srng->u.src_ring.hp_addr =
1022 (u32 *)((unsigned long)ab->mem + reg_base);
1023 else
1024 ath11k_dbg(ab, ATH11K_DBG_HAL,
1025 "type %d ring_num %d reg_base 0x%x shadow 0x%lx\n",
1026 type, ring_num,
1027 reg_base,
1028 (unsigned long)srng->u.src_ring.hp_addr -
1029 (unsigned long)ab->mem);
1030 }
1031 } else {
1032 /* During initialization loop count in all the descriptors
1033 * will be set to zero, and HW will set it to 1 on completing
1034 * descriptor update in first loop, and increments it by 1 on
1035 * subsequent loops (loop count wraps around after reaching
1036 * 0xffff). The 'loop_cnt' in SW ring state is the expected
1037 * loop count in descriptors updated by HW (to be processed
1038 * by SW).
1039 */
1040 srng->u.dst_ring.loop_cnt = 1;
1041 srng->u.dst_ring.tp = 0;
1042 srng->u.dst_ring.cached_hp = 0;
1043 srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
1044 if (srng_config->lmac_ring) {
1045 /* For LMAC rings, tail pointer updates will be done
1046 * through FW by writing to a shared memory location
1047 */
1048 lmac_idx = ring_id - HAL_SRNG_RING_ID_LMAC1_ID_START;
1049 srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
1050 lmac_idx);
1051 srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
1052 } else {
1053 if (!ab->hw_params.supports_shadow_regs)
1054 srng->u.dst_ring.tp_addr =
1055 (u32 *)((unsigned long)ab->mem + reg_base +
1056 (HAL_REO1_RING_TP(ab) - HAL_REO1_RING_HP(ab)));
1057 else
1058 ath11k_dbg(ab, ATH11K_DBG_HAL,
1059 "type %d ring_num %d target_reg 0x%x shadow 0x%lx\n",
1060 type, ring_num,
1061 reg_base + (HAL_REO1_RING_TP(ab) -
1062 HAL_REO1_RING_HP(ab)),
1063 (unsigned long)srng->u.dst_ring.tp_addr -
1064 (unsigned long)ab->mem);
1065 }
1066 }
1067
1068 if (srng_config->lmac_ring)
1069 return ring_id;
1070
1071 ath11k_hal_srng_hw_init(ab, srng);
1072
1073 if (type == HAL_CE_DST) {
1074 srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
1075 ath11k_hal_ce_dst_setup(ab, srng, ring_num);
1076 }
1077
1078 return ring_id;
1079 }
1080
ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base * ab,int shadow_cfg_idx,enum hal_ring_type ring_type,int ring_num)1081 static void ath11k_hal_srng_update_hp_tp_addr(struct ath11k_base *ab,
1082 int shadow_cfg_idx,
1083 enum hal_ring_type ring_type,
1084 int ring_num)
1085 {
1086 struct hal_srng *srng;
1087 struct ath11k_hal *hal = &ab->hal;
1088 int ring_id;
1089 struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1090
1091 ring_id = ath11k_hal_srng_get_ring_id(ab, ring_type, ring_num, 0);
1092 if (ring_id < 0)
1093 return;
1094
1095 srng = &hal->srng_list[ring_id];
1096
1097 if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1098 srng->u.dst_ring.tp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1099 (unsigned long)ab->mem);
1100 else
1101 srng->u.src_ring.hp_addr = (u32 *)(HAL_SHADOW_REG(ab, shadow_cfg_idx) +
1102 (unsigned long)ab->mem);
1103 }
1104
ath11k_hal_srng_update_shadow_config(struct ath11k_base * ab,enum hal_ring_type ring_type,int ring_num)1105 int ath11k_hal_srng_update_shadow_config(struct ath11k_base *ab,
1106 enum hal_ring_type ring_type,
1107 int ring_num)
1108 {
1109 struct ath11k_hal *hal = &ab->hal;
1110 struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1111 int shadow_cfg_idx = hal->num_shadow_reg_configured;
1112 u32 target_reg;
1113
1114 if (shadow_cfg_idx >= HAL_SHADOW_NUM_REGS)
1115 return -EINVAL;
1116
1117 hal->num_shadow_reg_configured++;
1118
1119 target_reg = srng_config->reg_start[HAL_HP_OFFSET_IN_REG_START];
1120 target_reg += srng_config->reg_size[HAL_HP_OFFSET_IN_REG_START] *
1121 ring_num;
1122
1123 /* For destination ring, shadow the TP */
1124 if (srng_config->ring_dir == HAL_SRNG_DIR_DST)
1125 target_reg += HAL_OFFSET_FROM_HP_TO_TP;
1126
1127 hal->shadow_reg_addr[shadow_cfg_idx] = target_reg;
1128
1129 /* update hp/tp addr to hal structure*/
1130 ath11k_hal_srng_update_hp_tp_addr(ab, shadow_cfg_idx, ring_type,
1131 ring_num);
1132
1133 ath11k_dbg(ab, ATH11K_DBG_HAL,
1134 "update shadow config target_reg %x shadow reg 0x%x shadow_idx 0x%x ring_type %d ring num %d",
1135 target_reg,
1136 HAL_SHADOW_REG(ab, shadow_cfg_idx),
1137 shadow_cfg_idx,
1138 ring_type, ring_num);
1139
1140 return 0;
1141 }
1142
ath11k_hal_srng_shadow_config(struct ath11k_base * ab)1143 void ath11k_hal_srng_shadow_config(struct ath11k_base *ab)
1144 {
1145 struct ath11k_hal *hal = &ab->hal;
1146 int ring_type, ring_num;
1147
1148 /* update all the non-CE srngs. */
1149 for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
1150 struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
1151
1152 if (ring_type == HAL_CE_SRC ||
1153 ring_type == HAL_CE_DST ||
1154 ring_type == HAL_CE_DST_STATUS)
1155 continue;
1156
1157 if (srng_config->lmac_ring)
1158 continue;
1159
1160 for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
1161 ath11k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
1162 }
1163 }
1164
ath11k_hal_srng_get_shadow_config(struct ath11k_base * ab,u32 ** cfg,u32 * len)1165 void ath11k_hal_srng_get_shadow_config(struct ath11k_base *ab,
1166 u32 **cfg, u32 *len)
1167 {
1168 struct ath11k_hal *hal = &ab->hal;
1169
1170 *len = hal->num_shadow_reg_configured;
1171 *cfg = hal->shadow_reg_addr;
1172 }
1173
ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base * ab,struct hal_srng * srng)1174 void ath11k_hal_srng_shadow_update_hp_tp(struct ath11k_base *ab,
1175 struct hal_srng *srng)
1176 {
1177 lockdep_assert_held(&srng->lock);
1178
1179 /* check whether the ring is empty. Update the shadow
1180 * HP only when then ring isn't empty.
1181 */
1182 if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
1183 *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
1184 ath11k_hal_srng_access_end(ab, srng);
1185 }
1186
ath11k_hal_srng_create_config(struct ath11k_base * ab)1187 static int ath11k_hal_srng_create_config(struct ath11k_base *ab)
1188 {
1189 struct ath11k_hal *hal = &ab->hal;
1190 struct hal_srng_config *s;
1191
1192 hal->srng_config = kmemdup(hw_srng_config_template,
1193 sizeof(hw_srng_config_template),
1194 GFP_KERNEL);
1195 if (!hal->srng_config)
1196 return -ENOMEM;
1197
1198 s = &hal->srng_config[HAL_REO_DST];
1199 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_BASE_LSB(ab);
1200 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_RING_HP(ab);
1201 s->reg_size[0] = HAL_REO2_RING_BASE_LSB(ab) - HAL_REO1_RING_BASE_LSB(ab);
1202 s->reg_size[1] = HAL_REO2_RING_HP(ab) - HAL_REO1_RING_HP(ab);
1203
1204 s = &hal->srng_config[HAL_REO_EXCEPTION];
1205 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_BASE_LSB(ab);
1206 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_TCL_RING_HP(ab);
1207
1208 s = &hal->srng_config[HAL_REO_REINJECT];
1209 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_BASE_LSB(ab);
1210 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_SW2REO_RING_HP(ab);
1211
1212 s = &hal->srng_config[HAL_REO_CMD];
1213 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_RING_BASE_LSB(ab);
1214 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_CMD_HP(ab);
1215
1216 s = &hal->srng_config[HAL_REO_STATUS];
1217 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_RING_BASE_LSB(ab);
1218 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO_STATUS_HP(ab);
1219
1220 s = &hal->srng_config[HAL_TCL_DATA];
1221 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_BASE_LSB(ab);
1222 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL1_RING_HP;
1223 s->reg_size[0] = HAL_TCL2_RING_BASE_LSB(ab) - HAL_TCL1_RING_BASE_LSB(ab);
1224 s->reg_size[1] = HAL_TCL2_RING_HP - HAL_TCL1_RING_HP;
1225
1226 s = &hal->srng_config[HAL_TCL_CMD];
1227 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_BASE_LSB(ab);
1228 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_RING_HP;
1229
1230 s = &hal->srng_config[HAL_TCL_STATUS];
1231 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_BASE_LSB(ab);
1232 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_TCL_REG + HAL_TCL_STATUS_RING_HP;
1233
1234 s = &hal->srng_config[HAL_CE_SRC];
1235 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1236 ATH11K_CE_OFFSET(ab);
1237 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab) + HAL_CE_DST_RING_HP +
1238 ATH11K_CE_OFFSET(ab);
1239 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1240 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1241 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_SRC_REG(ab) -
1242 HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab);
1243
1244 s = &hal->srng_config[HAL_CE_DST];
1245 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_BASE_LSB +
1246 ATH11K_CE_OFFSET(ab);
1247 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_RING_HP +
1248 ATH11K_CE_OFFSET(ab);
1249 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1250 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1251 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1252 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1253
1254 s = &hal->srng_config[HAL_CE_DST_STATUS];
1255 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) +
1256 HAL_CE_DST_STATUS_RING_BASE_LSB + ATH11K_CE_OFFSET(ab);
1257 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab) + HAL_CE_DST_STATUS_RING_HP +
1258 ATH11K_CE_OFFSET(ab);
1259 s->reg_size[0] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1260 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1261 s->reg_size[1] = HAL_SEQ_WCSS_UMAC_CE1_DST_REG(ab) -
1262 HAL_SEQ_WCSS_UMAC_CE0_DST_REG(ab);
1263
1264 s = &hal->srng_config[HAL_WBM_IDLE_LINK];
1265 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_BASE_LSB(ab);
1266 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_IDLE_LINK_RING_HP;
1267
1268 s = &hal->srng_config[HAL_SW2WBM_RELEASE];
1269 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_BASE_LSB(ab);
1270 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM_RELEASE_RING_HP;
1271
1272 s = &hal->srng_config[HAL_WBM2SW_RELEASE];
1273 s->reg_start[0] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1274 s->reg_start[1] = HAL_SEQ_WCSS_UMAC_WBM_REG + HAL_WBM0_RELEASE_RING_HP;
1275 s->reg_size[0] = HAL_WBM1_RELEASE_RING_BASE_LSB(ab) -
1276 HAL_WBM0_RELEASE_RING_BASE_LSB(ab);
1277 s->reg_size[1] = HAL_WBM1_RELEASE_RING_HP - HAL_WBM0_RELEASE_RING_HP;
1278
1279 return 0;
1280 }
1281
ath11k_hal_register_srng_key(struct ath11k_base * ab)1282 static void ath11k_hal_register_srng_key(struct ath11k_base *ab)
1283 {
1284 #if defined(__linux__)
1285 struct ath11k_hal *hal = &ab->hal;
1286 u32 ring_id;
1287
1288 for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1289 lockdep_register_key(hal->srng_key + ring_id);
1290 #endif
1291 }
1292
ath11k_hal_unregister_srng_key(struct ath11k_base * ab)1293 static void ath11k_hal_unregister_srng_key(struct ath11k_base *ab)
1294 {
1295 #if defined(__linux__)
1296 struct ath11k_hal *hal = &ab->hal;
1297 u32 ring_id;
1298
1299 for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
1300 lockdep_unregister_key(hal->srng_key + ring_id);
1301 #endif
1302 }
1303
ath11k_hal_srng_init(struct ath11k_base * ab)1304 int ath11k_hal_srng_init(struct ath11k_base *ab)
1305 {
1306 struct ath11k_hal *hal = &ab->hal;
1307 int ret;
1308
1309 memset(hal, 0, sizeof(*hal));
1310
1311 ret = ath11k_hal_srng_create_config(ab);
1312 if (ret)
1313 goto err_hal;
1314
1315 ret = ath11k_hal_alloc_cont_rdp(ab);
1316 if (ret)
1317 goto err_hal;
1318
1319 ret = ath11k_hal_alloc_cont_wrp(ab);
1320 if (ret)
1321 goto err_free_cont_rdp;
1322
1323 ath11k_hal_register_srng_key(ab);
1324
1325 return 0;
1326
1327 err_free_cont_rdp:
1328 ath11k_hal_free_cont_rdp(ab);
1329
1330 err_hal:
1331 return ret;
1332 }
1333 EXPORT_SYMBOL(ath11k_hal_srng_init);
1334
ath11k_hal_srng_deinit(struct ath11k_base * ab)1335 void ath11k_hal_srng_deinit(struct ath11k_base *ab)
1336 {
1337 struct ath11k_hal *hal = &ab->hal;
1338
1339 ath11k_hal_unregister_srng_key(ab);
1340 ath11k_hal_free_cont_rdp(ab);
1341 ath11k_hal_free_cont_wrp(ab);
1342 kfree(hal->srng_config);
1343 }
1344 EXPORT_SYMBOL(ath11k_hal_srng_deinit);
1345
ath11k_hal_dump_srng_stats(struct ath11k_base * ab)1346 void ath11k_hal_dump_srng_stats(struct ath11k_base *ab)
1347 {
1348 struct hal_srng *srng;
1349 struct ath11k_ext_irq_grp *irq_grp;
1350 struct ath11k_ce_pipe *ce_pipe;
1351 int i;
1352
1353 ath11k_err(ab, "Last interrupt received for each CE:\n");
1354 for (i = 0; i < ab->hw_params.ce_count; i++) {
1355 ce_pipe = &ab->ce.ce_pipe[i];
1356
1357 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1358 continue;
1359
1360 #if defined(__linux__)
1361 ath11k_err(ab, "CE_id %d pipe_num %d %ums before\n",
1362 i, ce_pipe->pipe_num,
1363 jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1364 #elif defined(__FreeBSD__)
1365 ath11k_err(ab, "CE_id %d pipe_num %d %jums before\n",
1366 i, ce_pipe->pipe_num,
1367 (uintmax_t)jiffies_to_msecs(jiffies - ce_pipe->timestamp));
1368 #endif
1369 }
1370
1371 ath11k_err(ab, "\nLast interrupt received for each group:\n");
1372 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) {
1373 irq_grp = &ab->ext_irq_grp[i];
1374 #if defined(__linux__)
1375 ath11k_err(ab, "group_id %d %ums before\n",
1376 irq_grp->grp_id,
1377 jiffies_to_msecs(jiffies - irq_grp->timestamp));
1378 #elif defined(__FreeBSD__)
1379 ath11k_err(ab, "group_id %d %jums before\n",
1380 irq_grp->grp_id,
1381 (uintmax_t)jiffies_to_msecs(jiffies - irq_grp->timestamp));
1382 #endif
1383 }
1384
1385 for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
1386 srng = &ab->hal.srng_list[i];
1387
1388 if (!srng->initialized)
1389 continue;
1390
1391 if (srng->ring_dir == HAL_SRNG_DIR_SRC)
1392 ath11k_err(ab,
1393 #if defined(__linux__)
1394 "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
1395 #elif defined(__FreeBSD__)
1396 "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %jums\n",
1397 #endif
1398 srng->ring_id, srng->u.src_ring.hp,
1399 srng->u.src_ring.reap_hp,
1400 *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
1401 srng->u.src_ring.last_tp,
1402 #if defined(__linux__)
1403 jiffies_to_msecs(jiffies - srng->timestamp));
1404 #elif defined(__FreeBSD__)
1405 (uintmax_t)jiffies_to_msecs(jiffies - srng->timestamp));
1406 #endif
1407 else if (srng->ring_dir == HAL_SRNG_DIR_DST)
1408 ath11k_err(ab,
1409 #if defined(__linux__)
1410 "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
1411 #elif defined(__FreeBSD__)
1412 "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %jums\n",
1413 #endif
1414 srng->ring_id, srng->u.dst_ring.tp,
1415 *srng->u.dst_ring.hp_addr,
1416 srng->u.dst_ring.cached_hp,
1417 srng->u.dst_ring.last_hp,
1418 #if defined(__linux__)
1419 jiffies_to_msecs(jiffies - srng->timestamp));
1420 #elif defined(__FreeBSD__)
1421 (uintmax_t)jiffies_to_msecs(jiffies - srng->timestamp));
1422 #endif
1423 }
1424 }
1425