xref: /linux/drivers/net/wireless/ath/ath12k/hal.c (revision ca220141fa8ebae09765a242076b2b77338106b0)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 #include <linux/dma-mapping.h>
7 #include "debug.h"
8 #include "hif.h"
9 
10 static void ath12k_hal_ce_dst_setup(struct ath12k_base *ab,
11 				    struct hal_srng *srng, int ring_num)
12 {
13 	ab->hal.ops->ce_dst_setup(ab, srng, ring_num);
14 }
15 
16 static void ath12k_hal_srng_src_hw_init(struct ath12k_base *ab,
17 					struct hal_srng *srng)
18 {
19 	ab->hal.ops->srng_src_hw_init(ab, srng);
20 }
21 
22 static void ath12k_hal_srng_dst_hw_init(struct ath12k_base *ab,
23 					struct hal_srng *srng)
24 {
25 	ab->hal.ops->srng_dst_hw_init(ab, srng);
26 }
27 
28 static void ath12k_hal_set_umac_srng_ptr_addr(struct ath12k_base *ab,
29 					      struct hal_srng *srng)
30 {
31 	ab->hal.ops->set_umac_srng_ptr_addr(ab, srng);
32 }
33 
34 static int ath12k_hal_srng_get_ring_id(struct ath12k_hal *hal,
35 				       enum hal_ring_type type,
36 				       int ring_num, int mac_id)
37 {
38 	return hal->ops->srng_get_ring_id(hal, type, ring_num, mac_id);
39 }
40 
41 int ath12k_hal_srng_update_shadow_config(struct ath12k_base *ab,
42 					 enum hal_ring_type ring_type,
43 					 int ring_num)
44 {
45 	return ab->hal.ops->srng_update_shadow_config(ab, ring_type,
46 							  ring_num);
47 }
48 
49 u32 ath12k_hal_ce_get_desc_size(struct ath12k_hal *hal, enum hal_ce_desc type)
50 {
51 	return hal->ops->ce_get_desc_size(type);
52 }
53 
54 void ath12k_hal_tx_set_dscp_tid_map(struct ath12k_base *ab, int id)
55 {
56 	ab->hal.ops->tx_set_dscp_tid_map(ab, id);
57 }
58 
59 void ath12k_hal_tx_configure_bank_register(struct ath12k_base *ab,
60 					   u32 bank_config, u8 bank_id)
61 {
62 	ab->hal.ops->tx_configure_bank_register(ab, bank_config, bank_id);
63 }
64 
65 void ath12k_hal_reoq_lut_addr_read_enable(struct ath12k_base *ab)
66 {
67 	ab->hal.ops->reoq_lut_addr_read_enable(ab);
68 }
69 
70 void ath12k_hal_reoq_lut_set_max_peerid(struct ath12k_base *ab)
71 {
72 	ab->hal.ops->reoq_lut_set_max_peerid(ab);
73 }
74 
75 void ath12k_hal_write_ml_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr)
76 {
77 	ab->hal.ops->write_ml_reoq_lut_addr(ab, paddr);
78 }
79 
80 void ath12k_hal_write_reoq_lut_addr(struct ath12k_base *ab, dma_addr_t paddr)
81 {
82 	ab->hal.ops->write_reoq_lut_addr(ab, paddr);
83 }
84 
85 void ath12k_hal_setup_link_idle_list(struct ath12k_base *ab,
86 				     struct hal_wbm_idle_scatter_list *sbuf,
87 				     u32 nsbufs, u32 tot_link_desc,
88 				     u32 end_offset)
89 {
90 	ab->hal.ops->setup_link_idle_list(ab, sbuf, nsbufs, tot_link_desc,
91 					      end_offset);
92 }
93 
94 void ath12k_hal_reo_hw_setup(struct ath12k_base *ab, u32 ring_hash_map)
95 {
96 	ab->hal.ops->reo_hw_setup(ab, ring_hash_map);
97 }
98 
99 void ath12k_hal_reo_init_cmd_ring(struct ath12k_base *ab, struct hal_srng *srng)
100 {
101 	ab->hal.ops->reo_init_cmd_ring(ab, srng);
102 }
103 
104 void ath12k_hal_reo_shared_qaddr_cache_clear(struct ath12k_base *ab)
105 {
106 	ab->hal.ops->reo_shared_qaddr_cache_clear(ab);
107 }
108 EXPORT_SYMBOL(ath12k_hal_reo_shared_qaddr_cache_clear);
109 
110 void ath12k_hal_rx_buf_addr_info_set(struct ath12k_hal *hal,
111 				     struct ath12k_buffer_addr *binfo,
112 				     dma_addr_t paddr, u32 cookie, u8 manager)
113 {
114 	hal->ops->rx_buf_addr_info_set(binfo, paddr, cookie, manager);
115 }
116 
117 void ath12k_hal_rx_buf_addr_info_get(struct ath12k_hal *hal,
118 				     struct ath12k_buffer_addr *binfo,
119 				     dma_addr_t *paddr, u32 *msdu_cookies,
120 				     u8 *rbm)
121 {
122 	hal->ops->rx_buf_addr_info_get(binfo, paddr, msdu_cookies, rbm);
123 }
124 
125 void ath12k_hal_rx_msdu_list_get(struct ath12k_hal *hal, struct ath12k *ar,
126 				 void *link_desc,
127 				 void *msdu_list,
128 				 u16 *num_msdus)
129 {
130 	hal->ops->rx_msdu_list_get(ar, link_desc, msdu_list, num_msdus);
131 }
132 
133 void ath12k_hal_rx_reo_ent_buf_paddr_get(struct ath12k_hal *hal, void *rx_desc,
134 					 dma_addr_t *paddr,
135 					 u32 *sw_cookie,
136 					 struct ath12k_buffer_addr **pp_buf_addr,
137 					 u8 *rbm, u32 *msdu_cnt)
138 {
139 	hal->ops->rx_reo_ent_buf_paddr_get(rx_desc, paddr, sw_cookie,
140 					   pp_buf_addr, rbm, msdu_cnt);
141 }
142 
143 void ath12k_hal_cc_config(struct ath12k_base *ab)
144 {
145 	ab->hal.ops->cc_config(ab);
146 }
147 
148 enum hal_rx_buf_return_buf_manager
149 ath12k_hal_get_idle_link_rbm(struct ath12k_hal *hal, u8 device_id)
150 {
151 	return hal->ops->get_idle_link_rbm(hal, device_id);
152 }
153 
154 static int ath12k_hal_alloc_cont_rdp(struct ath12k_hal *hal)
155 {
156 	size_t size;
157 
158 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
159 	hal->rdp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->rdp.paddr,
160 					    GFP_KERNEL);
161 	if (!hal->rdp.vaddr)
162 		return -ENOMEM;
163 
164 	return 0;
165 }
166 
167 static void ath12k_hal_free_cont_rdp(struct ath12k_hal *hal)
168 {
169 	size_t size;
170 
171 	if (!hal->rdp.vaddr)
172 		return;
173 
174 	size = sizeof(u32) * HAL_SRNG_RING_ID_MAX;
175 	dma_free_coherent(hal->dev, size,
176 			  hal->rdp.vaddr, hal->rdp.paddr);
177 	hal->rdp.vaddr = NULL;
178 }
179 
180 static int ath12k_hal_alloc_cont_wrp(struct ath12k_hal *hal)
181 {
182 	size_t size;
183 
184 	size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
185 	hal->wrp.vaddr = dma_alloc_coherent(hal->dev, size, &hal->wrp.paddr,
186 					    GFP_KERNEL);
187 	if (!hal->wrp.vaddr)
188 		return -ENOMEM;
189 
190 	return 0;
191 }
192 
193 static void ath12k_hal_free_cont_wrp(struct ath12k_hal *hal)
194 {
195 	size_t size;
196 
197 	if (!hal->wrp.vaddr)
198 		return;
199 
200 	size = sizeof(u32) * (HAL_SRNG_NUM_PMAC_RINGS + HAL_SRNG_NUM_DMAC_RINGS);
201 	dma_free_coherent(hal->dev, size,
202 			  hal->wrp.vaddr, hal->wrp.paddr);
203 	hal->wrp.vaddr = NULL;
204 }
205 
206 static void ath12k_hal_srng_hw_init(struct ath12k_base *ab,
207 				    struct hal_srng *srng)
208 {
209 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
210 		ath12k_hal_srng_src_hw_init(ab, srng);
211 	else
212 		ath12k_hal_srng_dst_hw_init(ab, srng);
213 }
214 
215 int ath12k_hal_srng_get_entrysize(struct ath12k_base *ab, u32 ring_type)
216 {
217 	struct hal_srng_config *srng_config;
218 
219 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
220 		return -EINVAL;
221 
222 	srng_config = &ab->hal.srng_config[ring_type];
223 
224 	return (srng_config->entry_size << 2);
225 }
226 EXPORT_SYMBOL(ath12k_hal_srng_get_entrysize);
227 
228 int ath12k_hal_srng_get_max_entries(struct ath12k_base *ab, u32 ring_type)
229 {
230 	struct hal_srng_config *srng_config;
231 
232 	if (WARN_ON(ring_type >= HAL_MAX_RING_TYPES))
233 		return -EINVAL;
234 
235 	srng_config = &ab->hal.srng_config[ring_type];
236 
237 	return (srng_config->max_size / srng_config->entry_size);
238 }
239 
240 void ath12k_hal_srng_get_params(struct ath12k_base *ab, struct hal_srng *srng,
241 				struct hal_srng_params *params)
242 {
243 	params->ring_base_paddr = srng->ring_base_paddr;
244 	params->ring_base_vaddr = srng->ring_base_vaddr;
245 	params->num_entries = srng->num_entries;
246 	params->intr_timer_thres_us = srng->intr_timer_thres_us;
247 	params->intr_batch_cntr_thres_entries =
248 		srng->intr_batch_cntr_thres_entries;
249 	params->low_threshold = srng->u.src_ring.low_threshold;
250 	params->msi_addr = srng->msi_addr;
251 	params->msi2_addr = srng->msi2_addr;
252 	params->msi_data = srng->msi_data;
253 	params->msi2_data = srng->msi2_data;
254 	params->flags = srng->flags;
255 }
256 EXPORT_SYMBOL(ath12k_hal_srng_get_params);
257 
258 dma_addr_t ath12k_hal_srng_get_hp_addr(struct ath12k_base *ab,
259 				       struct hal_srng *srng)
260 {
261 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
262 		return 0;
263 
264 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
265 		return ab->hal.wrp.paddr +
266 		       ((unsigned long)srng->u.src_ring.hp_addr -
267 			(unsigned long)ab->hal.wrp.vaddr);
268 	else
269 		return ab->hal.rdp.paddr +
270 		       ((unsigned long)srng->u.dst_ring.hp_addr -
271 			 (unsigned long)ab->hal.rdp.vaddr);
272 }
273 
274 dma_addr_t ath12k_hal_srng_get_tp_addr(struct ath12k_base *ab,
275 				       struct hal_srng *srng)
276 {
277 	if (!(srng->flags & HAL_SRNG_FLAGS_LMAC_RING))
278 		return 0;
279 
280 	if (srng->ring_dir == HAL_SRNG_DIR_SRC)
281 		return ab->hal.rdp.paddr +
282 		       ((unsigned long)srng->u.src_ring.tp_addr -
283 			(unsigned long)ab->hal.rdp.vaddr);
284 	else
285 		return ab->hal.wrp.paddr +
286 		       ((unsigned long)srng->u.dst_ring.tp_addr -
287 			(unsigned long)ab->hal.wrp.vaddr);
288 }
289 
290 void ath12k_hal_ce_src_set_desc(struct ath12k_hal *hal,
291 				struct hal_ce_srng_src_desc *desc,
292 				dma_addr_t paddr, u32 len, u32 id,
293 				u8 byte_swap_data)
294 {
295 	hal->ops->ce_src_set_desc(desc, paddr, len, id, byte_swap_data);
296 }
297 
298 void ath12k_hal_ce_dst_set_desc(struct ath12k_hal *hal,
299 				struct hal_ce_srng_dest_desc *desc,
300 				dma_addr_t paddr)
301 {
302 	hal->ops->ce_dst_set_desc(desc, paddr);
303 }
304 
305 u32 ath12k_hal_ce_dst_status_get_length(struct ath12k_hal *hal,
306 					struct hal_ce_srng_dst_status_desc *desc)
307 {
308 	return hal->ops->ce_dst_status_get_length(desc);
309 }
310 
311 void ath12k_hal_set_link_desc_addr(struct ath12k_hal *hal,
312 				   struct hal_wbm_link_desc *desc, u32 cookie,
313 				   dma_addr_t paddr, int rbm)
314 {
315 	hal->ops->set_link_desc_addr(desc, cookie, paddr, rbm);
316 }
317 
318 void *ath12k_hal_srng_dst_peek(struct ath12k_base *ab, struct hal_srng *srng)
319 {
320 	lockdep_assert_held(&srng->lock);
321 
322 	if (srng->u.dst_ring.tp != srng->u.dst_ring.cached_hp)
323 		return (srng->ring_base_vaddr + srng->u.dst_ring.tp);
324 
325 	return NULL;
326 }
327 EXPORT_SYMBOL(ath12k_hal_srng_dst_peek);
328 
329 void *ath12k_hal_srng_dst_get_next_entry(struct ath12k_base *ab,
330 					 struct hal_srng *srng)
331 {
332 	void *desc;
333 
334 	lockdep_assert_held(&srng->lock);
335 
336 	if (srng->u.dst_ring.tp == srng->u.dst_ring.cached_hp)
337 		return NULL;
338 
339 	desc = srng->ring_base_vaddr + srng->u.dst_ring.tp;
340 
341 	srng->u.dst_ring.tp = (srng->u.dst_ring.tp + srng->entry_size) %
342 			      srng->ring_size;
343 
344 	return desc;
345 }
346 EXPORT_SYMBOL(ath12k_hal_srng_dst_get_next_entry);
347 
348 int ath12k_hal_srng_dst_num_free(struct ath12k_base *ab, struct hal_srng *srng,
349 				 bool sync_hw_ptr)
350 {
351 	u32 tp, hp;
352 
353 	lockdep_assert_held(&srng->lock);
354 
355 	tp = srng->u.dst_ring.tp;
356 
357 	if (sync_hw_ptr) {
358 		hp = *srng->u.dst_ring.hp_addr;
359 		srng->u.dst_ring.cached_hp = hp;
360 	} else {
361 		hp = srng->u.dst_ring.cached_hp;
362 	}
363 
364 	if (hp >= tp)
365 		return (hp - tp) / srng->entry_size;
366 	else
367 		return (srng->ring_size - tp + hp) / srng->entry_size;
368 }
369 EXPORT_SYMBOL(ath12k_hal_srng_dst_num_free);
370 
371 /* Returns number of available entries in src ring */
372 int ath12k_hal_srng_src_num_free(struct ath12k_base *ab, struct hal_srng *srng,
373 				 bool sync_hw_ptr)
374 {
375 	u32 tp, hp;
376 
377 	lockdep_assert_held(&srng->lock);
378 
379 	hp = srng->u.src_ring.hp;
380 
381 	if (sync_hw_ptr) {
382 		tp = *srng->u.src_ring.tp_addr;
383 		srng->u.src_ring.cached_tp = tp;
384 	} else {
385 		tp = srng->u.src_ring.cached_tp;
386 	}
387 
388 	if (tp > hp)
389 		return ((tp - hp) / srng->entry_size) - 1;
390 	else
391 		return ((srng->ring_size - hp + tp) / srng->entry_size) - 1;
392 }
393 
394 void *ath12k_hal_srng_src_next_peek(struct ath12k_base *ab,
395 				    struct hal_srng *srng)
396 {
397 	void *desc;
398 	u32 next_hp;
399 
400 	lockdep_assert_held(&srng->lock);
401 
402 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
403 
404 	if (next_hp == srng->u.src_ring.cached_tp)
405 		return NULL;
406 
407 	desc = srng->ring_base_vaddr + next_hp;
408 
409 	return desc;
410 }
411 EXPORT_SYMBOL(ath12k_hal_srng_src_next_peek);
412 
413 void *ath12k_hal_srng_src_get_next_entry(struct ath12k_base *ab,
414 					 struct hal_srng *srng)
415 {
416 	void *desc;
417 	u32 next_hp;
418 
419 	lockdep_assert_held(&srng->lock);
420 
421 	/* TODO: Using % is expensive, but we have to do this since size of some
422 	 * SRNG rings is not power of 2 (due to descriptor sizes). Need to see
423 	 * if separate function is defined for rings having power of 2 ring size
424 	 * (TCL2SW, REO2SW, SW2RXDMA and CE rings) so that we can avoid the
425 	 * overhead of % by using mask (with &).
426 	 */
427 	next_hp = (srng->u.src_ring.hp + srng->entry_size) % srng->ring_size;
428 
429 	if (next_hp == srng->u.src_ring.cached_tp)
430 		return NULL;
431 
432 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
433 	srng->u.src_ring.hp = next_hp;
434 
435 	/* TODO: Reap functionality is not used by all rings. If particular
436 	 * ring does not use reap functionality, we need not update reap_hp
437 	 * with next_hp pointer. Need to make sure a separate function is used
438 	 * before doing any optimization by removing below code updating
439 	 * reap_hp.
440 	 */
441 	srng->u.src_ring.reap_hp = next_hp;
442 
443 	return desc;
444 }
445 EXPORT_SYMBOL(ath12k_hal_srng_src_get_next_entry);
446 
447 void *ath12k_hal_srng_src_peek(struct ath12k_base *ab, struct hal_srng *srng)
448 {
449 	lockdep_assert_held(&srng->lock);
450 
451 	if (((srng->u.src_ring.hp + srng->entry_size) % srng->ring_size) ==
452 	    srng->u.src_ring.cached_tp)
453 		return NULL;
454 
455 	return srng->ring_base_vaddr + srng->u.src_ring.hp;
456 }
457 EXPORT_SYMBOL(ath12k_hal_srng_src_peek);
458 
459 void *ath12k_hal_srng_src_reap_next(struct ath12k_base *ab,
460 				    struct hal_srng *srng)
461 {
462 	void *desc;
463 	u32 next_reap_hp;
464 
465 	lockdep_assert_held(&srng->lock);
466 
467 	next_reap_hp = (srng->u.src_ring.reap_hp + srng->entry_size) %
468 		       srng->ring_size;
469 
470 	if (next_reap_hp == srng->u.src_ring.cached_tp)
471 		return NULL;
472 
473 	desc = srng->ring_base_vaddr + next_reap_hp;
474 	srng->u.src_ring.reap_hp = next_reap_hp;
475 
476 	return desc;
477 }
478 
479 void *ath12k_hal_srng_src_get_next_reaped(struct ath12k_base *ab,
480 					  struct hal_srng *srng)
481 {
482 	void *desc;
483 
484 	lockdep_assert_held(&srng->lock);
485 
486 	if (srng->u.src_ring.hp == srng->u.src_ring.reap_hp)
487 		return NULL;
488 
489 	desc = srng->ring_base_vaddr + srng->u.src_ring.hp;
490 	srng->u.src_ring.hp = (srng->u.src_ring.hp + srng->entry_size) %
491 			      srng->ring_size;
492 
493 	return desc;
494 }
495 
496 void ath12k_hal_srng_access_begin(struct ath12k_base *ab, struct hal_srng *srng)
497 {
498 	u32 hp;
499 
500 	lockdep_assert_held(&srng->lock);
501 
502 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
503 		srng->u.src_ring.cached_tp =
504 			*(volatile u32 *)srng->u.src_ring.tp_addr;
505 	} else {
506 		hp = READ_ONCE(*srng->u.dst_ring.hp_addr);
507 
508 		if (hp != srng->u.dst_ring.cached_hp) {
509 			srng->u.dst_ring.cached_hp = hp;
510 			/* Make sure descriptor is read after the head
511 			 * pointer.
512 			 */
513 			dma_rmb();
514 		}
515 	}
516 }
517 EXPORT_SYMBOL(ath12k_hal_srng_access_begin);
518 
519 /* Update cached ring head/tail pointers to HW. ath12k_hal_srng_access_begin()
520  * should have been called before this.
521  */
522 void ath12k_hal_srng_access_end(struct ath12k_base *ab, struct hal_srng *srng)
523 {
524 	lockdep_assert_held(&srng->lock);
525 
526 	if (srng->flags & HAL_SRNG_FLAGS_LMAC_RING) {
527 		/* For LMAC rings, ring pointer updates are done through FW and
528 		 * hence written to a shared memory location that is read by FW
529 		 */
530 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
531 			srng->u.src_ring.last_tp =
532 				*(volatile u32 *)srng->u.src_ring.tp_addr;
533 			/* Make sure descriptor is written before updating the
534 			 * head pointer.
535 			 */
536 			dma_wmb();
537 			WRITE_ONCE(*srng->u.src_ring.hp_addr, srng->u.src_ring.hp);
538 		} else {
539 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
540 			/* Make sure descriptor is read before updating the
541 			 * tail pointer.
542 			 */
543 			dma_mb();
544 			WRITE_ONCE(*srng->u.dst_ring.tp_addr, srng->u.dst_ring.tp);
545 		}
546 	} else {
547 		if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
548 			srng->u.src_ring.last_tp =
549 				*(volatile u32 *)srng->u.src_ring.tp_addr;
550 			/* Assume implementation use an MMIO write accessor
551 			 * which has the required wmb() so that the descriptor
552 			 * is written before the updating the head pointer.
553 			 */
554 			ath12k_hif_write32(ab,
555 					   (unsigned long)srng->u.src_ring.hp_addr -
556 					   (unsigned long)ab->mem,
557 					   srng->u.src_ring.hp);
558 		} else {
559 			srng->u.dst_ring.last_hp = *srng->u.dst_ring.hp_addr;
560 			/* Make sure descriptor is read before updating the
561 			 * tail pointer.
562 			 */
563 			mb();
564 			ath12k_hif_write32(ab,
565 					   (unsigned long)srng->u.dst_ring.tp_addr -
566 					   (unsigned long)ab->mem,
567 					   srng->u.dst_ring.tp);
568 		}
569 	}
570 
571 	srng->timestamp = jiffies;
572 }
573 EXPORT_SYMBOL(ath12k_hal_srng_access_end);
574 
575 int ath12k_hal_srng_setup(struct ath12k_base *ab, enum hal_ring_type type,
576 			  int ring_num, int mac_id,
577 			  struct hal_srng_params *params)
578 {
579 	struct ath12k_hal *hal = &ab->hal;
580 	struct hal_srng_config *srng_config = &ab->hal.srng_config[type];
581 	struct hal_srng *srng;
582 	int ring_id;
583 	u32 idx;
584 	int i;
585 
586 	ring_id = ath12k_hal_srng_get_ring_id(hal, type, ring_num, mac_id);
587 	if (ring_id < 0)
588 		return ring_id;
589 
590 	srng = &hal->srng_list[ring_id];
591 
592 	srng->ring_id = ring_id;
593 	srng->ring_dir = srng_config->ring_dir;
594 	srng->ring_base_paddr = params->ring_base_paddr;
595 	srng->ring_base_vaddr = params->ring_base_vaddr;
596 	srng->entry_size = srng_config->entry_size;
597 	srng->num_entries = params->num_entries;
598 	srng->ring_size = srng->entry_size * srng->num_entries;
599 	srng->intr_batch_cntr_thres_entries =
600 				params->intr_batch_cntr_thres_entries;
601 	srng->intr_timer_thres_us = params->intr_timer_thres_us;
602 	srng->flags = params->flags;
603 	srng->msi_addr = params->msi_addr;
604 	srng->msi2_addr = params->msi2_addr;
605 	srng->msi_data = params->msi_data;
606 	srng->msi2_data = params->msi2_data;
607 	srng->initialized = 1;
608 	spin_lock_init(&srng->lock);
609 	lockdep_set_class(&srng->lock, &srng->lock_key);
610 
611 	for (i = 0; i < HAL_SRNG_NUM_REG_GRP; i++) {
612 		srng->hwreg_base[i] = srng_config->reg_start[i] +
613 				      (ring_num * srng_config->reg_size[i]);
614 	}
615 
616 	memset(srng->ring_base_vaddr, 0,
617 	       (srng->entry_size * srng->num_entries) << 2);
618 
619 	if (srng->ring_dir == HAL_SRNG_DIR_SRC) {
620 		srng->u.src_ring.hp = 0;
621 		srng->u.src_ring.cached_tp = 0;
622 		srng->u.src_ring.reap_hp = srng->ring_size - srng->entry_size;
623 		srng->u.src_ring.tp_addr = (void *)(hal->rdp.vaddr + ring_id);
624 		srng->u.src_ring.low_threshold = params->low_threshold *
625 						 srng->entry_size;
626 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
627 			ath12k_hal_set_umac_srng_ptr_addr(ab, srng);
628 		} else {
629 			idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
630 			srng->u.src_ring.hp_addr = (void *)(hal->wrp.vaddr +
631 						   idx);
632 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
633 		}
634 	} else {
635 		/* During initialization loop count in all the descriptors
636 		 * will be set to zero, and HW will set it to 1 on completing
637 		 * descriptor update in first loop, and increments it by 1 on
638 		 * subsequent loops (loop count wraps around after reaching
639 		 * 0xffff). The 'loop_cnt' in SW ring state is the expected
640 		 * loop count in descriptors updated by HW (to be processed
641 		 * by SW).
642 		 */
643 		srng->u.dst_ring.loop_cnt = 1;
644 		srng->u.dst_ring.tp = 0;
645 		srng->u.dst_ring.cached_hp = 0;
646 		srng->u.dst_ring.hp_addr = (void *)(hal->rdp.vaddr + ring_id);
647 		if (srng_config->mac_type == ATH12K_HAL_SRNG_UMAC) {
648 			ath12k_hal_set_umac_srng_ptr_addr(ab, srng);
649 		} else {
650 			/* For PMAC & DMAC rings, tail pointer updates will be done
651 			 * through FW by writing to a shared memory location
652 			 */
653 			idx = ring_id - HAL_SRNG_RING_ID_DMAC_CMN_ID_START;
654 			srng->u.dst_ring.tp_addr = (void *)(hal->wrp.vaddr +
655 						   idx);
656 			srng->flags |= HAL_SRNG_FLAGS_LMAC_RING;
657 		}
658 	}
659 
660 	if (srng_config->mac_type != ATH12K_HAL_SRNG_UMAC)
661 		return ring_id;
662 
663 	ath12k_hal_srng_hw_init(ab, srng);
664 
665 	if (type == HAL_CE_DST) {
666 		srng->u.dst_ring.max_buffer_length = params->max_buffer_len;
667 		ath12k_hal_ce_dst_setup(ab, srng, ring_num);
668 	}
669 
670 	return ring_id;
671 }
672 
673 void ath12k_hal_srng_shadow_config(struct ath12k_base *ab)
674 {
675 	struct ath12k_hal *hal = &ab->hal;
676 	int ring_type, ring_num;
677 
678 	/* update all the non-CE srngs. */
679 	for (ring_type = 0; ring_type < HAL_MAX_RING_TYPES; ring_type++) {
680 		struct hal_srng_config *srng_config = &hal->srng_config[ring_type];
681 
682 		if (ring_type == HAL_CE_SRC ||
683 		    ring_type == HAL_CE_DST ||
684 			ring_type == HAL_CE_DST_STATUS)
685 			continue;
686 
687 		if (srng_config->mac_type == ATH12K_HAL_SRNG_DMAC ||
688 		    srng_config->mac_type == ATH12K_HAL_SRNG_PMAC)
689 			continue;
690 
691 		for (ring_num = 0; ring_num < srng_config->max_rings; ring_num++)
692 			ath12k_hal_srng_update_shadow_config(ab, ring_type, ring_num);
693 	}
694 }
695 
696 void ath12k_hal_srng_get_shadow_config(struct ath12k_base *ab,
697 				       u32 **cfg, u32 *len)
698 {
699 	struct ath12k_hal *hal = &ab->hal;
700 
701 	*len = hal->num_shadow_reg_configured;
702 	*cfg = hal->shadow_reg_addr;
703 }
704 
705 void ath12k_hal_srng_shadow_update_hp_tp(struct ath12k_base *ab,
706 					 struct hal_srng *srng)
707 {
708 	lockdep_assert_held(&srng->lock);
709 
710 	/* check whether the ring is empty. Update the shadow
711 	 * HP only when then ring isn't' empty.
712 	 */
713 	if (srng->ring_dir == HAL_SRNG_DIR_SRC &&
714 	    *srng->u.src_ring.tp_addr != srng->u.src_ring.hp)
715 		ath12k_hal_srng_access_end(ab, srng);
716 }
717 
718 static void ath12k_hal_register_srng_lock_keys(struct ath12k_hal *hal)
719 {
720 	u32 ring_id;
721 
722 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
723 		lockdep_register_key(&hal->srng_list[ring_id].lock_key);
724 }
725 
726 static void ath12k_hal_unregister_srng_lock_keys(struct ath12k_hal *hal)
727 {
728 	u32 ring_id;
729 
730 	for (ring_id = 0; ring_id < HAL_SRNG_RING_ID_MAX; ring_id++)
731 		lockdep_unregister_key(&hal->srng_list[ring_id].lock_key);
732 }
733 
734 int ath12k_hal_srng_init(struct ath12k_base *ab)
735 {
736 	struct ath12k_hal *hal = &ab->hal;
737 	int ret;
738 
739 	ret = hal->ops->create_srng_config(hal);
740 	if (ret)
741 		goto err_hal;
742 
743 	hal->dev = ab->dev;
744 
745 	ret = ath12k_hal_alloc_cont_rdp(hal);
746 	if (ret)
747 		goto err_hal;
748 
749 	ret = ath12k_hal_alloc_cont_wrp(hal);
750 	if (ret)
751 		goto err_free_cont_rdp;
752 
753 	ath12k_hal_register_srng_lock_keys(hal);
754 
755 	return 0;
756 
757 err_free_cont_rdp:
758 	ath12k_hal_free_cont_rdp(hal);
759 
760 err_hal:
761 	return ret;
762 }
763 
764 void ath12k_hal_srng_deinit(struct ath12k_base *ab)
765 {
766 	struct ath12k_hal *hal = &ab->hal;
767 
768 	ath12k_hal_unregister_srng_lock_keys(hal);
769 	ath12k_hal_free_cont_rdp(hal);
770 	ath12k_hal_free_cont_wrp(hal);
771 	kfree(hal->srng_config);
772 	hal->srng_config = NULL;
773 }
774 
775 void ath12k_hal_dump_srng_stats(struct ath12k_base *ab)
776 {
777 	struct hal_srng *srng;
778 	struct ath12k_ext_irq_grp *irq_grp;
779 	struct ath12k_ce_pipe *ce_pipe;
780 	int i;
781 
782 	ath12k_err(ab, "Last interrupt received for each CE:\n");
783 	for (i = 0; i < ab->hw_params->ce_count; i++) {
784 		ce_pipe = &ab->ce.ce_pipe[i];
785 
786 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
787 			continue;
788 
789 		ath12k_err(ab, "CE_id %d pipe_num %d %ums before\n",
790 			   i, ce_pipe->pipe_num,
791 			   jiffies_to_msecs(jiffies - ce_pipe->timestamp));
792 	}
793 
794 	ath12k_err(ab, "\nLast interrupt received for each group:\n");
795 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
796 		irq_grp = &ab->ext_irq_grp[i];
797 		ath12k_err(ab, "group_id %d %ums before\n",
798 			   irq_grp->grp_id,
799 			   jiffies_to_msecs(jiffies - irq_grp->timestamp));
800 	}
801 
802 	for (i = 0; i < HAL_SRNG_RING_ID_MAX; i++) {
803 		srng = &ab->hal.srng_list[i];
804 
805 		if (!srng->initialized)
806 			continue;
807 
808 		if (srng->ring_dir == HAL_SRNG_DIR_SRC)
809 			ath12k_err(ab,
810 				   "src srng id %u hp %u, reap_hp %u, cur tp %u, cached tp %u last tp %u napi processed before %ums\n",
811 				   srng->ring_id, srng->u.src_ring.hp,
812 				   srng->u.src_ring.reap_hp,
813 				   *srng->u.src_ring.tp_addr, srng->u.src_ring.cached_tp,
814 				   srng->u.src_ring.last_tp,
815 				   jiffies_to_msecs(jiffies - srng->timestamp));
816 		else if (srng->ring_dir == HAL_SRNG_DIR_DST)
817 			ath12k_err(ab,
818 				   "dst srng id %u tp %u, cur hp %u, cached hp %u last hp %u napi processed before %ums\n",
819 				   srng->ring_id, srng->u.dst_ring.tp,
820 				   *srng->u.dst_ring.hp_addr,
821 				   srng->u.dst_ring.cached_hp,
822 				   srng->u.dst_ring.last_hp,
823 				   jiffies_to_msecs(jiffies - srng->timestamp));
824 	}
825 }
826 
827 void *ath12k_hal_encode_tlv64_hdr(void *tlv, u64 tag, u64 len)
828 {
829 	struct hal_tlv_64_hdr *tlv64 = tlv;
830 
831 	tlv64->tl = le64_encode_bits(tag, HAL_TLV_HDR_TAG) |
832 		    le64_encode_bits(len, HAL_TLV_HDR_LEN);
833 
834 	return tlv64->value;
835 }
836 EXPORT_SYMBOL(ath12k_hal_encode_tlv64_hdr);
837 
838 void *ath12k_hal_encode_tlv32_hdr(void *tlv, u64 tag, u64 len)
839 {
840 	struct hal_tlv_hdr *tlv32 = tlv;
841 
842 	tlv32->tl = le32_encode_bits(tag, HAL_TLV_HDR_TAG) |
843 		    le32_encode_bits(len, HAL_TLV_HDR_LEN);
844 
845 	return tlv32->value;
846 }
847 EXPORT_SYMBOL(ath12k_hal_encode_tlv32_hdr);
848 
849 u16 ath12k_hal_decode_tlv64_hdr(void *tlv, void **desc)
850 {
851 	struct hal_tlv_64_hdr *tlv64 = tlv;
852 	u16 tag;
853 
854 	tag = le64_get_bits(tlv64->tl, HAL_SRNG_TLV_HDR_TAG);
855 	*desc = tlv64->value;
856 
857 	return tag;
858 }
859 EXPORT_SYMBOL(ath12k_hal_decode_tlv64_hdr);
860 
861 u16 ath12k_hal_decode_tlv32_hdr(void *tlv, void **desc)
862 {
863 	struct hal_tlv_hdr *tlv32 = tlv;
864 	u16 tag;
865 
866 	tag = le32_get_bits(tlv32->tl, HAL_SRNG_TLV_HDR_TAG);
867 	*desc = tlv32->value;
868 
869 	return tag;
870 }
871 EXPORT_SYMBOL(ath12k_hal_decode_tlv32_hdr);
872