xref: /linux/drivers/net/wireless/ath/ath12k/dp.c (revision 429508c84d95811dd1300181dfe84743caff9a38)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <crypto/hash.h>
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "hal_tx.h"
11 #include "hif.h"
12 #include "debug.h"
13 #include "dp_rx.h"
14 #include "peer.h"
15 #include "dp_mon.h"
16 
17 enum ath12k_dp_desc_type {
18 	ATH12K_DP_TX_DESC,
19 	ATH12K_DP_RX_DESC,
20 };
21 
22 static void ath12k_dp_htt_htc_tx_complete(struct ath12k_base *ab,
23 					  struct sk_buff *skb)
24 {
25 	dev_kfree_skb_any(skb);
26 }
27 
28 void ath12k_dp_peer_cleanup(struct ath12k *ar, int vdev_id, const u8 *addr)
29 {
30 	struct ath12k_base *ab = ar->ab;
31 	struct ath12k_peer *peer;
32 
33 	/* TODO: Any other peer specific DP cleanup */
34 
35 	spin_lock_bh(&ab->base_lock);
36 	peer = ath12k_peer_find(ab, vdev_id, addr);
37 	if (!peer) {
38 		ath12k_warn(ab, "failed to lookup peer %pM on vdev %d\n",
39 			    addr, vdev_id);
40 		spin_unlock_bh(&ab->base_lock);
41 		return;
42 	}
43 
44 	ath12k_dp_rx_peer_tid_cleanup(ar, peer);
45 	crypto_free_shash(peer->tfm_mmic);
46 	peer->dp_setup_done = false;
47 	spin_unlock_bh(&ab->base_lock);
48 }
49 
50 int ath12k_dp_peer_setup(struct ath12k *ar, int vdev_id, const u8 *addr)
51 {
52 	struct ath12k_base *ab = ar->ab;
53 	struct ath12k_peer *peer;
54 	u32 reo_dest;
55 	int ret = 0, tid;
56 
57 	/* NOTE: reo_dest ring id starts from 1 unlike mac_id which starts from 0 */
58 	reo_dest = ar->dp.mac_id + 1;
59 	ret = ath12k_wmi_set_peer_param(ar, addr, vdev_id,
60 					WMI_PEER_SET_DEFAULT_ROUTING,
61 					DP_RX_HASH_ENABLE | (reo_dest << 1));
62 
63 	if (ret) {
64 		ath12k_warn(ab, "failed to set default routing %d peer :%pM vdev_id :%d\n",
65 			    ret, addr, vdev_id);
66 		return ret;
67 	}
68 
69 	for (tid = 0; tid <= IEEE80211_NUM_TIDS; tid++) {
70 		ret = ath12k_dp_rx_peer_tid_setup(ar, addr, vdev_id, tid, 1, 0,
71 						  HAL_PN_TYPE_NONE);
72 		if (ret) {
73 			ath12k_warn(ab, "failed to setup rxd tid queue for tid %d: %d\n",
74 				    tid, ret);
75 			goto peer_clean;
76 		}
77 	}
78 
79 	ret = ath12k_dp_rx_peer_frag_setup(ar, addr, vdev_id);
80 	if (ret) {
81 		ath12k_warn(ab, "failed to setup rx defrag context\n");
82 		goto peer_clean;
83 	}
84 
85 	/* TODO: Setup other peer specific resource used in data path */
86 
87 	return 0;
88 
89 peer_clean:
90 	spin_lock_bh(&ab->base_lock);
91 
92 	peer = ath12k_peer_find(ab, vdev_id, addr);
93 	if (!peer) {
94 		ath12k_warn(ab, "failed to find the peer to del rx tid\n");
95 		spin_unlock_bh(&ab->base_lock);
96 		return -ENOENT;
97 	}
98 
99 	for (; tid >= 0; tid--)
100 		ath12k_dp_rx_peer_tid_delete(ar, peer, tid);
101 
102 	spin_unlock_bh(&ab->base_lock);
103 
104 	return ret;
105 }
106 
107 void ath12k_dp_srng_cleanup(struct ath12k_base *ab, struct dp_srng *ring)
108 {
109 	if (!ring->vaddr_unaligned)
110 		return;
111 
112 	dma_free_coherent(ab->dev, ring->size, ring->vaddr_unaligned,
113 			  ring->paddr_unaligned);
114 
115 	ring->vaddr_unaligned = NULL;
116 }
117 
118 static int ath12k_dp_srng_find_ring_in_mask(int ring_num, const u8 *grp_mask)
119 {
120 	int ext_group_num;
121 	u8 mask = 1 << ring_num;
122 
123 	for (ext_group_num = 0; ext_group_num < ATH12K_EXT_IRQ_GRP_NUM_MAX;
124 	     ext_group_num++) {
125 		if (mask & grp_mask[ext_group_num])
126 			return ext_group_num;
127 	}
128 
129 	return -ENOENT;
130 }
131 
132 static int ath12k_dp_srng_calculate_msi_group(struct ath12k_base *ab,
133 					      enum hal_ring_type type, int ring_num)
134 {
135 	const u8 *grp_mask;
136 
137 	switch (type) {
138 	case HAL_WBM2SW_RELEASE:
139 		if (ring_num == HAL_WBM2SW_REL_ERR_RING_NUM) {
140 			grp_mask = &ab->hw_params->ring_mask->rx_wbm_rel[0];
141 			ring_num = 0;
142 		} else {
143 			grp_mask = &ab->hw_params->ring_mask->tx[0];
144 		}
145 		break;
146 	case HAL_REO_EXCEPTION:
147 		grp_mask = &ab->hw_params->ring_mask->rx_err[0];
148 		break;
149 	case HAL_REO_DST:
150 		grp_mask = &ab->hw_params->ring_mask->rx[0];
151 		break;
152 	case HAL_REO_STATUS:
153 		grp_mask = &ab->hw_params->ring_mask->reo_status[0];
154 		break;
155 	case HAL_RXDMA_MONITOR_STATUS:
156 	case HAL_RXDMA_MONITOR_DST:
157 		grp_mask = &ab->hw_params->ring_mask->rx_mon_dest[0];
158 		break;
159 	case HAL_TX_MONITOR_DST:
160 		grp_mask = &ab->hw_params->ring_mask->tx_mon_dest[0];
161 		break;
162 	case HAL_RXDMA_BUF:
163 		grp_mask = &ab->hw_params->ring_mask->host2rxdma[0];
164 		break;
165 	case HAL_RXDMA_MONITOR_BUF:
166 	case HAL_TCL_DATA:
167 	case HAL_TCL_CMD:
168 	case HAL_REO_CMD:
169 	case HAL_SW2WBM_RELEASE:
170 	case HAL_WBM_IDLE_LINK:
171 	case HAL_TCL_STATUS:
172 	case HAL_REO_REINJECT:
173 	case HAL_CE_SRC:
174 	case HAL_CE_DST:
175 	case HAL_CE_DST_STATUS:
176 	default:
177 		return -ENOENT;
178 	}
179 
180 	return ath12k_dp_srng_find_ring_in_mask(ring_num, grp_mask);
181 }
182 
183 static void ath12k_dp_srng_msi_setup(struct ath12k_base *ab,
184 				     struct hal_srng_params *ring_params,
185 				     enum hal_ring_type type, int ring_num)
186 {
187 	int msi_group_number, msi_data_count;
188 	u32 msi_data_start, msi_irq_start, addr_lo, addr_hi;
189 	int ret;
190 
191 	ret = ath12k_hif_get_user_msi_vector(ab, "DP",
192 					     &msi_data_count, &msi_data_start,
193 					     &msi_irq_start);
194 	if (ret)
195 		return;
196 
197 	msi_group_number = ath12k_dp_srng_calculate_msi_group(ab, type,
198 							      ring_num);
199 	if (msi_group_number < 0) {
200 		ath12k_dbg(ab, ATH12K_DBG_PCI,
201 			   "ring not part of an ext_group; ring_type: %d,ring_num %d",
202 			   type, ring_num);
203 		ring_params->msi_addr = 0;
204 		ring_params->msi_data = 0;
205 		return;
206 	}
207 
208 	if (msi_group_number > msi_data_count) {
209 		ath12k_dbg(ab, ATH12K_DBG_PCI,
210 			   "multiple msi_groups share one msi, msi_group_num %d",
211 			   msi_group_number);
212 	}
213 
214 	ath12k_hif_get_msi_address(ab, &addr_lo, &addr_hi);
215 
216 	ring_params->msi_addr = addr_lo;
217 	ring_params->msi_addr |= (dma_addr_t)(((uint64_t)addr_hi) << 32);
218 	ring_params->msi_data = (msi_group_number % msi_data_count)
219 		+ msi_data_start;
220 	ring_params->flags |= HAL_SRNG_FLAGS_MSI_INTR;
221 }
222 
223 int ath12k_dp_srng_setup(struct ath12k_base *ab, struct dp_srng *ring,
224 			 enum hal_ring_type type, int ring_num,
225 			 int mac_id, int num_entries)
226 {
227 	struct hal_srng_params params = { 0 };
228 	int entry_sz = ath12k_hal_srng_get_entrysize(ab, type);
229 	int max_entries = ath12k_hal_srng_get_max_entries(ab, type);
230 	int ret;
231 
232 	if (max_entries < 0 || entry_sz < 0)
233 		return -EINVAL;
234 
235 	if (num_entries > max_entries)
236 		num_entries = max_entries;
237 
238 	ring->size = (num_entries * entry_sz) + HAL_RING_BASE_ALIGN - 1;
239 	ring->vaddr_unaligned = dma_alloc_coherent(ab->dev, ring->size,
240 						   &ring->paddr_unaligned,
241 						   GFP_KERNEL);
242 	if (!ring->vaddr_unaligned)
243 		return -ENOMEM;
244 
245 	ring->vaddr = PTR_ALIGN(ring->vaddr_unaligned, HAL_RING_BASE_ALIGN);
246 	ring->paddr = ring->paddr_unaligned + ((unsigned long)ring->vaddr -
247 		      (unsigned long)ring->vaddr_unaligned);
248 
249 	params.ring_base_vaddr = ring->vaddr;
250 	params.ring_base_paddr = ring->paddr;
251 	params.num_entries = num_entries;
252 	ath12k_dp_srng_msi_setup(ab, &params, type, ring_num + mac_id);
253 
254 	switch (type) {
255 	case HAL_REO_DST:
256 		params.intr_batch_cntr_thres_entries =
257 					HAL_SRNG_INT_BATCH_THRESHOLD_RX;
258 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
259 		break;
260 	case HAL_RXDMA_BUF:
261 	case HAL_RXDMA_MONITOR_BUF:
262 	case HAL_RXDMA_MONITOR_STATUS:
263 		params.low_threshold = num_entries >> 3;
264 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
265 		params.intr_batch_cntr_thres_entries = 0;
266 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
267 		break;
268 	case HAL_TX_MONITOR_DST:
269 		params.low_threshold = DP_TX_MONITOR_BUF_SIZE_MAX >> 3;
270 		params.flags |= HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN;
271 		params.intr_batch_cntr_thres_entries = 0;
272 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_RX;
273 		break;
274 	case HAL_WBM2SW_RELEASE:
275 		if (ab->hw_params->hw_ops->dp_srng_is_tx_comp_ring(ring_num)) {
276 			params.intr_batch_cntr_thres_entries =
277 					HAL_SRNG_INT_BATCH_THRESHOLD_TX;
278 			params.intr_timer_thres_us =
279 					HAL_SRNG_INT_TIMER_THRESHOLD_TX;
280 			break;
281 		}
282 		/* follow through when ring_num != HAL_WBM2SW_REL_ERR_RING_NUM */
283 		fallthrough;
284 	case HAL_REO_EXCEPTION:
285 	case HAL_REO_REINJECT:
286 	case HAL_REO_CMD:
287 	case HAL_REO_STATUS:
288 	case HAL_TCL_DATA:
289 	case HAL_TCL_CMD:
290 	case HAL_TCL_STATUS:
291 	case HAL_WBM_IDLE_LINK:
292 	case HAL_SW2WBM_RELEASE:
293 	case HAL_RXDMA_DST:
294 	case HAL_RXDMA_MONITOR_DST:
295 	case HAL_RXDMA_MONITOR_DESC:
296 		params.intr_batch_cntr_thres_entries =
297 					HAL_SRNG_INT_BATCH_THRESHOLD_OTHER;
298 		params.intr_timer_thres_us = HAL_SRNG_INT_TIMER_THRESHOLD_OTHER;
299 		break;
300 	case HAL_RXDMA_DIR_BUF:
301 		break;
302 	default:
303 		ath12k_warn(ab, "Not a valid ring type in dp :%d\n", type);
304 		return -EINVAL;
305 	}
306 
307 	ret = ath12k_hal_srng_setup(ab, type, ring_num, mac_id, &params);
308 	if (ret < 0) {
309 		ath12k_warn(ab, "failed to setup srng: %d ring_id %d\n",
310 			    ret, ring_num);
311 		return ret;
312 	}
313 
314 	ring->ring_id = ret;
315 
316 	return 0;
317 }
318 
319 static
320 u32 ath12k_dp_tx_get_vdev_bank_config(struct ath12k_base *ab, struct ath12k_vif *arvif)
321 {
322 	u32 bank_config = 0;
323 
324 	/* Only valid for raw frames with HW crypto enabled.
325 	 * With SW crypto, mac80211 sets key per packet
326 	 */
327 	if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
328 	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ab->dev_flags))
329 		bank_config |=
330 			u32_encode_bits(ath12k_dp_tx_get_encrypt_type(arvif->key_cipher),
331 					HAL_TX_BANK_CONFIG_ENCRYPT_TYPE);
332 
333 	bank_config |= u32_encode_bits(arvif->tx_encap_type,
334 					HAL_TX_BANK_CONFIG_ENCAP_TYPE);
335 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_SRC_BUFFER_SWAP) |
336 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_LINK_META_SWAP) |
337 			u32_encode_bits(0, HAL_TX_BANK_CONFIG_EPD);
338 
339 	/* only valid if idx_lookup_override is not set in tcl_data_cmd */
340 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_INDEX_LOOKUP_EN);
341 
342 	bank_config |= u32_encode_bits(arvif->hal_addr_search_flags & HAL_TX_ADDRX_EN,
343 					HAL_TX_BANK_CONFIG_ADDRX_EN) |
344 			u32_encode_bits(!!(arvif->hal_addr_search_flags &
345 					HAL_TX_ADDRY_EN),
346 					HAL_TX_BANK_CONFIG_ADDRY_EN);
347 
348 	bank_config |= u32_encode_bits(ieee80211_vif_is_mesh(arvif->vif) ? 3 : 0,
349 					HAL_TX_BANK_CONFIG_MESH_EN) |
350 			u32_encode_bits(arvif->vdev_id_check_en,
351 					HAL_TX_BANK_CONFIG_VDEV_ID_CHECK_EN);
352 
353 	bank_config |= u32_encode_bits(0, HAL_TX_BANK_CONFIG_DSCP_TIP_MAP_ID);
354 
355 	return bank_config;
356 }
357 
358 static int ath12k_dp_tx_get_bank_profile(struct ath12k_base *ab, struct ath12k_vif *arvif,
359 					 struct ath12k_dp *dp)
360 {
361 	int bank_id = DP_INVALID_BANK_ID;
362 	int i;
363 	u32 bank_config;
364 	bool configure_register = false;
365 
366 	/* convert vdev params into hal_tx_bank_config */
367 	bank_config = ath12k_dp_tx_get_vdev_bank_config(ab, arvif);
368 
369 	spin_lock_bh(&dp->tx_bank_lock);
370 	/* TODO: implement using idr kernel framework*/
371 	for (i = 0; i < dp->num_bank_profiles; i++) {
372 		if (dp->bank_profiles[i].is_configured &&
373 		    (dp->bank_profiles[i].bank_config ^ bank_config) == 0) {
374 			bank_id = i;
375 			goto inc_ref_and_return;
376 		}
377 		if (!dp->bank_profiles[i].is_configured ||
378 		    !dp->bank_profiles[i].num_users) {
379 			bank_id = i;
380 			goto configure_and_return;
381 		}
382 	}
383 
384 	if (bank_id == DP_INVALID_BANK_ID) {
385 		spin_unlock_bh(&dp->tx_bank_lock);
386 		ath12k_err(ab, "unable to find TX bank!");
387 		return bank_id;
388 	}
389 
390 configure_and_return:
391 	dp->bank_profiles[bank_id].is_configured = true;
392 	dp->bank_profiles[bank_id].bank_config = bank_config;
393 	configure_register = true;
394 inc_ref_and_return:
395 	dp->bank_profiles[bank_id].num_users++;
396 	spin_unlock_bh(&dp->tx_bank_lock);
397 
398 	if (configure_register)
399 		ath12k_hal_tx_configure_bank_register(ab, bank_config, bank_id);
400 
401 	ath12k_dbg(ab, ATH12K_DBG_DP_HTT, "dp_htt tcl bank_id %d input 0x%x match 0x%x num_users %u",
402 		   bank_id, bank_config, dp->bank_profiles[bank_id].bank_config,
403 		   dp->bank_profiles[bank_id].num_users);
404 
405 	return bank_id;
406 }
407 
408 void ath12k_dp_tx_put_bank_profile(struct ath12k_dp *dp, u8 bank_id)
409 {
410 	spin_lock_bh(&dp->tx_bank_lock);
411 	dp->bank_profiles[bank_id].num_users--;
412 	spin_unlock_bh(&dp->tx_bank_lock);
413 }
414 
415 static void ath12k_dp_deinit_bank_profiles(struct ath12k_base *ab)
416 {
417 	struct ath12k_dp *dp = &ab->dp;
418 
419 	kfree(dp->bank_profiles);
420 	dp->bank_profiles = NULL;
421 }
422 
423 static int ath12k_dp_init_bank_profiles(struct ath12k_base *ab)
424 {
425 	struct ath12k_dp *dp = &ab->dp;
426 	u32 num_tcl_banks = ab->hw_params->num_tcl_banks;
427 	int i;
428 
429 	dp->num_bank_profiles = num_tcl_banks;
430 	dp->bank_profiles = kmalloc_array(num_tcl_banks,
431 					  sizeof(struct ath12k_dp_tx_bank_profile),
432 					  GFP_KERNEL);
433 	if (!dp->bank_profiles)
434 		return -ENOMEM;
435 
436 	spin_lock_init(&dp->tx_bank_lock);
437 
438 	for (i = 0; i < num_tcl_banks; i++) {
439 		dp->bank_profiles[i].is_configured = false;
440 		dp->bank_profiles[i].num_users = 0;
441 	}
442 
443 	return 0;
444 }
445 
446 static void ath12k_dp_srng_common_cleanup(struct ath12k_base *ab)
447 {
448 	struct ath12k_dp *dp = &ab->dp;
449 	int i;
450 
451 	ath12k_dp_srng_cleanup(ab, &dp->reo_status_ring);
452 	ath12k_dp_srng_cleanup(ab, &dp->reo_cmd_ring);
453 	ath12k_dp_srng_cleanup(ab, &dp->reo_except_ring);
454 	ath12k_dp_srng_cleanup(ab, &dp->rx_rel_ring);
455 	ath12k_dp_srng_cleanup(ab, &dp->reo_reinject_ring);
456 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
457 		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_comp_ring);
458 		ath12k_dp_srng_cleanup(ab, &dp->tx_ring[i].tcl_data_ring);
459 	}
460 	ath12k_dp_srng_cleanup(ab, &dp->tcl_status_ring);
461 	ath12k_dp_srng_cleanup(ab, &dp->tcl_cmd_ring);
462 	ath12k_dp_srng_cleanup(ab, &dp->wbm_desc_rel_ring);
463 }
464 
465 static int ath12k_dp_srng_common_setup(struct ath12k_base *ab)
466 {
467 	struct ath12k_dp *dp = &ab->dp;
468 	const struct ath12k_hal_tcl_to_wbm_rbm_map *map;
469 	struct hal_srng *srng;
470 	int i, ret, tx_comp_ring_num;
471 	u32 ring_hash_map;
472 
473 	ret = ath12k_dp_srng_setup(ab, &dp->wbm_desc_rel_ring,
474 				   HAL_SW2WBM_RELEASE, 0, 0,
475 				   DP_WBM_RELEASE_RING_SIZE);
476 	if (ret) {
477 		ath12k_warn(ab, "failed to set up wbm2sw_release ring :%d\n",
478 			    ret);
479 		goto err;
480 	}
481 
482 	ret = ath12k_dp_srng_setup(ab, &dp->tcl_cmd_ring, HAL_TCL_CMD, 0, 0,
483 				   DP_TCL_CMD_RING_SIZE);
484 	if (ret) {
485 		ath12k_warn(ab, "failed to set up tcl_cmd ring :%d\n", ret);
486 		goto err;
487 	}
488 
489 	ret = ath12k_dp_srng_setup(ab, &dp->tcl_status_ring, HAL_TCL_STATUS,
490 				   0, 0, DP_TCL_STATUS_RING_SIZE);
491 	if (ret) {
492 		ath12k_warn(ab, "failed to set up tcl_status ring :%d\n", ret);
493 		goto err;
494 	}
495 
496 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
497 		map = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map;
498 		tx_comp_ring_num = map[i].wbm_ring_num;
499 
500 		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_data_ring,
501 					   HAL_TCL_DATA, i, 0,
502 					   DP_TCL_DATA_RING_SIZE);
503 		if (ret) {
504 			ath12k_warn(ab, "failed to set up tcl_data ring (%d) :%d\n",
505 				    i, ret);
506 			goto err;
507 		}
508 
509 		ret = ath12k_dp_srng_setup(ab, &dp->tx_ring[i].tcl_comp_ring,
510 					   HAL_WBM2SW_RELEASE, tx_comp_ring_num, 0,
511 					   DP_TX_COMP_RING_SIZE);
512 		if (ret) {
513 			ath12k_warn(ab, "failed to set up tcl_comp ring (%d) :%d\n",
514 				    tx_comp_ring_num, ret);
515 			goto err;
516 		}
517 	}
518 
519 	ret = ath12k_dp_srng_setup(ab, &dp->reo_reinject_ring, HAL_REO_REINJECT,
520 				   0, 0, DP_REO_REINJECT_RING_SIZE);
521 	if (ret) {
522 		ath12k_warn(ab, "failed to set up reo_reinject ring :%d\n",
523 			    ret);
524 		goto err;
525 	}
526 
527 	ret = ath12k_dp_srng_setup(ab, &dp->rx_rel_ring, HAL_WBM2SW_RELEASE,
528 				   HAL_WBM2SW_REL_ERR_RING_NUM, 0,
529 				   DP_RX_RELEASE_RING_SIZE);
530 	if (ret) {
531 		ath12k_warn(ab, "failed to set up rx_rel ring :%d\n", ret);
532 		goto err;
533 	}
534 
535 	ret = ath12k_dp_srng_setup(ab, &dp->reo_except_ring, HAL_REO_EXCEPTION,
536 				   0, 0, DP_REO_EXCEPTION_RING_SIZE);
537 	if (ret) {
538 		ath12k_warn(ab, "failed to set up reo_exception ring :%d\n",
539 			    ret);
540 		goto err;
541 	}
542 
543 	ret = ath12k_dp_srng_setup(ab, &dp->reo_cmd_ring, HAL_REO_CMD,
544 				   0, 0, DP_REO_CMD_RING_SIZE);
545 	if (ret) {
546 		ath12k_warn(ab, "failed to set up reo_cmd ring :%d\n", ret);
547 		goto err;
548 	}
549 
550 	srng = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
551 	ath12k_hal_reo_init_cmd_ring(ab, srng);
552 
553 	ret = ath12k_dp_srng_setup(ab, &dp->reo_status_ring, HAL_REO_STATUS,
554 				   0, 0, DP_REO_STATUS_RING_SIZE);
555 	if (ret) {
556 		ath12k_warn(ab, "failed to set up reo_status ring :%d\n", ret);
557 		goto err;
558 	}
559 
560 	/* When hash based routing of rx packet is enabled, 32 entries to map
561 	 * the hash values to the ring will be configured. Each hash entry uses
562 	 * four bits to map to a particular ring. The ring mapping will be
563 	 * 0:TCL, 1:SW1, 2:SW2, 3:SW3, 4:SW4, 5:Release, 6:FW and 7:SW5
564 	 * 8:SW6, 9:SW7, 10:SW8, 11:Not used.
565 	 */
566 	ring_hash_map = HAL_HASH_ROUTING_RING_SW1 |
567 			HAL_HASH_ROUTING_RING_SW2 << 4 |
568 			HAL_HASH_ROUTING_RING_SW3 << 8 |
569 			HAL_HASH_ROUTING_RING_SW4 << 12 |
570 			HAL_HASH_ROUTING_RING_SW1 << 16 |
571 			HAL_HASH_ROUTING_RING_SW2 << 20 |
572 			HAL_HASH_ROUTING_RING_SW3 << 24 |
573 			HAL_HASH_ROUTING_RING_SW4 << 28;
574 
575 	ath12k_hal_reo_hw_setup(ab, ring_hash_map);
576 
577 	return 0;
578 
579 err:
580 	ath12k_dp_srng_common_cleanup(ab);
581 
582 	return ret;
583 }
584 
585 static void ath12k_dp_scatter_idle_link_desc_cleanup(struct ath12k_base *ab)
586 {
587 	struct ath12k_dp *dp = &ab->dp;
588 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
589 	int i;
590 
591 	for (i = 0; i < DP_IDLE_SCATTER_BUFS_MAX; i++) {
592 		if (!slist[i].vaddr)
593 			continue;
594 
595 		dma_free_coherent(ab->dev, HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
596 				  slist[i].vaddr, slist[i].paddr);
597 		slist[i].vaddr = NULL;
598 	}
599 }
600 
601 static int ath12k_dp_scatter_idle_link_desc_setup(struct ath12k_base *ab,
602 						  int size,
603 						  u32 n_link_desc_bank,
604 						  u32 n_link_desc,
605 						  u32 last_bank_sz)
606 {
607 	struct ath12k_dp *dp = &ab->dp;
608 	struct dp_link_desc_bank *link_desc_banks = dp->link_desc_banks;
609 	struct hal_wbm_idle_scatter_list *slist = dp->scatter_list;
610 	u32 n_entries_per_buf;
611 	int num_scatter_buf, scatter_idx;
612 	struct hal_wbm_link_desc *scatter_buf;
613 	int align_bytes, n_entries;
614 	dma_addr_t paddr;
615 	int rem_entries;
616 	int i;
617 	int ret = 0;
618 	u32 end_offset, cookie;
619 
620 	n_entries_per_buf = HAL_WBM_IDLE_SCATTER_BUF_SIZE /
621 		ath12k_hal_srng_get_entrysize(ab, HAL_WBM_IDLE_LINK);
622 	num_scatter_buf = DIV_ROUND_UP(size, HAL_WBM_IDLE_SCATTER_BUF_SIZE);
623 
624 	if (num_scatter_buf > DP_IDLE_SCATTER_BUFS_MAX)
625 		return -EINVAL;
626 
627 	for (i = 0; i < num_scatter_buf; i++) {
628 		slist[i].vaddr = dma_alloc_coherent(ab->dev,
629 						    HAL_WBM_IDLE_SCATTER_BUF_SIZE_MAX,
630 						    &slist[i].paddr, GFP_KERNEL);
631 		if (!slist[i].vaddr) {
632 			ret = -ENOMEM;
633 			goto err;
634 		}
635 	}
636 
637 	scatter_idx = 0;
638 	scatter_buf = slist[scatter_idx].vaddr;
639 	rem_entries = n_entries_per_buf;
640 
641 	for (i = 0; i < n_link_desc_bank; i++) {
642 		align_bytes = link_desc_banks[i].vaddr -
643 			      link_desc_banks[i].vaddr_unaligned;
644 		n_entries = (DP_LINK_DESC_ALLOC_SIZE_THRESH - align_bytes) /
645 			     HAL_LINK_DESC_SIZE;
646 		paddr = link_desc_banks[i].paddr;
647 		while (n_entries) {
648 			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
649 			ath12k_hal_set_link_desc_addr(scatter_buf, cookie, paddr);
650 			n_entries--;
651 			paddr += HAL_LINK_DESC_SIZE;
652 			if (rem_entries) {
653 				rem_entries--;
654 				scatter_buf++;
655 				continue;
656 			}
657 
658 			rem_entries = n_entries_per_buf;
659 			scatter_idx++;
660 			scatter_buf = slist[scatter_idx].vaddr;
661 		}
662 	}
663 
664 	end_offset = (scatter_buf - slist[scatter_idx].vaddr) *
665 		     sizeof(struct hal_wbm_link_desc);
666 	ath12k_hal_setup_link_idle_list(ab, slist, num_scatter_buf,
667 					n_link_desc, end_offset);
668 
669 	return 0;
670 
671 err:
672 	ath12k_dp_scatter_idle_link_desc_cleanup(ab);
673 
674 	return ret;
675 }
676 
677 static void
678 ath12k_dp_link_desc_bank_free(struct ath12k_base *ab,
679 			      struct dp_link_desc_bank *link_desc_banks)
680 {
681 	int i;
682 
683 	for (i = 0; i < DP_LINK_DESC_BANKS_MAX; i++) {
684 		if (link_desc_banks[i].vaddr_unaligned) {
685 			dma_free_coherent(ab->dev,
686 					  link_desc_banks[i].size,
687 					  link_desc_banks[i].vaddr_unaligned,
688 					  link_desc_banks[i].paddr_unaligned);
689 			link_desc_banks[i].vaddr_unaligned = NULL;
690 		}
691 	}
692 }
693 
694 static int ath12k_dp_link_desc_bank_alloc(struct ath12k_base *ab,
695 					  struct dp_link_desc_bank *desc_bank,
696 					  int n_link_desc_bank,
697 					  int last_bank_sz)
698 {
699 	struct ath12k_dp *dp = &ab->dp;
700 	int i;
701 	int ret = 0;
702 	int desc_sz = DP_LINK_DESC_ALLOC_SIZE_THRESH;
703 
704 	for (i = 0; i < n_link_desc_bank; i++) {
705 		if (i == (n_link_desc_bank - 1) && last_bank_sz)
706 			desc_sz = last_bank_sz;
707 
708 		desc_bank[i].vaddr_unaligned =
709 					dma_alloc_coherent(ab->dev, desc_sz,
710 							   &desc_bank[i].paddr_unaligned,
711 							   GFP_KERNEL);
712 		if (!desc_bank[i].vaddr_unaligned) {
713 			ret = -ENOMEM;
714 			goto err;
715 		}
716 
717 		desc_bank[i].vaddr = PTR_ALIGN(desc_bank[i].vaddr_unaligned,
718 					       HAL_LINK_DESC_ALIGN);
719 		desc_bank[i].paddr = desc_bank[i].paddr_unaligned +
720 				     ((unsigned long)desc_bank[i].vaddr -
721 				      (unsigned long)desc_bank[i].vaddr_unaligned);
722 		desc_bank[i].size = desc_sz;
723 	}
724 
725 	return 0;
726 
727 err:
728 	ath12k_dp_link_desc_bank_free(ab, dp->link_desc_banks);
729 
730 	return ret;
731 }
732 
733 void ath12k_dp_link_desc_cleanup(struct ath12k_base *ab,
734 				 struct dp_link_desc_bank *desc_bank,
735 				 u32 ring_type, struct dp_srng *ring)
736 {
737 	ath12k_dp_link_desc_bank_free(ab, desc_bank);
738 
739 	if (ring_type != HAL_RXDMA_MONITOR_DESC) {
740 		ath12k_dp_srng_cleanup(ab, ring);
741 		ath12k_dp_scatter_idle_link_desc_cleanup(ab);
742 	}
743 }
744 
745 static int ath12k_wbm_idle_ring_setup(struct ath12k_base *ab, u32 *n_link_desc)
746 {
747 	struct ath12k_dp *dp = &ab->dp;
748 	u32 n_mpdu_link_desc, n_mpdu_queue_desc;
749 	u32 n_tx_msdu_link_desc, n_rx_msdu_link_desc;
750 	int ret = 0;
751 
752 	n_mpdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX) /
753 			   HAL_NUM_MPDUS_PER_LINK_DESC;
754 
755 	n_mpdu_queue_desc = n_mpdu_link_desc /
756 			    HAL_NUM_MPDU_LINKS_PER_QUEUE_DESC;
757 
758 	n_tx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_FLOWS_PER_TID *
759 			       DP_AVG_MSDUS_PER_FLOW) /
760 			      HAL_NUM_TX_MSDUS_PER_LINK_DESC;
761 
762 	n_rx_msdu_link_desc = (DP_NUM_TIDS_MAX * DP_AVG_MPDUS_PER_TID_MAX *
763 			       DP_AVG_MSDUS_PER_MPDU) /
764 			      HAL_NUM_RX_MSDUS_PER_LINK_DESC;
765 
766 	*n_link_desc = n_mpdu_link_desc + n_mpdu_queue_desc +
767 		      n_tx_msdu_link_desc + n_rx_msdu_link_desc;
768 
769 	if (*n_link_desc & (*n_link_desc - 1))
770 		*n_link_desc = 1 << fls(*n_link_desc);
771 
772 	ret = ath12k_dp_srng_setup(ab, &dp->wbm_idle_ring,
773 				   HAL_WBM_IDLE_LINK, 0, 0, *n_link_desc);
774 	if (ret) {
775 		ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
776 		return ret;
777 	}
778 	return ret;
779 }
780 
781 int ath12k_dp_link_desc_setup(struct ath12k_base *ab,
782 			      struct dp_link_desc_bank *link_desc_banks,
783 			      u32 ring_type, struct hal_srng *srng,
784 			      u32 n_link_desc)
785 {
786 	u32 tot_mem_sz;
787 	u32 n_link_desc_bank, last_bank_sz;
788 	u32 entry_sz, align_bytes, n_entries;
789 	struct hal_wbm_link_desc *desc;
790 	u32 paddr;
791 	int i, ret;
792 	u32 cookie;
793 
794 	tot_mem_sz = n_link_desc * HAL_LINK_DESC_SIZE;
795 	tot_mem_sz += HAL_LINK_DESC_ALIGN;
796 
797 	if (tot_mem_sz <= DP_LINK_DESC_ALLOC_SIZE_THRESH) {
798 		n_link_desc_bank = 1;
799 		last_bank_sz = tot_mem_sz;
800 	} else {
801 		n_link_desc_bank = tot_mem_sz /
802 				   (DP_LINK_DESC_ALLOC_SIZE_THRESH -
803 				    HAL_LINK_DESC_ALIGN);
804 		last_bank_sz = tot_mem_sz %
805 			       (DP_LINK_DESC_ALLOC_SIZE_THRESH -
806 				HAL_LINK_DESC_ALIGN);
807 
808 		if (last_bank_sz)
809 			n_link_desc_bank += 1;
810 	}
811 
812 	if (n_link_desc_bank > DP_LINK_DESC_BANKS_MAX)
813 		return -EINVAL;
814 
815 	ret = ath12k_dp_link_desc_bank_alloc(ab, link_desc_banks,
816 					     n_link_desc_bank, last_bank_sz);
817 	if (ret)
818 		return ret;
819 
820 	/* Setup link desc idle list for HW internal usage */
821 	entry_sz = ath12k_hal_srng_get_entrysize(ab, ring_type);
822 	tot_mem_sz = entry_sz * n_link_desc;
823 
824 	/* Setup scatter desc list when the total memory requirement is more */
825 	if (tot_mem_sz > DP_LINK_DESC_ALLOC_SIZE_THRESH &&
826 	    ring_type != HAL_RXDMA_MONITOR_DESC) {
827 		ret = ath12k_dp_scatter_idle_link_desc_setup(ab, tot_mem_sz,
828 							     n_link_desc_bank,
829 							     n_link_desc,
830 							     last_bank_sz);
831 		if (ret) {
832 			ath12k_warn(ab, "failed to setup scatting idle list descriptor :%d\n",
833 				    ret);
834 			goto fail_desc_bank_free;
835 		}
836 
837 		return 0;
838 	}
839 
840 	spin_lock_bh(&srng->lock);
841 
842 	ath12k_hal_srng_access_begin(ab, srng);
843 
844 	for (i = 0; i < n_link_desc_bank; i++) {
845 		align_bytes = link_desc_banks[i].vaddr -
846 			      link_desc_banks[i].vaddr_unaligned;
847 		n_entries = (link_desc_banks[i].size - align_bytes) /
848 			    HAL_LINK_DESC_SIZE;
849 		paddr = link_desc_banks[i].paddr;
850 		while (n_entries &&
851 		       (desc = ath12k_hal_srng_src_get_next_entry(ab, srng))) {
852 			cookie = DP_LINK_DESC_COOKIE_SET(n_entries, i);
853 			ath12k_hal_set_link_desc_addr(desc,
854 						      cookie, paddr);
855 			n_entries--;
856 			paddr += HAL_LINK_DESC_SIZE;
857 		}
858 	}
859 
860 	ath12k_hal_srng_access_end(ab, srng);
861 
862 	spin_unlock_bh(&srng->lock);
863 
864 	return 0;
865 
866 fail_desc_bank_free:
867 	ath12k_dp_link_desc_bank_free(ab, link_desc_banks);
868 
869 	return ret;
870 }
871 
872 int ath12k_dp_service_srng(struct ath12k_base *ab,
873 			   struct ath12k_ext_irq_grp *irq_grp,
874 			   int budget)
875 {
876 	struct napi_struct *napi = &irq_grp->napi;
877 	int grp_id = irq_grp->grp_id;
878 	int work_done = 0;
879 	int i = 0, j;
880 	int tot_work_done = 0;
881 	enum dp_monitor_mode monitor_mode;
882 	u8 ring_mask;
883 
884 	while (i < ab->hw_params->max_tx_ring) {
885 		if (ab->hw_params->ring_mask->tx[grp_id] &
886 			BIT(ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[i].wbm_ring_num))
887 			ath12k_dp_tx_completion_handler(ab, i);
888 		i++;
889 	}
890 
891 	if (ab->hw_params->ring_mask->rx_err[grp_id]) {
892 		work_done = ath12k_dp_rx_process_err(ab, napi, budget);
893 		budget -= work_done;
894 		tot_work_done += work_done;
895 		if (budget <= 0)
896 			goto done;
897 	}
898 
899 	if (ab->hw_params->ring_mask->rx_wbm_rel[grp_id]) {
900 		work_done = ath12k_dp_rx_process_wbm_err(ab,
901 							 napi,
902 							 budget);
903 		budget -= work_done;
904 		tot_work_done += work_done;
905 
906 		if (budget <= 0)
907 			goto done;
908 	}
909 
910 	if (ab->hw_params->ring_mask->rx[grp_id]) {
911 		i = fls(ab->hw_params->ring_mask->rx[grp_id]) - 1;
912 		work_done = ath12k_dp_rx_process(ab, i, napi,
913 						 budget);
914 		budget -= work_done;
915 		tot_work_done += work_done;
916 		if (budget <= 0)
917 			goto done;
918 	}
919 
920 	if (ab->hw_params->ring_mask->rx_mon_dest[grp_id]) {
921 		monitor_mode = ATH12K_DP_RX_MONITOR_MODE;
922 		ring_mask = ab->hw_params->ring_mask->rx_mon_dest[grp_id];
923 		for (i = 0; i < ab->num_radios; i++) {
924 			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
925 				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
926 
927 				if (ring_mask & BIT(id)) {
928 					work_done =
929 					ath12k_dp_mon_process_ring(ab, id, napi, budget,
930 								   monitor_mode);
931 					budget -= work_done;
932 					tot_work_done += work_done;
933 
934 					if (budget <= 0)
935 						goto done;
936 				}
937 			}
938 		}
939 	}
940 
941 	if (ab->hw_params->ring_mask->tx_mon_dest[grp_id]) {
942 		monitor_mode = ATH12K_DP_TX_MONITOR_MODE;
943 		ring_mask = ab->hw_params->ring_mask->tx_mon_dest[grp_id];
944 		for (i = 0; i < ab->num_radios; i++) {
945 			for (j = 0; j < ab->hw_params->num_rxmda_per_pdev; j++) {
946 				int id = i * ab->hw_params->num_rxmda_per_pdev + j;
947 
948 				if (ring_mask & BIT(id)) {
949 					work_done =
950 					ath12k_dp_mon_process_ring(ab, id, napi, budget,
951 								   monitor_mode);
952 					budget -= work_done;
953 					tot_work_done += work_done;
954 
955 					if (budget <= 0)
956 						goto done;
957 				}
958 			}
959 		}
960 	}
961 
962 	if (ab->hw_params->ring_mask->reo_status[grp_id])
963 		ath12k_dp_rx_process_reo_status(ab);
964 
965 	if (ab->hw_params->ring_mask->host2rxdma[grp_id]) {
966 		struct ath12k_dp *dp = &ab->dp;
967 		struct dp_rxdma_ring *rx_ring = &dp->rx_refill_buf_ring;
968 		LIST_HEAD(list);
969 
970 		ath12k_dp_rx_bufs_replenish(ab, rx_ring, &list, 0);
971 	}
972 
973 	/* TODO: Implement handler for other interrupts */
974 
975 done:
976 	return tot_work_done;
977 }
978 
979 void ath12k_dp_pdev_free(struct ath12k_base *ab)
980 {
981 	int i;
982 
983 	del_timer_sync(&ab->mon_reap_timer);
984 
985 	for (i = 0; i < ab->num_radios; i++)
986 		ath12k_dp_rx_pdev_free(ab, i);
987 }
988 
989 void ath12k_dp_pdev_pre_alloc(struct ath12k_base *ab)
990 {
991 	struct ath12k *ar;
992 	struct ath12k_pdev_dp *dp;
993 	int i;
994 
995 	for (i = 0; i <  ab->num_radios; i++) {
996 		ar = ab->pdevs[i].ar;
997 		dp = &ar->dp;
998 		dp->mac_id = i;
999 		atomic_set(&dp->num_tx_pending, 0);
1000 		init_waitqueue_head(&dp->tx_empty_waitq);
1001 
1002 		/* TODO: Add any RXDMA setup required per pdev */
1003 	}
1004 }
1005 
1006 bool ath12k_dp_wmask_compaction_rx_tlv_supported(struct ath12k_base *ab)
1007 {
1008 	if (test_bit(WMI_TLV_SERVICE_WMSK_COMPACTION_RX_TLVS, ab->wmi_ab.svc_map) &&
1009 	    ab->hw_params->hal_ops->rxdma_ring_wmask_rx_mpdu_start &&
1010 	    ab->hw_params->hal_ops->rxdma_ring_wmask_rx_msdu_end &&
1011 	    ab->hw_params->hal_ops->get_hal_rx_compact_ops) {
1012 		return true;
1013 	}
1014 	return false;
1015 }
1016 
1017 void ath12k_dp_hal_rx_desc_init(struct ath12k_base *ab)
1018 {
1019 	if (ath12k_dp_wmask_compaction_rx_tlv_supported(ab)) {
1020 		/* RX TLVS compaction is supported, hence change the hal_rx_ops
1021 		 * to compact hal_rx_ops.
1022 		 */
1023 		ab->hal_rx_ops = ab->hw_params->hal_ops->get_hal_rx_compact_ops();
1024 	}
1025 	ab->hal.hal_desc_sz =
1026 		ab->hal_rx_ops->rx_desc_get_desc_size();
1027 }
1028 
1029 static void ath12k_dp_service_mon_ring(struct timer_list *t)
1030 {
1031 	struct ath12k_base *ab = from_timer(ab, t, mon_reap_timer);
1032 	int i;
1033 
1034 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++)
1035 		ath12k_dp_mon_process_ring(ab, i, NULL, DP_MON_SERVICE_BUDGET,
1036 					   ATH12K_DP_RX_MONITOR_MODE);
1037 
1038 	mod_timer(&ab->mon_reap_timer, jiffies +
1039 		  msecs_to_jiffies(ATH12K_MON_TIMER_INTERVAL));
1040 }
1041 
1042 static void ath12k_dp_mon_reap_timer_init(struct ath12k_base *ab)
1043 {
1044 	if (ab->hw_params->rxdma1_enable)
1045 		return;
1046 
1047 	timer_setup(&ab->mon_reap_timer, ath12k_dp_service_mon_ring, 0);
1048 }
1049 
1050 int ath12k_dp_pdev_alloc(struct ath12k_base *ab)
1051 {
1052 	struct ath12k *ar;
1053 	int ret;
1054 	int i;
1055 
1056 	ret = ath12k_dp_rx_htt_setup(ab);
1057 	if (ret)
1058 		goto out;
1059 
1060 	ath12k_dp_mon_reap_timer_init(ab);
1061 
1062 	/* TODO: Per-pdev rx ring unlike tx ring which is mapped to different AC's */
1063 	for (i = 0; i < ab->num_radios; i++) {
1064 		ar = ab->pdevs[i].ar;
1065 		ret = ath12k_dp_rx_pdev_alloc(ab, i);
1066 		if (ret) {
1067 			ath12k_warn(ab, "failed to allocate pdev rx for pdev_id :%d\n",
1068 				    i);
1069 			goto err;
1070 		}
1071 		ret = ath12k_dp_rx_pdev_mon_attach(ar);
1072 		if (ret) {
1073 			ath12k_warn(ab, "failed to initialize mon pdev %d\n", i);
1074 			goto err;
1075 		}
1076 	}
1077 
1078 	return 0;
1079 err:
1080 	ath12k_dp_pdev_free(ab);
1081 out:
1082 	return ret;
1083 }
1084 
1085 int ath12k_dp_htt_connect(struct ath12k_dp *dp)
1086 {
1087 	struct ath12k_htc_svc_conn_req conn_req = {0};
1088 	struct ath12k_htc_svc_conn_resp conn_resp = {0};
1089 	int status;
1090 
1091 	conn_req.ep_ops.ep_tx_complete = ath12k_dp_htt_htc_tx_complete;
1092 	conn_req.ep_ops.ep_rx_complete = ath12k_dp_htt_htc_t2h_msg_handler;
1093 
1094 	/* connect to control service */
1095 	conn_req.service_id = ATH12K_HTC_SVC_ID_HTT_DATA_MSG;
1096 
1097 	status = ath12k_htc_connect_service(&dp->ab->htc, &conn_req,
1098 					    &conn_resp);
1099 
1100 	if (status)
1101 		return status;
1102 
1103 	dp->eid = conn_resp.eid;
1104 
1105 	return 0;
1106 }
1107 
1108 static void ath12k_dp_update_vdev_search(struct ath12k_vif *arvif)
1109 {
1110 	switch (arvif->vdev_type) {
1111 	case WMI_VDEV_TYPE_STA:
1112 		/* TODO: Verify the search type and flags since ast hash
1113 		 * is not part of peer mapv3
1114 		 */
1115 		arvif->hal_addr_search_flags = HAL_TX_ADDRY_EN;
1116 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1117 		break;
1118 	case WMI_VDEV_TYPE_AP:
1119 	case WMI_VDEV_TYPE_IBSS:
1120 		arvif->hal_addr_search_flags = HAL_TX_ADDRX_EN;
1121 		arvif->search_type = HAL_TX_ADDR_SEARCH_DEFAULT;
1122 		break;
1123 	case WMI_VDEV_TYPE_MONITOR:
1124 	default:
1125 		return;
1126 	}
1127 }
1128 
1129 void ath12k_dp_vdev_tx_attach(struct ath12k *ar, struct ath12k_vif *arvif)
1130 {
1131 	struct ath12k_base *ab = ar->ab;
1132 
1133 	arvif->tcl_metadata |= u32_encode_bits(1, HTT_TCL_META_DATA_TYPE) |
1134 			       u32_encode_bits(arvif->vdev_id,
1135 					       HTT_TCL_META_DATA_VDEV_ID) |
1136 			       u32_encode_bits(ar->pdev->pdev_id,
1137 					       HTT_TCL_META_DATA_PDEV_ID);
1138 
1139 	/* set HTT extension valid bit to 0 by default */
1140 	arvif->tcl_metadata &= ~HTT_TCL_META_DATA_VALID_HTT;
1141 
1142 	ath12k_dp_update_vdev_search(arvif);
1143 	arvif->vdev_id_check_en = true;
1144 	arvif->bank_id = ath12k_dp_tx_get_bank_profile(ab, arvif, &ab->dp);
1145 
1146 	/* TODO: error path for bank id failure */
1147 	if (arvif->bank_id == DP_INVALID_BANK_ID) {
1148 		ath12k_err(ar->ab, "Failed to initialize DP TX Banks");
1149 		return;
1150 	}
1151 }
1152 
1153 static void ath12k_dp_cc_cleanup(struct ath12k_base *ab)
1154 {
1155 	struct ath12k_rx_desc_info *desc_info;
1156 	struct ath12k_tx_desc_info *tx_desc_info, *tmp1;
1157 	struct ath12k_dp *dp = &ab->dp;
1158 	struct ath12k_skb_cb *skb_cb;
1159 	struct sk_buff *skb;
1160 	struct ath12k *ar;
1161 	int i, j;
1162 	u32 pool_id, tx_spt_page;
1163 
1164 	if (!dp->spt_info)
1165 		return;
1166 
1167 	/* RX Descriptor cleanup */
1168 	spin_lock_bh(&dp->rx_desc_lock);
1169 
1170 	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1171 		desc_info = dp->spt_info->rxbaddr[i];
1172 
1173 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1174 			if (!desc_info[j].in_use) {
1175 				list_del(&desc_info[j].list);
1176 				continue;
1177 			}
1178 
1179 			skb = desc_info[j].skb;
1180 			if (!skb)
1181 				continue;
1182 
1183 			dma_unmap_single(ab->dev, ATH12K_SKB_RXCB(skb)->paddr,
1184 					 skb->len + skb_tailroom(skb), DMA_FROM_DEVICE);
1185 			dev_kfree_skb_any(skb);
1186 		}
1187 	}
1188 
1189 	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1190 		if (!dp->spt_info->rxbaddr[i])
1191 			continue;
1192 
1193 		kfree(dp->spt_info->rxbaddr[i]);
1194 		dp->spt_info->rxbaddr[i] = NULL;
1195 	}
1196 
1197 	spin_unlock_bh(&dp->rx_desc_lock);
1198 
1199 	/* TX Descriptor cleanup */
1200 	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1201 		spin_lock_bh(&dp->tx_desc_lock[i]);
1202 
1203 		list_for_each_entry_safe(tx_desc_info, tmp1, &dp->tx_desc_used_list[i],
1204 					 list) {
1205 			list_del(&tx_desc_info->list);
1206 			skb = tx_desc_info->skb;
1207 
1208 			if (!skb)
1209 				continue;
1210 
1211 			skb_cb = ATH12K_SKB_CB(skb);
1212 			ar = skb_cb->ar;
1213 			if (atomic_dec_and_test(&ar->dp.num_tx_pending))
1214 				wake_up(&ar->dp.tx_empty_waitq);
1215 
1216 			dma_unmap_single(ab->dev, ATH12K_SKB_CB(skb)->paddr,
1217 					 skb->len, DMA_TO_DEVICE);
1218 			dev_kfree_skb_any(skb);
1219 		}
1220 
1221 		spin_unlock_bh(&dp->tx_desc_lock[i]);
1222 	}
1223 
1224 	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1225 		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1226 
1227 		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1228 			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1229 			if (!dp->spt_info->txbaddr[tx_spt_page])
1230 				continue;
1231 
1232 			kfree(dp->spt_info->txbaddr[tx_spt_page]);
1233 			dp->spt_info->txbaddr[tx_spt_page] = NULL;
1234 		}
1235 
1236 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1237 	}
1238 
1239 	/* unmap SPT pages */
1240 	for (i = 0; i < dp->num_spt_pages; i++) {
1241 		if (!dp->spt_info[i].vaddr)
1242 			continue;
1243 
1244 		dma_free_coherent(ab->dev, ATH12K_PAGE_SIZE,
1245 				  dp->spt_info[i].vaddr, dp->spt_info[i].paddr);
1246 		dp->spt_info[i].vaddr = NULL;
1247 	}
1248 
1249 	kfree(dp->spt_info);
1250 }
1251 
1252 static void ath12k_dp_reoq_lut_cleanup(struct ath12k_base *ab)
1253 {
1254 	struct ath12k_dp *dp = &ab->dp;
1255 
1256 	if (!ab->hw_params->reoq_lut_support)
1257 		return;
1258 
1259 	if (!dp->reoq_lut.vaddr)
1260 		return;
1261 
1262 	dma_free_coherent(ab->dev, DP_REOQ_LUT_SIZE,
1263 			  dp->reoq_lut.vaddr, dp->reoq_lut.paddr);
1264 	dp->reoq_lut.vaddr = NULL;
1265 
1266 	ath12k_hif_write32(ab,
1267 			   HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab), 0);
1268 }
1269 
1270 void ath12k_dp_free(struct ath12k_base *ab)
1271 {
1272 	struct ath12k_dp *dp = &ab->dp;
1273 	int i;
1274 
1275 	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1276 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1277 
1278 	ath12k_dp_cc_cleanup(ab);
1279 	ath12k_dp_reoq_lut_cleanup(ab);
1280 	ath12k_dp_deinit_bank_profiles(ab);
1281 	ath12k_dp_srng_common_cleanup(ab);
1282 
1283 	ath12k_dp_rx_reo_cmd_list_cleanup(ab);
1284 
1285 	for (i = 0; i < ab->hw_params->max_tx_ring; i++)
1286 		kfree(dp->tx_ring[i].tx_status);
1287 
1288 	ath12k_dp_rx_free(ab);
1289 	/* Deinit any SOC level resource */
1290 }
1291 
1292 void ath12k_dp_cc_config(struct ath12k_base *ab)
1293 {
1294 	u32 cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1295 	u32 reo_base = HAL_SEQ_WCSS_UMAC_REO_REG;
1296 	u32 wbm_base = HAL_SEQ_WCSS_UMAC_WBM_REG;
1297 	u32 val = 0;
1298 
1299 	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG0(ab), cmem_base);
1300 
1301 	val |= u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1302 			       HAL_REO1_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1303 		u32_encode_bits(ATH12K_CC_PPT_MSB,
1304 				HAL_REO1_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1305 		u32_encode_bits(ATH12K_CC_SPT_MSB,
1306 				HAL_REO1_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1307 		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ALIGN) |
1308 		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_ENABLE) |
1309 		u32_encode_bits(1, HAL_REO1_SW_COOKIE_CFG_GLOBAL_ENABLE);
1310 
1311 	ath12k_hif_write32(ab, reo_base + HAL_REO1_SW_COOKIE_CFG1(ab), val);
1312 
1313 	/* Enable HW CC for WBM */
1314 	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG0, cmem_base);
1315 
1316 	val = u32_encode_bits(ATH12K_CMEM_ADDR_MSB,
1317 			      HAL_WBM_SW_COOKIE_CFG_CMEM_BASE_ADDR_MSB) |
1318 		u32_encode_bits(ATH12K_CC_PPT_MSB,
1319 				HAL_WBM_SW_COOKIE_CFG_COOKIE_PPT_MSB) |
1320 		u32_encode_bits(ATH12K_CC_SPT_MSB,
1321 				HAL_WBM_SW_COOKIE_CFG_COOKIE_SPT_MSB) |
1322 		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ALIGN);
1323 
1324 	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG1, val);
1325 
1326 	/* Enable conversion complete indication */
1327 	val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2);
1328 	val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_RELEASE_PATH_EN) |
1329 		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_ERR_PATH_EN) |
1330 		u32_encode_bits(1, HAL_WBM_SW_COOKIE_CFG_CONV_IND_EN);
1331 
1332 	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CFG2, val);
1333 
1334 	/* Enable Cookie conversion for WBM2SW Rings */
1335 	val = ath12k_hif_read32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG);
1336 	val |= u32_encode_bits(1, HAL_WBM_SW_COOKIE_CONV_CFG_GLOBAL_EN) |
1337 	       ab->hw_params->hal_params->wbm2sw_cc_enable;
1338 
1339 	ath12k_hif_write32(ab, wbm_base + HAL_WBM_SW_COOKIE_CONVERT_CFG, val);
1340 }
1341 
1342 static u32 ath12k_dp_cc_cookie_gen(u16 ppt_idx, u16 spt_idx)
1343 {
1344 	return (u32)ppt_idx << ATH12K_CC_PPT_SHIFT | spt_idx;
1345 }
1346 
1347 static inline void *ath12k_dp_cc_get_desc_addr_ptr(struct ath12k_base *ab,
1348 						   u16 ppt_idx, u16 spt_idx)
1349 {
1350 	struct ath12k_dp *dp = &ab->dp;
1351 
1352 	return dp->spt_info[ppt_idx].vaddr + spt_idx;
1353 }
1354 
1355 struct ath12k_rx_desc_info *ath12k_dp_get_rx_desc(struct ath12k_base *ab,
1356 						  u32 cookie)
1357 {
1358 	struct ath12k_rx_desc_info **desc_addr_ptr;
1359 	u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1360 
1361 	ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1362 	spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1363 
1364 	start_ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET;
1365 	end_ppt_idx = start_ppt_idx + ATH12K_NUM_RX_SPT_PAGES;
1366 
1367 	if (ppt_idx < start_ppt_idx ||
1368 	    ppt_idx >= end_ppt_idx ||
1369 	    spt_idx > ATH12K_MAX_SPT_ENTRIES)
1370 		return NULL;
1371 
1372 	desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1373 
1374 	return *desc_addr_ptr;
1375 }
1376 
1377 struct ath12k_tx_desc_info *ath12k_dp_get_tx_desc(struct ath12k_base *ab,
1378 						  u32 cookie)
1379 {
1380 	struct ath12k_tx_desc_info **desc_addr_ptr;
1381 	u16 start_ppt_idx, end_ppt_idx, ppt_idx, spt_idx;
1382 
1383 	ppt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_PPT);
1384 	spt_idx = u32_get_bits(cookie, ATH12K_DP_CC_COOKIE_SPT);
1385 
1386 	start_ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET;
1387 	end_ppt_idx = start_ppt_idx +
1388 		      (ATH12K_TX_SPT_PAGES_PER_POOL * ATH12K_HW_MAX_QUEUES);
1389 
1390 	if (ppt_idx < start_ppt_idx ||
1391 	    ppt_idx >= end_ppt_idx ||
1392 	    spt_idx > ATH12K_MAX_SPT_ENTRIES)
1393 		return NULL;
1394 
1395 	desc_addr_ptr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, spt_idx);
1396 
1397 	return *desc_addr_ptr;
1398 }
1399 
1400 static int ath12k_dp_cc_desc_init(struct ath12k_base *ab)
1401 {
1402 	struct ath12k_dp *dp = &ab->dp;
1403 	struct ath12k_rx_desc_info *rx_descs, **rx_desc_addr;
1404 	struct ath12k_tx_desc_info *tx_descs, **tx_desc_addr;
1405 	u32 i, j, pool_id, tx_spt_page;
1406 	u32 ppt_idx;
1407 
1408 	spin_lock_bh(&dp->rx_desc_lock);
1409 
1410 	/* First ATH12K_NUM_RX_SPT_PAGES of allocated SPT pages are used for RX */
1411 	for (i = 0; i < ATH12K_NUM_RX_SPT_PAGES; i++) {
1412 		rx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*rx_descs),
1413 				   GFP_ATOMIC);
1414 
1415 		if (!rx_descs) {
1416 			spin_unlock_bh(&dp->rx_desc_lock);
1417 			return -ENOMEM;
1418 		}
1419 
1420 		ppt_idx = ATH12K_RX_SPT_PAGE_OFFSET + i;
1421 		dp->spt_info->rxbaddr[i] = &rx_descs[0];
1422 
1423 		for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1424 			rx_descs[j].cookie = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1425 			rx_descs[j].magic = ATH12K_DP_RX_DESC_MAGIC;
1426 			list_add_tail(&rx_descs[j].list, &dp->rx_desc_free_list);
1427 
1428 			/* Update descriptor VA in SPT */
1429 			rx_desc_addr = ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1430 			*rx_desc_addr = &rx_descs[j];
1431 		}
1432 	}
1433 
1434 	spin_unlock_bh(&dp->rx_desc_lock);
1435 
1436 	for (pool_id = 0; pool_id < ATH12K_HW_MAX_QUEUES; pool_id++) {
1437 		spin_lock_bh(&dp->tx_desc_lock[pool_id]);
1438 		for (i = 0; i < ATH12K_TX_SPT_PAGES_PER_POOL; i++) {
1439 			tx_descs = kcalloc(ATH12K_MAX_SPT_ENTRIES, sizeof(*tx_descs),
1440 					   GFP_ATOMIC);
1441 
1442 			if (!tx_descs) {
1443 				spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1444 				/* Caller takes care of TX pending and RX desc cleanup */
1445 				return -ENOMEM;
1446 			}
1447 
1448 			tx_spt_page = i + pool_id * ATH12K_TX_SPT_PAGES_PER_POOL;
1449 			ppt_idx = ATH12K_TX_SPT_PAGE_OFFSET + tx_spt_page;
1450 
1451 			dp->spt_info->txbaddr[tx_spt_page] = &tx_descs[0];
1452 
1453 			for (j = 0; j < ATH12K_MAX_SPT_ENTRIES; j++) {
1454 				tx_descs[j].desc_id = ath12k_dp_cc_cookie_gen(ppt_idx, j);
1455 				tx_descs[j].pool_id = pool_id;
1456 				list_add_tail(&tx_descs[j].list,
1457 					      &dp->tx_desc_free_list[pool_id]);
1458 
1459 				/* Update descriptor VA in SPT */
1460 				tx_desc_addr =
1461 					ath12k_dp_cc_get_desc_addr_ptr(ab, ppt_idx, j);
1462 				*tx_desc_addr = &tx_descs[j];
1463 			}
1464 		}
1465 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
1466 	}
1467 	return 0;
1468 }
1469 
1470 static int ath12k_dp_cmem_init(struct ath12k_base *ab,
1471 			       struct ath12k_dp *dp,
1472 			       enum ath12k_dp_desc_type type)
1473 {
1474 	u32 cmem_base;
1475 	int i, start, end;
1476 
1477 	cmem_base = ab->qmi.dev_mem[ATH12K_QMI_DEVMEM_CMEM_INDEX].start;
1478 
1479 	switch (type) {
1480 	case ATH12K_DP_TX_DESC:
1481 		start = ATH12K_TX_SPT_PAGE_OFFSET;
1482 		end = start + ATH12K_NUM_TX_SPT_PAGES;
1483 		break;
1484 	case ATH12K_DP_RX_DESC:
1485 		start = ATH12K_RX_SPT_PAGE_OFFSET;
1486 		end = start + ATH12K_NUM_RX_SPT_PAGES;
1487 		break;
1488 	default:
1489 		ath12k_err(ab, "invalid descriptor type %d in cmem init\n", type);
1490 		return -EINVAL;
1491 	}
1492 
1493 	/* Write to PPT in CMEM */
1494 	for (i = start; i < end; i++)
1495 		ath12k_hif_write32(ab, cmem_base + ATH12K_PPT_ADDR_OFFSET(i),
1496 				   dp->spt_info[i].paddr >> ATH12K_SPT_4K_ALIGN_OFFSET);
1497 
1498 	return 0;
1499 }
1500 
1501 static int ath12k_dp_cc_init(struct ath12k_base *ab)
1502 {
1503 	struct ath12k_dp *dp = &ab->dp;
1504 	int i, ret = 0;
1505 
1506 	INIT_LIST_HEAD(&dp->rx_desc_free_list);
1507 	spin_lock_init(&dp->rx_desc_lock);
1508 
1509 	for (i = 0; i < ATH12K_HW_MAX_QUEUES; i++) {
1510 		INIT_LIST_HEAD(&dp->tx_desc_free_list[i]);
1511 		INIT_LIST_HEAD(&dp->tx_desc_used_list[i]);
1512 		spin_lock_init(&dp->tx_desc_lock[i]);
1513 	}
1514 
1515 	dp->num_spt_pages = ATH12K_NUM_SPT_PAGES;
1516 	if (dp->num_spt_pages > ATH12K_MAX_PPT_ENTRIES)
1517 		dp->num_spt_pages = ATH12K_MAX_PPT_ENTRIES;
1518 
1519 	dp->spt_info = kcalloc(dp->num_spt_pages, sizeof(struct ath12k_spt_info),
1520 			       GFP_KERNEL);
1521 
1522 	if (!dp->spt_info) {
1523 		ath12k_warn(ab, "SPT page allocation failure");
1524 		return -ENOMEM;
1525 	}
1526 
1527 	for (i = 0; i < dp->num_spt_pages; i++) {
1528 		dp->spt_info[i].vaddr = dma_alloc_coherent(ab->dev,
1529 							   ATH12K_PAGE_SIZE,
1530 							   &dp->spt_info[i].paddr,
1531 							   GFP_KERNEL);
1532 
1533 		if (!dp->spt_info[i].vaddr) {
1534 			ret = -ENOMEM;
1535 			goto free;
1536 		}
1537 
1538 		if (dp->spt_info[i].paddr & ATH12K_SPT_4K_ALIGN_CHECK) {
1539 			ath12k_warn(ab, "SPT allocated memory is not 4K aligned");
1540 			ret = -EINVAL;
1541 			goto free;
1542 		}
1543 	}
1544 
1545 	ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_TX_DESC);
1546 	if (ret) {
1547 		ath12k_warn(ab, "HW CC Tx cmem init failed %d", ret);
1548 		goto free;
1549 	}
1550 
1551 	ret = ath12k_dp_cmem_init(ab, dp, ATH12K_DP_RX_DESC);
1552 	if (ret) {
1553 		ath12k_warn(ab, "HW CC Rx cmem init failed %d", ret);
1554 		goto free;
1555 	}
1556 
1557 	ret = ath12k_dp_cc_desc_init(ab);
1558 	if (ret) {
1559 		ath12k_warn(ab, "HW CC desc init failed %d", ret);
1560 		goto free;
1561 	}
1562 
1563 	return 0;
1564 free:
1565 	ath12k_dp_cc_cleanup(ab);
1566 	return ret;
1567 }
1568 
1569 static int ath12k_dp_reoq_lut_setup(struct ath12k_base *ab)
1570 {
1571 	struct ath12k_dp *dp = &ab->dp;
1572 
1573 	if (!ab->hw_params->reoq_lut_support)
1574 		return 0;
1575 
1576 	dp->reoq_lut.vaddr = dma_alloc_coherent(ab->dev,
1577 						DP_REOQ_LUT_SIZE,
1578 						&dp->reoq_lut.paddr,
1579 						GFP_KERNEL | __GFP_ZERO);
1580 	if (!dp->reoq_lut.vaddr) {
1581 		ath12k_warn(ab, "failed to allocate memory for reoq table");
1582 		return -ENOMEM;
1583 	}
1584 
1585 	ath12k_hif_write32(ab, HAL_SEQ_WCSS_UMAC_REO_REG + HAL_REO1_QDESC_LUT_BASE0(ab),
1586 			   dp->reoq_lut.paddr);
1587 	return 0;
1588 }
1589 
1590 int ath12k_dp_alloc(struct ath12k_base *ab)
1591 {
1592 	struct ath12k_dp *dp = &ab->dp;
1593 	struct hal_srng *srng = NULL;
1594 	size_t size = 0;
1595 	u32 n_link_desc = 0;
1596 	int ret;
1597 	int i;
1598 
1599 	dp->ab = ab;
1600 
1601 	INIT_LIST_HEAD(&dp->reo_cmd_list);
1602 	INIT_LIST_HEAD(&dp->reo_cmd_cache_flush_list);
1603 	spin_lock_init(&dp->reo_cmd_lock);
1604 
1605 	dp->reo_cmd_cache_flush_count = 0;
1606 
1607 	ret = ath12k_wbm_idle_ring_setup(ab, &n_link_desc);
1608 	if (ret) {
1609 		ath12k_warn(ab, "failed to setup wbm_idle_ring: %d\n", ret);
1610 		return ret;
1611 	}
1612 
1613 	srng = &ab->hal.srng_list[dp->wbm_idle_ring.ring_id];
1614 
1615 	ret = ath12k_dp_link_desc_setup(ab, dp->link_desc_banks,
1616 					HAL_WBM_IDLE_LINK, srng, n_link_desc);
1617 	if (ret) {
1618 		ath12k_warn(ab, "failed to setup link desc: %d\n", ret);
1619 		return ret;
1620 	}
1621 
1622 	ret = ath12k_dp_cc_init(ab);
1623 
1624 	if (ret) {
1625 		ath12k_warn(ab, "failed to setup cookie converter %d\n", ret);
1626 		goto fail_link_desc_cleanup;
1627 	}
1628 	ret = ath12k_dp_init_bank_profiles(ab);
1629 	if (ret) {
1630 		ath12k_warn(ab, "failed to setup bank profiles %d\n", ret);
1631 		goto fail_hw_cc_cleanup;
1632 	}
1633 
1634 	ret = ath12k_dp_srng_common_setup(ab);
1635 	if (ret)
1636 		goto fail_dp_bank_profiles_cleanup;
1637 
1638 	size = sizeof(struct hal_wbm_release_ring_tx) * DP_TX_COMP_RING_SIZE;
1639 
1640 	ret = ath12k_dp_reoq_lut_setup(ab);
1641 	if (ret) {
1642 		ath12k_warn(ab, "failed to setup reoq table %d\n", ret);
1643 		goto fail_cmn_srng_cleanup;
1644 	}
1645 
1646 	for (i = 0; i < ab->hw_params->max_tx_ring; i++) {
1647 		dp->tx_ring[i].tcl_data_ring_id = i;
1648 
1649 		dp->tx_ring[i].tx_status_head = 0;
1650 		dp->tx_ring[i].tx_status_tail = DP_TX_COMP_RING_SIZE - 1;
1651 		dp->tx_ring[i].tx_status = kmalloc(size, GFP_KERNEL);
1652 		if (!dp->tx_ring[i].tx_status) {
1653 			ret = -ENOMEM;
1654 			/* FIXME: The allocated tx status is not freed
1655 			 * properly here
1656 			 */
1657 			goto fail_cmn_reoq_cleanup;
1658 		}
1659 	}
1660 
1661 	for (i = 0; i < HAL_DSCP_TID_MAP_TBL_NUM_ENTRIES_MAX; i++)
1662 		ath12k_hal_tx_set_dscp_tid_map(ab, i);
1663 
1664 	ret = ath12k_dp_rx_alloc(ab);
1665 	if (ret)
1666 		goto fail_dp_rx_free;
1667 
1668 	/* Init any SOC level resource for DP */
1669 
1670 	return 0;
1671 
1672 fail_dp_rx_free:
1673 	ath12k_dp_rx_free(ab);
1674 
1675 fail_cmn_reoq_cleanup:
1676 	ath12k_dp_reoq_lut_cleanup(ab);
1677 
1678 fail_cmn_srng_cleanup:
1679 	ath12k_dp_srng_common_cleanup(ab);
1680 
1681 fail_dp_bank_profiles_cleanup:
1682 	ath12k_dp_deinit_bank_profiles(ab);
1683 
1684 fail_hw_cc_cleanup:
1685 	ath12k_dp_cc_cleanup(ab);
1686 
1687 fail_link_desc_cleanup:
1688 	ath12k_dp_link_desc_cleanup(ab, dp->link_desc_banks,
1689 				    HAL_WBM_IDLE_LINK, &dp->wbm_idle_ring);
1690 
1691 	return ret;
1692 }
1693