xref: /freebsd/sys/contrib/dev/athk/ath12k/dp_tx.c (revision 02e9120893770924227138ba49df1edb3896112a)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include "core.h"
8 #include "dp_tx.h"
9 #include "debug.h"
10 #include "hw.h"
11 
12 static enum hal_tcl_encap_type
13 ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
14 {
15 	struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
16 	struct ath12k_base *ab = arvif->ar->ab;
17 
18 	if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
19 		return HAL_TCL_ENCAP_TYPE_RAW;
20 
21 	if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
22 		return HAL_TCL_ENCAP_TYPE_ETHERNET;
23 
24 	return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
25 }
26 
27 static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
28 {
29 	struct ieee80211_hdr *hdr = (void *)skb->data;
30 	u8 *qos_ctl;
31 
32 	if (!ieee80211_is_data_qos(hdr->frame_control))
33 		return;
34 
35 	qos_ctl = ieee80211_get_qos_ctl(hdr);
36 	memmove(skb->data + IEEE80211_QOS_CTL_LEN,
37 #if defined(__linux__)
38 		skb->data, (void *)qos_ctl - (void *)skb->data);
39 #elif defined(__FreeBSD__)
40 		skb->data, qos_ctl - skb->data);
41 #endif
42 	skb_pull(skb, IEEE80211_QOS_CTL_LEN);
43 
44 	hdr = (void *)skb->data;
45 	hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
46 }
47 
48 static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
49 {
50 	struct ieee80211_hdr *hdr = (void *)skb->data;
51 	struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
52 
53 	if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
54 		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
55 	else if (!ieee80211_is_data_qos(hdr->frame_control))
56 		return HAL_DESC_REO_NON_QOS_TID;
57 	else
58 		return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
59 }
60 
61 enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
62 {
63 	switch (cipher) {
64 	case WLAN_CIPHER_SUITE_WEP40:
65 		return HAL_ENCRYPT_TYPE_WEP_40;
66 	case WLAN_CIPHER_SUITE_WEP104:
67 		return HAL_ENCRYPT_TYPE_WEP_104;
68 	case WLAN_CIPHER_SUITE_TKIP:
69 		return HAL_ENCRYPT_TYPE_TKIP_MIC;
70 	case WLAN_CIPHER_SUITE_CCMP:
71 		return HAL_ENCRYPT_TYPE_CCMP_128;
72 	case WLAN_CIPHER_SUITE_CCMP_256:
73 		return HAL_ENCRYPT_TYPE_CCMP_256;
74 	case WLAN_CIPHER_SUITE_GCMP:
75 		return HAL_ENCRYPT_TYPE_GCMP_128;
76 	case WLAN_CIPHER_SUITE_GCMP_256:
77 		return HAL_ENCRYPT_TYPE_AES_GCMP_256;
78 	default:
79 		return HAL_ENCRYPT_TYPE_OPEN;
80 	}
81 }
82 
83 static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
84 				       struct ath12k_tx_desc_info *tx_desc,
85 				       u8 pool_id)
86 {
87 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
88 	list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
89 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
90 }
91 
92 static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
93 							      u8 pool_id)
94 {
95 	struct ath12k_tx_desc_info *desc;
96 
97 	spin_lock_bh(&dp->tx_desc_lock[pool_id]);
98 	desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
99 					struct ath12k_tx_desc_info,
100 					list);
101 	if (!desc) {
102 		spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
103 		ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
104 		return NULL;
105 	}
106 
107 	list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
108 	spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
109 
110 	return desc;
111 }
112 
113 static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd,
114 					     struct hal_tx_info *ti)
115 {
116 	struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd;
117 
118 	tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
119 					      HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
120 	tcl_ext_cmd->info1 = le32_encode_bits(0x0,
121 					      HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
122 			       le32_encode_bits(ti->data_len,
123 						HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
124 
125 	tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
126 				le32_encode_bits(ti->encap_type,
127 						 HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
128 				le32_encode_bits(ti->encrypt_type,
129 						 HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
130 }
131 
132 int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
133 		 struct sk_buff *skb)
134 {
135 	struct ath12k_base *ab = ar->ab;
136 	struct ath12k_dp *dp = &ab->dp;
137 	struct hal_tx_info ti = {0};
138 	struct ath12k_tx_desc_info *tx_desc;
139 	struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
140 	struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
141 	struct hal_tcl_data_cmd *hal_tcl_desc;
142 	struct hal_tx_msdu_ext_desc *msg;
143 	struct sk_buff *skb_ext_desc;
144 	struct hal_srng *tcl_ring;
145 	struct ieee80211_hdr *hdr = (void *)skb->data;
146 	struct dp_tx_ring *tx_ring;
147 	u8 pool_id;
148 	u8 hal_ring_id;
149 	int ret;
150 	u8 ring_selector, ring_map = 0;
151 	bool tcl_ring_retry;
152 	bool msdu_ext_desc = false;
153 
154 	if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
155 		return -ESHUTDOWN;
156 
157 	if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
158 	    !ieee80211_is_data(hdr->frame_control))
159 		return -ENOTSUPP;
160 
161 	pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
162 
163 	/* Let the default ring selection be based on current processor
164 	 * number, where one of the 3 tcl rings are selected based on
165 	 * the smp_processor_id(). In case that ring
166 	 * is full/busy, we resort to other available rings.
167 	 * If all rings are full, we drop the packet.
168 	 * TODO: Add throttling logic when all rings are full
169 	 */
170 	ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
171 
172 tcl_ring_sel:
173 	tcl_ring_retry = false;
174 	ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
175 
176 	ring_map |= BIT(ti.ring_id);
177 	ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
178 
179 	tx_ring = &dp->tx_ring[ti.ring_id];
180 
181 	tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
182 	if (!tx_desc)
183 		return -ENOMEM;
184 
185 	ti.bank_id = arvif->bank_id;
186 	ti.meta_data_flags = arvif->tcl_metadata;
187 
188 	if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
189 	    test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
190 		if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
191 			ti.encrypt_type =
192 				ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
193 
194 			if (ieee80211_has_protected(hdr->frame_control))
195 				skb_put(skb, IEEE80211_CCMP_MIC_LEN);
196 		} else {
197 			ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
198 		}
199 
200 		msdu_ext_desc = true;
201 	}
202 
203 	ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
204 	ti.addr_search_flags = arvif->hal_addr_search_flags;
205 	ti.search_type = arvif->search_type;
206 	ti.type = HAL_TCL_DESC_TYPE_BUFFER;
207 	ti.pkt_offset = 0;
208 	ti.lmac_id = ar->lmac_id;
209 	ti.vdev_id = arvif->vdev_id;
210 	ti.bss_ast_hash = arvif->ast_hash;
211 	ti.bss_ast_idx = arvif->ast_idx;
212 	ti.dscp_tid_tbl_idx = 0;
213 
214 	if (skb->ip_summed == CHECKSUM_PARTIAL &&
215 	    ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
216 		ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
217 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
218 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
219 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
220 			     u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
221 	}
222 
223 	ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
224 
225 	ti.tid = ath12k_dp_tx_get_tid(skb);
226 
227 	switch (ti.encap_type) {
228 	case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
229 		ath12k_dp_tx_encap_nwifi(skb);
230 		break;
231 	case HAL_TCL_ENCAP_TYPE_RAW:
232 		if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
233 			ret = -EINVAL;
234 			goto fail_remove_tx_buf;
235 		}
236 		break;
237 	case HAL_TCL_ENCAP_TYPE_ETHERNET:
238 		/* no need to encap */
239 		break;
240 	case HAL_TCL_ENCAP_TYPE_802_3:
241 	default:
242 		/* TODO: Take care of other encap modes as well */
243 		ret = -EINVAL;
244 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
245 		goto fail_remove_tx_buf;
246 	}
247 
248 	ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
249 	if (dma_mapping_error(ab->dev, ti.paddr)) {
250 		atomic_inc(&ab->soc_stats.tx_err.misc_fail);
251 		ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
252 		ret = -ENOMEM;
253 		goto fail_remove_tx_buf;
254 	}
255 
256 	tx_desc->skb = skb;
257 	tx_desc->mac_id = ar->pdev_idx;
258 	ti.desc_id = tx_desc->desc_id;
259 	ti.data_len = skb->len;
260 	skb_cb->paddr = ti.paddr;
261 	skb_cb->vif = arvif->vif;
262 	skb_cb->ar = ar;
263 
264 	if (msdu_ext_desc) {
265 		skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
266 		if (!skb_ext_desc) {
267 			ret = -ENOMEM;
268 			goto fail_unmap_dma;
269 		}
270 
271 		skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
272 		memset(skb_ext_desc->data, 0, skb_ext_desc->len);
273 
274 		msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
275 		ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
276 
277 		ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
278 					  skb_ext_desc->len, DMA_TO_DEVICE);
279 		ret = dma_mapping_error(ab->dev, ti.paddr);
280 		if (ret) {
281 			kfree_skb(skb_ext_desc);
282 			goto fail_unmap_dma;
283 		}
284 
285 		ti.data_len = skb_ext_desc->len;
286 		ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
287 
288 		skb_cb->paddr_ext_desc = ti.paddr;
289 	}
290 
291 	hal_ring_id = tx_ring->tcl_data_ring.ring_id;
292 	tcl_ring = &ab->hal.srng_list[hal_ring_id];
293 
294 	spin_lock_bh(&tcl_ring->lock);
295 
296 	ath12k_hal_srng_access_begin(ab, tcl_ring);
297 
298 	hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
299 	if (!hal_tcl_desc) {
300 		/* NOTE: It is highly unlikely we'll be running out of tcl_ring
301 		 * desc because the desc is directly enqueued onto hw queue.
302 		 */
303 		ath12k_hal_srng_access_end(ab, tcl_ring);
304 		ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
305 		spin_unlock_bh(&tcl_ring->lock);
306 		ret = -ENOMEM;
307 
308 		/* Checking for available tcl descritors in another ring in
309 		 * case of failure due to full tcl ring now, is better than
310 		 * checking this ring earlier for each pkt tx.
311 		 * Restart ring selection if some rings are not checked yet.
312 		 */
313 		if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
314 		    ab->hw_params->tcl_ring_retry) {
315 			tcl_ring_retry = true;
316 			ring_selector++;
317 		}
318 
319 		goto fail_unmap_dma;
320 	}
321 
322 	ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
323 
324 	ath12k_hal_srng_access_end(ab, tcl_ring);
325 
326 	spin_unlock_bh(&tcl_ring->lock);
327 
328 	ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
329 			skb->data, skb->len);
330 
331 	atomic_inc(&ar->dp.num_tx_pending);
332 
333 	return 0;
334 
335 fail_unmap_dma:
336 	dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
337 	dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
338 			 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
339 
340 fail_remove_tx_buf:
341 	ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
342 	if (tcl_ring_retry)
343 		goto tcl_ring_sel;
344 
345 	return ret;
346 }
347 
348 static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
349 				    struct sk_buff *msdu, u8 mac_id,
350 				    struct dp_tx_ring *tx_ring)
351 {
352 	struct ath12k *ar;
353 	struct ath12k_skb_cb *skb_cb;
354 	u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
355 
356 	skb_cb = ATH12K_SKB_CB(msdu);
357 
358 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
359 	if (skb_cb->paddr_ext_desc)
360 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
361 				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
362 
363 	dev_kfree_skb_any(msdu);
364 
365 	ar = ab->pdevs[pdev_id].ar;
366 	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
367 		wake_up(&ar->dp.tx_empty_waitq);
368 }
369 
370 static void
371 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
372 				 struct sk_buff *msdu,
373 				 struct dp_tx_ring *tx_ring,
374 				 struct ath12k_dp_htt_wbm_tx_status *ts)
375 {
376 	struct ieee80211_tx_info *info;
377 	struct ath12k_skb_cb *skb_cb;
378 	struct ath12k *ar;
379 
380 	skb_cb = ATH12K_SKB_CB(msdu);
381 	info = IEEE80211_SKB_CB(msdu);
382 
383 	ar = skb_cb->ar;
384 
385 	if (atomic_dec_and_test(&ar->dp.num_tx_pending))
386 		wake_up(&ar->dp.tx_empty_waitq);
387 
388 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
389 	if (skb_cb->paddr_ext_desc)
390 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
391 				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
392 
393 	memset(&info->status, 0, sizeof(info->status));
394 
395 	if (ts->acked) {
396 		if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
397 			info->flags |= IEEE80211_TX_STAT_ACK;
398 			info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
399 						  ts->ack_rssi;
400 			info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
401 		} else {
402 			info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
403 		}
404 	}
405 
406 	ieee80211_tx_status(ar->hw, msdu);
407 }
408 
409 static void
410 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
411 				     void *desc, u8 mac_id,
412 				     struct sk_buff *msdu,
413 				     struct dp_tx_ring *tx_ring)
414 {
415 	struct htt_tx_wbm_completion *status_desc;
416 	struct ath12k_dp_htt_wbm_tx_status ts = {0};
417 	enum hal_wbm_htt_tx_comp_status wbm_status;
418 
419 #if defined(__linux__)
420 	status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
421 #elif defined(__FreeBSD__)
422 	status_desc = (void *)((u8 *)desc + HTT_TX_WBM_COMP_STATUS_OFFSET);
423 #endif
424 
425 	wbm_status = le32_get_bits(status_desc->info0,
426 				   HTT_TX_WBM_COMP_INFO0_STATUS);
427 
428 	switch (wbm_status) {
429 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
430 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
431 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
432 		ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
433 		ts.ack_rssi = le32_get_bits(status_desc->info2,
434 					    HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
435 		ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
436 		break;
437 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
438 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
439 		ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
440 		break;
441 	case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
442 		/* This event is to be handled only when the driver decides to
443 		 * use WDS offload functionality.
444 		 */
445 		break;
446 	default:
447 		ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
448 		break;
449 	}
450 }
451 
452 static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
453 				       struct sk_buff *msdu,
454 				       struct hal_tx_status *ts)
455 {
456 	struct ath12k_base *ab = ar->ab;
457 	struct ieee80211_tx_info *info;
458 	struct ath12k_skb_cb *skb_cb;
459 
460 	if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
461 		/* Must not happen */
462 		return;
463 	}
464 
465 	skb_cb = ATH12K_SKB_CB(msdu);
466 
467 	dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
468 	if (skb_cb->paddr_ext_desc)
469 		dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
470 				 sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
471 
472 	rcu_read_lock();
473 
474 	if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
475 		dev_kfree_skb_any(msdu);
476 		goto exit;
477 	}
478 
479 	if (!skb_cb->vif) {
480 		dev_kfree_skb_any(msdu);
481 		goto exit;
482 	}
483 
484 	info = IEEE80211_SKB_CB(msdu);
485 	memset(&info->status, 0, sizeof(info->status));
486 
487 	/* skip tx rate update from ieee80211_status*/
488 	info->status.rates[0].idx = -1;
489 
490 	if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
491 	    !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
492 		info->flags |= IEEE80211_TX_STAT_ACK;
493 		info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
494 					  ts->ack_rssi;
495 		info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
496 	}
497 
498 	if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
499 	    (info->flags & IEEE80211_TX_CTL_NO_ACK))
500 		info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
501 
502 	/* NOTE: Tx rate status reporting. Tx completion status does not have
503 	 * necessary information (for example nss) to build the tx rate.
504 	 * Might end up reporting it out-of-band from HTT stats.
505 	 */
506 
507 	ieee80211_tx_status(ar->hw, msdu);
508 
509 exit:
510 	rcu_read_unlock();
511 }
512 
513 static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
514 				      struct hal_wbm_completion_ring_tx *desc,
515 				      struct hal_tx_status *ts)
516 {
517 	ts->buf_rel_source =
518 		le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
519 	if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
520 	    ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
521 		return;
522 
523 	if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
524 		return;
525 
526 	ts->status = le32_get_bits(desc->info0,
527 				   HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
528 
529 	ts->ppdu_id = le32_get_bits(desc->info1,
530 				    HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
531 	if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
532 		ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
533 	else
534 		ts->rate_stats = 0;
535 }
536 
537 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
538 {
539 	struct ath12k *ar;
540 	struct ath12k_dp *dp = &ab->dp;
541 	int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
542 	struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
543 	struct ath12k_tx_desc_info *tx_desc = NULL;
544 	struct sk_buff *msdu;
545 	struct hal_tx_status ts = { 0 };
546 	struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
547 	struct hal_wbm_release_ring *desc;
548 	u8 mac_id, pdev_id;
549 	u64 desc_va;
550 
551 	spin_lock_bh(&status_ring->lock);
552 
553 	ath12k_hal_srng_access_begin(ab, status_ring);
554 
555 	while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
556 		desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
557 		if (!desc)
558 			break;
559 
560 		memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
561 		       desc, sizeof(*desc));
562 		tx_ring->tx_status_head =
563 			ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
564 	}
565 
566 	if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
567 	    (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
568 		/* TODO: Process pending tx_status messages when kfifo_is_full() */
569 		ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
570 	}
571 
572 	ath12k_hal_srng_access_end(ab, status_ring);
573 
574 	spin_unlock_bh(&status_ring->lock);
575 
576 	while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
577 		struct hal_wbm_completion_ring_tx *tx_status;
578 		u32 desc_id;
579 
580 		tx_ring->tx_status_tail =
581 			ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
582 		tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
583 		ath12k_dp_tx_status_parse(ab, tx_status, &ts);
584 
585 		if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
586 			/* HW done cookie conversion */
587 			desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
588 				   le32_to_cpu(tx_status->buf_va_lo));
589 			tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
590 		} else {
591 			/* SW does cookie conversion to VA */
592 			desc_id = le32_get_bits(tx_status->buf_va_hi,
593 						BUFFER_ADDR_INFO1_SW_COOKIE);
594 
595 			tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
596 		}
597 		if (!tx_desc) {
598 			ath12k_warn(ab, "unable to retrieve tx_desc!");
599 			continue;
600 		}
601 
602 		msdu = tx_desc->skb;
603 		mac_id = tx_desc->mac_id;
604 
605 		/* Release descriptor as soon as extracting necessary info
606 		 * to reduce contention
607 		 */
608 		ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
609 		if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
610 			ath12k_dp_tx_process_htt_tx_complete(ab,
611 							     (void *)tx_status,
612 							     mac_id, msdu,
613 							     tx_ring);
614 			continue;
615 		}
616 
617 		pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
618 		ar = ab->pdevs[pdev_id].ar;
619 
620 		if (atomic_dec_and_test(&ar->dp.num_tx_pending))
621 			wake_up(&ar->dp.tx_empty_waitq);
622 
623 		ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
624 	}
625 }
626 
627 static int
628 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
629 			      int mac_id, u32 ring_id,
630 			      enum hal_ring_type ring_type,
631 			      enum htt_srng_ring_type *htt_ring_type,
632 			      enum htt_srng_ring_id *htt_ring_id)
633 {
634 	int ret = 0;
635 
636 	switch (ring_type) {
637 	case HAL_RXDMA_BUF:
638 		/* for some targets, host fills rx buffer to fw and fw fills to
639 		 * rxbuf ring for each rxdma
640 		 */
641 		if (!ab->hw_params->rx_mac_buf_ring) {
642 			if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
643 			      ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
644 				ret = -EINVAL;
645 			}
646 			*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
647 			*htt_ring_type = HTT_SW_TO_HW_RING;
648 		} else {
649 			if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
650 				*htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
651 				*htt_ring_type = HTT_SW_TO_SW_RING;
652 			} else {
653 				*htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
654 				*htt_ring_type = HTT_SW_TO_HW_RING;
655 			}
656 		}
657 		break;
658 	case HAL_RXDMA_DST:
659 		*htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
660 		*htt_ring_type = HTT_HW_TO_SW_RING;
661 		break;
662 	case HAL_RXDMA_MONITOR_BUF:
663 		*htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
664 		*htt_ring_type = HTT_SW_TO_HW_RING;
665 		break;
666 	case HAL_RXDMA_MONITOR_STATUS:
667 		*htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
668 		*htt_ring_type = HTT_SW_TO_HW_RING;
669 		break;
670 	case HAL_RXDMA_MONITOR_DST:
671 		*htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
672 		*htt_ring_type = HTT_HW_TO_SW_RING;
673 		break;
674 	case HAL_RXDMA_MONITOR_DESC:
675 		*htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
676 		*htt_ring_type = HTT_SW_TO_HW_RING;
677 		break;
678 	case HAL_TX_MONITOR_BUF:
679 		*htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
680 		*htt_ring_type = HTT_SW_TO_HW_RING;
681 		break;
682 	case HAL_TX_MONITOR_DST:
683 		*htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
684 		*htt_ring_type = HTT_HW_TO_SW_RING;
685 		break;
686 	default:
687 		ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
688 		ret = -EINVAL;
689 	}
690 	return ret;
691 }
692 
693 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
694 				int mac_id, enum hal_ring_type ring_type)
695 {
696 	struct htt_srng_setup_cmd *cmd;
697 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
698 	struct hal_srng_params params;
699 	struct sk_buff *skb;
700 	u32 ring_entry_sz;
701 	int len = sizeof(*cmd);
702 	dma_addr_t hp_addr, tp_addr;
703 	enum htt_srng_ring_type htt_ring_type;
704 	enum htt_srng_ring_id htt_ring_id;
705 	int ret;
706 
707 	skb = ath12k_htc_alloc_skb(ab, len);
708 	if (!skb)
709 		return -ENOMEM;
710 
711 	memset(&params, 0, sizeof(params));
712 	ath12k_hal_srng_get_params(ab, srng, &params);
713 
714 	hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
715 	tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
716 
717 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
718 					    ring_type, &htt_ring_type,
719 					    &htt_ring_id);
720 	if (ret)
721 		goto err_free;
722 
723 	skb_put(skb, len);
724 	cmd = (struct htt_srng_setup_cmd *)skb->data;
725 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
726 				      HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
727 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
728 	    htt_ring_type == HTT_HW_TO_SW_RING)
729 		cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
730 					       HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
731 	else
732 		cmd->info0 |= le32_encode_bits(mac_id,
733 					       HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
734 	cmd->info0 |= le32_encode_bits(htt_ring_type,
735 				       HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
736 	cmd->info0 |= le32_encode_bits(htt_ring_id,
737 				       HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
738 
739 	cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
740 					     HAL_ADDR_LSB_REG_MASK);
741 
742 	cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
743 					     HAL_ADDR_MSB_REG_SHIFT);
744 
745 	ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
746 	if (ret < 0)
747 		goto err_free;
748 
749 	ring_entry_sz = ret;
750 
751 	ring_entry_sz >>= 2;
752 	cmd->info1 = le32_encode_bits(ring_entry_sz,
753 				      HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
754 	cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
755 				       HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
756 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
757 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
758 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
759 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
760 	cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
761 				       HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
762 	if (htt_ring_type == HTT_SW_TO_HW_RING)
763 		cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
764 
765 	cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
766 	cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
767 
768 	cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
769 	cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
770 
771 	cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
772 	cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
773 	cmd->msi_data = cpu_to_le32(params.msi_data);
774 
775 	cmd->intr_info =
776 		le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
777 				 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
778 	cmd->intr_info |=
779 		le32_encode_bits(params.intr_timer_thres_us >> 3,
780 				 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
781 
782 	cmd->info2 = 0;
783 	if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
784 		cmd->info2 = le32_encode_bits(params.low_threshold,
785 					      HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
786 	}
787 
788 	ath12k_dbg(ab, ATH12K_DBG_HAL,
789 		   "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
790 		   __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
791 		   cmd->msi_data);
792 
793 	ath12k_dbg(ab, ATH12K_DBG_HAL,
794 		   "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
795 		   ring_id, ring_type, cmd->intr_info, cmd->info2);
796 
797 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
798 	if (ret)
799 		goto err_free;
800 
801 	return 0;
802 
803 err_free:
804 	dev_kfree_skb_any(skb);
805 
806 	return ret;
807 }
808 
809 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
810 
811 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
812 {
813 	struct ath12k_dp *dp = &ab->dp;
814 	struct sk_buff *skb;
815 	struct htt_ver_req_cmd *cmd;
816 	int len = sizeof(*cmd);
817 	int ret;
818 
819 	init_completion(&dp->htt_tgt_version_received);
820 
821 	skb = ath12k_htc_alloc_skb(ab, len);
822 	if (!skb)
823 		return -ENOMEM;
824 
825 	skb_put(skb, len);
826 	cmd = (struct htt_ver_req_cmd *)skb->data;
827 	cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
828 					     HTT_VER_REQ_INFO_MSG_ID);
829 
830 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
831 	if (ret) {
832 		dev_kfree_skb_any(skb);
833 		return ret;
834 	}
835 
836 	ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
837 					  HTT_TARGET_VERSION_TIMEOUT_HZ);
838 	if (ret == 0) {
839 		ath12k_warn(ab, "htt target version request timed out\n");
840 		return -ETIMEDOUT;
841 	}
842 
843 	if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
844 		ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
845 			   dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
846 		return -ENOTSUPP;
847 	}
848 
849 	return 0;
850 }
851 
852 int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
853 {
854 	struct ath12k_base *ab = ar->ab;
855 	struct ath12k_dp *dp = &ab->dp;
856 	struct sk_buff *skb;
857 	struct htt_ppdu_stats_cfg_cmd *cmd;
858 	int len = sizeof(*cmd);
859 	u8 pdev_mask;
860 	int ret;
861 	int i;
862 
863 	for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
864 		skb = ath12k_htc_alloc_skb(ab, len);
865 		if (!skb)
866 			return -ENOMEM;
867 
868 		skb_put(skb, len);
869 		cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
870 		cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
871 					    HTT_PPDU_STATS_CFG_MSG_TYPE);
872 
873 		pdev_mask = 1 << (i + 1);
874 		cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
875 		cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
876 
877 		ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
878 		if (ret) {
879 			dev_kfree_skb_any(skb);
880 			return ret;
881 		}
882 	}
883 
884 	return 0;
885 }
886 
887 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
888 				     int mac_id, enum hal_ring_type ring_type,
889 				     int rx_buf_size,
890 				     struct htt_rx_ring_tlv_filter *tlv_filter)
891 {
892 	struct htt_rx_ring_selection_cfg_cmd *cmd;
893 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
894 	struct hal_srng_params params;
895 	struct sk_buff *skb;
896 	int len = sizeof(*cmd);
897 	enum htt_srng_ring_type htt_ring_type;
898 	enum htt_srng_ring_id htt_ring_id;
899 	int ret;
900 
901 	skb = ath12k_htc_alloc_skb(ab, len);
902 	if (!skb)
903 		return -ENOMEM;
904 
905 	memset(&params, 0, sizeof(params));
906 	ath12k_hal_srng_get_params(ab, srng, &params);
907 
908 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
909 					    ring_type, &htt_ring_type,
910 					    &htt_ring_id);
911 	if (ret)
912 		goto err_free;
913 
914 	skb_put(skb, len);
915 	cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
916 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
917 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
918 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
919 	    htt_ring_type == HTT_HW_TO_SW_RING)
920 		cmd->info0 |=
921 			le32_encode_bits(DP_SW2HW_MACID(mac_id),
922 					 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
923 	else
924 		cmd->info0 |=
925 			le32_encode_bits(mac_id,
926 					 HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
927 	cmd->info0 |= le32_encode_bits(htt_ring_id,
928 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
929 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
930 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
931 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
932 				       HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
933 	cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
934 				       HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
935 	cmd->info1 = le32_encode_bits(rx_buf_size,
936 				      HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
937 	cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
938 	cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
939 	cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
940 	cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
941 	cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
942 
943 	if (tlv_filter->offset_valid) {
944 		cmd->rx_packet_offset =
945 			le32_encode_bits(tlv_filter->rx_packet_offset,
946 					 HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
947 
948 		cmd->rx_packet_offset |=
949 			le32_encode_bits(tlv_filter->rx_header_offset,
950 					 HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
951 
952 		cmd->rx_mpdu_offset =
953 			le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
954 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
955 
956 		cmd->rx_mpdu_offset |=
957 			le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
958 					 HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
959 
960 		cmd->rx_msdu_offset =
961 			le32_encode_bits(tlv_filter->rx_msdu_end_offset,
962 					 HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
963 
964 		cmd->rx_msdu_offset |=
965 			le32_encode_bits(tlv_filter->rx_msdu_start_offset,
966 					 HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
967 
968 		cmd->rx_attn_offset =
969 			le32_encode_bits(tlv_filter->rx_attn_offset,
970 					 HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
971 	}
972 
973 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
974 	if (ret)
975 		goto err_free;
976 
977 	return 0;
978 
979 err_free:
980 	dev_kfree_skb_any(skb);
981 
982 	return ret;
983 }
984 
985 int
986 ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
987 				   struct htt_ext_stats_cfg_params *cfg_params,
988 				   u64 cookie)
989 {
990 	struct ath12k_base *ab = ar->ab;
991 	struct ath12k_dp *dp = &ab->dp;
992 	struct sk_buff *skb;
993 	struct htt_ext_stats_cfg_cmd *cmd;
994 	int len = sizeof(*cmd);
995 	int ret;
996 
997 	skb = ath12k_htc_alloc_skb(ab, len);
998 	if (!skb)
999 		return -ENOMEM;
1000 
1001 	skb_put(skb, len);
1002 
1003 	cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1004 	memset(cmd, 0, sizeof(*cmd));
1005 	cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1006 
1007 	cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
1008 
1009 	cmd->hdr.stats_type = type;
1010 	cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1011 	cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1012 	cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1013 	cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1014 	cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1015 	cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1016 
1017 	ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1018 	if (ret) {
1019 		ath12k_warn(ab, "failed to send htt type stats request: %d",
1020 			    ret);
1021 		dev_kfree_skb_any(skb);
1022 		return ret;
1023 	}
1024 
1025 	return 0;
1026 }
1027 
1028 int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1029 {
1030 	struct ath12k_base *ab = ar->ab;
1031 	int ret;
1032 
1033 	ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
1034 	if (ret) {
1035 		ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
1036 		return ret;
1037 	}
1038 
1039 	ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
1040 	if (ret) {
1041 		ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1042 		return ret;
1043 	}
1044 
1045 	return 0;
1046 }
1047 
1048 int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1049 {
1050 	struct ath12k_base *ab = ar->ab;
1051 	struct ath12k_dp *dp = &ab->dp;
1052 	struct htt_rx_ring_tlv_filter tlv_filter = {0};
1053 	int ret, ring_id;
1054 
1055 	ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1056 	tlv_filter.offset_valid = false;
1057 
1058 	if (!reset) {
1059 		tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
1060 		tlv_filter.pkt_filter_flags0 =
1061 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1062 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1063 		tlv_filter.pkt_filter_flags1 =
1064 					HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1065 					HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1066 		tlv_filter.pkt_filter_flags2 =
1067 					HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1068 					HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1069 		tlv_filter.pkt_filter_flags3 =
1070 					HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1071 					HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1072 					HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1073 					HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1074 	}
1075 
1076 	if (ab->hw_params->rxdma1_enable) {
1077 		ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
1078 						       HAL_RXDMA_MONITOR_BUF,
1079 						       DP_RXDMA_REFILL_RING_SIZE,
1080 						       &tlv_filter);
1081 		if (ret) {
1082 			ath12k_err(ab,
1083 				   "failed to setup filter for monitor buf %d\n", ret);
1084 			return ret;
1085 		}
1086 	}
1087 
1088 	return 0;
1089 }
1090 
1091 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1092 				     int mac_id, enum hal_ring_type ring_type,
1093 				     int tx_buf_size,
1094 				     struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1095 {
1096 	struct htt_tx_ring_selection_cfg_cmd *cmd;
1097 	struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1098 	struct hal_srng_params params;
1099 	struct sk_buff *skb;
1100 	int len = sizeof(*cmd);
1101 	enum htt_srng_ring_type htt_ring_type;
1102 	enum htt_srng_ring_id htt_ring_id;
1103 	int ret;
1104 
1105 	skb = ath12k_htc_alloc_skb(ab, len);
1106 	if (!skb)
1107 		return -ENOMEM;
1108 
1109 	memset(&params, 0, sizeof(params));
1110 	ath12k_hal_srng_get_params(ab, srng, &params);
1111 
1112 	ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1113 					    ring_type, &htt_ring_type,
1114 					    &htt_ring_id);
1115 
1116 	if (ret)
1117 		goto err_free;
1118 
1119 	skb_put(skb, len);
1120 	cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1121 	cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1122 				      HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1123 	if (htt_ring_type == HTT_SW_TO_HW_RING ||
1124 	    htt_ring_type == HTT_HW_TO_SW_RING)
1125 		cmd->info0 |=
1126 			le32_encode_bits(DP_SW2HW_MACID(mac_id),
1127 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1128 	else
1129 		cmd->info0 |=
1130 			le32_encode_bits(mac_id,
1131 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1132 	cmd->info0 |= le32_encode_bits(htt_ring_id,
1133 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1134 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1135 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1136 	cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1137 				       HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1138 
1139 	cmd->info1 |=
1140 		le32_encode_bits(tx_buf_size,
1141 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1142 
1143 	if (htt_tlv_filter->tx_mon_mgmt_filter) {
1144 		cmd->info1 |=
1145 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1146 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1147 		cmd->info1 |=
1148 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1149 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1150 		cmd->info2 |=
1151 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1152 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1153 	}
1154 
1155 	if (htt_tlv_filter->tx_mon_data_filter) {
1156 		cmd->info1 |=
1157 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1158 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1159 		cmd->info1 |=
1160 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1161 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1162 		cmd->info2 |=
1163 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1164 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1165 	}
1166 
1167 	if (htt_tlv_filter->tx_mon_ctrl_filter) {
1168 		cmd->info1 |=
1169 			le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1170 					 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1171 		cmd->info1 |=
1172 		le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1173 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1174 		cmd->info2 |=
1175 		le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1176 				 HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1177 	}
1178 
1179 	cmd->tlv_filter_mask_in0 =
1180 		cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1181 	cmd->tlv_filter_mask_in1 =
1182 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1183 	cmd->tlv_filter_mask_in2 =
1184 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1185 	cmd->tlv_filter_mask_in3 =
1186 		cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1187 
1188 	ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1189 	if (ret)
1190 		goto err_free;
1191 
1192 	return 0;
1193 
1194 err_free:
1195 	dev_kfree_skb_any(skb);
1196 	return ret;
1197 }
1198 
1199 int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1200 {
1201 	struct ath12k_base *ab = ar->ab;
1202 	struct ath12k_dp *dp = &ab->dp;
1203 	struct htt_tx_ring_tlv_filter tlv_filter = {0};
1204 	int ret, ring_id;
1205 
1206 	ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
1207 
1208 	/* TODO: Need to set upstream/downstream tlv filters
1209 	 * here
1210 	 */
1211 
1212 	if (ab->hw_params->rxdma1_enable) {
1213 		ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
1214 						       HAL_TX_MONITOR_BUF,
1215 						       DP_RXDMA_REFILL_RING_SIZE,
1216 						       &tlv_filter);
1217 		if (ret) {
1218 			ath12k_err(ab,
1219 				   "failed to setup filter for monitor buf %d\n", ret);
1220 			return ret;
1221 		}
1222 	}
1223 
1224 	return 0;
1225 }
1226