1*5c1def83SBjoern A. Zeeb // SPDX-License-Identifier: BSD-3-Clause-Clear
2*5c1def83SBjoern A. Zeeb /*
3*5c1def83SBjoern A. Zeeb * Copyright (c) 2018-2021 The Linux Foundation. All rights reserved.
4*5c1def83SBjoern A. Zeeb * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
5*5c1def83SBjoern A. Zeeb */
6*5c1def83SBjoern A. Zeeb
7*5c1def83SBjoern A. Zeeb #include "core.h"
8*5c1def83SBjoern A. Zeeb #include "dp_tx.h"
9*5c1def83SBjoern A. Zeeb #include "debug.h"
10*5c1def83SBjoern A. Zeeb #include "hw.h"
11*5c1def83SBjoern A. Zeeb
12*5c1def83SBjoern A. Zeeb static enum hal_tcl_encap_type
ath12k_dp_tx_get_encap_type(struct ath12k_vif * arvif,struct sk_buff * skb)13*5c1def83SBjoern A. Zeeb ath12k_dp_tx_get_encap_type(struct ath12k_vif *arvif, struct sk_buff *skb)
14*5c1def83SBjoern A. Zeeb {
15*5c1def83SBjoern A. Zeeb struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
16*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = arvif->ar->ab;
17*5c1def83SBjoern A. Zeeb
18*5c1def83SBjoern A. Zeeb if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
19*5c1def83SBjoern A. Zeeb return HAL_TCL_ENCAP_TYPE_RAW;
20*5c1def83SBjoern A. Zeeb
21*5c1def83SBjoern A. Zeeb if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
22*5c1def83SBjoern A. Zeeb return HAL_TCL_ENCAP_TYPE_ETHERNET;
23*5c1def83SBjoern A. Zeeb
24*5c1def83SBjoern A. Zeeb return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
25*5c1def83SBjoern A. Zeeb }
26*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_encap_nwifi(struct sk_buff * skb)27*5c1def83SBjoern A. Zeeb static void ath12k_dp_tx_encap_nwifi(struct sk_buff *skb)
28*5c1def83SBjoern A. Zeeb {
29*5c1def83SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
30*5c1def83SBjoern A. Zeeb u8 *qos_ctl;
31*5c1def83SBjoern A. Zeeb
32*5c1def83SBjoern A. Zeeb if (!ieee80211_is_data_qos(hdr->frame_control))
33*5c1def83SBjoern A. Zeeb return;
34*5c1def83SBjoern A. Zeeb
35*5c1def83SBjoern A. Zeeb qos_ctl = ieee80211_get_qos_ctl(hdr);
36*5c1def83SBjoern A. Zeeb memmove(skb->data + IEEE80211_QOS_CTL_LEN,
37*5c1def83SBjoern A. Zeeb #if defined(__linux__)
38*5c1def83SBjoern A. Zeeb skb->data, (void *)qos_ctl - (void *)skb->data);
39*5c1def83SBjoern A. Zeeb #elif defined(__FreeBSD__)
40*5c1def83SBjoern A. Zeeb skb->data, qos_ctl - skb->data);
41*5c1def83SBjoern A. Zeeb #endif
42*5c1def83SBjoern A. Zeeb skb_pull(skb, IEEE80211_QOS_CTL_LEN);
43*5c1def83SBjoern A. Zeeb
44*5c1def83SBjoern A. Zeeb hdr = (void *)skb->data;
45*5c1def83SBjoern A. Zeeb hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
46*5c1def83SBjoern A. Zeeb }
47*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_get_tid(struct sk_buff * skb)48*5c1def83SBjoern A. Zeeb static u8 ath12k_dp_tx_get_tid(struct sk_buff *skb)
49*5c1def83SBjoern A. Zeeb {
50*5c1def83SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
51*5c1def83SBjoern A. Zeeb struct ath12k_skb_cb *cb = ATH12K_SKB_CB(skb);
52*5c1def83SBjoern A. Zeeb
53*5c1def83SBjoern A. Zeeb if (cb->flags & ATH12K_SKB_HW_80211_ENCAP)
54*5c1def83SBjoern A. Zeeb return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
55*5c1def83SBjoern A. Zeeb else if (!ieee80211_is_data_qos(hdr->frame_control))
56*5c1def83SBjoern A. Zeeb return HAL_DESC_REO_NON_QOS_TID;
57*5c1def83SBjoern A. Zeeb else
58*5c1def83SBjoern A. Zeeb return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
59*5c1def83SBjoern A. Zeeb }
60*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_get_encrypt_type(u32 cipher)61*5c1def83SBjoern A. Zeeb enum hal_encrypt_type ath12k_dp_tx_get_encrypt_type(u32 cipher)
62*5c1def83SBjoern A. Zeeb {
63*5c1def83SBjoern A. Zeeb switch (cipher) {
64*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_WEP40:
65*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_WEP_40;
66*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_WEP104:
67*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_WEP_104;
68*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_TKIP:
69*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_TKIP_MIC;
70*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_CCMP:
71*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_CCMP_128;
72*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_CCMP_256:
73*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_CCMP_256;
74*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP:
75*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_GCMP_128;
76*5c1def83SBjoern A. Zeeb case WLAN_CIPHER_SUITE_GCMP_256:
77*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_AES_GCMP_256;
78*5c1def83SBjoern A. Zeeb default:
79*5c1def83SBjoern A. Zeeb return HAL_ENCRYPT_TYPE_OPEN;
80*5c1def83SBjoern A. Zeeb }
81*5c1def83SBjoern A. Zeeb }
82*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_release_txbuf(struct ath12k_dp * dp,struct ath12k_tx_desc_info * tx_desc,u8 pool_id)83*5c1def83SBjoern A. Zeeb static void ath12k_dp_tx_release_txbuf(struct ath12k_dp *dp,
84*5c1def83SBjoern A. Zeeb struct ath12k_tx_desc_info *tx_desc,
85*5c1def83SBjoern A. Zeeb u8 pool_id)
86*5c1def83SBjoern A. Zeeb {
87*5c1def83SBjoern A. Zeeb spin_lock_bh(&dp->tx_desc_lock[pool_id]);
88*5c1def83SBjoern A. Zeeb list_move_tail(&tx_desc->list, &dp->tx_desc_free_list[pool_id]);
89*5c1def83SBjoern A. Zeeb spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
90*5c1def83SBjoern A. Zeeb }
91*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_assign_buffer(struct ath12k_dp * dp,u8 pool_id)92*5c1def83SBjoern A. Zeeb static struct ath12k_tx_desc_info *ath12k_dp_tx_assign_buffer(struct ath12k_dp *dp,
93*5c1def83SBjoern A. Zeeb u8 pool_id)
94*5c1def83SBjoern A. Zeeb {
95*5c1def83SBjoern A. Zeeb struct ath12k_tx_desc_info *desc;
96*5c1def83SBjoern A. Zeeb
97*5c1def83SBjoern A. Zeeb spin_lock_bh(&dp->tx_desc_lock[pool_id]);
98*5c1def83SBjoern A. Zeeb desc = list_first_entry_or_null(&dp->tx_desc_free_list[pool_id],
99*5c1def83SBjoern A. Zeeb struct ath12k_tx_desc_info,
100*5c1def83SBjoern A. Zeeb list);
101*5c1def83SBjoern A. Zeeb if (!desc) {
102*5c1def83SBjoern A. Zeeb spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
103*5c1def83SBjoern A. Zeeb ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
104*5c1def83SBjoern A. Zeeb return NULL;
105*5c1def83SBjoern A. Zeeb }
106*5c1def83SBjoern A. Zeeb
107*5c1def83SBjoern A. Zeeb list_move_tail(&desc->list, &dp->tx_desc_used_list[pool_id]);
108*5c1def83SBjoern A. Zeeb spin_unlock_bh(&dp->tx_desc_lock[pool_id]);
109*5c1def83SBjoern A. Zeeb
110*5c1def83SBjoern A. Zeeb return desc;
111*5c1def83SBjoern A. Zeeb }
112*5c1def83SBjoern A. Zeeb
ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base * ab,void * cmd,struct hal_tx_info * ti)113*5c1def83SBjoern A. Zeeb static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab, void *cmd,
114*5c1def83SBjoern A. Zeeb struct hal_tx_info *ti)
115*5c1def83SBjoern A. Zeeb {
116*5c1def83SBjoern A. Zeeb struct hal_tx_msdu_ext_desc *tcl_ext_cmd = (struct hal_tx_msdu_ext_desc *)cmd;
117*5c1def83SBjoern A. Zeeb
118*5c1def83SBjoern A. Zeeb tcl_ext_cmd->info0 = le32_encode_bits(ti->paddr,
119*5c1def83SBjoern A. Zeeb HAL_TX_MSDU_EXT_INFO0_BUF_PTR_LO);
120*5c1def83SBjoern A. Zeeb tcl_ext_cmd->info1 = le32_encode_bits(0x0,
121*5c1def83SBjoern A. Zeeb HAL_TX_MSDU_EXT_INFO1_BUF_PTR_HI) |
122*5c1def83SBjoern A. Zeeb le32_encode_bits(ti->data_len,
123*5c1def83SBjoern A. Zeeb HAL_TX_MSDU_EXT_INFO1_BUF_LEN);
124*5c1def83SBjoern A. Zeeb
125*5c1def83SBjoern A. Zeeb tcl_ext_cmd->info1 = le32_encode_bits(1, HAL_TX_MSDU_EXT_INFO1_EXTN_OVERRIDE) |
126*5c1def83SBjoern A. Zeeb le32_encode_bits(ti->encap_type,
127*5c1def83SBjoern A. Zeeb HAL_TX_MSDU_EXT_INFO1_ENCAP_TYPE) |
128*5c1def83SBjoern A. Zeeb le32_encode_bits(ti->encrypt_type,
129*5c1def83SBjoern A. Zeeb HAL_TX_MSDU_EXT_INFO1_ENCRYPT_TYPE);
130*5c1def83SBjoern A. Zeeb }
131*5c1def83SBjoern A. Zeeb
ath12k_dp_tx(struct ath12k * ar,struct ath12k_vif * arvif,struct sk_buff * skb)132*5c1def83SBjoern A. Zeeb int ath12k_dp_tx(struct ath12k *ar, struct ath12k_vif *arvif,
133*5c1def83SBjoern A. Zeeb struct sk_buff *skb)
134*5c1def83SBjoern A. Zeeb {
135*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
136*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
137*5c1def83SBjoern A. Zeeb struct hal_tx_info ti = {0};
138*5c1def83SBjoern A. Zeeb struct ath12k_tx_desc_info *tx_desc;
139*5c1def83SBjoern A. Zeeb struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
140*5c1def83SBjoern A. Zeeb struct ath12k_skb_cb *skb_cb = ATH12K_SKB_CB(skb);
141*5c1def83SBjoern A. Zeeb struct hal_tcl_data_cmd *hal_tcl_desc;
142*5c1def83SBjoern A. Zeeb struct hal_tx_msdu_ext_desc *msg;
143*5c1def83SBjoern A. Zeeb struct sk_buff *skb_ext_desc;
144*5c1def83SBjoern A. Zeeb struct hal_srng *tcl_ring;
145*5c1def83SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
146*5c1def83SBjoern A. Zeeb struct dp_tx_ring *tx_ring;
147*5c1def83SBjoern A. Zeeb u8 pool_id;
148*5c1def83SBjoern A. Zeeb u8 hal_ring_id;
149*5c1def83SBjoern A. Zeeb int ret;
150*5c1def83SBjoern A. Zeeb u8 ring_selector, ring_map = 0;
151*5c1def83SBjoern A. Zeeb bool tcl_ring_retry;
152*5c1def83SBjoern A. Zeeb bool msdu_ext_desc = false;
153*5c1def83SBjoern A. Zeeb
154*5c1def83SBjoern A. Zeeb if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
155*5c1def83SBjoern A. Zeeb return -ESHUTDOWN;
156*5c1def83SBjoern A. Zeeb
157*5c1def83SBjoern A. Zeeb if (!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
158*5c1def83SBjoern A. Zeeb !ieee80211_is_data(hdr->frame_control))
159*5c1def83SBjoern A. Zeeb return -ENOTSUPP;
160*5c1def83SBjoern A. Zeeb
161*5c1def83SBjoern A. Zeeb pool_id = skb_get_queue_mapping(skb) & (ATH12K_HW_MAX_QUEUES - 1);
162*5c1def83SBjoern A. Zeeb
163*5c1def83SBjoern A. Zeeb /* Let the default ring selection be based on current processor
164*5c1def83SBjoern A. Zeeb * number, where one of the 3 tcl rings are selected based on
165*5c1def83SBjoern A. Zeeb * the smp_processor_id(). In case that ring
166*5c1def83SBjoern A. Zeeb * is full/busy, we resort to other available rings.
167*5c1def83SBjoern A. Zeeb * If all rings are full, we drop the packet.
168*5c1def83SBjoern A. Zeeb * TODO: Add throttling logic when all rings are full
169*5c1def83SBjoern A. Zeeb */
170*5c1def83SBjoern A. Zeeb ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
171*5c1def83SBjoern A. Zeeb
172*5c1def83SBjoern A. Zeeb tcl_ring_sel:
173*5c1def83SBjoern A. Zeeb tcl_ring_retry = false;
174*5c1def83SBjoern A. Zeeb ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
175*5c1def83SBjoern A. Zeeb
176*5c1def83SBjoern A. Zeeb ring_map |= BIT(ti.ring_id);
177*5c1def83SBjoern A. Zeeb ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
178*5c1def83SBjoern A. Zeeb
179*5c1def83SBjoern A. Zeeb tx_ring = &dp->tx_ring[ti.ring_id];
180*5c1def83SBjoern A. Zeeb
181*5c1def83SBjoern A. Zeeb tx_desc = ath12k_dp_tx_assign_buffer(dp, pool_id);
182*5c1def83SBjoern A. Zeeb if (!tx_desc)
183*5c1def83SBjoern A. Zeeb return -ENOMEM;
184*5c1def83SBjoern A. Zeeb
185*5c1def83SBjoern A. Zeeb ti.bank_id = arvif->bank_id;
186*5c1def83SBjoern A. Zeeb ti.meta_data_flags = arvif->tcl_metadata;
187*5c1def83SBjoern A. Zeeb
188*5c1def83SBjoern A. Zeeb if (arvif->tx_encap_type == HAL_TCL_ENCAP_TYPE_RAW &&
189*5c1def83SBjoern A. Zeeb test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
190*5c1def83SBjoern A. Zeeb if (skb_cb->flags & ATH12K_SKB_CIPHER_SET) {
191*5c1def83SBjoern A. Zeeb ti.encrypt_type =
192*5c1def83SBjoern A. Zeeb ath12k_dp_tx_get_encrypt_type(skb_cb->cipher);
193*5c1def83SBjoern A. Zeeb
194*5c1def83SBjoern A. Zeeb if (ieee80211_has_protected(hdr->frame_control))
195*5c1def83SBjoern A. Zeeb skb_put(skb, IEEE80211_CCMP_MIC_LEN);
196*5c1def83SBjoern A. Zeeb } else {
197*5c1def83SBjoern A. Zeeb ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
198*5c1def83SBjoern A. Zeeb }
199*5c1def83SBjoern A. Zeeb
200*5c1def83SBjoern A. Zeeb msdu_ext_desc = true;
201*5c1def83SBjoern A. Zeeb }
202*5c1def83SBjoern A. Zeeb
203*5c1def83SBjoern A. Zeeb ti.encap_type = ath12k_dp_tx_get_encap_type(arvif, skb);
204*5c1def83SBjoern A. Zeeb ti.addr_search_flags = arvif->hal_addr_search_flags;
205*5c1def83SBjoern A. Zeeb ti.search_type = arvif->search_type;
206*5c1def83SBjoern A. Zeeb ti.type = HAL_TCL_DESC_TYPE_BUFFER;
207*5c1def83SBjoern A. Zeeb ti.pkt_offset = 0;
208*5c1def83SBjoern A. Zeeb ti.lmac_id = ar->lmac_id;
209*5c1def83SBjoern A. Zeeb ti.vdev_id = arvif->vdev_id;
210*5c1def83SBjoern A. Zeeb ti.bss_ast_hash = arvif->ast_hash;
211*5c1def83SBjoern A. Zeeb ti.bss_ast_idx = arvif->ast_idx;
212*5c1def83SBjoern A. Zeeb ti.dscp_tid_tbl_idx = 0;
213*5c1def83SBjoern A. Zeeb
214*5c1def83SBjoern A. Zeeb if (skb->ip_summed == CHECKSUM_PARTIAL &&
215*5c1def83SBjoern A. Zeeb ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW) {
216*5c1def83SBjoern A. Zeeb ti.flags0 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_IP4_CKSUM_EN) |
217*5c1def83SBjoern A. Zeeb u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP4_CKSUM_EN) |
218*5c1def83SBjoern A. Zeeb u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_UDP6_CKSUM_EN) |
219*5c1def83SBjoern A. Zeeb u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP4_CKSUM_EN) |
220*5c1def83SBjoern A. Zeeb u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO2_TCP6_CKSUM_EN);
221*5c1def83SBjoern A. Zeeb }
222*5c1def83SBjoern A. Zeeb
223*5c1def83SBjoern A. Zeeb ti.flags1 |= u32_encode_bits(1, HAL_TCL_DATA_CMD_INFO3_TID_OVERWRITE);
224*5c1def83SBjoern A. Zeeb
225*5c1def83SBjoern A. Zeeb ti.tid = ath12k_dp_tx_get_tid(skb);
226*5c1def83SBjoern A. Zeeb
227*5c1def83SBjoern A. Zeeb switch (ti.encap_type) {
228*5c1def83SBjoern A. Zeeb case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
229*5c1def83SBjoern A. Zeeb ath12k_dp_tx_encap_nwifi(skb);
230*5c1def83SBjoern A. Zeeb break;
231*5c1def83SBjoern A. Zeeb case HAL_TCL_ENCAP_TYPE_RAW:
232*5c1def83SBjoern A. Zeeb if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
233*5c1def83SBjoern A. Zeeb ret = -EINVAL;
234*5c1def83SBjoern A. Zeeb goto fail_remove_tx_buf;
235*5c1def83SBjoern A. Zeeb }
236*5c1def83SBjoern A. Zeeb break;
237*5c1def83SBjoern A. Zeeb case HAL_TCL_ENCAP_TYPE_ETHERNET:
238*5c1def83SBjoern A. Zeeb /* no need to encap */
239*5c1def83SBjoern A. Zeeb break;
240*5c1def83SBjoern A. Zeeb case HAL_TCL_ENCAP_TYPE_802_3:
241*5c1def83SBjoern A. Zeeb default:
242*5c1def83SBjoern A. Zeeb /* TODO: Take care of other encap modes as well */
243*5c1def83SBjoern A. Zeeb ret = -EINVAL;
244*5c1def83SBjoern A. Zeeb atomic_inc(&ab->soc_stats.tx_err.misc_fail);
245*5c1def83SBjoern A. Zeeb goto fail_remove_tx_buf;
246*5c1def83SBjoern A. Zeeb }
247*5c1def83SBjoern A. Zeeb
248*5c1def83SBjoern A. Zeeb ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
249*5c1def83SBjoern A. Zeeb if (dma_mapping_error(ab->dev, ti.paddr)) {
250*5c1def83SBjoern A. Zeeb atomic_inc(&ab->soc_stats.tx_err.misc_fail);
251*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
252*5c1def83SBjoern A. Zeeb ret = -ENOMEM;
253*5c1def83SBjoern A. Zeeb goto fail_remove_tx_buf;
254*5c1def83SBjoern A. Zeeb }
255*5c1def83SBjoern A. Zeeb
256*5c1def83SBjoern A. Zeeb tx_desc->skb = skb;
257*5c1def83SBjoern A. Zeeb tx_desc->mac_id = ar->pdev_idx;
258*5c1def83SBjoern A. Zeeb ti.desc_id = tx_desc->desc_id;
259*5c1def83SBjoern A. Zeeb ti.data_len = skb->len;
260*5c1def83SBjoern A. Zeeb skb_cb->paddr = ti.paddr;
261*5c1def83SBjoern A. Zeeb skb_cb->vif = arvif->vif;
262*5c1def83SBjoern A. Zeeb skb_cb->ar = ar;
263*5c1def83SBjoern A. Zeeb
264*5c1def83SBjoern A. Zeeb if (msdu_ext_desc) {
265*5c1def83SBjoern A. Zeeb skb_ext_desc = dev_alloc_skb(sizeof(struct hal_tx_msdu_ext_desc));
266*5c1def83SBjoern A. Zeeb if (!skb_ext_desc) {
267*5c1def83SBjoern A. Zeeb ret = -ENOMEM;
268*5c1def83SBjoern A. Zeeb goto fail_unmap_dma;
269*5c1def83SBjoern A. Zeeb }
270*5c1def83SBjoern A. Zeeb
271*5c1def83SBjoern A. Zeeb skb_put(skb_ext_desc, sizeof(struct hal_tx_msdu_ext_desc));
272*5c1def83SBjoern A. Zeeb memset(skb_ext_desc->data, 0, skb_ext_desc->len);
273*5c1def83SBjoern A. Zeeb
274*5c1def83SBjoern A. Zeeb msg = (struct hal_tx_msdu_ext_desc *)skb_ext_desc->data;
275*5c1def83SBjoern A. Zeeb ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
276*5c1def83SBjoern A. Zeeb
277*5c1def83SBjoern A. Zeeb ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
278*5c1def83SBjoern A. Zeeb skb_ext_desc->len, DMA_TO_DEVICE);
279*5c1def83SBjoern A. Zeeb ret = dma_mapping_error(ab->dev, ti.paddr);
280*5c1def83SBjoern A. Zeeb if (ret) {
281*5c1def83SBjoern A. Zeeb kfree_skb(skb_ext_desc);
282*5c1def83SBjoern A. Zeeb goto fail_unmap_dma;
283*5c1def83SBjoern A. Zeeb }
284*5c1def83SBjoern A. Zeeb
285*5c1def83SBjoern A. Zeeb ti.data_len = skb_ext_desc->len;
286*5c1def83SBjoern A. Zeeb ti.type = HAL_TCL_DESC_TYPE_EXT_DESC;
287*5c1def83SBjoern A. Zeeb
288*5c1def83SBjoern A. Zeeb skb_cb->paddr_ext_desc = ti.paddr;
289*5c1def83SBjoern A. Zeeb }
290*5c1def83SBjoern A. Zeeb
291*5c1def83SBjoern A. Zeeb hal_ring_id = tx_ring->tcl_data_ring.ring_id;
292*5c1def83SBjoern A. Zeeb tcl_ring = &ab->hal.srng_list[hal_ring_id];
293*5c1def83SBjoern A. Zeeb
294*5c1def83SBjoern A. Zeeb spin_lock_bh(&tcl_ring->lock);
295*5c1def83SBjoern A. Zeeb
296*5c1def83SBjoern A. Zeeb ath12k_hal_srng_access_begin(ab, tcl_ring);
297*5c1def83SBjoern A. Zeeb
298*5c1def83SBjoern A. Zeeb hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
299*5c1def83SBjoern A. Zeeb if (!hal_tcl_desc) {
300*5c1def83SBjoern A. Zeeb /* NOTE: It is highly unlikely we'll be running out of tcl_ring
301*5c1def83SBjoern A. Zeeb * desc because the desc is directly enqueued onto hw queue.
302*5c1def83SBjoern A. Zeeb */
303*5c1def83SBjoern A. Zeeb ath12k_hal_srng_access_end(ab, tcl_ring);
304*5c1def83SBjoern A. Zeeb ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
305*5c1def83SBjoern A. Zeeb spin_unlock_bh(&tcl_ring->lock);
306*5c1def83SBjoern A. Zeeb ret = -ENOMEM;
307*5c1def83SBjoern A. Zeeb
308*5c1def83SBjoern A. Zeeb /* Checking for available tcl descritors in another ring in
309*5c1def83SBjoern A. Zeeb * case of failure due to full tcl ring now, is better than
310*5c1def83SBjoern A. Zeeb * checking this ring earlier for each pkt tx.
311*5c1def83SBjoern A. Zeeb * Restart ring selection if some rings are not checked yet.
312*5c1def83SBjoern A. Zeeb */
313*5c1def83SBjoern A. Zeeb if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
314*5c1def83SBjoern A. Zeeb ab->hw_params->tcl_ring_retry) {
315*5c1def83SBjoern A. Zeeb tcl_ring_retry = true;
316*5c1def83SBjoern A. Zeeb ring_selector++;
317*5c1def83SBjoern A. Zeeb }
318*5c1def83SBjoern A. Zeeb
319*5c1def83SBjoern A. Zeeb goto fail_unmap_dma;
320*5c1def83SBjoern A. Zeeb }
321*5c1def83SBjoern A. Zeeb
322*5c1def83SBjoern A. Zeeb ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
323*5c1def83SBjoern A. Zeeb
324*5c1def83SBjoern A. Zeeb ath12k_hal_srng_access_end(ab, tcl_ring);
325*5c1def83SBjoern A. Zeeb
326*5c1def83SBjoern A. Zeeb spin_unlock_bh(&tcl_ring->lock);
327*5c1def83SBjoern A. Zeeb
328*5c1def83SBjoern A. Zeeb ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
329*5c1def83SBjoern A. Zeeb skb->data, skb->len);
330*5c1def83SBjoern A. Zeeb
331*5c1def83SBjoern A. Zeeb atomic_inc(&ar->dp.num_tx_pending);
332*5c1def83SBjoern A. Zeeb
333*5c1def83SBjoern A. Zeeb return 0;
334*5c1def83SBjoern A. Zeeb
335*5c1def83SBjoern A. Zeeb fail_unmap_dma:
336*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
337*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
338*5c1def83SBjoern A. Zeeb sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
339*5c1def83SBjoern A. Zeeb
340*5c1def83SBjoern A. Zeeb fail_remove_tx_buf:
341*5c1def83SBjoern A. Zeeb ath12k_dp_tx_release_txbuf(dp, tx_desc, pool_id);
342*5c1def83SBjoern A. Zeeb if (tcl_ring_retry)
343*5c1def83SBjoern A. Zeeb goto tcl_ring_sel;
344*5c1def83SBjoern A. Zeeb
345*5c1def83SBjoern A. Zeeb return ret;
346*5c1def83SBjoern A. Zeeb }
347*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_free_txbuf(struct ath12k_base * ab,struct sk_buff * msdu,u8 mac_id,struct dp_tx_ring * tx_ring)348*5c1def83SBjoern A. Zeeb static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
349*5c1def83SBjoern A. Zeeb struct sk_buff *msdu, u8 mac_id,
350*5c1def83SBjoern A. Zeeb struct dp_tx_ring *tx_ring)
351*5c1def83SBjoern A. Zeeb {
352*5c1def83SBjoern A. Zeeb struct ath12k *ar;
353*5c1def83SBjoern A. Zeeb struct ath12k_skb_cb *skb_cb;
354*5c1def83SBjoern A. Zeeb u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
355*5c1def83SBjoern A. Zeeb
356*5c1def83SBjoern A. Zeeb skb_cb = ATH12K_SKB_CB(msdu);
357*5c1def83SBjoern A. Zeeb
358*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
359*5c1def83SBjoern A. Zeeb if (skb_cb->paddr_ext_desc)
360*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
361*5c1def83SBjoern A. Zeeb sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
362*5c1def83SBjoern A. Zeeb
363*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(msdu);
364*5c1def83SBjoern A. Zeeb
365*5c1def83SBjoern A. Zeeb ar = ab->pdevs[pdev_id].ar;
366*5c1def83SBjoern A. Zeeb if (atomic_dec_and_test(&ar->dp.num_tx_pending))
367*5c1def83SBjoern A. Zeeb wake_up(&ar->dp.tx_empty_waitq);
368*5c1def83SBjoern A. Zeeb }
369*5c1def83SBjoern A. Zeeb
370*5c1def83SBjoern A. Zeeb static void
ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base * ab,struct sk_buff * msdu,struct dp_tx_ring * tx_ring,struct ath12k_dp_htt_wbm_tx_status * ts)371*5c1def83SBjoern A. Zeeb ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
372*5c1def83SBjoern A. Zeeb struct sk_buff *msdu,
373*5c1def83SBjoern A. Zeeb struct dp_tx_ring *tx_ring,
374*5c1def83SBjoern A. Zeeb struct ath12k_dp_htt_wbm_tx_status *ts)
375*5c1def83SBjoern A. Zeeb {
376*5c1def83SBjoern A. Zeeb struct ieee80211_tx_info *info;
377*5c1def83SBjoern A. Zeeb struct ath12k_skb_cb *skb_cb;
378*5c1def83SBjoern A. Zeeb struct ath12k *ar;
379*5c1def83SBjoern A. Zeeb
380*5c1def83SBjoern A. Zeeb skb_cb = ATH12K_SKB_CB(msdu);
381*5c1def83SBjoern A. Zeeb info = IEEE80211_SKB_CB(msdu);
382*5c1def83SBjoern A. Zeeb
383*5c1def83SBjoern A. Zeeb ar = skb_cb->ar;
384*5c1def83SBjoern A. Zeeb
385*5c1def83SBjoern A. Zeeb if (atomic_dec_and_test(&ar->dp.num_tx_pending))
386*5c1def83SBjoern A. Zeeb wake_up(&ar->dp.tx_empty_waitq);
387*5c1def83SBjoern A. Zeeb
388*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
389*5c1def83SBjoern A. Zeeb if (skb_cb->paddr_ext_desc)
390*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
391*5c1def83SBjoern A. Zeeb sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
392*5c1def83SBjoern A. Zeeb
393*5c1def83SBjoern A. Zeeb memset(&info->status, 0, sizeof(info->status));
394*5c1def83SBjoern A. Zeeb
395*5c1def83SBjoern A. Zeeb if (ts->acked) {
396*5c1def83SBjoern A. Zeeb if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
397*5c1def83SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK;
398*5c1def83SBjoern A. Zeeb info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
399*5c1def83SBjoern A. Zeeb ts->ack_rssi;
400*5c1def83SBjoern A. Zeeb info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
401*5c1def83SBjoern A. Zeeb } else {
402*5c1def83SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
403*5c1def83SBjoern A. Zeeb }
404*5c1def83SBjoern A. Zeeb }
405*5c1def83SBjoern A. Zeeb
406*5c1def83SBjoern A. Zeeb ieee80211_tx_status(ar->hw, msdu);
407*5c1def83SBjoern A. Zeeb }
408*5c1def83SBjoern A. Zeeb
409*5c1def83SBjoern A. Zeeb static void
ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base * ab,void * desc,u8 mac_id,struct sk_buff * msdu,struct dp_tx_ring * tx_ring)410*5c1def83SBjoern A. Zeeb ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab,
411*5c1def83SBjoern A. Zeeb void *desc, u8 mac_id,
412*5c1def83SBjoern A. Zeeb struct sk_buff *msdu,
413*5c1def83SBjoern A. Zeeb struct dp_tx_ring *tx_ring)
414*5c1def83SBjoern A. Zeeb {
415*5c1def83SBjoern A. Zeeb struct htt_tx_wbm_completion *status_desc;
416*5c1def83SBjoern A. Zeeb struct ath12k_dp_htt_wbm_tx_status ts = {0};
417*5c1def83SBjoern A. Zeeb enum hal_wbm_htt_tx_comp_status wbm_status;
418*5c1def83SBjoern A. Zeeb
419*5c1def83SBjoern A. Zeeb #if defined(__linux__)
420*5c1def83SBjoern A. Zeeb status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
421*5c1def83SBjoern A. Zeeb #elif defined(__FreeBSD__)
422*5c1def83SBjoern A. Zeeb status_desc = (void *)((u8 *)desc + HTT_TX_WBM_COMP_STATUS_OFFSET);
423*5c1def83SBjoern A. Zeeb #endif
424*5c1def83SBjoern A. Zeeb
425*5c1def83SBjoern A. Zeeb wbm_status = le32_get_bits(status_desc->info0,
426*5c1def83SBjoern A. Zeeb HTT_TX_WBM_COMP_INFO0_STATUS);
427*5c1def83SBjoern A. Zeeb
428*5c1def83SBjoern A. Zeeb switch (wbm_status) {
429*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
430*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
431*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
432*5c1def83SBjoern A. Zeeb ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
433*5c1def83SBjoern A. Zeeb ts.ack_rssi = le32_get_bits(status_desc->info2,
434*5c1def83SBjoern A. Zeeb HTT_TX_WBM_COMP_INFO2_ACK_RSSI);
435*5c1def83SBjoern A. Zeeb ath12k_dp_tx_htt_tx_complete_buf(ab, msdu, tx_ring, &ts);
436*5c1def83SBjoern A. Zeeb break;
437*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
438*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
439*5c1def83SBjoern A. Zeeb ath12k_dp_tx_free_txbuf(ab, msdu, mac_id, tx_ring);
440*5c1def83SBjoern A. Zeeb break;
441*5c1def83SBjoern A. Zeeb case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
442*5c1def83SBjoern A. Zeeb /* This event is to be handled only when the driver decides to
443*5c1def83SBjoern A. Zeeb * use WDS offload functionality.
444*5c1def83SBjoern A. Zeeb */
445*5c1def83SBjoern A. Zeeb break;
446*5c1def83SBjoern A. Zeeb default:
447*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
448*5c1def83SBjoern A. Zeeb break;
449*5c1def83SBjoern A. Zeeb }
450*5c1def83SBjoern A. Zeeb }
451*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_complete_msdu(struct ath12k * ar,struct sk_buff * msdu,struct hal_tx_status * ts)452*5c1def83SBjoern A. Zeeb static void ath12k_dp_tx_complete_msdu(struct ath12k *ar,
453*5c1def83SBjoern A. Zeeb struct sk_buff *msdu,
454*5c1def83SBjoern A. Zeeb struct hal_tx_status *ts)
455*5c1def83SBjoern A. Zeeb {
456*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
457*5c1def83SBjoern A. Zeeb struct ieee80211_tx_info *info;
458*5c1def83SBjoern A. Zeeb struct ath12k_skb_cb *skb_cb;
459*5c1def83SBjoern A. Zeeb
460*5c1def83SBjoern A. Zeeb if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
461*5c1def83SBjoern A. Zeeb /* Must not happen */
462*5c1def83SBjoern A. Zeeb return;
463*5c1def83SBjoern A. Zeeb }
464*5c1def83SBjoern A. Zeeb
465*5c1def83SBjoern A. Zeeb skb_cb = ATH12K_SKB_CB(msdu);
466*5c1def83SBjoern A. Zeeb
467*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
468*5c1def83SBjoern A. Zeeb if (skb_cb->paddr_ext_desc)
469*5c1def83SBjoern A. Zeeb dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
470*5c1def83SBjoern A. Zeeb sizeof(struct hal_tx_msdu_ext_desc), DMA_TO_DEVICE);
471*5c1def83SBjoern A. Zeeb
472*5c1def83SBjoern A. Zeeb rcu_read_lock();
473*5c1def83SBjoern A. Zeeb
474*5c1def83SBjoern A. Zeeb if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
475*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(msdu);
476*5c1def83SBjoern A. Zeeb goto exit;
477*5c1def83SBjoern A. Zeeb }
478*5c1def83SBjoern A. Zeeb
479*5c1def83SBjoern A. Zeeb if (!skb_cb->vif) {
480*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(msdu);
481*5c1def83SBjoern A. Zeeb goto exit;
482*5c1def83SBjoern A. Zeeb }
483*5c1def83SBjoern A. Zeeb
484*5c1def83SBjoern A. Zeeb info = IEEE80211_SKB_CB(msdu);
485*5c1def83SBjoern A. Zeeb memset(&info->status, 0, sizeof(info->status));
486*5c1def83SBjoern A. Zeeb
487*5c1def83SBjoern A. Zeeb /* skip tx rate update from ieee80211_status*/
488*5c1def83SBjoern A. Zeeb info->status.rates[0].idx = -1;
489*5c1def83SBjoern A. Zeeb
490*5c1def83SBjoern A. Zeeb if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
491*5c1def83SBjoern A. Zeeb !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
492*5c1def83SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_ACK;
493*5c1def83SBjoern A. Zeeb info->status.ack_signal = ATH12K_DEFAULT_NOISE_FLOOR +
494*5c1def83SBjoern A. Zeeb ts->ack_rssi;
495*5c1def83SBjoern A. Zeeb info->status.flags = IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
496*5c1def83SBjoern A. Zeeb }
497*5c1def83SBjoern A. Zeeb
498*5c1def83SBjoern A. Zeeb if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
499*5c1def83SBjoern A. Zeeb (info->flags & IEEE80211_TX_CTL_NO_ACK))
500*5c1def83SBjoern A. Zeeb info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
501*5c1def83SBjoern A. Zeeb
502*5c1def83SBjoern A. Zeeb /* NOTE: Tx rate status reporting. Tx completion status does not have
503*5c1def83SBjoern A. Zeeb * necessary information (for example nss) to build the tx rate.
504*5c1def83SBjoern A. Zeeb * Might end up reporting it out-of-band from HTT stats.
505*5c1def83SBjoern A. Zeeb */
506*5c1def83SBjoern A. Zeeb
507*5c1def83SBjoern A. Zeeb ieee80211_tx_status(ar->hw, msdu);
508*5c1def83SBjoern A. Zeeb
509*5c1def83SBjoern A. Zeeb exit:
510*5c1def83SBjoern A. Zeeb rcu_read_unlock();
511*5c1def83SBjoern A. Zeeb }
512*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_status_parse(struct ath12k_base * ab,struct hal_wbm_completion_ring_tx * desc,struct hal_tx_status * ts)513*5c1def83SBjoern A. Zeeb static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
514*5c1def83SBjoern A. Zeeb struct hal_wbm_completion_ring_tx *desc,
515*5c1def83SBjoern A. Zeeb struct hal_tx_status *ts)
516*5c1def83SBjoern A. Zeeb {
517*5c1def83SBjoern A. Zeeb ts->buf_rel_source =
518*5c1def83SBjoern A. Zeeb le32_get_bits(desc->info0, HAL_WBM_COMPL_TX_INFO0_REL_SRC_MODULE);
519*5c1def83SBjoern A. Zeeb if (ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
520*5c1def83SBjoern A. Zeeb ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)
521*5c1def83SBjoern A. Zeeb return;
522*5c1def83SBjoern A. Zeeb
523*5c1def83SBjoern A. Zeeb if (ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)
524*5c1def83SBjoern A. Zeeb return;
525*5c1def83SBjoern A. Zeeb
526*5c1def83SBjoern A. Zeeb ts->status = le32_get_bits(desc->info0,
527*5c1def83SBjoern A. Zeeb HAL_WBM_COMPL_TX_INFO0_TQM_RELEASE_REASON);
528*5c1def83SBjoern A. Zeeb
529*5c1def83SBjoern A. Zeeb ts->ppdu_id = le32_get_bits(desc->info1,
530*5c1def83SBjoern A. Zeeb HAL_WBM_COMPL_TX_INFO1_TQM_STATUS_NUMBER);
531*5c1def83SBjoern A. Zeeb if (le32_to_cpu(desc->rate_stats.info0) & HAL_TX_RATE_STATS_INFO0_VALID)
532*5c1def83SBjoern A. Zeeb ts->rate_stats = le32_to_cpu(desc->rate_stats.info0);
533*5c1def83SBjoern A. Zeeb else
534*5c1def83SBjoern A. Zeeb ts->rate_stats = 0;
535*5c1def83SBjoern A. Zeeb }
536*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_completion_handler(struct ath12k_base * ab,int ring_id)537*5c1def83SBjoern A. Zeeb void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
538*5c1def83SBjoern A. Zeeb {
539*5c1def83SBjoern A. Zeeb struct ath12k *ar;
540*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
541*5c1def83SBjoern A. Zeeb int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
542*5c1def83SBjoern A. Zeeb struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
543*5c1def83SBjoern A. Zeeb struct ath12k_tx_desc_info *tx_desc = NULL;
544*5c1def83SBjoern A. Zeeb struct sk_buff *msdu;
545*5c1def83SBjoern A. Zeeb struct hal_tx_status ts = { 0 };
546*5c1def83SBjoern A. Zeeb struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
547*5c1def83SBjoern A. Zeeb struct hal_wbm_release_ring *desc;
548*5c1def83SBjoern A. Zeeb u8 mac_id, pdev_id;
549*5c1def83SBjoern A. Zeeb u64 desc_va;
550*5c1def83SBjoern A. Zeeb
551*5c1def83SBjoern A. Zeeb spin_lock_bh(&status_ring->lock);
552*5c1def83SBjoern A. Zeeb
553*5c1def83SBjoern A. Zeeb ath12k_hal_srng_access_begin(ab, status_ring);
554*5c1def83SBjoern A. Zeeb
555*5c1def83SBjoern A. Zeeb while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) != tx_ring->tx_status_tail) {
556*5c1def83SBjoern A. Zeeb desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
557*5c1def83SBjoern A. Zeeb if (!desc)
558*5c1def83SBjoern A. Zeeb break;
559*5c1def83SBjoern A. Zeeb
560*5c1def83SBjoern A. Zeeb memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
561*5c1def83SBjoern A. Zeeb desc, sizeof(*desc));
562*5c1def83SBjoern A. Zeeb tx_ring->tx_status_head =
563*5c1def83SBjoern A. Zeeb ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head);
564*5c1def83SBjoern A. Zeeb }
565*5c1def83SBjoern A. Zeeb
566*5c1def83SBjoern A. Zeeb if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
567*5c1def83SBjoern A. Zeeb (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_head) == tx_ring->tx_status_tail)) {
568*5c1def83SBjoern A. Zeeb /* TODO: Process pending tx_status messages when kfifo_is_full() */
569*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
570*5c1def83SBjoern A. Zeeb }
571*5c1def83SBjoern A. Zeeb
572*5c1def83SBjoern A. Zeeb ath12k_hal_srng_access_end(ab, status_ring);
573*5c1def83SBjoern A. Zeeb
574*5c1def83SBjoern A. Zeeb spin_unlock_bh(&status_ring->lock);
575*5c1def83SBjoern A. Zeeb
576*5c1def83SBjoern A. Zeeb while (ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
577*5c1def83SBjoern A. Zeeb struct hal_wbm_completion_ring_tx *tx_status;
578*5c1def83SBjoern A. Zeeb u32 desc_id;
579*5c1def83SBjoern A. Zeeb
580*5c1def83SBjoern A. Zeeb tx_ring->tx_status_tail =
581*5c1def83SBjoern A. Zeeb ATH12K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
582*5c1def83SBjoern A. Zeeb tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
583*5c1def83SBjoern A. Zeeb ath12k_dp_tx_status_parse(ab, tx_status, &ts);
584*5c1def83SBjoern A. Zeeb
585*5c1def83SBjoern A. Zeeb if (le32_get_bits(tx_status->info0, HAL_WBM_COMPL_TX_INFO0_CC_DONE)) {
586*5c1def83SBjoern A. Zeeb /* HW done cookie conversion */
587*5c1def83SBjoern A. Zeeb desc_va = ((u64)le32_to_cpu(tx_status->buf_va_hi) << 32 |
588*5c1def83SBjoern A. Zeeb le32_to_cpu(tx_status->buf_va_lo));
589*5c1def83SBjoern A. Zeeb tx_desc = (struct ath12k_tx_desc_info *)((unsigned long)desc_va);
590*5c1def83SBjoern A. Zeeb } else {
591*5c1def83SBjoern A. Zeeb /* SW does cookie conversion to VA */
592*5c1def83SBjoern A. Zeeb desc_id = le32_get_bits(tx_status->buf_va_hi,
593*5c1def83SBjoern A. Zeeb BUFFER_ADDR_INFO1_SW_COOKIE);
594*5c1def83SBjoern A. Zeeb
595*5c1def83SBjoern A. Zeeb tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
596*5c1def83SBjoern A. Zeeb }
597*5c1def83SBjoern A. Zeeb if (!tx_desc) {
598*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "unable to retrieve tx_desc!");
599*5c1def83SBjoern A. Zeeb continue;
600*5c1def83SBjoern A. Zeeb }
601*5c1def83SBjoern A. Zeeb
602*5c1def83SBjoern A. Zeeb msdu = tx_desc->skb;
603*5c1def83SBjoern A. Zeeb mac_id = tx_desc->mac_id;
604*5c1def83SBjoern A. Zeeb
605*5c1def83SBjoern A. Zeeb /* Release descriptor as soon as extracting necessary info
606*5c1def83SBjoern A. Zeeb * to reduce contention
607*5c1def83SBjoern A. Zeeb */
608*5c1def83SBjoern A. Zeeb ath12k_dp_tx_release_txbuf(dp, tx_desc, tx_desc->pool_id);
609*5c1def83SBjoern A. Zeeb if (ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW) {
610*5c1def83SBjoern A. Zeeb ath12k_dp_tx_process_htt_tx_complete(ab,
611*5c1def83SBjoern A. Zeeb (void *)tx_status,
612*5c1def83SBjoern A. Zeeb mac_id, msdu,
613*5c1def83SBjoern A. Zeeb tx_ring);
614*5c1def83SBjoern A. Zeeb continue;
615*5c1def83SBjoern A. Zeeb }
616*5c1def83SBjoern A. Zeeb
617*5c1def83SBjoern A. Zeeb pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, mac_id);
618*5c1def83SBjoern A. Zeeb ar = ab->pdevs[pdev_id].ar;
619*5c1def83SBjoern A. Zeeb
620*5c1def83SBjoern A. Zeeb if (atomic_dec_and_test(&ar->dp.num_tx_pending))
621*5c1def83SBjoern A. Zeeb wake_up(&ar->dp.tx_empty_waitq);
622*5c1def83SBjoern A. Zeeb
623*5c1def83SBjoern A. Zeeb ath12k_dp_tx_complete_msdu(ar, msdu, &ts);
624*5c1def83SBjoern A. Zeeb }
625*5c1def83SBjoern A. Zeeb }
626*5c1def83SBjoern A. Zeeb
627*5c1def83SBjoern A. Zeeb static int
ath12k_dp_tx_get_ring_id_type(struct ath12k_base * ab,int mac_id,u32 ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)628*5c1def83SBjoern A. Zeeb ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
629*5c1def83SBjoern A. Zeeb int mac_id, u32 ring_id,
630*5c1def83SBjoern A. Zeeb enum hal_ring_type ring_type,
631*5c1def83SBjoern A. Zeeb enum htt_srng_ring_type *htt_ring_type,
632*5c1def83SBjoern A. Zeeb enum htt_srng_ring_id *htt_ring_id)
633*5c1def83SBjoern A. Zeeb {
634*5c1def83SBjoern A. Zeeb int ret = 0;
635*5c1def83SBjoern A. Zeeb
636*5c1def83SBjoern A. Zeeb switch (ring_type) {
637*5c1def83SBjoern A. Zeeb case HAL_RXDMA_BUF:
638*5c1def83SBjoern A. Zeeb /* for some targets, host fills rx buffer to fw and fw fills to
639*5c1def83SBjoern A. Zeeb * rxbuf ring for each rxdma
640*5c1def83SBjoern A. Zeeb */
641*5c1def83SBjoern A. Zeeb if (!ab->hw_params->rx_mac_buf_ring) {
642*5c1def83SBjoern A. Zeeb if (!(ring_id == HAL_SRNG_SW2RXDMA_BUF0 ||
643*5c1def83SBjoern A. Zeeb ring_id == HAL_SRNG_SW2RXDMA_BUF1)) {
644*5c1def83SBjoern A. Zeeb ret = -EINVAL;
645*5c1def83SBjoern A. Zeeb }
646*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
647*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
648*5c1def83SBjoern A. Zeeb } else {
649*5c1def83SBjoern A. Zeeb if (ring_id == HAL_SRNG_SW2RXDMA_BUF0) {
650*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
651*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_SW_RING;
652*5c1def83SBjoern A. Zeeb } else {
653*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
654*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
655*5c1def83SBjoern A. Zeeb }
656*5c1def83SBjoern A. Zeeb }
657*5c1def83SBjoern A. Zeeb break;
658*5c1def83SBjoern A. Zeeb case HAL_RXDMA_DST:
659*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
660*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_HW_TO_SW_RING;
661*5c1def83SBjoern A. Zeeb break;
662*5c1def83SBjoern A. Zeeb case HAL_RXDMA_MONITOR_BUF:
663*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
664*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
665*5c1def83SBjoern A. Zeeb break;
666*5c1def83SBjoern A. Zeeb case HAL_RXDMA_MONITOR_STATUS:
667*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
668*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
669*5c1def83SBjoern A. Zeeb break;
670*5c1def83SBjoern A. Zeeb case HAL_RXDMA_MONITOR_DST:
671*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
672*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_HW_TO_SW_RING;
673*5c1def83SBjoern A. Zeeb break;
674*5c1def83SBjoern A. Zeeb case HAL_RXDMA_MONITOR_DESC:
675*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
676*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
677*5c1def83SBjoern A. Zeeb break;
678*5c1def83SBjoern A. Zeeb case HAL_TX_MONITOR_BUF:
679*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_TX_MON_HOST2MON_BUF_RING;
680*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_SW_TO_HW_RING;
681*5c1def83SBjoern A. Zeeb break;
682*5c1def83SBjoern A. Zeeb case HAL_TX_MONITOR_DST:
683*5c1def83SBjoern A. Zeeb *htt_ring_id = HTT_TX_MON_MON2HOST_DEST_RING;
684*5c1def83SBjoern A. Zeeb *htt_ring_type = HTT_HW_TO_SW_RING;
685*5c1def83SBjoern A. Zeeb break;
686*5c1def83SBjoern A. Zeeb default:
687*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
688*5c1def83SBjoern A. Zeeb ret = -EINVAL;
689*5c1def83SBjoern A. Zeeb }
690*5c1def83SBjoern A. Zeeb return ret;
691*5c1def83SBjoern A. Zeeb }
692*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_srng_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type)693*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
694*5c1def83SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type)
695*5c1def83SBjoern A. Zeeb {
696*5c1def83SBjoern A. Zeeb struct htt_srng_setup_cmd *cmd;
697*5c1def83SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
698*5c1def83SBjoern A. Zeeb struct hal_srng_params params;
699*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
700*5c1def83SBjoern A. Zeeb u32 ring_entry_sz;
701*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
702*5c1def83SBjoern A. Zeeb dma_addr_t hp_addr, tp_addr;
703*5c1def83SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
704*5c1def83SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
705*5c1def83SBjoern A. Zeeb int ret;
706*5c1def83SBjoern A. Zeeb
707*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
708*5c1def83SBjoern A. Zeeb if (!skb)
709*5c1def83SBjoern A. Zeeb return -ENOMEM;
710*5c1def83SBjoern A. Zeeb
711*5c1def83SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
712*5c1def83SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
713*5c1def83SBjoern A. Zeeb
714*5c1def83SBjoern A. Zeeb hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
715*5c1def83SBjoern A. Zeeb tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
716*5c1def83SBjoern A. Zeeb
717*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
718*5c1def83SBjoern A. Zeeb ring_type, &htt_ring_type,
719*5c1def83SBjoern A. Zeeb &htt_ring_id);
720*5c1def83SBjoern A. Zeeb if (ret)
721*5c1def83SBjoern A. Zeeb goto err_free;
722*5c1def83SBjoern A. Zeeb
723*5c1def83SBjoern A. Zeeb skb_put(skb, len);
724*5c1def83SBjoern A. Zeeb cmd = (struct htt_srng_setup_cmd *)skb->data;
725*5c1def83SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_SRING_SETUP,
726*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE);
727*5c1def83SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
728*5c1def83SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
729*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(DP_SW2HW_MACID(mac_id),
730*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
731*5c1def83SBjoern A. Zeeb else
732*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(mac_id,
733*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID);
734*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_type,
735*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE);
736*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
737*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO0_RING_ID);
738*5c1def83SBjoern A. Zeeb
739*5c1def83SBjoern A. Zeeb cmd->ring_base_addr_lo = cpu_to_le32(params.ring_base_paddr &
740*5c1def83SBjoern A. Zeeb HAL_ADDR_LSB_REG_MASK);
741*5c1def83SBjoern A. Zeeb
742*5c1def83SBjoern A. Zeeb cmd->ring_base_addr_hi = cpu_to_le32((u64)params.ring_base_paddr >>
743*5c1def83SBjoern A. Zeeb HAL_ADDR_MSB_REG_SHIFT);
744*5c1def83SBjoern A. Zeeb
745*5c1def83SBjoern A. Zeeb ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
746*5c1def83SBjoern A. Zeeb if (ret < 0)
747*5c1def83SBjoern A. Zeeb goto err_free;
748*5c1def83SBjoern A. Zeeb
749*5c1def83SBjoern A. Zeeb ring_entry_sz = ret;
750*5c1def83SBjoern A. Zeeb
751*5c1def83SBjoern A. Zeeb ring_entry_sz >>= 2;
752*5c1def83SBjoern A. Zeeb cmd->info1 = le32_encode_bits(ring_entry_sz,
753*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE);
754*5c1def83SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(params.num_entries * ring_entry_sz,
755*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE);
756*5c1def83SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
757*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP);
758*5c1def83SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
759*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP);
760*5c1def83SBjoern A. Zeeb cmd->info1 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP),
761*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP);
762*5c1def83SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING)
763*5c1def83SBjoern A. Zeeb cmd->info1 |= cpu_to_le32(HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS);
764*5c1def83SBjoern A. Zeeb
765*5c1def83SBjoern A. Zeeb cmd->ring_head_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(hp_addr));
766*5c1def83SBjoern A. Zeeb cmd->ring_head_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(hp_addr));
767*5c1def83SBjoern A. Zeeb
768*5c1def83SBjoern A. Zeeb cmd->ring_tail_off32_remote_addr_lo = cpu_to_le32(lower_32_bits(tp_addr));
769*5c1def83SBjoern A. Zeeb cmd->ring_tail_off32_remote_addr_hi = cpu_to_le32(upper_32_bits(tp_addr));
770*5c1def83SBjoern A. Zeeb
771*5c1def83SBjoern A. Zeeb cmd->ring_msi_addr_lo = cpu_to_le32(lower_32_bits(params.msi_addr));
772*5c1def83SBjoern A. Zeeb cmd->ring_msi_addr_hi = cpu_to_le32(upper_32_bits(params.msi_addr));
773*5c1def83SBjoern A. Zeeb cmd->msi_data = cpu_to_le32(params.msi_data);
774*5c1def83SBjoern A. Zeeb
775*5c1def83SBjoern A. Zeeb cmd->intr_info =
776*5c1def83SBjoern A. Zeeb le32_encode_bits(params.intr_batch_cntr_thres_entries * ring_entry_sz,
777*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH);
778*5c1def83SBjoern A. Zeeb cmd->intr_info |=
779*5c1def83SBjoern A. Zeeb le32_encode_bits(params.intr_timer_thres_us >> 3,
780*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH);
781*5c1def83SBjoern A. Zeeb
782*5c1def83SBjoern A. Zeeb cmd->info2 = 0;
783*5c1def83SBjoern A. Zeeb if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
784*5c1def83SBjoern A. Zeeb cmd->info2 = le32_encode_bits(params.low_threshold,
785*5c1def83SBjoern A. Zeeb HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH);
786*5c1def83SBjoern A. Zeeb }
787*5c1def83SBjoern A. Zeeb
788*5c1def83SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_HAL,
789*5c1def83SBjoern A. Zeeb "%s msi_addr_lo:0x%x, msi_addr_hi:0x%x, msi_data:0x%x\n",
790*5c1def83SBjoern A. Zeeb __func__, cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
791*5c1def83SBjoern A. Zeeb cmd->msi_data);
792*5c1def83SBjoern A. Zeeb
793*5c1def83SBjoern A. Zeeb ath12k_dbg(ab, ATH12K_DBG_HAL,
794*5c1def83SBjoern A. Zeeb "ring_id:%d, ring_type:%d, intr_info:0x%x, flags:0x%x\n",
795*5c1def83SBjoern A. Zeeb ring_id, ring_type, cmd->intr_info, cmd->info2);
796*5c1def83SBjoern A. Zeeb
797*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
798*5c1def83SBjoern A. Zeeb if (ret)
799*5c1def83SBjoern A. Zeeb goto err_free;
800*5c1def83SBjoern A. Zeeb
801*5c1def83SBjoern A. Zeeb return 0;
802*5c1def83SBjoern A. Zeeb
803*5c1def83SBjoern A. Zeeb err_free:
804*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
805*5c1def83SBjoern A. Zeeb
806*5c1def83SBjoern A. Zeeb return ret;
807*5c1def83SBjoern A. Zeeb }
808*5c1def83SBjoern A. Zeeb
809*5c1def83SBjoern A. Zeeb #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
810*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base * ab)811*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
812*5c1def83SBjoern A. Zeeb {
813*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
814*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
815*5c1def83SBjoern A. Zeeb struct htt_ver_req_cmd *cmd;
816*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
817*5c1def83SBjoern A. Zeeb int ret;
818*5c1def83SBjoern A. Zeeb
819*5c1def83SBjoern A. Zeeb init_completion(&dp->htt_tgt_version_received);
820*5c1def83SBjoern A. Zeeb
821*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
822*5c1def83SBjoern A. Zeeb if (!skb)
823*5c1def83SBjoern A. Zeeb return -ENOMEM;
824*5c1def83SBjoern A. Zeeb
825*5c1def83SBjoern A. Zeeb skb_put(skb, len);
826*5c1def83SBjoern A. Zeeb cmd = (struct htt_ver_req_cmd *)skb->data;
827*5c1def83SBjoern A. Zeeb cmd->ver_reg_info = le32_encode_bits(HTT_H2T_MSG_TYPE_VERSION_REQ,
828*5c1def83SBjoern A. Zeeb HTT_VER_REQ_INFO_MSG_ID);
829*5c1def83SBjoern A. Zeeb
830*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
831*5c1def83SBjoern A. Zeeb if (ret) {
832*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
833*5c1def83SBjoern A. Zeeb return ret;
834*5c1def83SBjoern A. Zeeb }
835*5c1def83SBjoern A. Zeeb
836*5c1def83SBjoern A. Zeeb ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
837*5c1def83SBjoern A. Zeeb HTT_TARGET_VERSION_TIMEOUT_HZ);
838*5c1def83SBjoern A. Zeeb if (ret == 0) {
839*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "htt target version request timed out\n");
840*5c1def83SBjoern A. Zeeb return -ETIMEDOUT;
841*5c1def83SBjoern A. Zeeb }
842*5c1def83SBjoern A. Zeeb
843*5c1def83SBjoern A. Zeeb if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
844*5c1def83SBjoern A. Zeeb ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
845*5c1def83SBjoern A. Zeeb dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
846*5c1def83SBjoern A. Zeeb return -ENOTSUPP;
847*5c1def83SBjoern A. Zeeb }
848*5c1def83SBjoern A. Zeeb
849*5c1def83SBjoern A. Zeeb return 0;
850*5c1def83SBjoern A. Zeeb }
851*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k * ar,u32 mask)852*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_h2t_ppdu_stats_req(struct ath12k *ar, u32 mask)
853*5c1def83SBjoern A. Zeeb {
854*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
855*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
856*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
857*5c1def83SBjoern A. Zeeb struct htt_ppdu_stats_cfg_cmd *cmd;
858*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
859*5c1def83SBjoern A. Zeeb u8 pdev_mask;
860*5c1def83SBjoern A. Zeeb int ret;
861*5c1def83SBjoern A. Zeeb int i;
862*5c1def83SBjoern A. Zeeb
863*5c1def83SBjoern A. Zeeb for (i = 0; i < ab->hw_params->num_rxmda_per_pdev; i++) {
864*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
865*5c1def83SBjoern A. Zeeb if (!skb)
866*5c1def83SBjoern A. Zeeb return -ENOMEM;
867*5c1def83SBjoern A. Zeeb
868*5c1def83SBjoern A. Zeeb skb_put(skb, len);
869*5c1def83SBjoern A. Zeeb cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
870*5c1def83SBjoern A. Zeeb cmd->msg = le32_encode_bits(HTT_H2T_MSG_TYPE_PPDU_STATS_CFG,
871*5c1def83SBjoern A. Zeeb HTT_PPDU_STATS_CFG_MSG_TYPE);
872*5c1def83SBjoern A. Zeeb
873*5c1def83SBjoern A. Zeeb pdev_mask = 1 << (i + 1);
874*5c1def83SBjoern A. Zeeb cmd->msg |= le32_encode_bits(pdev_mask, HTT_PPDU_STATS_CFG_PDEV_ID);
875*5c1def83SBjoern A. Zeeb cmd->msg |= le32_encode_bits(mask, HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK);
876*5c1def83SBjoern A. Zeeb
877*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
878*5c1def83SBjoern A. Zeeb if (ret) {
879*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
880*5c1def83SBjoern A. Zeeb return ret;
881*5c1def83SBjoern A. Zeeb }
882*5c1def83SBjoern A. Zeeb }
883*5c1def83SBjoern A. Zeeb
884*5c1def83SBjoern A. Zeeb return 0;
885*5c1def83SBjoern A. Zeeb }
886*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)887*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
888*5c1def83SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type,
889*5c1def83SBjoern A. Zeeb int rx_buf_size,
890*5c1def83SBjoern A. Zeeb struct htt_rx_ring_tlv_filter *tlv_filter)
891*5c1def83SBjoern A. Zeeb {
892*5c1def83SBjoern A. Zeeb struct htt_rx_ring_selection_cfg_cmd *cmd;
893*5c1def83SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
894*5c1def83SBjoern A. Zeeb struct hal_srng_params params;
895*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
896*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
897*5c1def83SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
898*5c1def83SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
899*5c1def83SBjoern A. Zeeb int ret;
900*5c1def83SBjoern A. Zeeb
901*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
902*5c1def83SBjoern A. Zeeb if (!skb)
903*5c1def83SBjoern A. Zeeb return -ENOMEM;
904*5c1def83SBjoern A. Zeeb
905*5c1def83SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
906*5c1def83SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
907*5c1def83SBjoern A. Zeeb
908*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
909*5c1def83SBjoern A. Zeeb ring_type, &htt_ring_type,
910*5c1def83SBjoern A. Zeeb &htt_ring_id);
911*5c1def83SBjoern A. Zeeb if (ret)
912*5c1def83SBjoern A. Zeeb goto err_free;
913*5c1def83SBjoern A. Zeeb
914*5c1def83SBjoern A. Zeeb skb_put(skb, len);
915*5c1def83SBjoern A. Zeeb cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
916*5c1def83SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG,
917*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
918*5c1def83SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
919*5c1def83SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
920*5c1def83SBjoern A. Zeeb cmd->info0 |=
921*5c1def83SBjoern A. Zeeb le32_encode_bits(DP_SW2HW_MACID(mac_id),
922*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
923*5c1def83SBjoern A. Zeeb else
924*5c1def83SBjoern A. Zeeb cmd->info0 |=
925*5c1def83SBjoern A. Zeeb le32_encode_bits(mac_id,
926*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
927*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
928*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
929*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
930*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS);
931*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
932*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS);
933*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(tlv_filter->offset_valid,
934*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_OFFSET_VALID);
935*5c1def83SBjoern A. Zeeb cmd->info1 = le32_encode_bits(rx_buf_size,
936*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE);
937*5c1def83SBjoern A. Zeeb cmd->pkt_type_en_flags0 = cpu_to_le32(tlv_filter->pkt_filter_flags0);
938*5c1def83SBjoern A. Zeeb cmd->pkt_type_en_flags1 = cpu_to_le32(tlv_filter->pkt_filter_flags1);
939*5c1def83SBjoern A. Zeeb cmd->pkt_type_en_flags2 = cpu_to_le32(tlv_filter->pkt_filter_flags2);
940*5c1def83SBjoern A. Zeeb cmd->pkt_type_en_flags3 = cpu_to_le32(tlv_filter->pkt_filter_flags3);
941*5c1def83SBjoern A. Zeeb cmd->rx_filter_tlv = cpu_to_le32(tlv_filter->rx_filter);
942*5c1def83SBjoern A. Zeeb
943*5c1def83SBjoern A. Zeeb if (tlv_filter->offset_valid) {
944*5c1def83SBjoern A. Zeeb cmd->rx_packet_offset =
945*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_packet_offset,
946*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_PACKET_OFFSET);
947*5c1def83SBjoern A. Zeeb
948*5c1def83SBjoern A. Zeeb cmd->rx_packet_offset |=
949*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_header_offset,
950*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_HEADER_OFFSET);
951*5c1def83SBjoern A. Zeeb
952*5c1def83SBjoern A. Zeeb cmd->rx_mpdu_offset =
953*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_end_offset,
954*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_END_OFFSET);
955*5c1def83SBjoern A. Zeeb
956*5c1def83SBjoern A. Zeeb cmd->rx_mpdu_offset |=
957*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_mpdu_start_offset,
958*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MPDU_START_OFFSET);
959*5c1def83SBjoern A. Zeeb
960*5c1def83SBjoern A. Zeeb cmd->rx_msdu_offset =
961*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_msdu_end_offset,
962*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MSDU_END_OFFSET);
963*5c1def83SBjoern A. Zeeb
964*5c1def83SBjoern A. Zeeb cmd->rx_msdu_offset |=
965*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_msdu_start_offset,
966*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_MSDU_START_OFFSET);
967*5c1def83SBjoern A. Zeeb
968*5c1def83SBjoern A. Zeeb cmd->rx_attn_offset =
969*5c1def83SBjoern A. Zeeb le32_encode_bits(tlv_filter->rx_attn_offset,
970*5c1def83SBjoern A. Zeeb HTT_RX_RING_SELECTION_CFG_RX_ATTENTION_OFFSET);
971*5c1def83SBjoern A. Zeeb }
972*5c1def83SBjoern A. Zeeb
973*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
974*5c1def83SBjoern A. Zeeb if (ret)
975*5c1def83SBjoern A. Zeeb goto err_free;
976*5c1def83SBjoern A. Zeeb
977*5c1def83SBjoern A. Zeeb return 0;
978*5c1def83SBjoern A. Zeeb
979*5c1def83SBjoern A. Zeeb err_free:
980*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
981*5c1def83SBjoern A. Zeeb
982*5c1def83SBjoern A. Zeeb return ret;
983*5c1def83SBjoern A. Zeeb }
984*5c1def83SBjoern A. Zeeb
985*5c1def83SBjoern A. Zeeb int
ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k * ar,u8 type,struct htt_ext_stats_cfg_params * cfg_params,u64 cookie)986*5c1def83SBjoern A. Zeeb ath12k_dp_tx_htt_h2t_ext_stats_req(struct ath12k *ar, u8 type,
987*5c1def83SBjoern A. Zeeb struct htt_ext_stats_cfg_params *cfg_params,
988*5c1def83SBjoern A. Zeeb u64 cookie)
989*5c1def83SBjoern A. Zeeb {
990*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
991*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
992*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
993*5c1def83SBjoern A. Zeeb struct htt_ext_stats_cfg_cmd *cmd;
994*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
995*5c1def83SBjoern A. Zeeb int ret;
996*5c1def83SBjoern A. Zeeb
997*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
998*5c1def83SBjoern A. Zeeb if (!skb)
999*5c1def83SBjoern A. Zeeb return -ENOMEM;
1000*5c1def83SBjoern A. Zeeb
1001*5c1def83SBjoern A. Zeeb skb_put(skb, len);
1002*5c1def83SBjoern A. Zeeb
1003*5c1def83SBjoern A. Zeeb cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1004*5c1def83SBjoern A. Zeeb memset(cmd, 0, sizeof(*cmd));
1005*5c1def83SBjoern A. Zeeb cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1006*5c1def83SBjoern A. Zeeb
1007*5c1def83SBjoern A. Zeeb cmd->hdr.pdev_mask = 1 << ar->pdev->pdev_id;
1008*5c1def83SBjoern A. Zeeb
1009*5c1def83SBjoern A. Zeeb cmd->hdr.stats_type = type;
1010*5c1def83SBjoern A. Zeeb cmd->cfg_param0 = cpu_to_le32(cfg_params->cfg0);
1011*5c1def83SBjoern A. Zeeb cmd->cfg_param1 = cpu_to_le32(cfg_params->cfg1);
1012*5c1def83SBjoern A. Zeeb cmd->cfg_param2 = cpu_to_le32(cfg_params->cfg2);
1013*5c1def83SBjoern A. Zeeb cmd->cfg_param3 = cpu_to_le32(cfg_params->cfg3);
1014*5c1def83SBjoern A. Zeeb cmd->cookie_lsb = cpu_to_le32(lower_32_bits(cookie));
1015*5c1def83SBjoern A. Zeeb cmd->cookie_msb = cpu_to_le32(upper_32_bits(cookie));
1016*5c1def83SBjoern A. Zeeb
1017*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1018*5c1def83SBjoern A. Zeeb if (ret) {
1019*5c1def83SBjoern A. Zeeb ath12k_warn(ab, "failed to send htt type stats request: %d",
1020*5c1def83SBjoern A. Zeeb ret);
1021*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
1022*5c1def83SBjoern A. Zeeb return ret;
1023*5c1def83SBjoern A. Zeeb }
1024*5c1def83SBjoern A. Zeeb
1025*5c1def83SBjoern A. Zeeb return 0;
1026*5c1def83SBjoern A. Zeeb }
1027*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k * ar,bool reset)1028*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1029*5c1def83SBjoern A. Zeeb {
1030*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1031*5c1def83SBjoern A. Zeeb int ret;
1032*5c1def83SBjoern A. Zeeb
1033*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
1034*5c1def83SBjoern A. Zeeb if (ret) {
1035*5c1def83SBjoern A. Zeeb ath12k_err(ab, "failed to setup tx monitor filter %d\n", ret);
1036*5c1def83SBjoern A. Zeeb return ret;
1037*5c1def83SBjoern A. Zeeb }
1038*5c1def83SBjoern A. Zeeb
1039*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_htt_tx_monitor_mode_ring_config(ar, reset);
1040*5c1def83SBjoern A. Zeeb if (ret) {
1041*5c1def83SBjoern A. Zeeb ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1042*5c1def83SBjoern A. Zeeb return ret;
1043*5c1def83SBjoern A. Zeeb }
1044*5c1def83SBjoern A. Zeeb
1045*5c1def83SBjoern A. Zeeb return 0;
1046*5c1def83SBjoern A. Zeeb }
1047*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k * ar,bool reset)1048*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_rx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1049*5c1def83SBjoern A. Zeeb {
1050*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1051*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
1052*5c1def83SBjoern A. Zeeb struct htt_rx_ring_tlv_filter tlv_filter = {0};
1053*5c1def83SBjoern A. Zeeb int ret, ring_id;
1054*5c1def83SBjoern A. Zeeb
1055*5c1def83SBjoern A. Zeeb ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1056*5c1def83SBjoern A. Zeeb tlv_filter.offset_valid = false;
1057*5c1def83SBjoern A. Zeeb
1058*5c1def83SBjoern A. Zeeb if (!reset) {
1059*5c1def83SBjoern A. Zeeb tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
1060*5c1def83SBjoern A. Zeeb tlv_filter.pkt_filter_flags0 =
1061*5c1def83SBjoern A. Zeeb HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1062*5c1def83SBjoern A. Zeeb HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1063*5c1def83SBjoern A. Zeeb tlv_filter.pkt_filter_flags1 =
1064*5c1def83SBjoern A. Zeeb HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1065*5c1def83SBjoern A. Zeeb HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1066*5c1def83SBjoern A. Zeeb tlv_filter.pkt_filter_flags2 =
1067*5c1def83SBjoern A. Zeeb HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1068*5c1def83SBjoern A. Zeeb HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1069*5c1def83SBjoern A. Zeeb tlv_filter.pkt_filter_flags3 =
1070*5c1def83SBjoern A. Zeeb HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1071*5c1def83SBjoern A. Zeeb HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1072*5c1def83SBjoern A. Zeeb HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1073*5c1def83SBjoern A. Zeeb HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1074*5c1def83SBjoern A. Zeeb }
1075*5c1def83SBjoern A. Zeeb
1076*5c1def83SBjoern A. Zeeb if (ab->hw_params->rxdma1_enable) {
1077*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 0,
1078*5c1def83SBjoern A. Zeeb HAL_RXDMA_MONITOR_BUF,
1079*5c1def83SBjoern A. Zeeb DP_RXDMA_REFILL_RING_SIZE,
1080*5c1def83SBjoern A. Zeeb &tlv_filter);
1081*5c1def83SBjoern A. Zeeb if (ret) {
1082*5c1def83SBjoern A. Zeeb ath12k_err(ab,
1083*5c1def83SBjoern A. Zeeb "failed to setup filter for monitor buf %d\n", ret);
1084*5c1def83SBjoern A. Zeeb return ret;
1085*5c1def83SBjoern A. Zeeb }
1086*5c1def83SBjoern A. Zeeb }
1087*5c1def83SBjoern A. Zeeb
1088*5c1def83SBjoern A. Zeeb return 0;
1089*5c1def83SBjoern A. Zeeb }
1090*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int tx_buf_size,struct htt_tx_ring_tlv_filter * htt_tlv_filter)1091*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1092*5c1def83SBjoern A. Zeeb int mac_id, enum hal_ring_type ring_type,
1093*5c1def83SBjoern A. Zeeb int tx_buf_size,
1094*5c1def83SBjoern A. Zeeb struct htt_tx_ring_tlv_filter *htt_tlv_filter)
1095*5c1def83SBjoern A. Zeeb {
1096*5c1def83SBjoern A. Zeeb struct htt_tx_ring_selection_cfg_cmd *cmd;
1097*5c1def83SBjoern A. Zeeb struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1098*5c1def83SBjoern A. Zeeb struct hal_srng_params params;
1099*5c1def83SBjoern A. Zeeb struct sk_buff *skb;
1100*5c1def83SBjoern A. Zeeb int len = sizeof(*cmd);
1101*5c1def83SBjoern A. Zeeb enum htt_srng_ring_type htt_ring_type;
1102*5c1def83SBjoern A. Zeeb enum htt_srng_ring_id htt_ring_id;
1103*5c1def83SBjoern A. Zeeb int ret;
1104*5c1def83SBjoern A. Zeeb
1105*5c1def83SBjoern A. Zeeb skb = ath12k_htc_alloc_skb(ab, len);
1106*5c1def83SBjoern A. Zeeb if (!skb)
1107*5c1def83SBjoern A. Zeeb return -ENOMEM;
1108*5c1def83SBjoern A. Zeeb
1109*5c1def83SBjoern A. Zeeb memset(¶ms, 0, sizeof(params));
1110*5c1def83SBjoern A. Zeeb ath12k_hal_srng_get_params(ab, srng, ¶ms);
1111*5c1def83SBjoern A. Zeeb
1112*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1113*5c1def83SBjoern A. Zeeb ring_type, &htt_ring_type,
1114*5c1def83SBjoern A. Zeeb &htt_ring_id);
1115*5c1def83SBjoern A. Zeeb
1116*5c1def83SBjoern A. Zeeb if (ret)
1117*5c1def83SBjoern A. Zeeb goto err_free;
1118*5c1def83SBjoern A. Zeeb
1119*5c1def83SBjoern A. Zeeb skb_put(skb, len);
1120*5c1def83SBjoern A. Zeeb cmd = (struct htt_tx_ring_selection_cfg_cmd *)skb->data;
1121*5c1def83SBjoern A. Zeeb cmd->info0 = le32_encode_bits(HTT_H2T_MSG_TYPE_TX_MONITOR_CFG,
1122*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE);
1123*5c1def83SBjoern A. Zeeb if (htt_ring_type == HTT_SW_TO_HW_RING ||
1124*5c1def83SBjoern A. Zeeb htt_ring_type == HTT_HW_TO_SW_RING)
1125*5c1def83SBjoern A. Zeeb cmd->info0 |=
1126*5c1def83SBjoern A. Zeeb le32_encode_bits(DP_SW2HW_MACID(mac_id),
1127*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1128*5c1def83SBjoern A. Zeeb else
1129*5c1def83SBjoern A. Zeeb cmd->info0 |=
1130*5c1def83SBjoern A. Zeeb le32_encode_bits(mac_id,
1131*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID);
1132*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(htt_ring_id,
1133*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_RING_ID);
1134*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP),
1135*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_SS);
1136*5c1def83SBjoern A. Zeeb cmd->info0 |= le32_encode_bits(!!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP),
1137*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO0_PS);
1138*5c1def83SBjoern A. Zeeb
1139*5c1def83SBjoern A. Zeeb cmd->info1 |=
1140*5c1def83SBjoern A. Zeeb le32_encode_bits(tx_buf_size,
1141*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_RING_BUFF_SIZE);
1142*5c1def83SBjoern A. Zeeb
1143*5c1def83SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_mgmt_filter) {
1144*5c1def83SBjoern A. Zeeb cmd->info1 |=
1145*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1146*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1147*5c1def83SBjoern A. Zeeb cmd->info1 |=
1148*5c1def83SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1149*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_MGMT);
1150*5c1def83SBjoern A. Zeeb cmd->info2 |=
1151*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_MGMT,
1152*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1153*5c1def83SBjoern A. Zeeb }
1154*5c1def83SBjoern A. Zeeb
1155*5c1def83SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_data_filter) {
1156*5c1def83SBjoern A. Zeeb cmd->info1 |=
1157*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1158*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1159*5c1def83SBjoern A. Zeeb cmd->info1 |=
1160*5c1def83SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1161*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_CTRL);
1162*5c1def83SBjoern A. Zeeb cmd->info2 |=
1163*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_CTRL,
1164*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1165*5c1def83SBjoern A. Zeeb }
1166*5c1def83SBjoern A. Zeeb
1167*5c1def83SBjoern A. Zeeb if (htt_tlv_filter->tx_mon_ctrl_filter) {
1168*5c1def83SBjoern A. Zeeb cmd->info1 |=
1169*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1170*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_PKT_TYPE);
1171*5c1def83SBjoern A. Zeeb cmd->info1 |=
1172*5c1def83SBjoern A. Zeeb le32_encode_bits(htt_tlv_filter->tx_mon_pkt_dma_len,
1173*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO1_CONF_LEN_DATA);
1174*5c1def83SBjoern A. Zeeb cmd->info2 |=
1175*5c1def83SBjoern A. Zeeb le32_encode_bits(HTT_STATS_FRAME_CTRL_TYPE_DATA,
1176*5c1def83SBjoern A. Zeeb HTT_TX_RING_SELECTION_CFG_CMD_INFO2_PKT_TYPE_EN_FLAG);
1177*5c1def83SBjoern A. Zeeb }
1178*5c1def83SBjoern A. Zeeb
1179*5c1def83SBjoern A. Zeeb cmd->tlv_filter_mask_in0 =
1180*5c1def83SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_downstream_tlv_flags);
1181*5c1def83SBjoern A. Zeeb cmd->tlv_filter_mask_in1 =
1182*5c1def83SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags0);
1183*5c1def83SBjoern A. Zeeb cmd->tlv_filter_mask_in2 =
1184*5c1def83SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags1);
1185*5c1def83SBjoern A. Zeeb cmd->tlv_filter_mask_in3 =
1186*5c1def83SBjoern A. Zeeb cpu_to_le32(htt_tlv_filter->tx_mon_upstream_tlv_flags2);
1187*5c1def83SBjoern A. Zeeb
1188*5c1def83SBjoern A. Zeeb ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1189*5c1def83SBjoern A. Zeeb if (ret)
1190*5c1def83SBjoern A. Zeeb goto err_free;
1191*5c1def83SBjoern A. Zeeb
1192*5c1def83SBjoern A. Zeeb return 0;
1193*5c1def83SBjoern A. Zeeb
1194*5c1def83SBjoern A. Zeeb err_free:
1195*5c1def83SBjoern A. Zeeb dev_kfree_skb_any(skb);
1196*5c1def83SBjoern A. Zeeb return ret;
1197*5c1def83SBjoern A. Zeeb }
1198*5c1def83SBjoern A. Zeeb
ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k * ar,bool reset)1199*5c1def83SBjoern A. Zeeb int ath12k_dp_tx_htt_tx_monitor_mode_ring_config(struct ath12k *ar, bool reset)
1200*5c1def83SBjoern A. Zeeb {
1201*5c1def83SBjoern A. Zeeb struct ath12k_base *ab = ar->ab;
1202*5c1def83SBjoern A. Zeeb struct ath12k_dp *dp = &ab->dp;
1203*5c1def83SBjoern A. Zeeb struct htt_tx_ring_tlv_filter tlv_filter = {0};
1204*5c1def83SBjoern A. Zeeb int ret, ring_id;
1205*5c1def83SBjoern A. Zeeb
1206*5c1def83SBjoern A. Zeeb ring_id = dp->tx_mon_buf_ring.refill_buf_ring.ring_id;
1207*5c1def83SBjoern A. Zeeb
1208*5c1def83SBjoern A. Zeeb /* TODO: Need to set upstream/downstream tlv filters
1209*5c1def83SBjoern A. Zeeb * here
1210*5c1def83SBjoern A. Zeeb */
1211*5c1def83SBjoern A. Zeeb
1212*5c1def83SBjoern A. Zeeb if (ab->hw_params->rxdma1_enable) {
1213*5c1def83SBjoern A. Zeeb ret = ath12k_dp_tx_htt_tx_filter_setup(ar->ab, ring_id, 0,
1214*5c1def83SBjoern A. Zeeb HAL_TX_MONITOR_BUF,
1215*5c1def83SBjoern A. Zeeb DP_RXDMA_REFILL_RING_SIZE,
1216*5c1def83SBjoern A. Zeeb &tlv_filter);
1217*5c1def83SBjoern A. Zeeb if (ret) {
1218*5c1def83SBjoern A. Zeeb ath12k_err(ab,
1219*5c1def83SBjoern A. Zeeb "failed to setup filter for monitor buf %d\n", ret);
1220*5c1def83SBjoern A. Zeeb return ret;
1221*5c1def83SBjoern A. Zeeb }
1222*5c1def83SBjoern A. Zeeb }
1223*5c1def83SBjoern A. Zeeb
1224*5c1def83SBjoern A. Zeeb return 0;
1225*5c1def83SBjoern A. Zeeb }
1226