1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved.
5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
6 */
7
8 #include "core.h"
9 #include "dp_tx.h"
10 #include "debug.h"
11 #include "debugfs_sta.h"
12 #include "hw.h"
13 #include "peer.h"
14 #include "mac.h"
15
16 static enum hal_tcl_encap_type
ath11k_dp_tx_get_encap_type(struct ath11k_vif * arvif,struct sk_buff * skb)17 ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb)
18 {
19 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb);
20 struct ath11k_base *ab = arvif->ar->ab;
21
22 if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags))
23 return HAL_TCL_ENCAP_TYPE_RAW;
24
25 if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP)
26 return HAL_TCL_ENCAP_TYPE_ETHERNET;
27
28 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI;
29 }
30
ath11k_dp_tx_encap_nwifi(struct sk_buff * skb)31 static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb)
32 {
33 struct ieee80211_hdr *hdr = (void *)skb->data;
34 u8 *qos_ctl;
35
36 if (!ieee80211_is_data_qos(hdr->frame_control))
37 return;
38
39 qos_ctl = ieee80211_get_qos_ctl(hdr);
40 memmove(skb->data + IEEE80211_QOS_CTL_LEN,
41 skb->data, (void *)qos_ctl - (void *)skb->data);
42 skb_pull(skb, IEEE80211_QOS_CTL_LEN);
43
44 hdr = (void *)skb->data;
45 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA);
46 }
47
ath11k_dp_tx_get_tid(struct sk_buff * skb)48 static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb)
49 {
50 struct ieee80211_hdr *hdr = (void *)skb->data;
51 struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb);
52
53 if (cb->flags & ATH11K_SKB_HW_80211_ENCAP)
54 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
55 else if (!ieee80211_is_data_qos(hdr->frame_control))
56 return HAL_DESC_REO_NON_QOS_TID;
57 else
58 return skb->priority & IEEE80211_QOS_CTL_TID_MASK;
59 }
60
ath11k_dp_tx_get_encrypt_type(u32 cipher)61 enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher)
62 {
63 switch (cipher) {
64 case WLAN_CIPHER_SUITE_WEP40:
65 return HAL_ENCRYPT_TYPE_WEP_40;
66 case WLAN_CIPHER_SUITE_WEP104:
67 return HAL_ENCRYPT_TYPE_WEP_104;
68 case WLAN_CIPHER_SUITE_TKIP:
69 return HAL_ENCRYPT_TYPE_TKIP_MIC;
70 case WLAN_CIPHER_SUITE_CCMP:
71 return HAL_ENCRYPT_TYPE_CCMP_128;
72 case WLAN_CIPHER_SUITE_CCMP_256:
73 return HAL_ENCRYPT_TYPE_CCMP_256;
74 case WLAN_CIPHER_SUITE_GCMP:
75 return HAL_ENCRYPT_TYPE_GCMP_128;
76 case WLAN_CIPHER_SUITE_GCMP_256:
77 return HAL_ENCRYPT_TYPE_AES_GCMP_256;
78 default:
79 return HAL_ENCRYPT_TYPE_OPEN;
80 }
81 }
82
ath11k_dp_tx(struct ath11k * ar,struct ath11k_vif * arvif,struct ath11k_sta * arsta,struct sk_buff * skb)83 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif,
84 struct ath11k_sta *arsta, struct sk_buff *skb)
85 {
86 struct ath11k_base *ab = ar->ab;
87 struct ath11k_dp *dp = &ab->dp;
88 struct hal_tx_info ti = {};
89 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb);
90 struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb);
91 struct hal_srng *tcl_ring;
92 struct ieee80211_hdr *hdr = (void *)skb->data;
93 struct dp_tx_ring *tx_ring;
94 size_t num_tx_rings = ab->hw_params.hal_params->num_tx_rings;
95 void *hal_tcl_desc;
96 u8 pool_id;
97 u8 hal_ring_id;
98 int ret;
99 u32 ring_selector = 0;
100 u8 ring_map = 0;
101 bool tcl_ring_retry;
102
103 if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags)))
104 return -ESHUTDOWN;
105
106 if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) &&
107 !ieee80211_is_data(hdr->frame_control)))
108 return -EOPNOTSUPP;
109
110 pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1);
111
112 ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb);
113
114 tcl_ring_sel:
115 tcl_ring_retry = false;
116
117 ti.ring_id = ring_selector % num_tx_rings;
118 ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id;
119
120 ring_map |= BIT(ti.ring_id);
121
122 tx_ring = &dp->tx_ring[ti.ring_id];
123
124 spin_lock_bh(&tx_ring->tx_idr_lock);
125 ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0,
126 DP_TX_IDR_SIZE - 1, GFP_ATOMIC);
127 spin_unlock_bh(&tx_ring->tx_idr_lock);
128
129 if (unlikely(ret < 0)) {
130 if (ring_map == (BIT(num_tx_rings) - 1) ||
131 !ab->hw_params.tcl_ring_retry) {
132 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
133 return -ENOSPC;
134 }
135
136 /* Check if the next ring is available */
137 ring_selector++;
138 goto tcl_ring_sel;
139 }
140
141 ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) |
142 FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) |
143 FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id);
144 ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb);
145
146 if (ieee80211_has_a4(hdr->frame_control) &&
147 is_multicast_ether_addr(hdr->addr3) && arsta &&
148 arsta->use_4addr_set) {
149 ti.meta_data_flags = arsta->tcl_metadata;
150 ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1);
151 } else {
152 ti.meta_data_flags = arvif->tcl_metadata;
153 }
154
155 if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) {
156 if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) {
157 ti.encrypt_type =
158 ath11k_dp_tx_get_encrypt_type(skb_cb->cipher);
159
160 if (ieee80211_has_protected(hdr->frame_control))
161 skb_put(skb, IEEE80211_CCMP_MIC_LEN);
162 } else {
163 ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN;
164 }
165 }
166
167 ti.addr_search_flags = arvif->hal_addr_search_flags;
168 ti.search_type = arvif->search_type;
169 ti.type = HAL_TCL_DESC_TYPE_BUFFER;
170 ti.pkt_offset = 0;
171 ti.lmac_id = ar->lmac_id;
172 ti.bss_ast_hash = arvif->ast_hash;
173 ti.bss_ast_idx = arvif->ast_idx;
174 ti.dscp_tid_tbl_idx = 0;
175
176 if (likely(skb->ip_summed == CHECKSUM_PARTIAL &&
177 ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) {
178 ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) |
179 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) |
180 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) |
181 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) |
182 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1);
183 }
184
185 if (ieee80211_vif_is_mesh(arvif->vif))
186 ti.enable_mesh = true;
187
188 ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1);
189
190 ti.tid = ath11k_dp_tx_get_tid(skb);
191
192 switch (ti.encap_type) {
193 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI:
194 ath11k_dp_tx_encap_nwifi(skb);
195 break;
196 case HAL_TCL_ENCAP_TYPE_RAW:
197 if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) {
198 ret = -EINVAL;
199 goto fail_remove_idr;
200 }
201 break;
202 case HAL_TCL_ENCAP_TYPE_ETHERNET:
203 /* no need to encap */
204 break;
205 case HAL_TCL_ENCAP_TYPE_802_3:
206 default:
207 /* TODO: Take care of other encap modes as well */
208 ret = -EINVAL;
209 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
210 goto fail_remove_idr;
211 }
212
213 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
214 if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) {
215 atomic_inc(&ab->soc_stats.tx_err.misc_fail);
216 ath11k_warn(ab, "failed to DMA map data Tx buffer\n");
217 ret = -ENOMEM;
218 goto fail_remove_idr;
219 }
220
221 ti.data_len = skb->len;
222 skb_cb->paddr = ti.paddr;
223 skb_cb->vif = arvif->vif;
224 skb_cb->ar = ar;
225
226 hal_ring_id = tx_ring->tcl_data_ring.ring_id;
227 tcl_ring = &ab->hal.srng_list[hal_ring_id];
228
229 spin_lock_bh(&tcl_ring->lock);
230
231 ath11k_hal_srng_access_begin(ab, tcl_ring);
232
233 hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring);
234 if (unlikely(!hal_tcl_desc)) {
235 /* NOTE: It is highly unlikely we'll be running out of tcl_ring
236 * desc because the desc is directly enqueued onto hw queue.
237 */
238 ath11k_hal_srng_access_end(ab, tcl_ring);
239 ab->soc_stats.tx_err.desc_na[ti.ring_id]++;
240 spin_unlock_bh(&tcl_ring->lock);
241 ret = -ENOMEM;
242
243 /* Checking for available tcl descriptors in another ring in
244 * case of failure due to full tcl ring now, is better than
245 * checking this ring earlier for each pkt tx.
246 * Restart ring selection if some rings are not checked yet.
247 */
248 if (unlikely(ring_map != (BIT(num_tx_rings)) - 1) &&
249 ab->hw_params.tcl_ring_retry && num_tx_rings > 1) {
250 tcl_ring_retry = true;
251 ring_selector++;
252 }
253
254 goto fail_unmap_dma;
255 }
256
257 ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc +
258 sizeof(struct hal_tlv_hdr), &ti);
259
260 ath11k_hal_srng_access_end(ab, tcl_ring);
261
262 ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]);
263
264 spin_unlock_bh(&tcl_ring->lock);
265
266 ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ",
267 skb->data, skb->len);
268
269 atomic_inc(&ar->dp.num_tx_pending);
270
271 return 0;
272
273 fail_unmap_dma:
274 dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
275
276 fail_remove_idr:
277 spin_lock_bh(&tx_ring->tx_idr_lock);
278 idr_remove(&tx_ring->txbuf_idr,
279 FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id));
280 spin_unlock_bh(&tx_ring->tx_idr_lock);
281
282 if (tcl_ring_retry)
283 goto tcl_ring_sel;
284
285 return ret;
286 }
287
ath11k_dp_tx_free_txbuf(struct ath11k_base * ab,u8 mac_id,int msdu_id,struct dp_tx_ring * tx_ring)288 static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id,
289 int msdu_id,
290 struct dp_tx_ring *tx_ring)
291 {
292 struct ath11k *ar;
293 struct sk_buff *msdu;
294 struct ath11k_skb_cb *skb_cb;
295
296 spin_lock(&tx_ring->tx_idr_lock);
297 msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
298 spin_unlock(&tx_ring->tx_idr_lock);
299
300 if (unlikely(!msdu)) {
301 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
302 msdu_id);
303 return;
304 }
305
306 skb_cb = ATH11K_SKB_CB(msdu);
307
308 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
309 dev_kfree_skb_any(msdu);
310
311 ar = ab->pdevs[mac_id].ar;
312 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
313 wake_up(&ar->dp.tx_empty_waitq);
314 }
315
316 static void
ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base * ab,struct dp_tx_ring * tx_ring,struct ath11k_dp_htt_wbm_tx_status * ts)317 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab,
318 struct dp_tx_ring *tx_ring,
319 struct ath11k_dp_htt_wbm_tx_status *ts)
320 {
321 struct ieee80211_tx_status status = {};
322 struct sk_buff *msdu;
323 struct ieee80211_tx_info *info;
324 struct ath11k_skb_cb *skb_cb;
325 struct ath11k *ar;
326 struct ath11k_peer *peer;
327
328 spin_lock(&tx_ring->tx_idr_lock);
329 msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id);
330 spin_unlock(&tx_ring->tx_idr_lock);
331
332 if (unlikely(!msdu)) {
333 ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n",
334 ts->msdu_id);
335 return;
336 }
337
338 skb_cb = ATH11K_SKB_CB(msdu);
339 info = IEEE80211_SKB_CB(msdu);
340
341 ar = skb_cb->ar;
342
343 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
344 wake_up(&ar->dp.tx_empty_waitq);
345
346 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
347
348 if (!skb_cb->vif) {
349 ieee80211_free_txskb(ar->hw, msdu);
350 return;
351 }
352
353 memset(&info->status, 0, sizeof(info->status));
354
355 if (ts->acked) {
356 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
357 info->flags |= IEEE80211_TX_STAT_ACK;
358 info->status.ack_signal = ts->ack_rssi;
359
360 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
361 ab->wmi_ab.svc_map))
362 info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
363
364 info->status.flags |=
365 IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
366 } else {
367 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
368 }
369 }
370
371 spin_lock_bh(&ab->base_lock);
372 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
373 if (!peer || !peer->sta) {
374 ath11k_dbg(ab, ATH11K_DBG_DATA,
375 "dp_tx: failed to find the peer with peer_id %d\n",
376 ts->peer_id);
377 spin_unlock_bh(&ab->base_lock);
378 ieee80211_free_txskb(ar->hw, msdu);
379 return;
380 }
381 spin_unlock_bh(&ab->base_lock);
382
383 status.sta = peer->sta;
384 status.info = info;
385 status.skb = msdu;
386
387 ieee80211_tx_status_ext(ar->hw, &status);
388 }
389
390 static void
ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base * ab,void * desc,u8 mac_id,u32 msdu_id,struct dp_tx_ring * tx_ring)391 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab,
392 void *desc, u8 mac_id,
393 u32 msdu_id, struct dp_tx_ring *tx_ring)
394 {
395 struct htt_tx_wbm_completion *status_desc;
396 struct ath11k_dp_htt_wbm_tx_status ts = {};
397 enum hal_wbm_htt_tx_comp_status wbm_status;
398
399 status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET;
400
401 wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS,
402 status_desc->info0);
403 switch (wbm_status) {
404 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK:
405 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP:
406 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL:
407 ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK);
408 ts.msdu_id = msdu_id;
409 ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI,
410 status_desc->info1);
411
412 if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2))
413 ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID,
414 status_desc->info2);
415 else
416 ts.peer_id = HTT_INVALID_PEER_ID;
417
418 ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts);
419
420 break;
421 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ:
422 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT:
423 ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring);
424 break;
425 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY:
426 /* This event is to be handled only when the driver decides to
427 * use WDS offload functionality.
428 */
429 break;
430 default:
431 ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status);
432 break;
433 }
434 }
435
ath11k_dp_tx_cache_peer_stats(struct ath11k * ar,struct sk_buff * msdu,struct hal_tx_status * ts)436 static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar,
437 struct sk_buff *msdu,
438 struct hal_tx_status *ts)
439 {
440 struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
441
442 if (ts->try_cnt > 1) {
443 peer_stats->retry_pkts += ts->try_cnt - 1;
444 peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len;
445
446 if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) {
447 peer_stats->failed_pkts += 1;
448 peer_stats->failed_bytes += msdu->len;
449 }
450 }
451 }
452
ath11k_dp_tx_update_txcompl(struct ath11k * ar,struct hal_tx_status * ts)453 void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts)
454 {
455 struct ath11k_base *ab = ar->ab;
456 struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats;
457 enum hal_tx_rate_stats_pkt_type pkt_type;
458 enum hal_tx_rate_stats_sgi sgi;
459 enum hal_tx_rate_stats_bw bw;
460 struct ath11k_peer *peer;
461 struct ath11k_sta *arsta;
462 struct ieee80211_sta *sta;
463 u16 rate, ru_tones;
464 u8 mcs, rate_idx = 0, ofdma;
465 int ret;
466
467 spin_lock_bh(&ab->base_lock);
468 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
469 if (!peer || !peer->sta) {
470 ath11k_dbg(ab, ATH11K_DBG_DP_TX,
471 "failed to find the peer by id %u\n", ts->peer_id);
472 goto err_out;
473 }
474
475 sta = peer->sta;
476 arsta = ath11k_sta_to_arsta(sta);
477
478 memset(&arsta->txrate, 0, sizeof(arsta->txrate));
479 pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE,
480 ts->rate_stats);
481 mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS,
482 ts->rate_stats);
483 sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI,
484 ts->rate_stats);
485 bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats);
486 ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats);
487 ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats);
488
489 /* This is to prefer choose the real NSS value arsta->last_txrate.nss,
490 * if it is invalid, then choose the NSS value while assoc.
491 */
492 if (arsta->last_txrate.nss)
493 arsta->txrate.nss = arsta->last_txrate.nss;
494 else
495 arsta->txrate.nss = arsta->peer_nss;
496
497 if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A ||
498 pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) {
499 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs,
500 pkt_type,
501 &rate_idx,
502 &rate);
503 if (ret < 0)
504 goto err_out;
505 arsta->txrate.legacy = rate;
506 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) {
507 if (mcs > 7) {
508 ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs);
509 goto err_out;
510 }
511
512 if (arsta->txrate.nss != 0)
513 arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1);
514 arsta->txrate.flags = RATE_INFO_FLAGS_MCS;
515 if (sgi)
516 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
517 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) {
518 if (mcs > 9) {
519 ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs);
520 goto err_out;
521 }
522
523 arsta->txrate.mcs = mcs;
524 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS;
525 if (sgi)
526 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI;
527 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
528 if (mcs > 11) {
529 ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs);
530 goto err_out;
531 }
532
533 arsta->txrate.mcs = mcs;
534 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS;
535 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi);
536 }
537
538 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw);
539 if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) {
540 arsta->txrate.bw = RATE_INFO_BW_HE_RU;
541 arsta->txrate.he_ru_alloc =
542 ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones);
543 }
544
545 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar))
546 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx);
547
548 err_out:
549 spin_unlock_bh(&ab->base_lock);
550 }
551
ath11k_dp_tx_complete_msdu(struct ath11k * ar,struct sk_buff * msdu,struct hal_tx_status * ts)552 static void ath11k_dp_tx_complete_msdu(struct ath11k *ar,
553 struct sk_buff *msdu,
554 struct hal_tx_status *ts)
555 {
556 struct ieee80211_tx_status status = {};
557 struct ieee80211_rate_status status_rate = {};
558 struct ath11k_base *ab = ar->ab;
559 struct ieee80211_tx_info *info;
560 struct ath11k_skb_cb *skb_cb;
561 struct ath11k_peer *peer;
562 struct ath11k_sta *arsta;
563 struct rate_info rate;
564
565 if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) {
566 /* Must not happen */
567 return;
568 }
569
570 skb_cb = ATH11K_SKB_CB(msdu);
571
572 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
573
574 if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) {
575 ieee80211_free_txskb(ar->hw, msdu);
576 return;
577 }
578
579 if (unlikely(!skb_cb->vif)) {
580 ieee80211_free_txskb(ar->hw, msdu);
581 return;
582 }
583
584 info = IEEE80211_SKB_CB(msdu);
585 memset(&info->status, 0, sizeof(info->status));
586
587 /* skip tx rate update from ieee80211_status*/
588 info->status.rates[0].idx = -1;
589
590 if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED &&
591 !(info->flags & IEEE80211_TX_CTL_NO_ACK)) {
592 info->flags |= IEEE80211_TX_STAT_ACK;
593 info->status.ack_signal = ts->ack_rssi;
594
595 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT,
596 ab->wmi_ab.svc_map))
597 info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR;
598
599 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID;
600 }
601
602 if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX &&
603 (info->flags & IEEE80211_TX_CTL_NO_ACK))
604 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED;
605
606 if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) ||
607 ab->hw_params.single_pdev_only) {
608 if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) {
609 if (ar->last_ppdu_id == 0) {
610 ar->last_ppdu_id = ts->ppdu_id;
611 } else if (ar->last_ppdu_id == ts->ppdu_id ||
612 ar->cached_ppdu_id == ar->last_ppdu_id) {
613 ar->cached_ppdu_id = ar->last_ppdu_id;
614 ar->cached_stats.is_ampdu = true;
615 ath11k_dp_tx_update_txcompl(ar, ts);
616 memset(&ar->cached_stats, 0,
617 sizeof(struct ath11k_per_peer_tx_stats));
618 } else {
619 ar->cached_stats.is_ampdu = false;
620 ath11k_dp_tx_update_txcompl(ar, ts);
621 memset(&ar->cached_stats, 0,
622 sizeof(struct ath11k_per_peer_tx_stats));
623 }
624 ar->last_ppdu_id = ts->ppdu_id;
625 }
626
627 ath11k_dp_tx_cache_peer_stats(ar, msdu, ts);
628 }
629
630 spin_lock_bh(&ab->base_lock);
631 peer = ath11k_peer_find_by_id(ab, ts->peer_id);
632 if (!peer || !peer->sta) {
633 ath11k_dbg(ab, ATH11K_DBG_DATA,
634 "dp_tx: failed to find the peer with peer_id %d\n",
635 ts->peer_id);
636 spin_unlock_bh(&ab->base_lock);
637 ieee80211_free_txskb(ar->hw, msdu);
638 return;
639 }
640 arsta = ath11k_sta_to_arsta(peer->sta);
641 status.sta = peer->sta;
642 status.skb = msdu;
643 status.info = info;
644 rate = arsta->last_txrate;
645
646 status_rate.rate_idx = rate;
647 status_rate.try_count = 1;
648
649 status.rates = &status_rate;
650 status.n_rates = 1;
651
652 spin_unlock_bh(&ab->base_lock);
653
654 ieee80211_tx_status_ext(ar->hw, &status);
655 }
656
ath11k_dp_tx_status_parse(struct ath11k_base * ab,struct hal_wbm_release_ring * desc,struct hal_tx_status * ts)657 static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab,
658 struct hal_wbm_release_ring *desc,
659 struct hal_tx_status *ts)
660 {
661 ts->buf_rel_source =
662 FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0);
663 if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW &&
664 ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM))
665 return;
666
667 if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW))
668 return;
669
670 ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON,
671 desc->info0);
672 ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER,
673 desc->info1);
674 ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT,
675 desc->info1);
676 ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI,
677 desc->info2);
678 if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU)
679 ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU;
680 ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3);
681 ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3);
682 if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID)
683 ts->rate_stats = desc->rate_stats.info0;
684 else
685 ts->rate_stats = 0;
686 }
687
ath11k_dp_tx_completion_handler(struct ath11k_base * ab,int ring_id)688 void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id)
689 {
690 struct ath11k *ar;
691 struct ath11k_dp *dp = &ab->dp;
692 int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id;
693 struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
694 struct sk_buff *msdu;
695 struct hal_tx_status ts = {};
696 struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id];
697 u32 *desc;
698 u32 msdu_id;
699 u8 mac_id;
700
701 spin_lock_bh(&status_ring->lock);
702
703 ath11k_hal_srng_access_begin(ab, status_ring);
704
705 while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) !=
706 tx_ring->tx_status_tail) &&
707 (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) {
708 memcpy(&tx_ring->tx_status[tx_ring->tx_status_head],
709 desc, sizeof(struct hal_wbm_release_ring));
710 tx_ring->tx_status_head =
711 ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head);
712 }
713
714 if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) &&
715 (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) ==
716 tx_ring->tx_status_tail))) {
717 /* TODO: Process pending tx_status messages when kfifo_is_full() */
718 ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
719 }
720
721 ath11k_hal_srng_access_end(ab, status_ring);
722
723 spin_unlock_bh(&status_ring->lock);
724
725 while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) {
726 struct hal_wbm_release_ring *tx_status;
727 u32 desc_id;
728
729 tx_ring->tx_status_tail =
730 ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail);
731 tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail];
732 ath11k_dp_tx_status_parse(ab, tx_status, &ts);
733
734 desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE,
735 tx_status->buf_addr_info.info1);
736 mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id);
737 msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id);
738
739 if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) {
740 ath11k_dp_tx_process_htt_tx_complete(ab,
741 (void *)tx_status,
742 mac_id, msdu_id,
743 tx_ring);
744 continue;
745 }
746
747 spin_lock(&tx_ring->tx_idr_lock);
748 msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id);
749 if (unlikely(!msdu)) {
750 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n",
751 msdu_id);
752 spin_unlock(&tx_ring->tx_idr_lock);
753 continue;
754 }
755
756 spin_unlock(&tx_ring->tx_idr_lock);
757
758 ar = ab->pdevs[mac_id].ar;
759
760 if (atomic_dec_and_test(&ar->dp.num_tx_pending))
761 wake_up(&ar->dp.tx_empty_waitq);
762
763 ath11k_dp_tx_complete_msdu(ar, msdu, &ts);
764 }
765 }
766
ath11k_dp_tx_send_reo_cmd(struct ath11k_base * ab,struct dp_rx_tid * rx_tid,enum hal_reo_cmd_type type,struct ath11k_hal_reo_cmd * cmd,void (* cb)(struct ath11k_dp *,void *,enum hal_reo_cmd_status))767 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid,
768 enum hal_reo_cmd_type type,
769 struct ath11k_hal_reo_cmd *cmd,
770 void (*cb)(struct ath11k_dp *, void *,
771 enum hal_reo_cmd_status))
772 {
773 struct ath11k_dp *dp = &ab->dp;
774 struct dp_reo_cmd *dp_cmd;
775 struct hal_srng *cmd_ring;
776 int cmd_num;
777
778 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags))
779 return -ESHUTDOWN;
780
781 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id];
782 cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd);
783
784 /* cmd_num should start from 1, during failure return the error code */
785 if (cmd_num < 0)
786 return cmd_num;
787
788 /* reo cmd ring descriptors has cmd_num starting from 1 */
789 if (cmd_num == 0)
790 return -EINVAL;
791
792 if (!cb)
793 return 0;
794
795 /* Can this be optimized so that we keep the pending command list only
796 * for tid delete command to free up the resource on the command status
797 * indication?
798 */
799 dp_cmd = kzalloc_obj(*dp_cmd, GFP_ATOMIC);
800
801 if (!dp_cmd)
802 return -ENOMEM;
803
804 memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid));
805 dp_cmd->cmd_num = cmd_num;
806 dp_cmd->handler = cb;
807
808 spin_lock_bh(&dp->reo_cmd_lock);
809 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list);
810 spin_unlock_bh(&dp->reo_cmd_lock);
811
812 return 0;
813 }
814
815 static int
ath11k_dp_tx_get_ring_id_type(struct ath11k_base * ab,int mac_id,u32 ring_id,enum hal_ring_type ring_type,enum htt_srng_ring_type * htt_ring_type,enum htt_srng_ring_id * htt_ring_id)816 ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab,
817 int mac_id, u32 ring_id,
818 enum hal_ring_type ring_type,
819 enum htt_srng_ring_type *htt_ring_type,
820 enum htt_srng_ring_id *htt_ring_id)
821 {
822 int lmac_ring_id_offset = 0;
823 int ret = 0;
824
825 switch (ring_type) {
826 case HAL_RXDMA_BUF:
827 lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC;
828
829 /* for QCA6390, host fills rx buffer to fw and fw fills to
830 * rxbuf ring for each rxdma
831 */
832 if (!ab->hw_params.rx_mac_buf_ring) {
833 if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF +
834 lmac_ring_id_offset) ||
835 ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF +
836 lmac_ring_id_offset))) {
837 ret = -EINVAL;
838 }
839 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
840 *htt_ring_type = HTT_SW_TO_HW_RING;
841 } else {
842 if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) {
843 *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING;
844 *htt_ring_type = HTT_SW_TO_SW_RING;
845 } else {
846 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING;
847 *htt_ring_type = HTT_SW_TO_HW_RING;
848 }
849 }
850 break;
851 case HAL_RXDMA_DST:
852 *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING;
853 *htt_ring_type = HTT_HW_TO_SW_RING;
854 break;
855 case HAL_RXDMA_MONITOR_BUF:
856 *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING;
857 *htt_ring_type = HTT_SW_TO_HW_RING;
858 break;
859 case HAL_RXDMA_MONITOR_STATUS:
860 *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING;
861 *htt_ring_type = HTT_SW_TO_HW_RING;
862 break;
863 case HAL_RXDMA_MONITOR_DST:
864 *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING;
865 *htt_ring_type = HTT_HW_TO_SW_RING;
866 break;
867 case HAL_RXDMA_MONITOR_DESC:
868 *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING;
869 *htt_ring_type = HTT_SW_TO_HW_RING;
870 break;
871 default:
872 ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
873 ret = -EINVAL;
874 }
875 return ret;
876 }
877
ath11k_dp_tx_htt_srng_setup(struct ath11k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type)878 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id,
879 int mac_id, enum hal_ring_type ring_type)
880 {
881 struct htt_srng_setup_cmd *cmd;
882 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
883 struct hal_srng_params params;
884 struct sk_buff *skb;
885 u32 ring_entry_sz;
886 int len = sizeof(*cmd);
887 dma_addr_t hp_addr, tp_addr;
888 enum htt_srng_ring_type htt_ring_type;
889 enum htt_srng_ring_id htt_ring_id;
890 int ret;
891
892 skb = ath11k_htc_alloc_skb(ab, len);
893 if (!skb)
894 return -ENOMEM;
895
896 memset(¶ms, 0, sizeof(params));
897 ath11k_hal_srng_get_params(ab, srng, ¶ms);
898
899 hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng);
900 tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng);
901
902 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
903 ring_type, &htt_ring_type,
904 &htt_ring_id);
905 if (ret)
906 goto err_free;
907
908 skb_put(skb, len);
909 cmd = (struct htt_srng_setup_cmd *)skb->data;
910 cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE,
911 HTT_H2T_MSG_TYPE_SRING_SETUP);
912 if (htt_ring_type == HTT_SW_TO_HW_RING ||
913 htt_ring_type == HTT_HW_TO_SW_RING)
914 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
915 DP_SW2HW_MACID(mac_id));
916 else
917 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID,
918 mac_id);
919 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE,
920 htt_ring_type);
921 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id);
922
923 cmd->ring_base_addr_lo = params.ring_base_paddr &
924 HAL_ADDR_LSB_REG_MASK;
925
926 cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >>
927 HAL_ADDR_MSB_REG_SHIFT;
928
929 ret = ath11k_hal_srng_get_entrysize(ab, ring_type);
930 if (ret < 0)
931 goto err_free;
932
933 ring_entry_sz = ret;
934
935 ring_entry_sz >>= 2;
936 cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE,
937 ring_entry_sz);
938 cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE,
939 params.num_entries * ring_entry_sz);
940 cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP,
941 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
942 cmd->info1 |= FIELD_PREP(
943 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP,
944 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
945 cmd->info1 |= FIELD_PREP(
946 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP,
947 !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP));
948 if (htt_ring_type == HTT_SW_TO_HW_RING)
949 cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS;
950
951 cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK;
952 cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >>
953 HAL_ADDR_MSB_REG_SHIFT;
954
955 cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK;
956 cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >>
957 HAL_ADDR_MSB_REG_SHIFT;
958
959 cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr);
960 cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr);
961 cmd->msi_data = params.msi_data;
962
963 cmd->intr_info = FIELD_PREP(
964 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH,
965 params.intr_batch_cntr_thres_entries * ring_entry_sz);
966 cmd->intr_info |= FIELD_PREP(
967 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH,
968 params.intr_timer_thres_us >> 3);
969
970 cmd->info2 = 0;
971 if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) {
972 cmd->info2 = FIELD_PREP(
973 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH,
974 params.low_threshold);
975 }
976
977 ath11k_dbg(ab, ATH11K_DBG_DP_TX,
978 "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n",
979 cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi,
980 cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2);
981
982 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
983 if (ret)
984 goto err_free;
985
986 return 0;
987
988 err_free:
989 dev_kfree_skb_any(skb);
990
991 return ret;
992 }
993
994 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ)
995
ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base * ab)996 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab)
997 {
998 struct ath11k_dp *dp = &ab->dp;
999 struct sk_buff *skb;
1000 struct htt_ver_req_cmd *cmd;
1001 int len = sizeof(*cmd);
1002 int ret;
1003
1004 init_completion(&dp->htt_tgt_version_received);
1005
1006 skb = ath11k_htc_alloc_skb(ab, len);
1007 if (!skb)
1008 return -ENOMEM;
1009
1010 skb_put(skb, len);
1011 cmd = (struct htt_ver_req_cmd *)skb->data;
1012 cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID,
1013 HTT_H2T_MSG_TYPE_VERSION_REQ);
1014
1015 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1016 if (ret) {
1017 dev_kfree_skb_any(skb);
1018 return ret;
1019 }
1020
1021 ret = wait_for_completion_timeout(&dp->htt_tgt_version_received,
1022 HTT_TARGET_VERSION_TIMEOUT_HZ);
1023 if (ret == 0) {
1024 ath11k_warn(ab, "htt target version request timed out\n");
1025 return -ETIMEDOUT;
1026 }
1027
1028 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) {
1029 ath11k_err(ab, "unsupported htt major version %d supported version is %d\n",
1030 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR);
1031 return -EOPNOTSUPP;
1032 }
1033
1034 return 0;
1035 }
1036
ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k * ar,u32 mask)1037 int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask)
1038 {
1039 struct ath11k_base *ab = ar->ab;
1040 struct ath11k_dp *dp = &ab->dp;
1041 struct sk_buff *skb;
1042 struct htt_ppdu_stats_cfg_cmd *cmd;
1043 int len = sizeof(*cmd);
1044 u8 pdev_mask;
1045 int ret;
1046 int i;
1047
1048 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1049 skb = ath11k_htc_alloc_skb(ab, len);
1050 if (!skb)
1051 return -ENOMEM;
1052
1053 skb_put(skb, len);
1054 cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data;
1055 cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE,
1056 HTT_H2T_MSG_TYPE_PPDU_STATS_CFG);
1057
1058 pdev_mask = 1 << (ar->pdev_idx + i);
1059 cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask);
1060 cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask);
1061
1062 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1063 if (ret) {
1064 dev_kfree_skb_any(skb);
1065 return ret;
1066 }
1067 }
1068
1069 return 0;
1070 }
1071
ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base * ab,u32 ring_id,int mac_id,enum hal_ring_type ring_type,int rx_buf_size,struct htt_rx_ring_tlv_filter * tlv_filter)1072 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id,
1073 int mac_id, enum hal_ring_type ring_type,
1074 int rx_buf_size,
1075 struct htt_rx_ring_tlv_filter *tlv_filter)
1076 {
1077 struct htt_rx_ring_selection_cfg_cmd *cmd;
1078 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1079 struct hal_srng_params params;
1080 struct sk_buff *skb;
1081 int len = sizeof(*cmd);
1082 enum htt_srng_ring_type htt_ring_type;
1083 enum htt_srng_ring_id htt_ring_id;
1084 int ret;
1085
1086 skb = ath11k_htc_alloc_skb(ab, len);
1087 if (!skb)
1088 return -ENOMEM;
1089
1090 memset(¶ms, 0, sizeof(params));
1091 ath11k_hal_srng_get_params(ab, srng, ¶ms);
1092
1093 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1094 ring_type, &htt_ring_type,
1095 &htt_ring_id);
1096 if (ret)
1097 goto err_free;
1098
1099 skb_put(skb, len);
1100 cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data;
1101 cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE,
1102 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG);
1103 if (htt_ring_type == HTT_SW_TO_HW_RING ||
1104 htt_ring_type == HTT_HW_TO_SW_RING)
1105 cmd->info0 |=
1106 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
1107 DP_SW2HW_MACID(mac_id));
1108 else
1109 cmd->info0 |=
1110 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID,
1111 mac_id);
1112 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID,
1113 htt_ring_id);
1114 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS,
1115 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP));
1116 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS,
1117 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP));
1118
1119 cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE,
1120 rx_buf_size);
1121 cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0;
1122 cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1;
1123 cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2;
1124 cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3;
1125 cmd->rx_filter_tlv = tlv_filter->rx_filter;
1126
1127 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
1128 if (ret)
1129 goto err_free;
1130
1131 return 0;
1132
1133 err_free:
1134 dev_kfree_skb_any(skb);
1135
1136 return ret;
1137 }
1138
1139 int
ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k * ar,u8 type,struct htt_ext_stats_cfg_params * cfg_params,u64 cookie)1140 ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type,
1141 struct htt_ext_stats_cfg_params *cfg_params,
1142 u64 cookie)
1143 {
1144 struct ath11k_base *ab = ar->ab;
1145 struct ath11k_dp *dp = &ab->dp;
1146 struct sk_buff *skb;
1147 struct htt_ext_stats_cfg_cmd *cmd;
1148 u32 pdev_id;
1149 int len = sizeof(*cmd);
1150 int ret;
1151
1152 skb = ath11k_htc_alloc_skb(ab, len);
1153 if (!skb)
1154 return -ENOMEM;
1155
1156 skb_put(skb, len);
1157
1158 cmd = (struct htt_ext_stats_cfg_cmd *)skb->data;
1159 memset(cmd, 0, sizeof(*cmd));
1160 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG;
1161
1162 if (ab->hw_params.single_pdev_only)
1163 pdev_id = ath11k_mac_get_target_pdev_id(ar);
1164 else
1165 pdev_id = ar->pdev->pdev_id;
1166
1167 cmd->hdr.pdev_mask = 1 << pdev_id;
1168
1169 cmd->hdr.stats_type = type;
1170 cmd->cfg_param0 = cfg_params->cfg0;
1171 cmd->cfg_param1 = cfg_params->cfg1;
1172 cmd->cfg_param2 = cfg_params->cfg2;
1173 cmd->cfg_param3 = cfg_params->cfg3;
1174 cmd->cookie_lsb = lower_32_bits(cookie);
1175 cmd->cookie_msb = upper_32_bits(cookie);
1176
1177 ret = ath11k_htc_send(&ab->htc, dp->eid, skb);
1178 if (ret) {
1179 ath11k_warn(ab, "failed to send htt type stats request: %d",
1180 ret);
1181 dev_kfree_skb_any(skb);
1182 return ret;
1183 }
1184
1185 return 0;
1186 }
1187
ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k * ar,bool reset)1188 int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset)
1189 {
1190 struct ath11k_pdev_dp *dp = &ar->dp;
1191 struct ath11k_base *ab = ar->ab;
1192 struct htt_rx_ring_tlv_filter tlv_filter = {};
1193 int ret = 0, ring_id = 0, i;
1194
1195 if (ab->hw_params.full_monitor_mode) {
1196 ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab,
1197 dp->mac_id, !reset);
1198 if (ret < 0) {
1199 ath11k_err(ab, "failed to setup full monitor %d\n", ret);
1200 return ret;
1201 }
1202 }
1203
1204 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id;
1205
1206 if (!reset) {
1207 tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING;
1208 tlv_filter.pkt_filter_flags0 =
1209 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 |
1210 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0;
1211 tlv_filter.pkt_filter_flags1 =
1212 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 |
1213 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1;
1214 tlv_filter.pkt_filter_flags2 =
1215 HTT_RX_MON_FP_CTRL_FILTER_FLASG2 |
1216 HTT_RX_MON_MO_CTRL_FILTER_FLASG2;
1217 tlv_filter.pkt_filter_flags3 =
1218 HTT_RX_MON_FP_CTRL_FILTER_FLASG3 |
1219 HTT_RX_MON_MO_CTRL_FILTER_FLASG3 |
1220 HTT_RX_MON_FP_DATA_FILTER_FLASG3 |
1221 HTT_RX_MON_MO_DATA_FILTER_FLASG3;
1222 }
1223
1224 if (ab->hw_params.rxdma1_enable) {
1225 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id,
1226 HAL_RXDMA_MONITOR_BUF,
1227 DP_RXDMA_REFILL_RING_SIZE,
1228 &tlv_filter);
1229 } else if (!reset) {
1230 /* set in monitor mode only */
1231 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1232 ring_id = dp->rx_mac_buf_ring[i].ring_id;
1233 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1234 dp->mac_id + i,
1235 HAL_RXDMA_BUF,
1236 1024,
1237 &tlv_filter);
1238 }
1239 }
1240
1241 if (ret)
1242 return ret;
1243
1244 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) {
1245 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1246 if (!reset) {
1247 tlv_filter.rx_filter =
1248 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING;
1249 } else {
1250 tlv_filter = ath11k_mac_mon_status_filter_default;
1251
1252 if (ath11k_debugfs_is_extd_rx_stats_enabled(ar))
1253 tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar);
1254 }
1255
1256 ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1257 dp->mac_id + i,
1258 HAL_RXDMA_MONITOR_STATUS,
1259 DP_RXDMA_REFILL_RING_SIZE,
1260 &tlv_filter);
1261 }
1262
1263 if (!ar->ab->hw_params.rxdma1_enable)
1264 mod_timer(&ar->ab->mon_reap_timer, jiffies +
1265 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL));
1266
1267 return ret;
1268 }
1269
ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base * ab,int mac_id,bool config)1270 int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id,
1271 bool config)
1272 {
1273 struct htt_rx_full_monitor_mode_cfg_cmd *cmd;
1274 struct sk_buff *skb;
1275 int ret, len = sizeof(*cmd);
1276
1277 skb = ath11k_htc_alloc_skb(ab, len);
1278 if (!skb)
1279 return -ENOMEM;
1280
1281 skb_put(skb, len);
1282 cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data;
1283 memset(cmd, 0, sizeof(*cmd));
1284 cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE,
1285 HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE);
1286
1287 cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id);
1288
1289 cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE |
1290 FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING,
1291 HTT_RX_MON_RING_SW);
1292 if (config) {
1293 cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END |
1294 HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END;
1295 }
1296
1297 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb);
1298 if (ret)
1299 goto err_free;
1300
1301 return 0;
1302
1303 err_free:
1304 dev_kfree_skb_any(skb);
1305
1306 return ret;
1307 }
1308