1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 6 */ 7 8 #include "core.h" 9 #include "dp_tx.h" 10 #include "debug.h" 11 #include "debugfs_sta.h" 12 #include "hw.h" 13 #include "peer.h" 14 #include "mac.h" 15 16 static enum hal_tcl_encap_type 17 ath11k_dp_tx_get_encap_type(struct ath11k_vif *arvif, struct sk_buff *skb) 18 { 19 struct ieee80211_tx_info *tx_info = IEEE80211_SKB_CB(skb); 20 struct ath11k_base *ab = arvif->ar->ab; 21 22 if (test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) 23 return HAL_TCL_ENCAP_TYPE_RAW; 24 25 if (tx_info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) 26 return HAL_TCL_ENCAP_TYPE_ETHERNET; 27 28 return HAL_TCL_ENCAP_TYPE_NATIVE_WIFI; 29 } 30 31 static void ath11k_dp_tx_encap_nwifi(struct sk_buff *skb) 32 { 33 struct ieee80211_hdr *hdr = (void *)skb->data; 34 u8 *qos_ctl; 35 36 if (!ieee80211_is_data_qos(hdr->frame_control)) 37 return; 38 39 qos_ctl = ieee80211_get_qos_ctl(hdr); 40 memmove(skb->data + IEEE80211_QOS_CTL_LEN, 41 skb->data, (void *)qos_ctl - (void *)skb->data); 42 skb_pull(skb, IEEE80211_QOS_CTL_LEN); 43 44 hdr = (void *)skb->data; 45 hdr->frame_control &= ~__cpu_to_le16(IEEE80211_STYPE_QOS_DATA); 46 } 47 48 static u8 ath11k_dp_tx_get_tid(struct sk_buff *skb) 49 { 50 struct ieee80211_hdr *hdr = (void *)skb->data; 51 struct ath11k_skb_cb *cb = ATH11K_SKB_CB(skb); 52 53 if (cb->flags & ATH11K_SKB_HW_80211_ENCAP) 54 return skb->priority & IEEE80211_QOS_CTL_TID_MASK; 55 else if (!ieee80211_is_data_qos(hdr->frame_control)) 56 return HAL_DESC_REO_NON_QOS_TID; 57 else 58 return skb->priority & IEEE80211_QOS_CTL_TID_MASK; 59 } 60 61 enum hal_encrypt_type ath11k_dp_tx_get_encrypt_type(u32 cipher) 62 { 63 switch (cipher) { 64 case WLAN_CIPHER_SUITE_WEP40: 65 return HAL_ENCRYPT_TYPE_WEP_40; 66 case WLAN_CIPHER_SUITE_WEP104: 67 return HAL_ENCRYPT_TYPE_WEP_104; 68 case WLAN_CIPHER_SUITE_TKIP: 69 return HAL_ENCRYPT_TYPE_TKIP_MIC; 70 case WLAN_CIPHER_SUITE_CCMP: 71 return HAL_ENCRYPT_TYPE_CCMP_128; 72 case WLAN_CIPHER_SUITE_CCMP_256: 73 return HAL_ENCRYPT_TYPE_CCMP_256; 74 case WLAN_CIPHER_SUITE_GCMP: 75 return HAL_ENCRYPT_TYPE_GCMP_128; 76 case WLAN_CIPHER_SUITE_GCMP_256: 77 return HAL_ENCRYPT_TYPE_AES_GCMP_256; 78 default: 79 return HAL_ENCRYPT_TYPE_OPEN; 80 } 81 } 82 83 int ath11k_dp_tx(struct ath11k *ar, struct ath11k_vif *arvif, 84 struct ath11k_sta *arsta, struct sk_buff *skb) 85 { 86 struct ath11k_base *ab = ar->ab; 87 struct ath11k_dp *dp = &ab->dp; 88 struct hal_tx_info ti = {}; 89 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 90 struct ath11k_skb_cb *skb_cb = ATH11K_SKB_CB(skb); 91 struct hal_srng *tcl_ring; 92 struct ieee80211_hdr *hdr = (void *)skb->data; 93 struct dp_tx_ring *tx_ring; 94 void *hal_tcl_desc; 95 u8 pool_id; 96 u8 hal_ring_id; 97 int ret; 98 u32 ring_selector = 0; 99 u8 ring_map = 0; 100 bool tcl_ring_retry; 101 102 if (unlikely(test_bit(ATH11K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))) 103 return -ESHUTDOWN; 104 105 if (unlikely(!(info->flags & IEEE80211_TX_CTL_HW_80211_ENCAP) && 106 !ieee80211_is_data(hdr->frame_control))) 107 return -EOPNOTSUPP; 108 109 pool_id = skb_get_queue_mapping(skb) & (ATH11K_HW_MAX_QUEUES - 1); 110 111 ring_selector = ab->hw_params.hw_ops->get_ring_selector(skb); 112 113 tcl_ring_sel: 114 tcl_ring_retry = false; 115 116 ti.ring_id = ring_selector % ab->hw_params.max_tx_ring; 117 ti.rbm_id = ab->hw_params.hal_params->tcl2wbm_rbm_map[ti.ring_id].rbm_id; 118 119 ring_map |= BIT(ti.ring_id); 120 121 tx_ring = &dp->tx_ring[ti.ring_id]; 122 123 spin_lock_bh(&tx_ring->tx_idr_lock); 124 ret = idr_alloc(&tx_ring->txbuf_idr, skb, 0, 125 DP_TX_IDR_SIZE - 1, GFP_ATOMIC); 126 spin_unlock_bh(&tx_ring->tx_idr_lock); 127 128 if (unlikely(ret < 0)) { 129 if (ring_map == (BIT(ab->hw_params.max_tx_ring) - 1) || 130 !ab->hw_params.tcl_ring_retry) { 131 atomic_inc(&ab->soc_stats.tx_err.misc_fail); 132 return -ENOSPC; 133 } 134 135 /* Check if the next ring is available */ 136 ring_selector++; 137 goto tcl_ring_sel; 138 } 139 140 ti.desc_id = FIELD_PREP(DP_TX_DESC_ID_MAC_ID, ar->pdev_idx) | 141 FIELD_PREP(DP_TX_DESC_ID_MSDU_ID, ret) | 142 FIELD_PREP(DP_TX_DESC_ID_POOL_ID, pool_id); 143 ti.encap_type = ath11k_dp_tx_get_encap_type(arvif, skb); 144 145 if (ieee80211_has_a4(hdr->frame_control) && 146 is_multicast_ether_addr(hdr->addr3) && arsta && 147 arsta->use_4addr_set) { 148 ti.meta_data_flags = arsta->tcl_metadata; 149 ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TO_FW, 1); 150 } else { 151 ti.meta_data_flags = arvif->tcl_metadata; 152 } 153 154 if (unlikely(ti.encap_type == HAL_TCL_ENCAP_TYPE_RAW)) { 155 if (skb_cb->flags & ATH11K_SKB_CIPHER_SET) { 156 ti.encrypt_type = 157 ath11k_dp_tx_get_encrypt_type(skb_cb->cipher); 158 159 if (ieee80211_has_protected(hdr->frame_control)) 160 skb_put(skb, IEEE80211_CCMP_MIC_LEN); 161 } else { 162 ti.encrypt_type = HAL_ENCRYPT_TYPE_OPEN; 163 } 164 } 165 166 ti.addr_search_flags = arvif->hal_addr_search_flags; 167 ti.search_type = arvif->search_type; 168 ti.type = HAL_TCL_DESC_TYPE_BUFFER; 169 ti.pkt_offset = 0; 170 ti.lmac_id = ar->lmac_id; 171 ti.bss_ast_hash = arvif->ast_hash; 172 ti.bss_ast_idx = arvif->ast_idx; 173 ti.dscp_tid_tbl_idx = 0; 174 175 if (likely(skb->ip_summed == CHECKSUM_PARTIAL && 176 ti.encap_type != HAL_TCL_ENCAP_TYPE_RAW)) { 177 ti.flags0 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_IP4_CKSUM_EN, 1) | 178 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP4_CKSUM_EN, 1) | 179 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_UDP6_CKSUM_EN, 1) | 180 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP4_CKSUM_EN, 1) | 181 FIELD_PREP(HAL_TCL_DATA_CMD_INFO1_TCP6_CKSUM_EN, 1); 182 } 183 184 if (ieee80211_vif_is_mesh(arvif->vif)) 185 ti.enable_mesh = true; 186 187 ti.flags1 |= FIELD_PREP(HAL_TCL_DATA_CMD_INFO2_TID_OVERWRITE, 1); 188 189 ti.tid = ath11k_dp_tx_get_tid(skb); 190 191 switch (ti.encap_type) { 192 case HAL_TCL_ENCAP_TYPE_NATIVE_WIFI: 193 ath11k_dp_tx_encap_nwifi(skb); 194 break; 195 case HAL_TCL_ENCAP_TYPE_RAW: 196 if (!test_bit(ATH11K_FLAG_RAW_MODE, &ab->dev_flags)) { 197 ret = -EINVAL; 198 goto fail_remove_idr; 199 } 200 break; 201 case HAL_TCL_ENCAP_TYPE_ETHERNET: 202 /* no need to encap */ 203 break; 204 case HAL_TCL_ENCAP_TYPE_802_3: 205 default: 206 /* TODO: Take care of other encap modes as well */ 207 ret = -EINVAL; 208 atomic_inc(&ab->soc_stats.tx_err.misc_fail); 209 goto fail_remove_idr; 210 } 211 212 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE); 213 if (unlikely(dma_mapping_error(ab->dev, ti.paddr))) { 214 atomic_inc(&ab->soc_stats.tx_err.misc_fail); 215 ath11k_warn(ab, "failed to DMA map data Tx buffer\n"); 216 ret = -ENOMEM; 217 goto fail_remove_idr; 218 } 219 220 ti.data_len = skb->len; 221 skb_cb->paddr = ti.paddr; 222 skb_cb->vif = arvif->vif; 223 skb_cb->ar = ar; 224 225 hal_ring_id = tx_ring->tcl_data_ring.ring_id; 226 tcl_ring = &ab->hal.srng_list[hal_ring_id]; 227 228 spin_lock_bh(&tcl_ring->lock); 229 230 ath11k_hal_srng_access_begin(ab, tcl_ring); 231 232 hal_tcl_desc = (void *)ath11k_hal_srng_src_get_next_entry(ab, tcl_ring); 233 if (unlikely(!hal_tcl_desc)) { 234 /* NOTE: It is highly unlikely we'll be running out of tcl_ring 235 * desc because the desc is directly enqueued onto hw queue. 236 */ 237 ath11k_hal_srng_access_end(ab, tcl_ring); 238 ab->soc_stats.tx_err.desc_na[ti.ring_id]++; 239 spin_unlock_bh(&tcl_ring->lock); 240 ret = -ENOMEM; 241 242 /* Checking for available tcl descriptors in another ring in 243 * case of failure due to full tcl ring now, is better than 244 * checking this ring earlier for each pkt tx. 245 * Restart ring selection if some rings are not checked yet. 246 */ 247 if (unlikely(ring_map != (BIT(ab->hw_params.max_tx_ring)) - 1) && 248 ab->hw_params.tcl_ring_retry && ab->hw_params.max_tx_ring > 1) { 249 tcl_ring_retry = true; 250 ring_selector++; 251 } 252 253 goto fail_unmap_dma; 254 } 255 256 ath11k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc + 257 sizeof(struct hal_tlv_hdr), &ti); 258 259 ath11k_hal_srng_access_end(ab, tcl_ring); 260 261 ath11k_dp_shadow_start_timer(ab, tcl_ring, &dp->tx_ring_timer[ti.ring_id]); 262 263 spin_unlock_bh(&tcl_ring->lock); 264 265 ath11k_dbg_dump(ab, ATH11K_DBG_DP_TX, NULL, "dp tx msdu: ", 266 skb->data, skb->len); 267 268 atomic_inc(&ar->dp.num_tx_pending); 269 270 return 0; 271 272 fail_unmap_dma: 273 dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE); 274 275 fail_remove_idr: 276 spin_lock_bh(&tx_ring->tx_idr_lock); 277 idr_remove(&tx_ring->txbuf_idr, 278 FIELD_GET(DP_TX_DESC_ID_MSDU_ID, ti.desc_id)); 279 spin_unlock_bh(&tx_ring->tx_idr_lock); 280 281 if (tcl_ring_retry) 282 goto tcl_ring_sel; 283 284 return ret; 285 } 286 287 static void ath11k_dp_tx_free_txbuf(struct ath11k_base *ab, u8 mac_id, 288 int msdu_id, 289 struct dp_tx_ring *tx_ring) 290 { 291 struct ath11k *ar; 292 struct sk_buff *msdu; 293 struct ath11k_skb_cb *skb_cb; 294 295 spin_lock(&tx_ring->tx_idr_lock); 296 msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); 297 spin_unlock(&tx_ring->tx_idr_lock); 298 299 if (unlikely(!msdu)) { 300 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", 301 msdu_id); 302 return; 303 } 304 305 skb_cb = ATH11K_SKB_CB(msdu); 306 307 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 308 dev_kfree_skb_any(msdu); 309 310 ar = ab->pdevs[mac_id].ar; 311 if (atomic_dec_and_test(&ar->dp.num_tx_pending)) 312 wake_up(&ar->dp.tx_empty_waitq); 313 } 314 315 static void 316 ath11k_dp_tx_htt_tx_complete_buf(struct ath11k_base *ab, 317 struct dp_tx_ring *tx_ring, 318 struct ath11k_dp_htt_wbm_tx_status *ts) 319 { 320 struct ieee80211_tx_status status = {}; 321 struct sk_buff *msdu; 322 struct ieee80211_tx_info *info; 323 struct ath11k_skb_cb *skb_cb; 324 struct ath11k *ar; 325 struct ath11k_peer *peer; 326 327 spin_lock(&tx_ring->tx_idr_lock); 328 msdu = idr_remove(&tx_ring->txbuf_idr, ts->msdu_id); 329 spin_unlock(&tx_ring->tx_idr_lock); 330 331 if (unlikely(!msdu)) { 332 ath11k_warn(ab, "htt tx completion for unknown msdu_id %d\n", 333 ts->msdu_id); 334 return; 335 } 336 337 skb_cb = ATH11K_SKB_CB(msdu); 338 info = IEEE80211_SKB_CB(msdu); 339 340 ar = skb_cb->ar; 341 342 if (atomic_dec_and_test(&ar->dp.num_tx_pending)) 343 wake_up(&ar->dp.tx_empty_waitq); 344 345 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 346 347 if (!skb_cb->vif) { 348 ieee80211_free_txskb(ar->hw, msdu); 349 return; 350 } 351 352 memset(&info->status, 0, sizeof(info->status)); 353 354 if (ts->acked) { 355 if (!(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 356 info->flags |= IEEE80211_TX_STAT_ACK; 357 info->status.ack_signal = ts->ack_rssi; 358 359 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, 360 ab->wmi_ab.svc_map)) 361 info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; 362 363 info->status.flags |= 364 IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 365 } else { 366 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 367 } 368 } 369 370 spin_lock_bh(&ab->base_lock); 371 peer = ath11k_peer_find_by_id(ab, ts->peer_id); 372 if (!peer || !peer->sta) { 373 ath11k_dbg(ab, ATH11K_DBG_DATA, 374 "dp_tx: failed to find the peer with peer_id %d\n", 375 ts->peer_id); 376 spin_unlock_bh(&ab->base_lock); 377 ieee80211_free_txskb(ar->hw, msdu); 378 return; 379 } 380 spin_unlock_bh(&ab->base_lock); 381 382 status.sta = peer->sta; 383 status.info = info; 384 status.skb = msdu; 385 386 ieee80211_tx_status_ext(ar->hw, &status); 387 } 388 389 static void 390 ath11k_dp_tx_process_htt_tx_complete(struct ath11k_base *ab, 391 void *desc, u8 mac_id, 392 u32 msdu_id, struct dp_tx_ring *tx_ring) 393 { 394 struct htt_tx_wbm_completion *status_desc; 395 struct ath11k_dp_htt_wbm_tx_status ts = {}; 396 enum hal_wbm_htt_tx_comp_status wbm_status; 397 398 status_desc = desc + HTT_TX_WBM_COMP_STATUS_OFFSET; 399 400 wbm_status = FIELD_GET(HTT_TX_WBM_COMP_INFO0_STATUS, 401 status_desc->info0); 402 switch (wbm_status) { 403 case HAL_WBM_REL_HTT_TX_COMP_STATUS_OK: 404 case HAL_WBM_REL_HTT_TX_COMP_STATUS_DROP: 405 case HAL_WBM_REL_HTT_TX_COMP_STATUS_TTL: 406 ts.acked = (wbm_status == HAL_WBM_REL_HTT_TX_COMP_STATUS_OK); 407 ts.msdu_id = msdu_id; 408 ts.ack_rssi = FIELD_GET(HTT_TX_WBM_COMP_INFO1_ACK_RSSI, 409 status_desc->info1); 410 411 if (FIELD_GET(HTT_TX_WBM_COMP_INFO2_VALID, status_desc->info2)) 412 ts.peer_id = FIELD_GET(HTT_TX_WBM_COMP_INFO2_SW_PEER_ID, 413 status_desc->info2); 414 else 415 ts.peer_id = HTT_INVALID_PEER_ID; 416 417 ath11k_dp_tx_htt_tx_complete_buf(ab, tx_ring, &ts); 418 419 break; 420 case HAL_WBM_REL_HTT_TX_COMP_STATUS_REINJ: 421 case HAL_WBM_REL_HTT_TX_COMP_STATUS_INSPECT: 422 ath11k_dp_tx_free_txbuf(ab, mac_id, msdu_id, tx_ring); 423 break; 424 case HAL_WBM_REL_HTT_TX_COMP_STATUS_MEC_NOTIFY: 425 /* This event is to be handled only when the driver decides to 426 * use WDS offload functionality. 427 */ 428 break; 429 default: 430 ath11k_warn(ab, "Unknown htt tx status %d\n", wbm_status); 431 break; 432 } 433 } 434 435 static void ath11k_dp_tx_cache_peer_stats(struct ath11k *ar, 436 struct sk_buff *msdu, 437 struct hal_tx_status *ts) 438 { 439 struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; 440 441 if (ts->try_cnt > 1) { 442 peer_stats->retry_pkts += ts->try_cnt - 1; 443 peer_stats->retry_bytes += (ts->try_cnt - 1) * msdu->len; 444 445 if (ts->status != HAL_WBM_TQM_REL_REASON_FRAME_ACKED) { 446 peer_stats->failed_pkts += 1; 447 peer_stats->failed_bytes += msdu->len; 448 } 449 } 450 } 451 452 void ath11k_dp_tx_update_txcompl(struct ath11k *ar, struct hal_tx_status *ts) 453 { 454 struct ath11k_base *ab = ar->ab; 455 struct ath11k_per_peer_tx_stats *peer_stats = &ar->cached_stats; 456 enum hal_tx_rate_stats_pkt_type pkt_type; 457 enum hal_tx_rate_stats_sgi sgi; 458 enum hal_tx_rate_stats_bw bw; 459 struct ath11k_peer *peer; 460 struct ath11k_sta *arsta; 461 struct ieee80211_sta *sta; 462 u16 rate, ru_tones; 463 u8 mcs, rate_idx = 0, ofdma; 464 int ret; 465 466 spin_lock_bh(&ab->base_lock); 467 peer = ath11k_peer_find_by_id(ab, ts->peer_id); 468 if (!peer || !peer->sta) { 469 ath11k_dbg(ab, ATH11K_DBG_DP_TX, 470 "failed to find the peer by id %u\n", ts->peer_id); 471 goto err_out; 472 } 473 474 sta = peer->sta; 475 arsta = ath11k_sta_to_arsta(sta); 476 477 memset(&arsta->txrate, 0, sizeof(arsta->txrate)); 478 pkt_type = FIELD_GET(HAL_TX_RATE_STATS_INFO0_PKT_TYPE, 479 ts->rate_stats); 480 mcs = FIELD_GET(HAL_TX_RATE_STATS_INFO0_MCS, 481 ts->rate_stats); 482 sgi = FIELD_GET(HAL_TX_RATE_STATS_INFO0_SGI, 483 ts->rate_stats); 484 bw = FIELD_GET(HAL_TX_RATE_STATS_INFO0_BW, ts->rate_stats); 485 ru_tones = FIELD_GET(HAL_TX_RATE_STATS_INFO0_TONES_IN_RU, ts->rate_stats); 486 ofdma = FIELD_GET(HAL_TX_RATE_STATS_INFO0_OFDMA_TX, ts->rate_stats); 487 488 /* This is to prefer choose the real NSS value arsta->last_txrate.nss, 489 * if it is invalid, then choose the NSS value while assoc. 490 */ 491 if (arsta->last_txrate.nss) 492 arsta->txrate.nss = arsta->last_txrate.nss; 493 else 494 arsta->txrate.nss = arsta->peer_nss; 495 496 if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11A || 497 pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11B) { 498 ret = ath11k_mac_hw_ratecode_to_legacy_rate(mcs, 499 pkt_type, 500 &rate_idx, 501 &rate); 502 if (ret < 0) 503 goto err_out; 504 arsta->txrate.legacy = rate; 505 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11N) { 506 if (mcs > 7) { 507 ath11k_warn(ab, "Invalid HT mcs index %d\n", mcs); 508 goto err_out; 509 } 510 511 if (arsta->txrate.nss != 0) 512 arsta->txrate.mcs = mcs + 8 * (arsta->txrate.nss - 1); 513 arsta->txrate.flags = RATE_INFO_FLAGS_MCS; 514 if (sgi) 515 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 516 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AC) { 517 if (mcs > 9) { 518 ath11k_warn(ab, "Invalid VHT mcs index %d\n", mcs); 519 goto err_out; 520 } 521 522 arsta->txrate.mcs = mcs; 523 arsta->txrate.flags = RATE_INFO_FLAGS_VHT_MCS; 524 if (sgi) 525 arsta->txrate.flags |= RATE_INFO_FLAGS_SHORT_GI; 526 } else if (pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { 527 if (mcs > 11) { 528 ath11k_warn(ab, "Invalid HE mcs index %d\n", mcs); 529 goto err_out; 530 } 531 532 arsta->txrate.mcs = mcs; 533 arsta->txrate.flags = RATE_INFO_FLAGS_HE_MCS; 534 arsta->txrate.he_gi = ath11k_mac_he_gi_to_nl80211_he_gi(sgi); 535 } 536 537 arsta->txrate.bw = ath11k_mac_bw_to_mac80211_bw(bw); 538 if (ofdma && pkt_type == HAL_TX_RATE_STATS_PKT_TYPE_11AX) { 539 arsta->txrate.bw = RATE_INFO_BW_HE_RU; 540 arsta->txrate.he_ru_alloc = 541 ath11k_mac_he_ru_tones_to_nl80211_he_ru_alloc(ru_tones); 542 } 543 544 if (ath11k_debugfs_is_extd_tx_stats_enabled(ar)) 545 ath11k_debugfs_sta_add_tx_stats(arsta, peer_stats, rate_idx); 546 547 err_out: 548 spin_unlock_bh(&ab->base_lock); 549 } 550 551 static void ath11k_dp_tx_complete_msdu(struct ath11k *ar, 552 struct sk_buff *msdu, 553 struct hal_tx_status *ts) 554 { 555 struct ieee80211_tx_status status = {}; 556 struct ieee80211_rate_status status_rate = {}; 557 struct ath11k_base *ab = ar->ab; 558 struct ieee80211_tx_info *info; 559 struct ath11k_skb_cb *skb_cb; 560 struct ath11k_peer *peer; 561 struct ath11k_sta *arsta; 562 struct rate_info rate; 563 564 if (WARN_ON_ONCE(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) { 565 /* Must not happen */ 566 return; 567 } 568 569 skb_cb = ATH11K_SKB_CB(msdu); 570 571 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 572 573 if (unlikely(!rcu_access_pointer(ab->pdevs_active[ar->pdev_idx]))) { 574 ieee80211_free_txskb(ar->hw, msdu); 575 return; 576 } 577 578 if (unlikely(!skb_cb->vif)) { 579 ieee80211_free_txskb(ar->hw, msdu); 580 return; 581 } 582 583 info = IEEE80211_SKB_CB(msdu); 584 memset(&info->status, 0, sizeof(info->status)); 585 586 /* skip tx rate update from ieee80211_status*/ 587 info->status.rates[0].idx = -1; 588 589 if (ts->status == HAL_WBM_TQM_REL_REASON_FRAME_ACKED && 590 !(info->flags & IEEE80211_TX_CTL_NO_ACK)) { 591 info->flags |= IEEE80211_TX_STAT_ACK; 592 info->status.ack_signal = ts->ack_rssi; 593 594 if (!test_bit(WMI_TLV_SERVICE_HW_DB2DBM_CONVERSION_SUPPORT, 595 ab->wmi_ab.svc_map)) 596 info->status.ack_signal += ATH11K_DEFAULT_NOISE_FLOOR; 597 598 info->status.flags |= IEEE80211_TX_STATUS_ACK_SIGNAL_VALID; 599 } 600 601 if (ts->status == HAL_WBM_TQM_REL_REASON_CMD_REMOVE_TX && 602 (info->flags & IEEE80211_TX_CTL_NO_ACK)) 603 info->flags |= IEEE80211_TX_STAT_NOACK_TRANSMITTED; 604 605 if (unlikely(ath11k_debugfs_is_extd_tx_stats_enabled(ar)) || 606 ab->hw_params.single_pdev_only) { 607 if (ts->flags & HAL_TX_STATUS_FLAGS_FIRST_MSDU) { 608 if (ar->last_ppdu_id == 0) { 609 ar->last_ppdu_id = ts->ppdu_id; 610 } else if (ar->last_ppdu_id == ts->ppdu_id || 611 ar->cached_ppdu_id == ar->last_ppdu_id) { 612 ar->cached_ppdu_id = ar->last_ppdu_id; 613 ar->cached_stats.is_ampdu = true; 614 ath11k_dp_tx_update_txcompl(ar, ts); 615 memset(&ar->cached_stats, 0, 616 sizeof(struct ath11k_per_peer_tx_stats)); 617 } else { 618 ar->cached_stats.is_ampdu = false; 619 ath11k_dp_tx_update_txcompl(ar, ts); 620 memset(&ar->cached_stats, 0, 621 sizeof(struct ath11k_per_peer_tx_stats)); 622 } 623 ar->last_ppdu_id = ts->ppdu_id; 624 } 625 626 ath11k_dp_tx_cache_peer_stats(ar, msdu, ts); 627 } 628 629 spin_lock_bh(&ab->base_lock); 630 peer = ath11k_peer_find_by_id(ab, ts->peer_id); 631 if (!peer || !peer->sta) { 632 ath11k_dbg(ab, ATH11K_DBG_DATA, 633 "dp_tx: failed to find the peer with peer_id %d\n", 634 ts->peer_id); 635 spin_unlock_bh(&ab->base_lock); 636 ieee80211_free_txskb(ar->hw, msdu); 637 return; 638 } 639 arsta = ath11k_sta_to_arsta(peer->sta); 640 status.sta = peer->sta; 641 status.skb = msdu; 642 status.info = info; 643 rate = arsta->last_txrate; 644 645 status_rate.rate_idx = rate; 646 status_rate.try_count = 1; 647 648 status.rates = &status_rate; 649 status.n_rates = 1; 650 651 spin_unlock_bh(&ab->base_lock); 652 653 ieee80211_tx_status_ext(ar->hw, &status); 654 } 655 656 static inline void ath11k_dp_tx_status_parse(struct ath11k_base *ab, 657 struct hal_wbm_release_ring *desc, 658 struct hal_tx_status *ts) 659 { 660 ts->buf_rel_source = 661 FIELD_GET(HAL_WBM_RELEASE_INFO0_REL_SRC_MODULE, desc->info0); 662 if (unlikely(ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_FW && 663 ts->buf_rel_source != HAL_WBM_REL_SRC_MODULE_TQM)) 664 return; 665 666 if (unlikely(ts->buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) 667 return; 668 669 ts->status = FIELD_GET(HAL_WBM_RELEASE_INFO0_TQM_RELEASE_REASON, 670 desc->info0); 671 ts->ppdu_id = FIELD_GET(HAL_WBM_RELEASE_INFO1_TQM_STATUS_NUMBER, 672 desc->info1); 673 ts->try_cnt = FIELD_GET(HAL_WBM_RELEASE_INFO1_TRANSMIT_COUNT, 674 desc->info1); 675 ts->ack_rssi = FIELD_GET(HAL_WBM_RELEASE_INFO2_ACK_FRAME_RSSI, 676 desc->info2); 677 if (desc->info2 & HAL_WBM_RELEASE_INFO2_FIRST_MSDU) 678 ts->flags |= HAL_TX_STATUS_FLAGS_FIRST_MSDU; 679 ts->peer_id = FIELD_GET(HAL_WBM_RELEASE_INFO3_PEER_ID, desc->info3); 680 ts->tid = FIELD_GET(HAL_WBM_RELEASE_INFO3_TID, desc->info3); 681 if (desc->rate_stats.info0 & HAL_TX_RATE_STATS_INFO0_VALID) 682 ts->rate_stats = desc->rate_stats.info0; 683 else 684 ts->rate_stats = 0; 685 } 686 687 void ath11k_dp_tx_completion_handler(struct ath11k_base *ab, int ring_id) 688 { 689 struct ath11k *ar; 690 struct ath11k_dp *dp = &ab->dp; 691 int hal_ring_id = dp->tx_ring[ring_id].tcl_comp_ring.ring_id; 692 struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id]; 693 struct sk_buff *msdu; 694 struct hal_tx_status ts = {}; 695 struct dp_tx_ring *tx_ring = &dp->tx_ring[ring_id]; 696 u32 *desc; 697 u32 msdu_id; 698 u8 mac_id; 699 700 spin_lock_bh(&status_ring->lock); 701 702 ath11k_hal_srng_access_begin(ab, status_ring); 703 704 while ((ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) != 705 tx_ring->tx_status_tail) && 706 (desc = ath11k_hal_srng_dst_get_next_entry(ab, status_ring))) { 707 memcpy(&tx_ring->tx_status[tx_ring->tx_status_head], 708 desc, sizeof(struct hal_wbm_release_ring)); 709 tx_ring->tx_status_head = 710 ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head); 711 } 712 713 if (unlikely((ath11k_hal_srng_dst_peek(ab, status_ring) != NULL) && 714 (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_head) == 715 tx_ring->tx_status_tail))) { 716 /* TODO: Process pending tx_status messages when kfifo_is_full() */ 717 ath11k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n"); 718 } 719 720 ath11k_hal_srng_access_end(ab, status_ring); 721 722 spin_unlock_bh(&status_ring->lock); 723 724 while (ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail) != tx_ring->tx_status_head) { 725 struct hal_wbm_release_ring *tx_status; 726 u32 desc_id; 727 728 tx_ring->tx_status_tail = 729 ATH11K_TX_COMPL_NEXT(tx_ring->tx_status_tail); 730 tx_status = &tx_ring->tx_status[tx_ring->tx_status_tail]; 731 ath11k_dp_tx_status_parse(ab, tx_status, &ts); 732 733 desc_id = FIELD_GET(BUFFER_ADDR_INFO1_SW_COOKIE, 734 tx_status->buf_addr_info.info1); 735 mac_id = FIELD_GET(DP_TX_DESC_ID_MAC_ID, desc_id); 736 msdu_id = FIELD_GET(DP_TX_DESC_ID_MSDU_ID, desc_id); 737 738 if (unlikely(ts.buf_rel_source == HAL_WBM_REL_SRC_MODULE_FW)) { 739 ath11k_dp_tx_process_htt_tx_complete(ab, 740 (void *)tx_status, 741 mac_id, msdu_id, 742 tx_ring); 743 continue; 744 } 745 746 spin_lock(&tx_ring->tx_idr_lock); 747 msdu = idr_remove(&tx_ring->txbuf_idr, msdu_id); 748 if (unlikely(!msdu)) { 749 ath11k_warn(ab, "tx completion for unknown msdu_id %d\n", 750 msdu_id); 751 spin_unlock(&tx_ring->tx_idr_lock); 752 continue; 753 } 754 755 spin_unlock(&tx_ring->tx_idr_lock); 756 757 ar = ab->pdevs[mac_id].ar; 758 759 if (atomic_dec_and_test(&ar->dp.num_tx_pending)) 760 wake_up(&ar->dp.tx_empty_waitq); 761 762 ath11k_dp_tx_complete_msdu(ar, msdu, &ts); 763 } 764 } 765 766 int ath11k_dp_tx_send_reo_cmd(struct ath11k_base *ab, struct dp_rx_tid *rx_tid, 767 enum hal_reo_cmd_type type, 768 struct ath11k_hal_reo_cmd *cmd, 769 void (*cb)(struct ath11k_dp *, void *, 770 enum hal_reo_cmd_status)) 771 { 772 struct ath11k_dp *dp = &ab->dp; 773 struct dp_reo_cmd *dp_cmd; 774 struct hal_srng *cmd_ring; 775 int cmd_num; 776 777 if (test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 778 return -ESHUTDOWN; 779 780 cmd_ring = &ab->hal.srng_list[dp->reo_cmd_ring.ring_id]; 781 cmd_num = ath11k_hal_reo_cmd_send(ab, cmd_ring, type, cmd); 782 783 /* cmd_num should start from 1, during failure return the error code */ 784 if (cmd_num < 0) 785 return cmd_num; 786 787 /* reo cmd ring descriptors has cmd_num starting from 1 */ 788 if (cmd_num == 0) 789 return -EINVAL; 790 791 if (!cb) 792 return 0; 793 794 /* Can this be optimized so that we keep the pending command list only 795 * for tid delete command to free up the resource on the command status 796 * indication? 797 */ 798 dp_cmd = kzalloc(sizeof(*dp_cmd), GFP_ATOMIC); 799 800 if (!dp_cmd) 801 return -ENOMEM; 802 803 memcpy(&dp_cmd->data, rx_tid, sizeof(struct dp_rx_tid)); 804 dp_cmd->cmd_num = cmd_num; 805 dp_cmd->handler = cb; 806 807 spin_lock_bh(&dp->reo_cmd_lock); 808 list_add_tail(&dp_cmd->list, &dp->reo_cmd_list); 809 spin_unlock_bh(&dp->reo_cmd_lock); 810 811 return 0; 812 } 813 814 static int 815 ath11k_dp_tx_get_ring_id_type(struct ath11k_base *ab, 816 int mac_id, u32 ring_id, 817 enum hal_ring_type ring_type, 818 enum htt_srng_ring_type *htt_ring_type, 819 enum htt_srng_ring_id *htt_ring_id) 820 { 821 int lmac_ring_id_offset = 0; 822 int ret = 0; 823 824 switch (ring_type) { 825 case HAL_RXDMA_BUF: 826 lmac_ring_id_offset = mac_id * HAL_SRNG_RINGS_PER_LMAC; 827 828 /* for QCA6390, host fills rx buffer to fw and fw fills to 829 * rxbuf ring for each rxdma 830 */ 831 if (!ab->hw_params.rx_mac_buf_ring) { 832 if (!(ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF + 833 lmac_ring_id_offset) || 834 ring_id == (HAL_SRNG_RING_ID_WMAC1_SW2RXDMA1_BUF + 835 lmac_ring_id_offset))) { 836 ret = -EINVAL; 837 } 838 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 839 *htt_ring_type = HTT_SW_TO_HW_RING; 840 } else { 841 if (ring_id == HAL_SRNG_RING_ID_WMAC1_SW2RXDMA0_BUF) { 842 *htt_ring_id = HTT_HOST1_TO_FW_RXBUF_RING; 843 *htt_ring_type = HTT_SW_TO_SW_RING; 844 } else { 845 *htt_ring_id = HTT_RXDMA_HOST_BUF_RING; 846 *htt_ring_type = HTT_SW_TO_HW_RING; 847 } 848 } 849 break; 850 case HAL_RXDMA_DST: 851 *htt_ring_id = HTT_RXDMA_NON_MONITOR_DEST_RING; 852 *htt_ring_type = HTT_HW_TO_SW_RING; 853 break; 854 case HAL_RXDMA_MONITOR_BUF: 855 *htt_ring_id = HTT_RXDMA_MONITOR_BUF_RING; 856 *htt_ring_type = HTT_SW_TO_HW_RING; 857 break; 858 case HAL_RXDMA_MONITOR_STATUS: 859 *htt_ring_id = HTT_RXDMA_MONITOR_STATUS_RING; 860 *htt_ring_type = HTT_SW_TO_HW_RING; 861 break; 862 case HAL_RXDMA_MONITOR_DST: 863 *htt_ring_id = HTT_RXDMA_MONITOR_DEST_RING; 864 *htt_ring_type = HTT_HW_TO_SW_RING; 865 break; 866 case HAL_RXDMA_MONITOR_DESC: 867 *htt_ring_id = HTT_RXDMA_MONITOR_DESC_RING; 868 *htt_ring_type = HTT_SW_TO_HW_RING; 869 break; 870 default: 871 ath11k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type); 872 ret = -EINVAL; 873 } 874 return ret; 875 } 876 877 int ath11k_dp_tx_htt_srng_setup(struct ath11k_base *ab, u32 ring_id, 878 int mac_id, enum hal_ring_type ring_type) 879 { 880 struct htt_srng_setup_cmd *cmd; 881 struct hal_srng *srng = &ab->hal.srng_list[ring_id]; 882 struct hal_srng_params params; 883 struct sk_buff *skb; 884 u32 ring_entry_sz; 885 int len = sizeof(*cmd); 886 dma_addr_t hp_addr, tp_addr; 887 enum htt_srng_ring_type htt_ring_type; 888 enum htt_srng_ring_id htt_ring_id; 889 int ret; 890 891 skb = ath11k_htc_alloc_skb(ab, len); 892 if (!skb) 893 return -ENOMEM; 894 895 memset(¶ms, 0, sizeof(params)); 896 ath11k_hal_srng_get_params(ab, srng, ¶ms); 897 898 hp_addr = ath11k_hal_srng_get_hp_addr(ab, srng); 899 tp_addr = ath11k_hal_srng_get_tp_addr(ab, srng); 900 901 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, 902 ring_type, &htt_ring_type, 903 &htt_ring_id); 904 if (ret) 905 goto err_free; 906 907 skb_put(skb, len); 908 cmd = (struct htt_srng_setup_cmd *)skb->data; 909 cmd->info0 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_MSG_TYPE, 910 HTT_H2T_MSG_TYPE_SRING_SETUP); 911 if (htt_ring_type == HTT_SW_TO_HW_RING || 912 htt_ring_type == HTT_HW_TO_SW_RING) 913 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID, 914 DP_SW2HW_MACID(mac_id)); 915 else 916 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_PDEV_ID, 917 mac_id); 918 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_TYPE, 919 htt_ring_type); 920 cmd->info0 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO0_RING_ID, htt_ring_id); 921 922 cmd->ring_base_addr_lo = params.ring_base_paddr & 923 HAL_ADDR_LSB_REG_MASK; 924 925 cmd->ring_base_addr_hi = (u64)params.ring_base_paddr >> 926 HAL_ADDR_MSB_REG_SHIFT; 927 928 ret = ath11k_hal_srng_get_entrysize(ab, ring_type); 929 if (ret < 0) 930 goto err_free; 931 932 ring_entry_sz = ret; 933 934 ring_entry_sz >>= 2; 935 cmd->info1 = FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_ENTRY_SIZE, 936 ring_entry_sz); 937 cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_SIZE, 938 params.num_entries * ring_entry_sz); 939 cmd->info1 |= FIELD_PREP(HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_MSI_SWAP, 940 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP)); 941 cmd->info1 |= FIELD_PREP( 942 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_TLV_SWAP, 943 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)); 944 cmd->info1 |= FIELD_PREP( 945 HTT_SRNG_SETUP_CMD_INFO1_RING_FLAGS_HOST_FW_SWAP, 946 !!(params.flags & HAL_SRNG_FLAGS_RING_PTR_SWAP)); 947 if (htt_ring_type == HTT_SW_TO_HW_RING) 948 cmd->info1 |= HTT_SRNG_SETUP_CMD_INFO1_RING_LOOP_CNT_DIS; 949 950 cmd->ring_head_off32_remote_addr_lo = hp_addr & HAL_ADDR_LSB_REG_MASK; 951 cmd->ring_head_off32_remote_addr_hi = (u64)hp_addr >> 952 HAL_ADDR_MSB_REG_SHIFT; 953 954 cmd->ring_tail_off32_remote_addr_lo = tp_addr & HAL_ADDR_LSB_REG_MASK; 955 cmd->ring_tail_off32_remote_addr_hi = (u64)tp_addr >> 956 HAL_ADDR_MSB_REG_SHIFT; 957 958 cmd->ring_msi_addr_lo = lower_32_bits(params.msi_addr); 959 cmd->ring_msi_addr_hi = upper_32_bits(params.msi_addr); 960 cmd->msi_data = params.msi_data; 961 962 cmd->intr_info = FIELD_PREP( 963 HTT_SRNG_SETUP_CMD_INTR_INFO_BATCH_COUNTER_THRESH, 964 params.intr_batch_cntr_thres_entries * ring_entry_sz); 965 cmd->intr_info |= FIELD_PREP( 966 HTT_SRNG_SETUP_CMD_INTR_INFO_INTR_TIMER_THRESH, 967 params.intr_timer_thres_us >> 3); 968 969 cmd->info2 = 0; 970 if (params.flags & HAL_SRNG_FLAGS_LOW_THRESH_INTR_EN) { 971 cmd->info2 = FIELD_PREP( 972 HTT_SRNG_SETUP_CMD_INFO2_INTR_LOW_THRESH, 973 params.low_threshold); 974 } 975 976 ath11k_dbg(ab, ATH11K_DBG_DP_TX, 977 "htt srng setup msi_addr_lo 0x%x msi_addr_hi 0x%x msi_data 0x%x ring_id %d ring_type %d intr_info 0x%x flags 0x%x\n", 978 cmd->ring_msi_addr_lo, cmd->ring_msi_addr_hi, 979 cmd->msi_data, ring_id, ring_type, cmd->intr_info, cmd->info2); 980 981 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); 982 if (ret) 983 goto err_free; 984 985 return 0; 986 987 err_free: 988 dev_kfree_skb_any(skb); 989 990 return ret; 991 } 992 993 #define HTT_TARGET_VERSION_TIMEOUT_HZ (3 * HZ) 994 995 int ath11k_dp_tx_htt_h2t_ver_req_msg(struct ath11k_base *ab) 996 { 997 struct ath11k_dp *dp = &ab->dp; 998 struct sk_buff *skb; 999 struct htt_ver_req_cmd *cmd; 1000 int len = sizeof(*cmd); 1001 int ret; 1002 1003 init_completion(&dp->htt_tgt_version_received); 1004 1005 skb = ath11k_htc_alloc_skb(ab, len); 1006 if (!skb) 1007 return -ENOMEM; 1008 1009 skb_put(skb, len); 1010 cmd = (struct htt_ver_req_cmd *)skb->data; 1011 cmd->ver_reg_info = FIELD_PREP(HTT_VER_REQ_INFO_MSG_ID, 1012 HTT_H2T_MSG_TYPE_VERSION_REQ); 1013 1014 ret = ath11k_htc_send(&ab->htc, dp->eid, skb); 1015 if (ret) { 1016 dev_kfree_skb_any(skb); 1017 return ret; 1018 } 1019 1020 ret = wait_for_completion_timeout(&dp->htt_tgt_version_received, 1021 HTT_TARGET_VERSION_TIMEOUT_HZ); 1022 if (ret == 0) { 1023 ath11k_warn(ab, "htt target version request timed out\n"); 1024 return -ETIMEDOUT; 1025 } 1026 1027 if (dp->htt_tgt_ver_major != HTT_TARGET_VERSION_MAJOR) { 1028 ath11k_err(ab, "unsupported htt major version %d supported version is %d\n", 1029 dp->htt_tgt_ver_major, HTT_TARGET_VERSION_MAJOR); 1030 return -EOPNOTSUPP; 1031 } 1032 1033 return 0; 1034 } 1035 1036 int ath11k_dp_tx_htt_h2t_ppdu_stats_req(struct ath11k *ar, u32 mask) 1037 { 1038 struct ath11k_base *ab = ar->ab; 1039 struct ath11k_dp *dp = &ab->dp; 1040 struct sk_buff *skb; 1041 struct htt_ppdu_stats_cfg_cmd *cmd; 1042 int len = sizeof(*cmd); 1043 u8 pdev_mask; 1044 int ret; 1045 int i; 1046 1047 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 1048 skb = ath11k_htc_alloc_skb(ab, len); 1049 if (!skb) 1050 return -ENOMEM; 1051 1052 skb_put(skb, len); 1053 cmd = (struct htt_ppdu_stats_cfg_cmd *)skb->data; 1054 cmd->msg = FIELD_PREP(HTT_PPDU_STATS_CFG_MSG_TYPE, 1055 HTT_H2T_MSG_TYPE_PPDU_STATS_CFG); 1056 1057 pdev_mask = 1 << (ar->pdev_idx + i); 1058 cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_PDEV_ID, pdev_mask); 1059 cmd->msg |= FIELD_PREP(HTT_PPDU_STATS_CFG_TLV_TYPE_BITMASK, mask); 1060 1061 ret = ath11k_htc_send(&ab->htc, dp->eid, skb); 1062 if (ret) { 1063 dev_kfree_skb_any(skb); 1064 return ret; 1065 } 1066 } 1067 1068 return 0; 1069 } 1070 1071 int ath11k_dp_tx_htt_rx_filter_setup(struct ath11k_base *ab, u32 ring_id, 1072 int mac_id, enum hal_ring_type ring_type, 1073 int rx_buf_size, 1074 struct htt_rx_ring_tlv_filter *tlv_filter) 1075 { 1076 struct htt_rx_ring_selection_cfg_cmd *cmd; 1077 struct hal_srng *srng = &ab->hal.srng_list[ring_id]; 1078 struct hal_srng_params params; 1079 struct sk_buff *skb; 1080 int len = sizeof(*cmd); 1081 enum htt_srng_ring_type htt_ring_type; 1082 enum htt_srng_ring_id htt_ring_id; 1083 int ret; 1084 1085 skb = ath11k_htc_alloc_skb(ab, len); 1086 if (!skb) 1087 return -ENOMEM; 1088 1089 memset(¶ms, 0, sizeof(params)); 1090 ath11k_hal_srng_get_params(ab, srng, ¶ms); 1091 1092 ret = ath11k_dp_tx_get_ring_id_type(ab, mac_id, ring_id, 1093 ring_type, &htt_ring_type, 1094 &htt_ring_id); 1095 if (ret) 1096 goto err_free; 1097 1098 skb_put(skb, len); 1099 cmd = (struct htt_rx_ring_selection_cfg_cmd *)skb->data; 1100 cmd->info0 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_MSG_TYPE, 1101 HTT_H2T_MSG_TYPE_RX_RING_SELECTION_CFG); 1102 if (htt_ring_type == HTT_SW_TO_HW_RING || 1103 htt_ring_type == HTT_HW_TO_SW_RING) 1104 cmd->info0 |= 1105 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID, 1106 DP_SW2HW_MACID(mac_id)); 1107 else 1108 cmd->info0 |= 1109 FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PDEV_ID, 1110 mac_id); 1111 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_RING_ID, 1112 htt_ring_id); 1113 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_SS, 1114 !!(params.flags & HAL_SRNG_FLAGS_MSI_SWAP)); 1115 cmd->info0 |= FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO0_PS, 1116 !!(params.flags & HAL_SRNG_FLAGS_DATA_TLV_SWAP)); 1117 1118 cmd->info1 = FIELD_PREP(HTT_RX_RING_SELECTION_CFG_CMD_INFO1_BUF_SIZE, 1119 rx_buf_size); 1120 cmd->pkt_type_en_flags0 = tlv_filter->pkt_filter_flags0; 1121 cmd->pkt_type_en_flags1 = tlv_filter->pkt_filter_flags1; 1122 cmd->pkt_type_en_flags2 = tlv_filter->pkt_filter_flags2; 1123 cmd->pkt_type_en_flags3 = tlv_filter->pkt_filter_flags3; 1124 cmd->rx_filter_tlv = tlv_filter->rx_filter; 1125 1126 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); 1127 if (ret) 1128 goto err_free; 1129 1130 return 0; 1131 1132 err_free: 1133 dev_kfree_skb_any(skb); 1134 1135 return ret; 1136 } 1137 1138 int 1139 ath11k_dp_tx_htt_h2t_ext_stats_req(struct ath11k *ar, u8 type, 1140 struct htt_ext_stats_cfg_params *cfg_params, 1141 u64 cookie) 1142 { 1143 struct ath11k_base *ab = ar->ab; 1144 struct ath11k_dp *dp = &ab->dp; 1145 struct sk_buff *skb; 1146 struct htt_ext_stats_cfg_cmd *cmd; 1147 u32 pdev_id; 1148 int len = sizeof(*cmd); 1149 int ret; 1150 1151 skb = ath11k_htc_alloc_skb(ab, len); 1152 if (!skb) 1153 return -ENOMEM; 1154 1155 skb_put(skb, len); 1156 1157 cmd = (struct htt_ext_stats_cfg_cmd *)skb->data; 1158 memset(cmd, 0, sizeof(*cmd)); 1159 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_EXT_STATS_CFG; 1160 1161 if (ab->hw_params.single_pdev_only) 1162 pdev_id = ath11k_mac_get_target_pdev_id(ar); 1163 else 1164 pdev_id = ar->pdev->pdev_id; 1165 1166 cmd->hdr.pdev_mask = 1 << pdev_id; 1167 1168 cmd->hdr.stats_type = type; 1169 cmd->cfg_param0 = cfg_params->cfg0; 1170 cmd->cfg_param1 = cfg_params->cfg1; 1171 cmd->cfg_param2 = cfg_params->cfg2; 1172 cmd->cfg_param3 = cfg_params->cfg3; 1173 cmd->cookie_lsb = lower_32_bits(cookie); 1174 cmd->cookie_msb = upper_32_bits(cookie); 1175 1176 ret = ath11k_htc_send(&ab->htc, dp->eid, skb); 1177 if (ret) { 1178 ath11k_warn(ab, "failed to send htt type stats request: %d", 1179 ret); 1180 dev_kfree_skb_any(skb); 1181 return ret; 1182 } 1183 1184 return 0; 1185 } 1186 1187 int ath11k_dp_tx_htt_monitor_mode_ring_config(struct ath11k *ar, bool reset) 1188 { 1189 struct ath11k_pdev_dp *dp = &ar->dp; 1190 struct ath11k_base *ab = ar->ab; 1191 struct htt_rx_ring_tlv_filter tlv_filter = {}; 1192 int ret = 0, ring_id = 0, i; 1193 1194 if (ab->hw_params.full_monitor_mode) { 1195 ret = ath11k_dp_tx_htt_rx_full_mon_setup(ab, 1196 dp->mac_id, !reset); 1197 if (ret < 0) { 1198 ath11k_err(ab, "failed to setup full monitor %d\n", ret); 1199 return ret; 1200 } 1201 } 1202 1203 ring_id = dp->rxdma_mon_buf_ring.refill_buf_ring.ring_id; 1204 1205 if (!reset) { 1206 tlv_filter.rx_filter = HTT_RX_MON_FILTER_TLV_FLAGS_MON_BUF_RING; 1207 tlv_filter.pkt_filter_flags0 = 1208 HTT_RX_MON_FP_MGMT_FILTER_FLAGS0 | 1209 HTT_RX_MON_MO_MGMT_FILTER_FLAGS0; 1210 tlv_filter.pkt_filter_flags1 = 1211 HTT_RX_MON_FP_MGMT_FILTER_FLAGS1 | 1212 HTT_RX_MON_MO_MGMT_FILTER_FLAGS1; 1213 tlv_filter.pkt_filter_flags2 = 1214 HTT_RX_MON_FP_CTRL_FILTER_FLASG2 | 1215 HTT_RX_MON_MO_CTRL_FILTER_FLASG2; 1216 tlv_filter.pkt_filter_flags3 = 1217 HTT_RX_MON_FP_CTRL_FILTER_FLASG3 | 1218 HTT_RX_MON_MO_CTRL_FILTER_FLASG3 | 1219 HTT_RX_MON_FP_DATA_FILTER_FLASG3 | 1220 HTT_RX_MON_MO_DATA_FILTER_FLASG3; 1221 } 1222 1223 if (ab->hw_params.rxdma1_enable) { 1224 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, dp->mac_id, 1225 HAL_RXDMA_MONITOR_BUF, 1226 DP_RXDMA_REFILL_RING_SIZE, 1227 &tlv_filter); 1228 } else if (!reset) { 1229 /* set in monitor mode only */ 1230 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 1231 ring_id = dp->rx_mac_buf_ring[i].ring_id; 1232 ret = ath11k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id, 1233 dp->mac_id + i, 1234 HAL_RXDMA_BUF, 1235 1024, 1236 &tlv_filter); 1237 } 1238 } 1239 1240 if (ret) 1241 return ret; 1242 1243 for (i = 0; i < ab->hw_params.num_rxdma_per_pdev; i++) { 1244 ring_id = dp->rx_mon_status_refill_ring[i].refill_buf_ring.ring_id; 1245 if (!reset) { 1246 tlv_filter.rx_filter = 1247 HTT_RX_MON_FILTER_TLV_FLAGS_MON_STATUS_RING; 1248 } else { 1249 tlv_filter = ath11k_mac_mon_status_filter_default; 1250 1251 if (ath11k_debugfs_is_extd_rx_stats_enabled(ar)) 1252 tlv_filter.rx_filter = ath11k_debugfs_rx_filter(ar); 1253 } 1254 1255 ret = ath11k_dp_tx_htt_rx_filter_setup(ab, ring_id, 1256 dp->mac_id + i, 1257 HAL_RXDMA_MONITOR_STATUS, 1258 DP_RXDMA_REFILL_RING_SIZE, 1259 &tlv_filter); 1260 } 1261 1262 if (!ar->ab->hw_params.rxdma1_enable) 1263 mod_timer(&ar->ab->mon_reap_timer, jiffies + 1264 msecs_to_jiffies(ATH11K_MON_TIMER_INTERVAL)); 1265 1266 return ret; 1267 } 1268 1269 int ath11k_dp_tx_htt_rx_full_mon_setup(struct ath11k_base *ab, int mac_id, 1270 bool config) 1271 { 1272 struct htt_rx_full_monitor_mode_cfg_cmd *cmd; 1273 struct sk_buff *skb; 1274 int ret, len = sizeof(*cmd); 1275 1276 skb = ath11k_htc_alloc_skb(ab, len); 1277 if (!skb) 1278 return -ENOMEM; 1279 1280 skb_put(skb, len); 1281 cmd = (struct htt_rx_full_monitor_mode_cfg_cmd *)skb->data; 1282 memset(cmd, 0, sizeof(*cmd)); 1283 cmd->info0 = FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_MSG_TYPE, 1284 HTT_H2T_MSG_TYPE_RX_FULL_MONITOR_MODE); 1285 1286 cmd->info0 |= FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_INFO0_PDEV_ID, mac_id); 1287 1288 cmd->cfg = HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ENABLE | 1289 FIELD_PREP(HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_RELEASE_RING, 1290 HTT_RX_MON_RING_SW); 1291 if (config) { 1292 cmd->cfg |= HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_ZERO_MPDUS_END | 1293 HTT_RX_FULL_MON_MODE_CFG_CMD_CFG_NON_ZERO_MPDUS_END; 1294 } 1295 1296 ret = ath11k_htc_send(&ab->htc, ab->dp.eid, skb); 1297 if (ret) 1298 goto err_free; 1299 1300 return 0; 1301 1302 err_free: 1303 dev_kfree_skb_any(skb); 1304 1305 return ret; 1306 } 1307