Lines Matching defs:ab
19 struct ath12k_base *ab = arvif->ar->ab;
21 if (test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags))
103 ath12k_warn(dp->ab, "failed to allocate data Tx buffer\n");
113 static void ath12k_hal_tx_cmd_ext_desc_setup(struct ath12k_base *ab,
186 static int ath12k_dp_tx_align_payload(struct ath12k_base *ab,
189 u32 iova_mask = ab->hw_params->iova_mask;
227 struct ath12k_base *ab = ar->ab;
228 struct ath12k_dp *dp = &ab->dp;
247 u32 iova_mask = ab->hw_params->iova_mask;
249 if (test_bit(ATH12K_FLAG_CRASH_FLUSH, &ar->ab->dev_flags))
265 ring_selector = ab->hw_params->hw_ops->get_ring_selector(skb);
269 ti.ring_id = ring_selector % ab->hw_params->max_tx_ring;
272 ti.rbm_id = ab->hw_params->hal_ops->tcl_to_wbm_rbm_map[ti.ring_id].rbm_id;
284 test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags)) {
341 if (!test_bit(ATH12K_FLAG_RAW_MODE, &ab->dev_flags)) {
353 atomic_inc(&ab->device_stats.tx_err.misc_fail);
359 ret = ath12k_dp_tx_align_payload(ab, &skb);
361 ath12k_warn(ab, "failed to align TX buffer %d\n", ret);
374 ti.paddr = dma_map_single(ab->dev, skb->data, skb->len, DMA_TO_DEVICE);
375 if (dma_mapping_error(ab->dev, ti.paddr)) {
376 atomic_inc(&ab->device_stats.tx_err.misc_fail);
377 ath12k_warn(ab, "failed to DMA map data Tx buffer\n");
382 if (!test_bit(ATH12K_FLAG_HW_CRYPTO_DISABLED, &ar->ab->dev_flags) &&
414 ath12k_hal_tx_cmd_ext_desc_setup(ab, msg, &ti);
419 ath12k_dbg(ab, ATH12K_DBG_DP_TX,
425 ti.paddr = dma_map_single(ab->dev, skb_ext_desc->data,
427 ret = dma_mapping_error(ab->dev, ti.paddr);
439 tcl_ring = &ab->hal.srng_list[hal_ring_id];
443 ath12k_hal_srng_access_begin(ab, tcl_ring);
445 hal_tcl_desc = ath12k_hal_srng_src_get_next_entry(ab, tcl_ring);
450 ath12k_hal_srng_access_end(ab, tcl_ring);
451 ab->device_stats.tx_err.desc_na[ti.ring_id]++;
460 if (ring_map != (BIT(ab->hw_params->max_tx_ring) - 1) &&
461 ab->hw_params->tcl_ring_retry) {
480 ab->device_stats.tx_enqueued[ti.ring_id]++;
482 ath12k_hal_tx_cmd_desc_setup(ab, hal_tcl_desc, &ti);
484 ath12k_hal_srng_access_end(ab, tcl_ring);
488 ath12k_dbg_dump(ab, ATH12K_DBG_DP_TX, NULL, "dp tx msdu: ",
497 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
504 dma_unmap_single(ab->dev, ti.paddr, ti.data_len, DMA_TO_DEVICE);
519 static void ath12k_dp_tx_free_txbuf(struct ath12k_base *ab,
526 u8 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params->mac_id);
529 ar = ab->pdevs[pdev_id].ar;
531 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
533 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
545 ath12k_dp_tx_htt_tx_complete_buf(struct ath12k_base *ab,
562 ab->device_stats.tx_completed[tx_ring->tcl_data_ring_id]++;
567 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
569 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
595 ab->wmi_ab.svc_map))
608 ath12k_dp_tx_process_htt_tx_complete(struct ath12k_base *ab, void *desc,
620 ab->device_stats.fw_tx_status[wbm_status]++;
627 ath12k_dp_tx_htt_tx_complete_buf(ab, desc_params, tx_ring, &ts);
634 ath12k_dp_tx_free_txbuf(ab, tx_ring, desc_params);
642 ath12k_warn(ab, "Unknown htt wbm tx status %d\n", wbm_status);
649 struct ath12k_base *ab = ar->ab;
659 spin_lock_bh(&ab->base_lock);
660 peer = ath12k_peer_find_by_id(ab, ts->peer_id);
662 ath12k_dbg(ab, ATH12K_DBG_DP_TX,
664 spin_unlock_bh(&ab->base_lock);
678 spin_unlock_bh(&ab->base_lock);
688 ath12k_warn(ab, "Invalid tx legacy rate %d\n", ret);
696 ath12k_warn(ab, "Invalid HT mcs index %d\n", ts->mcs);
710 ath12k_warn(ab, "Invalid VHT mcs index %d\n", ts->mcs);
722 ath12k_warn(ab, "Invalid HE mcs index %d\n", ts->mcs);
732 ath12k_warn(ab, "Invalid EHT mcs index %d\n", ts->mcs);
741 ath12k_warn(ab, "Invalid tx pkt type: %d\n", ts->pkt_type);
760 spin_lock_bh(&ab->base_lock);
762 spin_unlock_bh(&ab->base_lock);
770 struct ath12k_base *ab = ar->ab;
785 ab->device_stats.tx_completed[ring]++;
787 dma_unmap_single(ab->dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE);
789 dma_unmap_single(ab->dev, skb_cb->paddr_ext_desc,
796 if (!rcu_dereference(ab->pdevs_active[ar->pdev_idx])) {
830 ab->wmi_ab.svc_map))
852 ath12k_dbg(ab, ATH12K_DBG_DP_TX, "tx frame is not acked status %d\n",
870 static void ath12k_dp_tx_status_parse(struct ath12k_base *ab,
903 void ath12k_dp_tx_completion_handler(struct ath12k_base *ab, int ring_id)
906 struct ath12k_dp *dp = &ab->dp;
908 struct hal_srng *status_ring = &ab->hal.srng_list[hal_ring_id];
921 ath12k_hal_srng_access_begin(ab, status_ring);
924 desc = ath12k_hal_srng_dst_get_next_entry(ab, status_ring);
934 if (ath12k_hal_srng_dst_peek(ab, status_ring) &&
937 ath12k_warn(ab, "Unable to process some of the tx_status ring desc because status_fifo is full\n");
940 ath12k_hal_srng_access_end(ab, status_ring);
951 ath12k_dp_tx_status_parse(ab, tx_status, &ts);
963 tx_desc = ath12k_dp_get_tx_desc(ab, desc_id);
966 ath12k_warn(ab, "unable to retrieve tx_desc!");
977 ab->device_stats.tx_wbm_rel_source[buf_rel_source]++;
981 ab->device_stats.tqm_rel_reason[rel_status]++;
988 ath12k_dp_tx_process_htt_tx_complete(ab, (void *)tx_status,
993 pdev_id = ath12k_hw_mac_id_to_pdev_id(ab->hw_params, desc_params.mac_id);
994 ar = ab->pdevs[pdev_id].ar;
1005 ath12k_dp_tx_get_ring_id_type(struct ath12k_base *ab,
1018 if (!ab->hw_params->rx_mac_buf_ring) {
1056 ath12k_warn(ab, "Unsupported ring type in DP :%d\n", ring_type);
1062 int ath12k_dp_tx_htt_srng_setup(struct ath12k_base *ab, u32 ring_id,
1066 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1076 skb = ath12k_htc_alloc_skb(ab, len);
1081 ath12k_hal_srng_get_params(ab, srng, ¶ms);
1083 hp_addr = ath12k_hal_srng_get_hp_addr(ab, srng);
1084 tp_addr = ath12k_hal_srng_get_tp_addr(ab, srng);
1086 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1114 ret = ath12k_hal_srng_get_entrysize(ab, ring_type);
1157 ath12k_dbg(ab, ATH12K_DBG_HAL,
1162 ath12k_dbg(ab, ATH12K_DBG_HAL,
1166 ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1180 int ath12k_dp_tx_htt_h2t_ver_req_msg(struct ath12k_base *ab)
1182 struct ath12k_dp *dp = &ab->dp;
1190 skb = ath12k_htc_alloc_skb(ab, len);
1206 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1215 ath12k_warn(ab, "htt target version request timed out\n");
1220 ath12k_err(ab, "unsupported htt major version %d supported version is %d\n",
1230 struct ath12k_base *ab = ar->ab;
1231 struct ath12k_dp *dp = &ab->dp;
1239 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1240 skb = ath12k_htc_alloc_skb(ab, len);
1253 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1263 int ath12k_dp_tx_htt_rx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1269 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1277 skb = ath12k_htc_alloc_skb(ab, len);
1282 ath12k_hal_srng_get_params(ab, srng, ¶ms);
1284 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1400 ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);
1417 struct ath12k_base *ab = ar->ab;
1418 struct ath12k_dp *dp = &ab->dp;
1425 skb = ath12k_htc_alloc_skb(ab, len);
1446 ret = ath12k_htc_send(&ab->htc, dp->eid, skb);
1448 ath12k_warn(ab, "failed to send htt type stats request: %d",
1459 struct ath12k_base *ab = ar->ab;
1464 ath12k_err(ab, "failed to setup rx monitor filter %d\n", ret);
1473 struct ath12k_base *ab = ar->ab;
1517 if (ab->hw_params->rxdma1_enable) {
1518 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1520 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1526 ath12k_err(ab,
1536 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1537 ring_id = ab->dp.rx_mac_buf_ring[i].ring_id;
1538 ret = ath12k_dp_tx_htt_rx_filter_setup(ar->ab, ring_id,
1544 ath12k_err(ab,
1552 for (i = 0; i < ab->hw_params->num_rxdma_per_pdev; i++) {
1553 ring_id = ab->dp.rx_mon_status_refill_ring[i].refill_buf_ring.ring_id;
1559 ret = ath12k_dp_tx_htt_rx_filter_setup(ab, ring_id,
1565 ath12k_err(ab,
1575 int ath12k_dp_tx_htt_tx_filter_setup(struct ath12k_base *ab, u32 ring_id,
1581 struct hal_srng *srng = &ab->hal.srng_list[ring_id];
1589 skb = ath12k_htc_alloc_skb(ab, len);
1594 ath12k_hal_srng_get_params(ab, srng, ¶ms);
1596 ret = ath12k_dp_tx_get_ring_id_type(ab, mac_id, ring_id,
1672 ret = ath12k_htc_send(&ab->htc, ab->dp.eid, skb);