1 // SPDX-License-Identifier: ISC 2 /* 3 * Copyright (c) 2005-2011 Atheros Communications Inc. 4 * Copyright (c) 2011-2017 Qualcomm Atheros, Inc. 5 * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved. 6 * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries. 7 */ 8 9 #include <linux/export.h> 10 #include <linux/etherdevice.h> 11 #include "htt.h" 12 #include "mac.h" 13 #include "hif.h" 14 #include "txrx.h" 15 #include "debug.h" 16 17 static u8 ath10k_htt_tx_txq_calc_size(size_t count) 18 { 19 int exp; 20 int factor; 21 22 exp = 0; 23 factor = count >> 7; 24 25 while (factor >= 64 && exp < 4) { 26 factor >>= 3; 27 exp++; 28 } 29 30 if (exp == 4) 31 return 0xff; 32 33 if (count > 0) 34 factor = max(1, factor); 35 36 return SM(exp, HTT_TX_Q_STATE_ENTRY_EXP) | 37 SM(factor, HTT_TX_Q_STATE_ENTRY_FACTOR); 38 } 39 40 static void __ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 41 struct ieee80211_txq *txq) 42 { 43 struct ath10k *ar = hw->priv; 44 struct ath10k_sta *arsta; 45 struct ath10k_vif *arvif = (void *)txq->vif->drv_priv; 46 unsigned long byte_cnt; 47 int idx; 48 u32 bit; 49 u16 peer_id; 50 u8 tid; 51 u8 count; 52 53 lockdep_assert_held(&ar->htt.tx_lock); 54 55 if (!ar->htt.tx_q_state.enabled) 56 return; 57 58 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 59 return; 60 61 if (txq->sta) { 62 arsta = (void *)txq->sta->drv_priv; 63 peer_id = arsta->peer_id; 64 } else { 65 peer_id = arvif->peer_id; 66 } 67 68 tid = txq->tid; 69 bit = BIT(peer_id % 32); 70 idx = peer_id / 32; 71 72 ieee80211_txq_get_depth(txq, NULL, &byte_cnt); 73 count = ath10k_htt_tx_txq_calc_size(byte_cnt); 74 75 if (unlikely(peer_id >= ar->htt.tx_q_state.num_peers) || 76 unlikely(tid >= ar->htt.tx_q_state.num_tids)) { 77 ath10k_warn(ar, "refusing to update txq for peer_id %u tid %u due to out of bounds\n", 78 peer_id, tid); 79 return; 80 } 81 82 ar->htt.tx_q_state.vaddr->count[tid][peer_id] = count; 83 ar->htt.tx_q_state.vaddr->map[tid][idx] &= ~bit; 84 ar->htt.tx_q_state.vaddr->map[tid][idx] |= count ? bit : 0; 85 86 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update peer_id %u tid %u count %u\n", 87 peer_id, tid, count); 88 } 89 90 static void __ath10k_htt_tx_txq_sync(struct ath10k *ar) 91 { 92 u32 seq; 93 size_t size; 94 95 lockdep_assert_held(&ar->htt.tx_lock); 96 97 if (!ar->htt.tx_q_state.enabled) 98 return; 99 100 if (ar->htt.tx_q_state.mode != HTT_TX_MODE_SWITCH_PUSH_PULL) 101 return; 102 103 seq = le32_to_cpu(ar->htt.tx_q_state.vaddr->seq); 104 seq++; 105 ar->htt.tx_q_state.vaddr->seq = cpu_to_le32(seq); 106 107 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx txq state update commit seq %u\n", 108 seq); 109 110 size = sizeof(*ar->htt.tx_q_state.vaddr); 111 dma_sync_single_for_device(ar->dev, 112 ar->htt.tx_q_state.paddr, 113 size, 114 DMA_TO_DEVICE); 115 } 116 117 void ath10k_htt_tx_txq_recalc(struct ieee80211_hw *hw, 118 struct ieee80211_txq *txq) 119 { 120 struct ath10k *ar = hw->priv; 121 122 spin_lock_bh(&ar->htt.tx_lock); 123 __ath10k_htt_tx_txq_recalc(hw, txq); 124 spin_unlock_bh(&ar->htt.tx_lock); 125 } 126 127 void ath10k_htt_tx_txq_sync(struct ath10k *ar) 128 { 129 spin_lock_bh(&ar->htt.tx_lock); 130 __ath10k_htt_tx_txq_sync(ar); 131 spin_unlock_bh(&ar->htt.tx_lock); 132 } 133 134 void ath10k_htt_tx_txq_update(struct ieee80211_hw *hw, 135 struct ieee80211_txq *txq) 136 { 137 struct ath10k *ar = hw->priv; 138 139 spin_lock_bh(&ar->htt.tx_lock); 140 __ath10k_htt_tx_txq_recalc(hw, txq); 141 __ath10k_htt_tx_txq_sync(ar); 142 spin_unlock_bh(&ar->htt.tx_lock); 143 } 144 145 void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 146 { 147 lockdep_assert_held(&htt->tx_lock); 148 149 htt->num_pending_tx--; 150 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 151 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 152 153 if (htt->num_pending_tx == 0) 154 wake_up(&htt->empty_tx_wq); 155 } 156 157 int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 158 { 159 lockdep_assert_held(&htt->tx_lock); 160 161 if (htt->num_pending_tx >= htt->max_num_pending_tx) 162 return -EBUSY; 163 164 htt->num_pending_tx++; 165 if (htt->num_pending_tx == htt->max_num_pending_tx) 166 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 167 168 return 0; 169 } 170 171 int ath10k_htt_tx_mgmt_inc_pending(struct ath10k_htt *htt, bool is_mgmt, 172 bool is_presp) 173 { 174 struct ath10k *ar = htt->ar; 175 176 lockdep_assert_held(&htt->tx_lock); 177 178 if (!is_mgmt || !ar->hw_params.max_probe_resp_desc_thres) 179 return 0; 180 181 if (is_presp && 182 ar->hw_params.max_probe_resp_desc_thres < htt->num_pending_mgmt_tx) 183 return -EBUSY; 184 185 htt->num_pending_mgmt_tx++; 186 187 return 0; 188 } 189 190 void ath10k_htt_tx_mgmt_dec_pending(struct ath10k_htt *htt) 191 { 192 lockdep_assert_held(&htt->tx_lock); 193 194 if (!htt->ar->hw_params.max_probe_resp_desc_thres) 195 return; 196 197 htt->num_pending_mgmt_tx--; 198 } 199 200 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) 201 { 202 struct ath10k *ar = htt->ar; 203 int ret; 204 205 spin_lock_bh(&htt->tx_lock); 206 ret = idr_alloc(&htt->pending_tx, skb, 0, 207 htt->max_num_pending_tx, GFP_ATOMIC); 208 spin_unlock_bh(&htt->tx_lock); 209 210 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); 211 212 return ret; 213 } 214 215 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 216 { 217 struct ath10k *ar = htt->ar; 218 219 lockdep_assert_held(&htt->tx_lock); 220 221 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %u\n", msdu_id); 222 223 idr_remove(&htt->pending_tx, msdu_id); 224 } 225 226 static void ath10k_htt_tx_free_cont_txbuf_32(struct ath10k_htt *htt) 227 { 228 struct ath10k *ar = htt->ar; 229 size_t size; 230 231 if (!htt->txbuf.vaddr_txbuff_32) 232 return; 233 234 size = htt->txbuf.size; 235 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_32, 236 htt->txbuf.paddr); 237 htt->txbuf.vaddr_txbuff_32 = NULL; 238 } 239 240 static int ath10k_htt_tx_alloc_cont_txbuf_32(struct ath10k_htt *htt) 241 { 242 struct ath10k *ar = htt->ar; 243 size_t size; 244 245 size = htt->max_num_pending_tx * 246 sizeof(struct ath10k_htt_txbuf_32); 247 248 htt->txbuf.vaddr_txbuff_32 = dma_alloc_coherent(ar->dev, size, 249 &htt->txbuf.paddr, 250 GFP_KERNEL); 251 if (!htt->txbuf.vaddr_txbuff_32) 252 return -ENOMEM; 253 254 htt->txbuf.size = size; 255 256 return 0; 257 } 258 259 static void ath10k_htt_tx_free_cont_txbuf_64(struct ath10k_htt *htt) 260 { 261 struct ath10k *ar = htt->ar; 262 size_t size; 263 264 if (!htt->txbuf.vaddr_txbuff_64) 265 return; 266 267 size = htt->txbuf.size; 268 dma_free_coherent(ar->dev, size, htt->txbuf.vaddr_txbuff_64, 269 htt->txbuf.paddr); 270 htt->txbuf.vaddr_txbuff_64 = NULL; 271 } 272 273 static int ath10k_htt_tx_alloc_cont_txbuf_64(struct ath10k_htt *htt) 274 { 275 struct ath10k *ar = htt->ar; 276 size_t size; 277 278 size = htt->max_num_pending_tx * 279 sizeof(struct ath10k_htt_txbuf_64); 280 281 htt->txbuf.vaddr_txbuff_64 = dma_alloc_coherent(ar->dev, size, 282 &htt->txbuf.paddr, 283 GFP_KERNEL); 284 if (!htt->txbuf.vaddr_txbuff_64) 285 return -ENOMEM; 286 287 htt->txbuf.size = size; 288 289 return 0; 290 } 291 292 static void ath10k_htt_tx_free_cont_frag_desc_32(struct ath10k_htt *htt) 293 { 294 size_t size; 295 296 if (!htt->frag_desc.vaddr_desc_32) 297 return; 298 299 size = htt->max_num_pending_tx * 300 sizeof(struct htt_msdu_ext_desc); 301 302 dma_free_coherent(htt->ar->dev, 303 size, 304 htt->frag_desc.vaddr_desc_32, 305 htt->frag_desc.paddr); 306 307 htt->frag_desc.vaddr_desc_32 = NULL; 308 } 309 310 static int ath10k_htt_tx_alloc_cont_frag_desc_32(struct ath10k_htt *htt) 311 { 312 struct ath10k *ar = htt->ar; 313 size_t size; 314 315 if (!ar->hw_params.continuous_frag_desc) 316 return 0; 317 318 size = htt->max_num_pending_tx * 319 sizeof(struct htt_msdu_ext_desc); 320 htt->frag_desc.vaddr_desc_32 = dma_alloc_coherent(ar->dev, size, 321 &htt->frag_desc.paddr, 322 GFP_KERNEL); 323 if (!htt->frag_desc.vaddr_desc_32) { 324 ath10k_err(ar, "failed to alloc fragment desc memory\n"); 325 return -ENOMEM; 326 } 327 htt->frag_desc.size = size; 328 329 return 0; 330 } 331 332 static void ath10k_htt_tx_free_cont_frag_desc_64(struct ath10k_htt *htt) 333 { 334 size_t size; 335 336 if (!htt->frag_desc.vaddr_desc_64) 337 return; 338 339 size = htt->max_num_pending_tx * 340 sizeof(struct htt_msdu_ext_desc_64); 341 342 dma_free_coherent(htt->ar->dev, 343 size, 344 htt->frag_desc.vaddr_desc_64, 345 htt->frag_desc.paddr); 346 347 htt->frag_desc.vaddr_desc_64 = NULL; 348 } 349 350 static int ath10k_htt_tx_alloc_cont_frag_desc_64(struct ath10k_htt *htt) 351 { 352 struct ath10k *ar = htt->ar; 353 size_t size; 354 355 if (!ar->hw_params.continuous_frag_desc) 356 return 0; 357 358 size = htt->max_num_pending_tx * 359 sizeof(struct htt_msdu_ext_desc_64); 360 361 htt->frag_desc.vaddr_desc_64 = dma_alloc_coherent(ar->dev, size, 362 &htt->frag_desc.paddr, 363 GFP_KERNEL); 364 if (!htt->frag_desc.vaddr_desc_64) { 365 ath10k_err(ar, "failed to alloc fragment desc memory\n"); 366 return -ENOMEM; 367 } 368 htt->frag_desc.size = size; 369 370 return 0; 371 } 372 373 static void ath10k_htt_tx_free_txq(struct ath10k_htt *htt) 374 { 375 struct ath10k *ar = htt->ar; 376 size_t size; 377 378 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 379 ar->running_fw->fw_file.fw_features)) 380 return; 381 382 size = sizeof(*htt->tx_q_state.vaddr); 383 384 dma_unmap_single(ar->dev, htt->tx_q_state.paddr, size, DMA_TO_DEVICE); 385 kfree(htt->tx_q_state.vaddr); 386 } 387 388 static int ath10k_htt_tx_alloc_txq(struct ath10k_htt *htt) 389 { 390 struct ath10k *ar = htt->ar; 391 size_t size; 392 int ret; 393 394 if (!test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 395 ar->running_fw->fw_file.fw_features)) 396 return 0; 397 398 htt->tx_q_state.num_peers = HTT_TX_Q_STATE_NUM_PEERS; 399 htt->tx_q_state.num_tids = HTT_TX_Q_STATE_NUM_TIDS; 400 htt->tx_q_state.type = HTT_Q_DEPTH_TYPE_BYTES; 401 402 size = sizeof(*htt->tx_q_state.vaddr); 403 htt->tx_q_state.vaddr = kzalloc(size, GFP_KERNEL); 404 if (!htt->tx_q_state.vaddr) 405 return -ENOMEM; 406 407 htt->tx_q_state.paddr = dma_map_single(ar->dev, htt->tx_q_state.vaddr, 408 size, DMA_TO_DEVICE); 409 ret = dma_mapping_error(ar->dev, htt->tx_q_state.paddr); 410 if (ret) { 411 ath10k_warn(ar, "failed to dma map tx_q_state: %d\n", ret); 412 kfree(htt->tx_q_state.vaddr); 413 return -EIO; 414 } 415 416 return 0; 417 } 418 419 static void ath10k_htt_tx_free_txdone_fifo(struct ath10k_htt *htt) 420 { 421 WARN_ON(!kfifo_is_empty(&htt->txdone_fifo)); 422 kfifo_free(&htt->txdone_fifo); 423 } 424 425 static int ath10k_htt_tx_alloc_txdone_fifo(struct ath10k_htt *htt) 426 { 427 int ret; 428 size_t size; 429 430 size = roundup_pow_of_two(htt->max_num_pending_tx); 431 ret = kfifo_alloc(&htt->txdone_fifo, size, GFP_KERNEL); 432 return ret; 433 } 434 435 static int ath10k_htt_tx_alloc_buf(struct ath10k_htt *htt) 436 { 437 struct ath10k *ar = htt->ar; 438 int ret; 439 440 ret = ath10k_htt_alloc_txbuff(htt); 441 if (ret) { 442 ath10k_err(ar, "failed to alloc cont tx buffer: %d\n", ret); 443 return ret; 444 } 445 446 ret = ath10k_htt_alloc_frag_desc(htt); 447 if (ret) { 448 ath10k_err(ar, "failed to alloc cont frag desc: %d\n", ret); 449 goto free_txbuf; 450 } 451 452 ret = ath10k_htt_tx_alloc_txq(htt); 453 if (ret) { 454 ath10k_err(ar, "failed to alloc txq: %d\n", ret); 455 goto free_frag_desc; 456 } 457 458 ret = ath10k_htt_tx_alloc_txdone_fifo(htt); 459 if (ret) { 460 ath10k_err(ar, "failed to alloc txdone fifo: %d\n", ret); 461 goto free_txq; 462 } 463 464 return 0; 465 466 free_txq: 467 ath10k_htt_tx_free_txq(htt); 468 469 free_frag_desc: 470 ath10k_htt_free_frag_desc(htt); 471 472 free_txbuf: 473 ath10k_htt_free_txbuff(htt); 474 475 return ret; 476 } 477 478 int ath10k_htt_tx_start(struct ath10k_htt *htt) 479 { 480 struct ath10k *ar = htt->ar; 481 int ret; 482 483 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 484 htt->max_num_pending_tx); 485 486 spin_lock_init(&htt->tx_lock); 487 idr_init(&htt->pending_tx); 488 489 if (htt->tx_mem_allocated) 490 return 0; 491 492 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 493 return 0; 494 495 ret = ath10k_htt_tx_alloc_buf(htt); 496 if (ret) 497 goto free_idr_pending_tx; 498 499 htt->tx_mem_allocated = true; 500 501 return 0; 502 503 free_idr_pending_tx: 504 idr_destroy(&htt->pending_tx); 505 506 return ret; 507 } 508 509 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) 510 { 511 struct ath10k *ar = ctx; 512 struct ath10k_htt *htt = &ar->htt; 513 struct htt_tx_done tx_done = {0}; 514 515 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %u\n", msdu_id); 516 517 tx_done.msdu_id = msdu_id; 518 tx_done.status = HTT_TX_COMPL_STATE_DISCARD; 519 520 ath10k_txrx_tx_unref(htt, &tx_done); 521 522 return 0; 523 } 524 525 void ath10k_htt_tx_destroy(struct ath10k_htt *htt) 526 { 527 if (!htt->tx_mem_allocated) 528 return; 529 530 ath10k_htt_free_txbuff(htt); 531 ath10k_htt_tx_free_txq(htt); 532 ath10k_htt_free_frag_desc(htt); 533 ath10k_htt_tx_free_txdone_fifo(htt); 534 htt->tx_mem_allocated = false; 535 } 536 537 static void ath10k_htt_flush_tx_queue(struct ath10k_htt *htt) 538 { 539 ath10k_htc_stop_hl(htt->ar); 540 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); 541 } 542 543 void ath10k_htt_tx_stop(struct ath10k_htt *htt) 544 { 545 ath10k_htt_flush_tx_queue(htt); 546 idr_destroy(&htt->pending_tx); 547 } 548 549 void ath10k_htt_tx_free(struct ath10k_htt *htt) 550 { 551 ath10k_htt_tx_stop(htt); 552 ath10k_htt_tx_destroy(htt); 553 } 554 555 void ath10k_htt_op_ep_tx_credits(struct ath10k *ar) 556 { 557 queue_work(ar->workqueue, &ar->bundle_tx_work); 558 } 559 560 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 561 { 562 struct ath10k_htt *htt = &ar->htt; 563 struct htt_tx_done tx_done = {0}; 564 struct htt_cmd_hdr *htt_hdr; 565 struct htt_data_tx_desc *desc_hdr = NULL; 566 u16 flags1 = 0; 567 u8 msg_type = 0; 568 569 if (htt->disable_tx_comp) { 570 htt_hdr = (struct htt_cmd_hdr *)skb->data; 571 msg_type = htt_hdr->msg_type; 572 573 if (msg_type == HTT_H2T_MSG_TYPE_TX_FRM) { 574 desc_hdr = (struct htt_data_tx_desc *) 575 (skb->data + sizeof(*htt_hdr)); 576 flags1 = __le16_to_cpu(desc_hdr->flags1); 577 skb_pull(skb, sizeof(struct htt_cmd_hdr)); 578 skb_pull(skb, sizeof(struct htt_data_tx_desc)); 579 } 580 } 581 582 dev_kfree_skb_any(skb); 583 584 if ((!htt->disable_tx_comp) || (msg_type != HTT_H2T_MSG_TYPE_TX_FRM)) 585 return; 586 587 ath10k_dbg(ar, ATH10K_DBG_HTT, 588 "htt tx complete msdu id:%u ,flags1:%x\n", 589 __le16_to_cpu(desc_hdr->id), flags1); 590 591 if (flags1 & HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE) 592 return; 593 594 tx_done.status = HTT_TX_COMPL_STATE_ACK; 595 tx_done.msdu_id = __le16_to_cpu(desc_hdr->id); 596 ath10k_txrx_tx_unref(&ar->htt, &tx_done); 597 } 598 599 void ath10k_htt_hif_tx_complete(struct ath10k *ar, struct sk_buff *skb) 600 { 601 dev_kfree_skb_any(skb); 602 } 603 EXPORT_SYMBOL(ath10k_htt_hif_tx_complete); 604 605 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 606 { 607 struct ath10k *ar = htt->ar; 608 struct sk_buff *skb; 609 struct htt_cmd *cmd; 610 int len = 0; 611 int ret; 612 613 len += sizeof(cmd->hdr); 614 len += sizeof(cmd->ver_req); 615 616 skb = ath10k_htc_alloc_skb(ar, len); 617 if (!skb) 618 return -ENOMEM; 619 620 skb_put(skb, len); 621 cmd = (struct htt_cmd *)skb->data; 622 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 623 624 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 625 if (ret) { 626 dev_kfree_skb_any(skb); 627 return ret; 628 } 629 630 return 0; 631 } 632 633 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u32 mask, u32 reset_mask, 634 u64 cookie) 635 { 636 struct ath10k *ar = htt->ar; 637 struct htt_stats_req *req; 638 struct sk_buff *skb; 639 struct htt_cmd *cmd; 640 int len = 0, ret; 641 642 len += sizeof(cmd->hdr); 643 len += sizeof(cmd->stats_req); 644 645 skb = ath10k_htc_alloc_skb(ar, len); 646 if (!skb) 647 return -ENOMEM; 648 649 skb_put(skb, len); 650 cmd = (struct htt_cmd *)skb->data; 651 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; 652 653 req = &cmd->stats_req; 654 655 memset(req, 0, sizeof(*req)); 656 657 /* currently we support only max 24 bit masks so no need to worry 658 * about endian support 659 */ 660 memcpy(req->upload_types, &mask, 3); 661 memcpy(req->reset_types, &reset_mask, 3); 662 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; 663 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); 664 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); 665 666 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 667 if (ret) { 668 ath10k_warn(ar, "failed to send htt type stats request: %d", 669 ret); 670 dev_kfree_skb_any(skb); 671 return ret; 672 } 673 674 return 0; 675 } 676 677 static int ath10k_htt_send_frag_desc_bank_cfg_32(struct ath10k_htt *htt) 678 { 679 struct ath10k *ar = htt->ar; 680 struct sk_buff *skb; 681 struct htt_cmd *cmd; 682 struct htt_frag_desc_bank_cfg32 *cfg; 683 int ret, size; 684 u8 info; 685 686 if (!ar->hw_params.continuous_frag_desc) 687 return 0; 688 689 if (!htt->frag_desc.paddr) { 690 ath10k_warn(ar, "invalid frag desc memory\n"); 691 return -EINVAL; 692 } 693 694 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg32); 695 skb = ath10k_htc_alloc_skb(ar, size); 696 if (!skb) 697 return -ENOMEM; 698 699 skb_put(skb, size); 700 cmd = (struct htt_cmd *)skb->data; 701 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 702 703 info = 0; 704 info |= SM(htt->tx_q_state.type, 705 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 706 707 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 708 ar->running_fw->fw_file.fw_features)) 709 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 710 711 cfg = &cmd->frag_desc_bank_cfg32; 712 cfg->info = info; 713 cfg->num_banks = 1; 714 cfg->desc_size = sizeof(struct htt_msdu_ext_desc); 715 cfg->bank_base_addrs[0] = __cpu_to_le32(htt->frag_desc.paddr); 716 cfg->bank_id[0].bank_min_id = 0; 717 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 718 1); 719 720 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 721 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 722 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 723 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 724 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 725 726 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 727 728 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 729 if (ret) { 730 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 731 ret); 732 dev_kfree_skb_any(skb); 733 return ret; 734 } 735 736 return 0; 737 } 738 739 static int ath10k_htt_send_frag_desc_bank_cfg_64(struct ath10k_htt *htt) 740 { 741 struct ath10k *ar = htt->ar; 742 struct sk_buff *skb; 743 struct htt_cmd *cmd; 744 struct htt_frag_desc_bank_cfg64 *cfg; 745 int ret, size; 746 u8 info; 747 748 if (!ar->hw_params.continuous_frag_desc) 749 return 0; 750 751 if (!htt->frag_desc.paddr) { 752 ath10k_warn(ar, "invalid frag desc memory\n"); 753 return -EINVAL; 754 } 755 756 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg64); 757 skb = ath10k_htc_alloc_skb(ar, size); 758 if (!skb) 759 return -ENOMEM; 760 761 skb_put(skb, size); 762 cmd = (struct htt_cmd *)skb->data; 763 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 764 765 info = 0; 766 info |= SM(htt->tx_q_state.type, 767 HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_DEPTH_TYPE); 768 769 if (test_bit(ATH10K_FW_FEATURE_PEER_FLOW_CONTROL, 770 ar->running_fw->fw_file.fw_features)) 771 info |= HTT_FRAG_DESC_BANK_CFG_INFO_Q_STATE_VALID; 772 773 cfg = &cmd->frag_desc_bank_cfg64; 774 cfg->info = info; 775 cfg->num_banks = 1; 776 cfg->desc_size = sizeof(struct htt_msdu_ext_desc_64); 777 cfg->bank_base_addrs[0] = __cpu_to_le64(htt->frag_desc.paddr); 778 cfg->bank_id[0].bank_min_id = 0; 779 cfg->bank_id[0].bank_max_id = __cpu_to_le16(htt->max_num_pending_tx - 780 1); 781 782 cfg->q_state.paddr = cpu_to_le32(htt->tx_q_state.paddr); 783 cfg->q_state.num_peers = cpu_to_le16(htt->tx_q_state.num_peers); 784 cfg->q_state.num_tids = cpu_to_le16(htt->tx_q_state.num_tids); 785 cfg->q_state.record_size = HTT_TX_Q_STATE_ENTRY_SIZE; 786 cfg->q_state.record_multiplier = HTT_TX_Q_STATE_ENTRY_MULTIPLIER; 787 788 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt frag desc bank cmd\n"); 789 790 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 791 if (ret) { 792 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 793 ret); 794 dev_kfree_skb_any(skb); 795 return ret; 796 } 797 798 return 0; 799 } 800 801 static void ath10k_htt_fill_rx_desc_offset_32(struct ath10k_hw_params *hw, 802 struct htt_rx_ring_setup_ring32 *rx_ring) 803 { 804 ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); 805 } 806 807 static void ath10k_htt_fill_rx_desc_offset_64(struct ath10k_hw_params *hw, 808 struct htt_rx_ring_setup_ring64 *rx_ring) 809 { 810 ath10k_htt_rx_desc_get_offsets(hw, &rx_ring->offsets); 811 } 812 813 static int ath10k_htt_send_rx_ring_cfg_32(struct ath10k_htt *htt) 814 { 815 struct ath10k *ar = htt->ar; 816 struct ath10k_hw_params *hw = &ar->hw_params; 817 struct sk_buff *skb; 818 struct htt_cmd *cmd; 819 struct htt_rx_ring_setup_ring32 *ring; 820 const int num_rx_ring = 1; 821 u16 flags; 822 u32 fw_idx; 823 int len; 824 int ret; 825 826 /* 827 * the HW expects the buffer to be an integral number of 4-byte 828 * "words" 829 */ 830 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 831 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 832 833 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) 834 + (sizeof(*ring) * num_rx_ring); 835 skb = ath10k_htc_alloc_skb(ar, len); 836 if (!skb) 837 return -ENOMEM; 838 839 skb_put(skb, len); 840 841 cmd = (struct htt_cmd *)skb->data; 842 ring = &cmd->rx_setup_32.rings[0]; 843 844 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 845 cmd->rx_setup_32.hdr.num_rings = 1; 846 847 /* FIXME: do we need all of this? */ 848 flags = 0; 849 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 850 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 851 flags |= HTT_RX_RING_FLAGS_PPDU_START; 852 flags |= HTT_RX_RING_FLAGS_PPDU_END; 853 flags |= HTT_RX_RING_FLAGS_MPDU_START; 854 flags |= HTT_RX_RING_FLAGS_MPDU_END; 855 flags |= HTT_RX_RING_FLAGS_MSDU_START; 856 flags |= HTT_RX_RING_FLAGS_MSDU_END; 857 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 858 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 859 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 860 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 861 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 862 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 863 flags |= HTT_RX_RING_FLAGS_NULL_RX; 864 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 865 866 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 867 868 ring->fw_idx_shadow_reg_paddr = 869 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 870 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 871 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 872 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 873 ring->flags = __cpu_to_le16(flags); 874 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 875 876 ath10k_htt_fill_rx_desc_offset_32(hw, ring); 877 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 878 if (ret) { 879 dev_kfree_skb_any(skb); 880 return ret; 881 } 882 883 return 0; 884 } 885 886 static int ath10k_htt_send_rx_ring_cfg_64(struct ath10k_htt *htt) 887 { 888 struct ath10k *ar = htt->ar; 889 struct ath10k_hw_params *hw = &ar->hw_params; 890 struct sk_buff *skb; 891 struct htt_cmd *cmd; 892 struct htt_rx_ring_setup_ring64 *ring; 893 const int num_rx_ring = 1; 894 u16 flags; 895 u32 fw_idx; 896 int len; 897 int ret; 898 899 /* HW expects the buffer to be an integral number of 4-byte 900 * "words" 901 */ 902 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 903 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 904 905 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_64.hdr) 906 + (sizeof(*ring) * num_rx_ring); 907 skb = ath10k_htc_alloc_skb(ar, len); 908 if (!skb) 909 return -ENOMEM; 910 911 skb_put(skb, len); 912 913 cmd = (struct htt_cmd *)skb->data; 914 ring = &cmd->rx_setup_64.rings[0]; 915 916 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 917 cmd->rx_setup_64.hdr.num_rings = 1; 918 919 flags = 0; 920 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 921 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 922 flags |= HTT_RX_RING_FLAGS_PPDU_START; 923 flags |= HTT_RX_RING_FLAGS_PPDU_END; 924 flags |= HTT_RX_RING_FLAGS_MPDU_START; 925 flags |= HTT_RX_RING_FLAGS_MPDU_END; 926 flags |= HTT_RX_RING_FLAGS_MSDU_START; 927 flags |= HTT_RX_RING_FLAGS_MSDU_END; 928 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 929 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 930 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 931 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 932 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 933 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 934 flags |= HTT_RX_RING_FLAGS_NULL_RX; 935 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 936 937 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 938 939 ring->fw_idx_shadow_reg_paddr = __cpu_to_le64(htt->rx_ring.alloc_idx.paddr); 940 ring->rx_ring_base_paddr = __cpu_to_le64(htt->rx_ring.base_paddr); 941 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 942 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 943 ring->flags = __cpu_to_le16(flags); 944 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 945 946 ath10k_htt_fill_rx_desc_offset_64(hw, ring); 947 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 948 if (ret) { 949 dev_kfree_skb_any(skb); 950 return ret; 951 } 952 953 return 0; 954 } 955 956 static int ath10k_htt_send_rx_ring_cfg_hl(struct ath10k_htt *htt) 957 { 958 struct ath10k *ar = htt->ar; 959 struct sk_buff *skb; 960 struct htt_cmd *cmd; 961 struct htt_rx_ring_setup_ring32 *ring; 962 const int num_rx_ring = 1; 963 u16 flags; 964 int len; 965 int ret; 966 967 /* 968 * the HW expects the buffer to be an integral number of 4-byte 969 * "words" 970 */ 971 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 972 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 973 974 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup_32.hdr) 975 + (sizeof(*ring) * num_rx_ring); 976 skb = ath10k_htc_alloc_skb(ar, len); 977 if (!skb) 978 return -ENOMEM; 979 980 skb_put(skb, len); 981 982 cmd = (struct htt_cmd *)skb->data; 983 ring = &cmd->rx_setup_32.rings[0]; 984 985 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 986 cmd->rx_setup_32.hdr.num_rings = 1; 987 988 flags = 0; 989 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 990 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 991 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 992 993 memset(ring, 0, sizeof(*ring)); 994 ring->rx_ring_len = __cpu_to_le16(HTT_RX_RING_SIZE_MIN); 995 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 996 ring->flags = __cpu_to_le16(flags); 997 998 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 999 if (ret) { 1000 dev_kfree_skb_any(skb); 1001 return ret; 1002 } 1003 1004 return 0; 1005 } 1006 1007 static int ath10k_htt_h2t_aggr_cfg_msg_32(struct ath10k_htt *htt, 1008 u8 max_subfrms_ampdu, 1009 u8 max_subfrms_amsdu) 1010 { 1011 struct ath10k *ar = htt->ar; 1012 struct htt_aggr_conf *aggr_conf; 1013 struct sk_buff *skb; 1014 struct htt_cmd *cmd; 1015 int len; 1016 int ret; 1017 1018 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ 1019 1020 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) 1021 return -EINVAL; 1022 1023 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) 1024 return -EINVAL; 1025 1026 len = sizeof(cmd->hdr); 1027 len += sizeof(cmd->aggr_conf); 1028 1029 skb = ath10k_htc_alloc_skb(ar, len); 1030 if (!skb) 1031 return -ENOMEM; 1032 1033 skb_put(skb, len); 1034 cmd = (struct htt_cmd *)skb->data; 1035 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; 1036 1037 aggr_conf = &cmd->aggr_conf; 1038 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; 1039 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; 1040 1041 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", 1042 aggr_conf->max_num_amsdu_subframes, 1043 aggr_conf->max_num_ampdu_subframes); 1044 1045 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 1046 if (ret) { 1047 dev_kfree_skb_any(skb); 1048 return ret; 1049 } 1050 1051 return 0; 1052 } 1053 1054 static int ath10k_htt_h2t_aggr_cfg_msg_v2(struct ath10k_htt *htt, 1055 u8 max_subfrms_ampdu, 1056 u8 max_subfrms_amsdu) 1057 { 1058 struct ath10k *ar = htt->ar; 1059 struct htt_aggr_conf_v2 *aggr_conf; 1060 struct sk_buff *skb; 1061 struct htt_cmd *cmd; 1062 int len; 1063 int ret; 1064 1065 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ 1066 1067 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) 1068 return -EINVAL; 1069 1070 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) 1071 return -EINVAL; 1072 1073 len = sizeof(cmd->hdr); 1074 len += sizeof(cmd->aggr_conf_v2); 1075 1076 skb = ath10k_htc_alloc_skb(ar, len); 1077 if (!skb) 1078 return -ENOMEM; 1079 1080 skb_put(skb, len); 1081 cmd = (struct htt_cmd *)skb->data; 1082 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; 1083 1084 aggr_conf = &cmd->aggr_conf_v2; 1085 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; 1086 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; 1087 1088 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", 1089 aggr_conf->max_num_amsdu_subframes, 1090 aggr_conf->max_num_ampdu_subframes); 1091 1092 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 1093 if (ret) { 1094 dev_kfree_skb_any(skb); 1095 return ret; 1096 } 1097 1098 return 0; 1099 } 1100 1101 int ath10k_htt_tx_fetch_resp(struct ath10k *ar, 1102 __le32 token, 1103 __le16 fetch_seq_num, 1104 struct htt_tx_fetch_record *records, 1105 size_t num_records) 1106 { 1107 struct sk_buff *skb; 1108 struct htt_cmd *cmd; 1109 const u16 resp_id = 0; 1110 int len = 0; 1111 int ret; 1112 1113 /* Response IDs are echo-ed back only for host driver convenience 1114 * purposes. They aren't used for anything in the driver yet so use 0. 1115 */ 1116 1117 len += sizeof(cmd->hdr); 1118 len += sizeof(cmd->tx_fetch_resp); 1119 len += sizeof(cmd->tx_fetch_resp.records[0]) * num_records; 1120 1121 skb = ath10k_htc_alloc_skb(ar, len); 1122 if (!skb) 1123 return -ENOMEM; 1124 1125 skb_put(skb, len); 1126 cmd = (struct htt_cmd *)skb->data; 1127 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FETCH_RESP; 1128 cmd->tx_fetch_resp.resp_id = cpu_to_le16(resp_id); 1129 cmd->tx_fetch_resp.fetch_seq_num = fetch_seq_num; 1130 cmd->tx_fetch_resp.num_records = cpu_to_le16(num_records); 1131 cmd->tx_fetch_resp.token = token; 1132 1133 memcpy(cmd->tx_fetch_resp.records, records, 1134 sizeof(records[0]) * num_records); 1135 1136 ret = ath10k_htc_send(&ar->htc, ar->htt.eid, skb); 1137 if (ret) { 1138 ath10k_warn(ar, "failed to submit htc command: %d\n", ret); 1139 goto err_free_skb; 1140 } 1141 1142 return 0; 1143 1144 err_free_skb: 1145 dev_kfree_skb_any(skb); 1146 1147 return ret; 1148 } 1149 1150 static u8 ath10k_htt_tx_get_vdev_id(struct ath10k *ar, struct sk_buff *skb) 1151 { 1152 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(skb); 1153 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 1154 struct ath10k_vif *arvif; 1155 1156 if (info->flags & IEEE80211_TX_CTL_TX_OFFCHAN) { 1157 return ar->scan.vdev_id; 1158 } else if (cb->vif) { 1159 arvif = (void *)cb->vif->drv_priv; 1160 return arvif->vdev_id; 1161 } else if (ar->monitor_started) { 1162 return ar->monitor_vdev_id; 1163 } else { 1164 return 0; 1165 } 1166 } 1167 1168 static u8 ath10k_htt_tx_get_tid(struct sk_buff *skb, bool is_eth) 1169 { 1170 struct ieee80211_hdr *hdr = (void *)skb->data; 1171 struct ath10k_skb_cb *cb = ATH10K_SKB_CB(skb); 1172 1173 if (!is_eth && ieee80211_is_mgmt(hdr->frame_control)) 1174 return HTT_DATA_TX_EXT_TID_MGMT; 1175 else if (cb->flags & ATH10K_SKB_F_QOS) 1176 return skb->priority & IEEE80211_QOS_CTL_TID_MASK; 1177 else 1178 return HTT_DATA_TX_EXT_TID_NON_QOS_MCAST_BCAST; 1179 } 1180 1181 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 1182 { 1183 struct ath10k *ar = htt->ar; 1184 struct device *dev = ar->dev; 1185 struct sk_buff *txdesc = NULL; 1186 struct htt_cmd *cmd; 1187 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1188 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1189 int len = 0; 1190 int msdu_id = -1; 1191 int res; 1192 const u8 *peer_addr; 1193 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1194 1195 len += sizeof(cmd->hdr); 1196 len += sizeof(cmd->mgmt_tx); 1197 1198 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1199 if (res < 0) 1200 goto err; 1201 1202 msdu_id = res; 1203 1204 if ((ieee80211_is_action(hdr->frame_control) || 1205 ieee80211_is_deauth(hdr->frame_control) || 1206 ieee80211_is_disassoc(hdr->frame_control)) && 1207 ieee80211_has_protected(hdr->frame_control)) { 1208 peer_addr = hdr->addr1; 1209 if (is_multicast_ether_addr(peer_addr)) { 1210 skb_put(msdu, sizeof(struct ieee80211_mmie_16)); 1211 } else { 1212 if (skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP || 1213 skb_cb->ucast_cipher == WLAN_CIPHER_SUITE_GCMP_256) 1214 skb_put(msdu, IEEE80211_GCMP_MIC_LEN); 1215 else 1216 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1217 } 1218 } 1219 1220 txdesc = ath10k_htc_alloc_skb(ar, len); 1221 if (!txdesc) { 1222 res = -ENOMEM; 1223 goto err_free_msdu_id; 1224 } 1225 1226 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1227 DMA_TO_DEVICE); 1228 res = dma_mapping_error(dev, skb_cb->paddr); 1229 if (res) { 1230 res = -EIO; 1231 goto err_free_txdesc; 1232 } 1233 1234 skb_put(txdesc, len); 1235 cmd = (struct htt_cmd *)txdesc->data; 1236 memset(cmd, 0, len); 1237 1238 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 1239 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 1240 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 1241 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 1242 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 1243 memcpy(cmd->mgmt_tx.hdr, msdu->data, 1244 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 1245 1246 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 1247 if (res) 1248 goto err_unmap_msdu; 1249 1250 return 0; 1251 1252 err_unmap_msdu: 1253 if (ar->bus_param.dev_type != ATH10K_DEV_TYPE_HL) 1254 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1255 err_free_txdesc: 1256 dev_kfree_skb_any(txdesc); 1257 err_free_msdu_id: 1258 spin_lock_bh(&htt->tx_lock); 1259 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1260 spin_unlock_bh(&htt->tx_lock); 1261 err: 1262 return res; 1263 } 1264 1265 #define HTT_TX_HL_NEEDED_HEADROOM \ 1266 (unsigned int)(sizeof(struct htt_cmd_hdr) + \ 1267 sizeof(struct htt_data_tx_desc) + \ 1268 sizeof(struct ath10k_htc_hdr)) 1269 1270 static int ath10k_htt_tx_hl(struct ath10k_htt *htt, enum ath10k_hw_txrx_mode txmode, 1271 struct sk_buff *msdu) 1272 { 1273 struct ath10k *ar = htt->ar; 1274 int res, data_len; 1275 struct htt_cmd_hdr *cmd_hdr; 1276 struct htt_data_tx_desc *tx_desc; 1277 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1278 struct sk_buff *tmp_skb; 1279 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1280 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1281 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1282 u8 flags0 = 0; 1283 u16 flags1 = 0; 1284 u16 msdu_id = 0; 1285 1286 if (!is_eth) { 1287 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1288 1289 if ((ieee80211_is_action(hdr->frame_control) || 1290 ieee80211_is_deauth(hdr->frame_control) || 1291 ieee80211_is_disassoc(hdr->frame_control)) && 1292 ieee80211_has_protected(hdr->frame_control)) { 1293 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1294 } 1295 } 1296 1297 data_len = msdu->len; 1298 1299 switch (txmode) { 1300 case ATH10K_HW_TXRX_RAW: 1301 case ATH10K_HW_TXRX_NATIVE_WIFI: 1302 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1303 fallthrough; 1304 case ATH10K_HW_TXRX_ETHERNET: 1305 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1306 break; 1307 case ATH10K_HW_TXRX_MGMT: 1308 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1309 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1310 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1311 1312 if (htt->disable_tx_comp) 1313 flags1 |= HTT_DATA_TX_DESC_FLAGS1_TX_COMPLETE; 1314 break; 1315 } 1316 1317 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1318 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1319 1320 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1321 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1322 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1323 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1324 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1325 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1326 } 1327 1328 /* Prepend the HTT header and TX desc struct to the data message 1329 * and realloc the skb if it does not have enough headroom. 1330 */ 1331 if (skb_headroom(msdu) < HTT_TX_HL_NEEDED_HEADROOM) { 1332 tmp_skb = msdu; 1333 1334 ath10k_dbg(htt->ar, ATH10K_DBG_HTT, 1335 "Not enough headroom in skb. Current headroom: %u, needed: %u. Reallocating...\n", 1336 skb_headroom(msdu), HTT_TX_HL_NEEDED_HEADROOM); 1337 msdu = skb_realloc_headroom(msdu, HTT_TX_HL_NEEDED_HEADROOM); 1338 kfree_skb(tmp_skb); 1339 if (!msdu) { 1340 ath10k_warn(htt->ar, "htt hl tx: Unable to realloc skb!\n"); 1341 res = -ENOMEM; 1342 goto out; 1343 } 1344 } 1345 1346 if (ar->bus_param.hl_msdu_ids) { 1347 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 1348 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1349 if (res < 0) { 1350 ath10k_err(ar, "msdu_id allocation failed %d\n", res); 1351 goto out; 1352 } 1353 msdu_id = res; 1354 } 1355 1356 /* As msdu is freed by mac80211 (in ieee80211_tx_status()) and by 1357 * ath10k (in ath10k_htt_htc_tx_complete()) we have to increase 1358 * reference by one to avoid a use-after-free case and a double 1359 * free. 1360 */ 1361 skb_get(msdu); 1362 1363 skb_push(msdu, sizeof(*cmd_hdr)); 1364 skb_push(msdu, sizeof(*tx_desc)); 1365 cmd_hdr = (struct htt_cmd_hdr *)msdu->data; 1366 tx_desc = (struct htt_data_tx_desc *)(msdu->data + sizeof(*cmd_hdr)); 1367 1368 cmd_hdr->msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1369 tx_desc->flags0 = flags0; 1370 tx_desc->flags1 = __cpu_to_le16(flags1); 1371 tx_desc->len = __cpu_to_le16(data_len); 1372 tx_desc->id = __cpu_to_le16(msdu_id); 1373 tx_desc->frags_paddr = 0; /* always zero */ 1374 /* Initialize peer_id to INVALID_PEER because this is NOT 1375 * Reinjection path 1376 */ 1377 tx_desc->peerid = __cpu_to_le32(HTT_INVALID_PEERID); 1378 1379 res = ath10k_htc_send_hl(&htt->ar->htc, htt->eid, msdu); 1380 1381 out: 1382 return res; 1383 } 1384 1385 static int ath10k_htt_tx_32(struct ath10k_htt *htt, 1386 enum ath10k_hw_txrx_mode txmode, 1387 struct sk_buff *msdu) 1388 { 1389 struct ath10k *ar = htt->ar; 1390 struct device *dev = ar->dev; 1391 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 1392 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1393 struct ath10k_hif_sg_item sg_items[2]; 1394 struct ath10k_htt_txbuf_32 *txbuf; 1395 struct htt_data_tx_desc_frag *frags; 1396 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1397 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1398 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1399 int prefetch_len; 1400 int res; 1401 u8 flags0 = 0; 1402 u16 msdu_id, flags1 = 0; 1403 u16 freq = 0; 1404 u32 frags_paddr = 0; 1405 u32 txbuf_paddr; 1406 struct htt_msdu_ext_desc *ext_desc = NULL; 1407 struct htt_msdu_ext_desc *ext_desc_t = NULL; 1408 1409 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1410 if (res < 0) 1411 goto err; 1412 1413 msdu_id = res; 1414 1415 prefetch_len = min(htt->prefetch_len, msdu->len); 1416 prefetch_len = roundup(prefetch_len, 4); 1417 1418 txbuf = htt->txbuf.vaddr_txbuff_32 + msdu_id; 1419 txbuf_paddr = htt->txbuf.paddr + 1420 (sizeof(struct ath10k_htt_txbuf_32) * msdu_id); 1421 1422 if (!is_eth) { 1423 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1424 1425 if ((ieee80211_is_action(hdr->frame_control) || 1426 ieee80211_is_deauth(hdr->frame_control) || 1427 ieee80211_is_disassoc(hdr->frame_control)) && 1428 ieee80211_has_protected(hdr->frame_control)) { 1429 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1430 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && 1431 txmode == ATH10K_HW_TXRX_RAW && 1432 ieee80211_has_protected(hdr->frame_control)) { 1433 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1434 } 1435 } 1436 1437 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1438 DMA_TO_DEVICE); 1439 res = dma_mapping_error(dev, skb_cb->paddr); 1440 if (res) { 1441 res = -EIO; 1442 goto err_free_msdu_id; 1443 } 1444 1445 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 1446 freq = ar->scan.roc_freq; 1447 1448 switch (txmode) { 1449 case ATH10K_HW_TXRX_RAW: 1450 case ATH10K_HW_TXRX_NATIVE_WIFI: 1451 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1452 fallthrough; 1453 case ATH10K_HW_TXRX_ETHERNET: 1454 if (ar->hw_params.continuous_frag_desc) { 1455 ext_desc_t = htt->frag_desc.vaddr_desc_32; 1456 memset(&ext_desc_t[msdu_id], 0, 1457 sizeof(struct htt_msdu_ext_desc)); 1458 frags = (struct htt_data_tx_desc_frag *) 1459 &ext_desc_t[msdu_id].frags; 1460 ext_desc = &ext_desc_t[msdu_id]; 1461 frags[0].tword_addr.paddr_lo = 1462 __cpu_to_le32(skb_cb->paddr); 1463 frags[0].tword_addr.paddr_hi = 0; 1464 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1465 1466 frags_paddr = htt->frag_desc.paddr + 1467 (sizeof(struct htt_msdu_ext_desc) * msdu_id); 1468 } else { 1469 frags = txbuf->frags; 1470 frags[0].dword_addr.paddr = 1471 __cpu_to_le32(skb_cb->paddr); 1472 frags[0].dword_addr.len = __cpu_to_le32(msdu->len); 1473 frags[1].dword_addr.paddr = 0; 1474 frags[1].dword_addr.len = 0; 1475 1476 frags_paddr = txbuf_paddr; 1477 } 1478 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1479 break; 1480 case ATH10K_HW_TXRX_MGMT: 1481 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1482 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1483 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1484 1485 frags_paddr = skb_cb->paddr; 1486 break; 1487 } 1488 1489 /* Normally all commands go through HTC which manages tx credits for 1490 * each endpoint and notifies when tx is completed. 1491 * 1492 * HTT endpoint is creditless so there's no need to care about HTC 1493 * flags. In that case it is trivial to fill the HTC header here. 1494 * 1495 * MSDU transmission is considered completed upon HTT event. This 1496 * implies no relevant resources can be freed until after the event is 1497 * received. That's why HTC tx completion handler itself is ignored by 1498 * setting NULL to transfer_context for all sg items. 1499 * 1500 * There is simply no point in pushing HTT TX_FRM through HTC tx path 1501 * as it's a waste of resources. By bypassing HTC it is possible to 1502 * avoid extra memory allocations, compress data structures and thus 1503 * improve performance. 1504 */ 1505 1506 txbuf->htc_hdr.eid = htt->eid; 1507 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + 1508 sizeof(txbuf->cmd_tx) + 1509 prefetch_len); 1510 txbuf->htc_hdr.flags = 0; 1511 1512 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1513 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1514 1515 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1516 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1517 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1518 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1519 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1520 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1521 if (ar->hw_params.continuous_frag_desc) 1522 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; 1523 } 1524 1525 /* Prevent firmware from sending up tx inspection requests. There's 1526 * nothing ath10k can do with frames requested for inspection so force 1527 * it to simply rely a regular tx completion with discard status. 1528 */ 1529 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 1530 1531 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1532 txbuf->cmd_tx.flags0 = flags0; 1533 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 1534 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 1535 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 1536 txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); 1537 if (ath10k_mac_tx_frm_has_freq(ar)) { 1538 txbuf->cmd_tx.offchan_tx.peerid = 1539 __cpu_to_le16(HTT_INVALID_PEERID); 1540 txbuf->cmd_tx.offchan_tx.freq = 1541 __cpu_to_le16(freq); 1542 } else { 1543 txbuf->cmd_tx.peerid = 1544 __cpu_to_le32(HTT_INVALID_PEERID); 1545 } 1546 1547 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 1548 ath10k_dbg(ar, ATH10K_DBG_HTT, 1549 "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", 1550 flags0, flags1, msdu->len, msdu_id, &frags_paddr, 1551 &skb_cb->paddr, vdev_id, tid, freq); 1552 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 1553 msdu->data, msdu->len); 1554 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 1555 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 1556 1557 sg_items[0].transfer_id = 0; 1558 sg_items[0].transfer_context = NULL; 1559 sg_items[0].vaddr = &txbuf->htc_hdr; 1560 sg_items[0].paddr = txbuf_paddr + 1561 sizeof(txbuf->frags); 1562 sg_items[0].len = sizeof(txbuf->htc_hdr) + 1563 sizeof(txbuf->cmd_hdr) + 1564 sizeof(txbuf->cmd_tx); 1565 1566 sg_items[1].transfer_id = 0; 1567 sg_items[1].transfer_context = NULL; 1568 sg_items[1].vaddr = msdu->data; 1569 sg_items[1].paddr = skb_cb->paddr; 1570 sg_items[1].len = prefetch_len; 1571 1572 res = ath10k_hif_tx_sg(htt->ar, 1573 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 1574 sg_items, ARRAY_SIZE(sg_items)); 1575 if (res) 1576 goto err_unmap_msdu; 1577 1578 return 0; 1579 1580 err_unmap_msdu: 1581 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1582 err_free_msdu_id: 1583 spin_lock_bh(&htt->tx_lock); 1584 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1585 spin_unlock_bh(&htt->tx_lock); 1586 err: 1587 return res; 1588 } 1589 1590 static int ath10k_htt_tx_64(struct ath10k_htt *htt, 1591 enum ath10k_hw_txrx_mode txmode, 1592 struct sk_buff *msdu) 1593 { 1594 struct ath10k *ar = htt->ar; 1595 struct device *dev = ar->dev; 1596 struct ieee80211_tx_info *info = IEEE80211_SKB_CB(msdu); 1597 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 1598 struct ath10k_hif_sg_item sg_items[2]; 1599 struct ath10k_htt_txbuf_64 *txbuf; 1600 struct htt_data_tx_desc_frag *frags; 1601 bool is_eth = (txmode == ATH10K_HW_TXRX_ETHERNET); 1602 u8 vdev_id = ath10k_htt_tx_get_vdev_id(ar, msdu); 1603 u8 tid = ath10k_htt_tx_get_tid(msdu, is_eth); 1604 int prefetch_len; 1605 int res; 1606 u8 flags0 = 0; 1607 u16 msdu_id, flags1 = 0; 1608 u16 freq = 0; 1609 dma_addr_t frags_paddr = 0; 1610 dma_addr_t txbuf_paddr; 1611 struct htt_msdu_ext_desc_64 *ext_desc = NULL; 1612 struct htt_msdu_ext_desc_64 *ext_desc_t = NULL; 1613 1614 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 1615 if (res < 0) 1616 goto err; 1617 1618 msdu_id = res; 1619 1620 prefetch_len = min(htt->prefetch_len, msdu->len); 1621 prefetch_len = roundup(prefetch_len, 4); 1622 1623 txbuf = htt->txbuf.vaddr_txbuff_64 + msdu_id; 1624 txbuf_paddr = htt->txbuf.paddr + 1625 (sizeof(struct ath10k_htt_txbuf_64) * msdu_id); 1626 1627 if (!is_eth) { 1628 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 1629 1630 if ((ieee80211_is_action(hdr->frame_control) || 1631 ieee80211_is_deauth(hdr->frame_control) || 1632 ieee80211_is_disassoc(hdr->frame_control)) && 1633 ieee80211_has_protected(hdr->frame_control)) { 1634 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1635 } else if (!(skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) && 1636 txmode == ATH10K_HW_TXRX_RAW && 1637 ieee80211_has_protected(hdr->frame_control)) { 1638 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 1639 } 1640 } 1641 1642 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 1643 DMA_TO_DEVICE); 1644 res = dma_mapping_error(dev, skb_cb->paddr); 1645 if (res) { 1646 res = -EIO; 1647 goto err_free_msdu_id; 1648 } 1649 1650 if (unlikely(info->flags & IEEE80211_TX_CTL_TX_OFFCHAN)) 1651 freq = ar->scan.roc_freq; 1652 1653 switch (txmode) { 1654 case ATH10K_HW_TXRX_RAW: 1655 case ATH10K_HW_TXRX_NATIVE_WIFI: 1656 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1657 fallthrough; 1658 case ATH10K_HW_TXRX_ETHERNET: 1659 if (ar->hw_params.continuous_frag_desc) { 1660 ext_desc_t = htt->frag_desc.vaddr_desc_64; 1661 memset(&ext_desc_t[msdu_id], 0, 1662 sizeof(struct htt_msdu_ext_desc_64)); 1663 frags = (struct htt_data_tx_desc_frag *) 1664 &ext_desc_t[msdu_id].frags; 1665 ext_desc = &ext_desc_t[msdu_id]; 1666 frags[0].tword_addr.paddr_lo = 1667 __cpu_to_le32(skb_cb->paddr); 1668 frags[0].tword_addr.paddr_hi = 1669 __cpu_to_le16(upper_32_bits(skb_cb->paddr)); 1670 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1671 1672 frags_paddr = htt->frag_desc.paddr + 1673 (sizeof(struct htt_msdu_ext_desc_64) * msdu_id); 1674 } else { 1675 frags = txbuf->frags; 1676 frags[0].tword_addr.paddr_lo = 1677 __cpu_to_le32(skb_cb->paddr); 1678 frags[0].tword_addr.paddr_hi = 1679 __cpu_to_le16(upper_32_bits(skb_cb->paddr)); 1680 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 1681 frags[1].tword_addr.paddr_lo = 0; 1682 frags[1].tword_addr.paddr_hi = 0; 1683 frags[1].tword_addr.len_16 = 0; 1684 } 1685 flags0 |= SM(txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1686 break; 1687 case ATH10K_HW_TXRX_MGMT: 1688 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 1689 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 1690 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 1691 1692 frags_paddr = skb_cb->paddr; 1693 break; 1694 } 1695 1696 /* Normally all commands go through HTC which manages tx credits for 1697 * each endpoint and notifies when tx is completed. 1698 * 1699 * HTT endpoint is creditless so there's no need to care about HTC 1700 * flags. In that case it is trivial to fill the HTC header here. 1701 * 1702 * MSDU transmission is considered completed upon HTT event. This 1703 * implies no relevant resources can be freed until after the event is 1704 * received. That's why HTC tx completion handler itself is ignored by 1705 * setting NULL to transfer_context for all sg items. 1706 * 1707 * There is simply no point in pushing HTT TX_FRM through HTC tx path 1708 * as it's a waste of resources. By bypassing HTC it is possible to 1709 * avoid extra memory allocations, compress data structures and thus 1710 * improve performance. 1711 */ 1712 1713 txbuf->htc_hdr.eid = htt->eid; 1714 txbuf->htc_hdr.len = __cpu_to_le16(sizeof(txbuf->cmd_hdr) + 1715 sizeof(txbuf->cmd_tx) + 1716 prefetch_len); 1717 txbuf->htc_hdr.flags = 0; 1718 1719 if (skb_cb->flags & ATH10K_SKB_F_NO_HWCRYPT) 1720 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 1721 1722 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 1723 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 1724 if (msdu->ip_summed == CHECKSUM_PARTIAL && 1725 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 1726 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 1727 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 1728 if (ar->hw_params.continuous_frag_desc) { 1729 memset(ext_desc->tso_flag, 0, sizeof(ext_desc->tso_flag)); 1730 ext_desc->tso_flag[3] |= 1731 __cpu_to_le32(HTT_MSDU_CHECKSUM_ENABLE_64); 1732 } 1733 } 1734 1735 /* Prevent firmware from sending up tx inspection requests. There's 1736 * nothing ath10k can do with frames requested for inspection so force 1737 * it to simply rely a regular tx completion with discard status. 1738 */ 1739 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 1740 1741 txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 1742 txbuf->cmd_tx.flags0 = flags0; 1743 txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 1744 txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 1745 txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 1746 1747 /* fill fragment descriptor */ 1748 txbuf->cmd_tx.frags_paddr = __cpu_to_le64(frags_paddr); 1749 if (ath10k_mac_tx_frm_has_freq(ar)) { 1750 txbuf->cmd_tx.offchan_tx.peerid = 1751 __cpu_to_le16(HTT_INVALID_PEERID); 1752 txbuf->cmd_tx.offchan_tx.freq = 1753 __cpu_to_le16(freq); 1754 } else { 1755 txbuf->cmd_tx.peerid = 1756 __cpu_to_le32(HTT_INVALID_PEERID); 1757 } 1758 1759 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 1760 ath10k_dbg(ar, ATH10K_DBG_HTT, 1761 "htt tx flags0 %u flags1 %u len %d id %u frags_paddr %pad, msdu_paddr %pad vdev %u tid %u freq %u\n", 1762 flags0, flags1, msdu->len, msdu_id, &frags_paddr, 1763 &skb_cb->paddr, vdev_id, tid, freq); 1764 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 1765 msdu->data, msdu->len); 1766 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 1767 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 1768 1769 sg_items[0].transfer_id = 0; 1770 sg_items[0].transfer_context = NULL; 1771 sg_items[0].vaddr = &txbuf->htc_hdr; 1772 sg_items[0].paddr = txbuf_paddr + 1773 sizeof(txbuf->frags); 1774 sg_items[0].len = sizeof(txbuf->htc_hdr) + 1775 sizeof(txbuf->cmd_hdr) + 1776 sizeof(txbuf->cmd_tx); 1777 1778 sg_items[1].transfer_id = 0; 1779 sg_items[1].transfer_context = NULL; 1780 sg_items[1].vaddr = msdu->data; 1781 sg_items[1].paddr = skb_cb->paddr; 1782 sg_items[1].len = prefetch_len; 1783 1784 res = ath10k_hif_tx_sg(htt->ar, 1785 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 1786 sg_items, ARRAY_SIZE(sg_items)); 1787 if (res) 1788 goto err_unmap_msdu; 1789 1790 return 0; 1791 1792 err_unmap_msdu: 1793 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 1794 err_free_msdu_id: 1795 spin_lock_bh(&htt->tx_lock); 1796 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 1797 spin_unlock_bh(&htt->tx_lock); 1798 err: 1799 return res; 1800 } 1801 1802 static const struct ath10k_htt_tx_ops htt_tx_ops_32 = { 1803 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_32, 1804 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, 1805 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_32, 1806 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_32, 1807 .htt_tx = ath10k_htt_tx_32, 1808 .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_32, 1809 .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_32, 1810 .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, 1811 }; 1812 1813 static const struct ath10k_htt_tx_ops htt_tx_ops_64 = { 1814 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_64, 1815 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_64, 1816 .htt_alloc_frag_desc = ath10k_htt_tx_alloc_cont_frag_desc_64, 1817 .htt_free_frag_desc = ath10k_htt_tx_free_cont_frag_desc_64, 1818 .htt_tx = ath10k_htt_tx_64, 1819 .htt_alloc_txbuff = ath10k_htt_tx_alloc_cont_txbuf_64, 1820 .htt_free_txbuff = ath10k_htt_tx_free_cont_txbuf_64, 1821 .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_v2, 1822 }; 1823 1824 static const struct ath10k_htt_tx_ops htt_tx_ops_hl = { 1825 .htt_send_rx_ring_cfg = ath10k_htt_send_rx_ring_cfg_hl, 1826 .htt_send_frag_desc_bank_cfg = ath10k_htt_send_frag_desc_bank_cfg_32, 1827 .htt_tx = ath10k_htt_tx_hl, 1828 .htt_h2t_aggr_cfg_msg = ath10k_htt_h2t_aggr_cfg_msg_32, 1829 .htt_flush_tx = ath10k_htt_flush_tx_queue, 1830 }; 1831 1832 void ath10k_htt_set_tx_ops(struct ath10k_htt *htt) 1833 { 1834 struct ath10k *ar = htt->ar; 1835 1836 if (ar->bus_param.dev_type == ATH10K_DEV_TYPE_HL) 1837 htt->tx_ops = &htt_tx_ops_hl; 1838 else if (ar->hw_params.target_64bit) 1839 htt->tx_ops = &htt_tx_ops_64; 1840 else 1841 htt->tx_ops = &htt_tx_ops_32; 1842 } 1843