1 /* 2 * Copyright (c) 2005-2011 Atheros Communications Inc. 3 * Copyright (c) 2011-2013 Qualcomm Atheros, Inc. 4 * 5 * Permission to use, copy, modify, and/or distribute this software for any 6 * purpose with or without fee is hereby granted, provided that the above 7 * copyright notice and this permission notice appear in all copies. 8 * 9 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 10 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 11 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 12 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 13 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 14 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF 15 * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 16 */ 17 18 #include <linux/etherdevice.h> 19 #include "htt.h" 20 #include "mac.h" 21 #include "hif.h" 22 #include "txrx.h" 23 #include "debug.h" 24 25 void __ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 26 { 27 htt->num_pending_tx--; 28 if (htt->num_pending_tx == htt->max_num_pending_tx - 1) 29 ath10k_mac_tx_unlock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 30 } 31 32 static void ath10k_htt_tx_dec_pending(struct ath10k_htt *htt) 33 { 34 spin_lock_bh(&htt->tx_lock); 35 __ath10k_htt_tx_dec_pending(htt); 36 spin_unlock_bh(&htt->tx_lock); 37 } 38 39 static int ath10k_htt_tx_inc_pending(struct ath10k_htt *htt) 40 { 41 int ret = 0; 42 43 spin_lock_bh(&htt->tx_lock); 44 45 if (htt->num_pending_tx >= htt->max_num_pending_tx) { 46 ret = -EBUSY; 47 goto exit; 48 } 49 50 htt->num_pending_tx++; 51 if (htt->num_pending_tx == htt->max_num_pending_tx) 52 ath10k_mac_tx_lock(htt->ar, ATH10K_TX_PAUSE_Q_FULL); 53 54 exit: 55 spin_unlock_bh(&htt->tx_lock); 56 return ret; 57 } 58 59 int ath10k_htt_tx_alloc_msdu_id(struct ath10k_htt *htt, struct sk_buff *skb) 60 { 61 struct ath10k *ar = htt->ar; 62 int ret; 63 64 lockdep_assert_held(&htt->tx_lock); 65 66 ret = idr_alloc(&htt->pending_tx, skb, 0, 67 htt->max_num_pending_tx, GFP_ATOMIC); 68 69 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx alloc msdu_id %d\n", ret); 70 71 return ret; 72 } 73 74 void ath10k_htt_tx_free_msdu_id(struct ath10k_htt *htt, u16 msdu_id) 75 { 76 struct ath10k *ar = htt->ar; 77 78 lockdep_assert_held(&htt->tx_lock); 79 80 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt tx free msdu_id %hu\n", msdu_id); 81 82 idr_remove(&htt->pending_tx, msdu_id); 83 } 84 85 int ath10k_htt_tx_alloc(struct ath10k_htt *htt) 86 { 87 struct ath10k *ar = htt->ar; 88 int ret, size; 89 90 ath10k_dbg(ar, ATH10K_DBG_BOOT, "htt tx max num pending tx %d\n", 91 htt->max_num_pending_tx); 92 93 spin_lock_init(&htt->tx_lock); 94 idr_init(&htt->pending_tx); 95 96 htt->tx_pool = dma_pool_create("ath10k htt tx pool", htt->ar->dev, 97 sizeof(struct ath10k_htt_txbuf), 4, 0); 98 if (!htt->tx_pool) { 99 ret = -ENOMEM; 100 goto free_idr_pending_tx; 101 } 102 103 if (!ar->hw_params.continuous_frag_desc) 104 goto skip_frag_desc_alloc; 105 106 size = htt->max_num_pending_tx * sizeof(struct htt_msdu_ext_desc); 107 htt->frag_desc.vaddr = dma_alloc_coherent(ar->dev, size, 108 &htt->frag_desc.paddr, 109 GFP_DMA); 110 if (!htt->frag_desc.vaddr) { 111 ath10k_warn(ar, "failed to alloc fragment desc memory\n"); 112 ret = -ENOMEM; 113 goto free_tx_pool; 114 } 115 116 skip_frag_desc_alloc: 117 return 0; 118 119 free_tx_pool: 120 dma_pool_destroy(htt->tx_pool); 121 free_idr_pending_tx: 122 idr_destroy(&htt->pending_tx); 123 return ret; 124 } 125 126 static int ath10k_htt_tx_clean_up_pending(int msdu_id, void *skb, void *ctx) 127 { 128 struct ath10k *ar = ctx; 129 struct ath10k_htt *htt = &ar->htt; 130 struct htt_tx_done tx_done = {0}; 131 132 ath10k_dbg(ar, ATH10K_DBG_HTT, "force cleanup msdu_id %hu\n", msdu_id); 133 134 tx_done.discard = 1; 135 tx_done.msdu_id = msdu_id; 136 137 ath10k_txrx_tx_unref(htt, &tx_done); 138 139 return 0; 140 } 141 142 void ath10k_htt_tx_free(struct ath10k_htt *htt) 143 { 144 int size; 145 146 idr_for_each(&htt->pending_tx, ath10k_htt_tx_clean_up_pending, htt->ar); 147 idr_destroy(&htt->pending_tx); 148 dma_pool_destroy(htt->tx_pool); 149 150 if (htt->frag_desc.vaddr) { 151 size = htt->max_num_pending_tx * 152 sizeof(struct htt_msdu_ext_desc); 153 dma_free_coherent(htt->ar->dev, size, htt->frag_desc.vaddr, 154 htt->frag_desc.paddr); 155 } 156 } 157 158 void ath10k_htt_htc_tx_complete(struct ath10k *ar, struct sk_buff *skb) 159 { 160 dev_kfree_skb_any(skb); 161 } 162 163 int ath10k_htt_h2t_ver_req_msg(struct ath10k_htt *htt) 164 { 165 struct ath10k *ar = htt->ar; 166 struct sk_buff *skb; 167 struct htt_cmd *cmd; 168 int len = 0; 169 int ret; 170 171 len += sizeof(cmd->hdr); 172 len += sizeof(cmd->ver_req); 173 174 skb = ath10k_htc_alloc_skb(ar, len); 175 if (!skb) 176 return -ENOMEM; 177 178 skb_put(skb, len); 179 cmd = (struct htt_cmd *)skb->data; 180 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_VERSION_REQ; 181 182 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 183 if (ret) { 184 dev_kfree_skb_any(skb); 185 return ret; 186 } 187 188 return 0; 189 } 190 191 int ath10k_htt_h2t_stats_req(struct ath10k_htt *htt, u8 mask, u64 cookie) 192 { 193 struct ath10k *ar = htt->ar; 194 struct htt_stats_req *req; 195 struct sk_buff *skb; 196 struct htt_cmd *cmd; 197 int len = 0, ret; 198 199 len += sizeof(cmd->hdr); 200 len += sizeof(cmd->stats_req); 201 202 skb = ath10k_htc_alloc_skb(ar, len); 203 if (!skb) 204 return -ENOMEM; 205 206 skb_put(skb, len); 207 cmd = (struct htt_cmd *)skb->data; 208 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_STATS_REQ; 209 210 req = &cmd->stats_req; 211 212 memset(req, 0, sizeof(*req)); 213 214 /* currently we support only max 8 bit masks so no need to worry 215 * about endian support */ 216 req->upload_types[0] = mask; 217 req->reset_types[0] = mask; 218 req->stat_type = HTT_STATS_REQ_CFG_STAT_TYPE_INVALID; 219 req->cookie_lsb = cpu_to_le32(cookie & 0xffffffff); 220 req->cookie_msb = cpu_to_le32((cookie & 0xffffffff00000000ULL) >> 32); 221 222 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 223 if (ret) { 224 ath10k_warn(ar, "failed to send htt type stats request: %d", 225 ret); 226 dev_kfree_skb_any(skb); 227 return ret; 228 } 229 230 return 0; 231 } 232 233 int ath10k_htt_send_frag_desc_bank_cfg(struct ath10k_htt *htt) 234 { 235 struct ath10k *ar = htt->ar; 236 struct sk_buff *skb; 237 struct htt_cmd *cmd; 238 int ret, size; 239 240 if (!ar->hw_params.continuous_frag_desc) 241 return 0; 242 243 if (!htt->frag_desc.paddr) { 244 ath10k_warn(ar, "invalid frag desc memory\n"); 245 return -EINVAL; 246 } 247 248 size = sizeof(cmd->hdr) + sizeof(cmd->frag_desc_bank_cfg); 249 skb = ath10k_htc_alloc_skb(ar, size); 250 if (!skb) 251 return -ENOMEM; 252 253 skb_put(skb, size); 254 cmd = (struct htt_cmd *)skb->data; 255 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_FRAG_DESC_BANK_CFG; 256 cmd->frag_desc_bank_cfg.info = 0; 257 cmd->frag_desc_bank_cfg.num_banks = 1; 258 cmd->frag_desc_bank_cfg.desc_size = sizeof(struct htt_msdu_ext_desc); 259 cmd->frag_desc_bank_cfg.bank_base_addrs[0] = 260 __cpu_to_le32(htt->frag_desc.paddr); 261 cmd->frag_desc_bank_cfg.bank_id[0].bank_min_id = 0; 262 cmd->frag_desc_bank_cfg.bank_id[0].bank_max_id = 263 __cpu_to_le16(htt->max_num_pending_tx - 1); 264 265 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 266 if (ret) { 267 ath10k_warn(ar, "failed to send frag desc bank cfg request: %d\n", 268 ret); 269 dev_kfree_skb_any(skb); 270 return ret; 271 } 272 273 return 0; 274 } 275 276 int ath10k_htt_send_rx_ring_cfg_ll(struct ath10k_htt *htt) 277 { 278 struct ath10k *ar = htt->ar; 279 struct sk_buff *skb; 280 struct htt_cmd *cmd; 281 struct htt_rx_ring_setup_ring *ring; 282 const int num_rx_ring = 1; 283 u16 flags; 284 u32 fw_idx; 285 int len; 286 int ret; 287 288 /* 289 * the HW expects the buffer to be an integral number of 4-byte 290 * "words" 291 */ 292 BUILD_BUG_ON(!IS_ALIGNED(HTT_RX_BUF_SIZE, 4)); 293 BUILD_BUG_ON((HTT_RX_BUF_SIZE & HTT_MAX_CACHE_LINE_SIZE_MASK) != 0); 294 295 len = sizeof(cmd->hdr) + sizeof(cmd->rx_setup.hdr) 296 + (sizeof(*ring) * num_rx_ring); 297 skb = ath10k_htc_alloc_skb(ar, len); 298 if (!skb) 299 return -ENOMEM; 300 301 skb_put(skb, len); 302 303 cmd = (struct htt_cmd *)skb->data; 304 ring = &cmd->rx_setup.rings[0]; 305 306 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_RX_RING_CFG; 307 cmd->rx_setup.hdr.num_rings = 1; 308 309 /* FIXME: do we need all of this? */ 310 flags = 0; 311 flags |= HTT_RX_RING_FLAGS_MAC80211_HDR; 312 flags |= HTT_RX_RING_FLAGS_MSDU_PAYLOAD; 313 flags |= HTT_RX_RING_FLAGS_PPDU_START; 314 flags |= HTT_RX_RING_FLAGS_PPDU_END; 315 flags |= HTT_RX_RING_FLAGS_MPDU_START; 316 flags |= HTT_RX_RING_FLAGS_MPDU_END; 317 flags |= HTT_RX_RING_FLAGS_MSDU_START; 318 flags |= HTT_RX_RING_FLAGS_MSDU_END; 319 flags |= HTT_RX_RING_FLAGS_RX_ATTENTION; 320 flags |= HTT_RX_RING_FLAGS_FRAG_INFO; 321 flags |= HTT_RX_RING_FLAGS_UNICAST_RX; 322 flags |= HTT_RX_RING_FLAGS_MULTICAST_RX; 323 flags |= HTT_RX_RING_FLAGS_CTRL_RX; 324 flags |= HTT_RX_RING_FLAGS_MGMT_RX; 325 flags |= HTT_RX_RING_FLAGS_NULL_RX; 326 flags |= HTT_RX_RING_FLAGS_PHY_DATA_RX; 327 328 fw_idx = __le32_to_cpu(*htt->rx_ring.alloc_idx.vaddr); 329 330 ring->fw_idx_shadow_reg_paddr = 331 __cpu_to_le32(htt->rx_ring.alloc_idx.paddr); 332 ring->rx_ring_base_paddr = __cpu_to_le32(htt->rx_ring.base_paddr); 333 ring->rx_ring_len = __cpu_to_le16(htt->rx_ring.size); 334 ring->rx_ring_bufsize = __cpu_to_le16(HTT_RX_BUF_SIZE); 335 ring->flags = __cpu_to_le16(flags); 336 ring->fw_idx_init_val = __cpu_to_le16(fw_idx); 337 338 #define desc_offset(x) (offsetof(struct htt_rx_desc, x) / 4) 339 340 ring->mac80211_hdr_offset = __cpu_to_le16(desc_offset(rx_hdr_status)); 341 ring->msdu_payload_offset = __cpu_to_le16(desc_offset(msdu_payload)); 342 ring->ppdu_start_offset = __cpu_to_le16(desc_offset(ppdu_start)); 343 ring->ppdu_end_offset = __cpu_to_le16(desc_offset(ppdu_end)); 344 ring->mpdu_start_offset = __cpu_to_le16(desc_offset(mpdu_start)); 345 ring->mpdu_end_offset = __cpu_to_le16(desc_offset(mpdu_end)); 346 ring->msdu_start_offset = __cpu_to_le16(desc_offset(msdu_start)); 347 ring->msdu_end_offset = __cpu_to_le16(desc_offset(msdu_end)); 348 ring->rx_attention_offset = __cpu_to_le16(desc_offset(attention)); 349 ring->frag_info_offset = __cpu_to_le16(desc_offset(frag_info)); 350 351 #undef desc_offset 352 353 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 354 if (ret) { 355 dev_kfree_skb_any(skb); 356 return ret; 357 } 358 359 return 0; 360 } 361 362 int ath10k_htt_h2t_aggr_cfg_msg(struct ath10k_htt *htt, 363 u8 max_subfrms_ampdu, 364 u8 max_subfrms_amsdu) 365 { 366 struct ath10k *ar = htt->ar; 367 struct htt_aggr_conf *aggr_conf; 368 struct sk_buff *skb; 369 struct htt_cmd *cmd; 370 int len; 371 int ret; 372 373 /* Firmware defaults are: amsdu = 3 and ampdu = 64 */ 374 375 if (max_subfrms_ampdu == 0 || max_subfrms_ampdu > 64) 376 return -EINVAL; 377 378 if (max_subfrms_amsdu == 0 || max_subfrms_amsdu > 31) 379 return -EINVAL; 380 381 len = sizeof(cmd->hdr); 382 len += sizeof(cmd->aggr_conf); 383 384 skb = ath10k_htc_alloc_skb(ar, len); 385 if (!skb) 386 return -ENOMEM; 387 388 skb_put(skb, len); 389 cmd = (struct htt_cmd *)skb->data; 390 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_AGGR_CFG; 391 392 aggr_conf = &cmd->aggr_conf; 393 aggr_conf->max_num_ampdu_subframes = max_subfrms_ampdu; 394 aggr_conf->max_num_amsdu_subframes = max_subfrms_amsdu; 395 396 ath10k_dbg(ar, ATH10K_DBG_HTT, "htt h2t aggr cfg msg amsdu %d ampdu %d", 397 aggr_conf->max_num_amsdu_subframes, 398 aggr_conf->max_num_ampdu_subframes); 399 400 ret = ath10k_htc_send(&htt->ar->htc, htt->eid, skb); 401 if (ret) { 402 dev_kfree_skb_any(skb); 403 return ret; 404 } 405 406 return 0; 407 } 408 409 int ath10k_htt_mgmt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 410 { 411 struct ath10k *ar = htt->ar; 412 struct device *dev = ar->dev; 413 struct sk_buff *txdesc = NULL; 414 struct htt_cmd *cmd; 415 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 416 u8 vdev_id = skb_cb->vdev_id; 417 int len = 0; 418 int msdu_id = -1; 419 int res; 420 421 res = ath10k_htt_tx_inc_pending(htt); 422 if (res) 423 goto err; 424 425 len += sizeof(cmd->hdr); 426 len += sizeof(cmd->mgmt_tx); 427 428 spin_lock_bh(&htt->tx_lock); 429 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 430 spin_unlock_bh(&htt->tx_lock); 431 if (res < 0) { 432 goto err_tx_dec; 433 } 434 msdu_id = res; 435 436 txdesc = ath10k_htc_alloc_skb(ar, len); 437 if (!txdesc) { 438 res = -ENOMEM; 439 goto err_free_msdu_id; 440 } 441 442 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 443 DMA_TO_DEVICE); 444 res = dma_mapping_error(dev, skb_cb->paddr); 445 if (res) { 446 res = -EIO; 447 goto err_free_txdesc; 448 } 449 450 skb_put(txdesc, len); 451 cmd = (struct htt_cmd *)txdesc->data; 452 memset(cmd, 0, len); 453 454 cmd->hdr.msg_type = HTT_H2T_MSG_TYPE_MGMT_TX; 455 cmd->mgmt_tx.msdu_paddr = __cpu_to_le32(ATH10K_SKB_CB(msdu)->paddr); 456 cmd->mgmt_tx.len = __cpu_to_le32(msdu->len); 457 cmd->mgmt_tx.desc_id = __cpu_to_le32(msdu_id); 458 cmd->mgmt_tx.vdev_id = __cpu_to_le32(vdev_id); 459 memcpy(cmd->mgmt_tx.hdr, msdu->data, 460 min_t(int, msdu->len, HTT_MGMT_FRM_HDR_DOWNLOAD_LEN)); 461 462 skb_cb->htt.txbuf = NULL; 463 464 res = ath10k_htc_send(&htt->ar->htc, htt->eid, txdesc); 465 if (res) 466 goto err_unmap_msdu; 467 468 return 0; 469 470 err_unmap_msdu: 471 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 472 err_free_txdesc: 473 dev_kfree_skb_any(txdesc); 474 err_free_msdu_id: 475 spin_lock_bh(&htt->tx_lock); 476 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 477 spin_unlock_bh(&htt->tx_lock); 478 err_tx_dec: 479 ath10k_htt_tx_dec_pending(htt); 480 err: 481 return res; 482 } 483 484 int ath10k_htt_tx(struct ath10k_htt *htt, struct sk_buff *msdu) 485 { 486 struct ath10k *ar = htt->ar; 487 struct device *dev = ar->dev; 488 struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)msdu->data; 489 struct ath10k_skb_cb *skb_cb = ATH10K_SKB_CB(msdu); 490 struct ath10k_hif_sg_item sg_items[2]; 491 struct htt_data_tx_desc_frag *frags; 492 u8 vdev_id = skb_cb->vdev_id; 493 u8 tid = skb_cb->htt.tid; 494 int prefetch_len; 495 int res; 496 u8 flags0 = 0; 497 u16 msdu_id, flags1 = 0; 498 dma_addr_t paddr = 0; 499 u32 frags_paddr = 0; 500 struct htt_msdu_ext_desc *ext_desc = NULL; 501 502 res = ath10k_htt_tx_inc_pending(htt); 503 if (res) 504 goto err; 505 506 spin_lock_bh(&htt->tx_lock); 507 res = ath10k_htt_tx_alloc_msdu_id(htt, msdu); 508 spin_unlock_bh(&htt->tx_lock); 509 if (res < 0) { 510 goto err_tx_dec; 511 } 512 msdu_id = res; 513 514 prefetch_len = min(htt->prefetch_len, msdu->len); 515 prefetch_len = roundup(prefetch_len, 4); 516 517 skb_cb->htt.txbuf = dma_pool_alloc(htt->tx_pool, GFP_ATOMIC, 518 &paddr); 519 if (!skb_cb->htt.txbuf) { 520 res = -ENOMEM; 521 goto err_free_msdu_id; 522 } 523 skb_cb->htt.txbuf_paddr = paddr; 524 525 if ((ieee80211_is_action(hdr->frame_control) || 526 ieee80211_is_deauth(hdr->frame_control) || 527 ieee80211_is_disassoc(hdr->frame_control)) && 528 ieee80211_has_protected(hdr->frame_control)) { 529 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 530 } else if (!skb_cb->htt.nohwcrypt && 531 skb_cb->txmode == ATH10K_HW_TXRX_RAW) { 532 skb_put(msdu, IEEE80211_CCMP_MIC_LEN); 533 } 534 535 skb_cb->paddr = dma_map_single(dev, msdu->data, msdu->len, 536 DMA_TO_DEVICE); 537 res = dma_mapping_error(dev, skb_cb->paddr); 538 if (res) { 539 res = -EIO; 540 goto err_free_txbuf; 541 } 542 543 switch (skb_cb->txmode) { 544 case ATH10K_HW_TXRX_RAW: 545 case ATH10K_HW_TXRX_NATIVE_WIFI: 546 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 547 /* pass through */ 548 case ATH10K_HW_TXRX_ETHERNET: 549 if (ar->hw_params.continuous_frag_desc) { 550 memset(&htt->frag_desc.vaddr[msdu_id], 0, 551 sizeof(struct htt_msdu_ext_desc)); 552 frags = (struct htt_data_tx_desc_frag *) 553 &htt->frag_desc.vaddr[msdu_id].frags; 554 ext_desc = &htt->frag_desc.vaddr[msdu_id]; 555 frags[0].tword_addr.paddr_lo = 556 __cpu_to_le32(skb_cb->paddr); 557 frags[0].tword_addr.paddr_hi = 0; 558 frags[0].tword_addr.len_16 = __cpu_to_le16(msdu->len); 559 560 frags_paddr = htt->frag_desc.paddr + 561 (sizeof(struct htt_msdu_ext_desc) * msdu_id); 562 } else { 563 frags = skb_cb->htt.txbuf->frags; 564 frags[0].dword_addr.paddr = 565 __cpu_to_le32(skb_cb->paddr); 566 frags[0].dword_addr.len = __cpu_to_le32(msdu->len); 567 frags[1].dword_addr.paddr = 0; 568 frags[1].dword_addr.len = 0; 569 570 frags_paddr = skb_cb->htt.txbuf_paddr; 571 } 572 flags0 |= SM(skb_cb->txmode, HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 573 break; 574 case ATH10K_HW_TXRX_MGMT: 575 flags0 |= SM(ATH10K_HW_TXRX_MGMT, 576 HTT_DATA_TX_DESC_FLAGS0_PKT_TYPE); 577 flags0 |= HTT_DATA_TX_DESC_FLAGS0_MAC_HDR_PRESENT; 578 579 frags_paddr = skb_cb->paddr; 580 break; 581 } 582 583 /* Normally all commands go through HTC which manages tx credits for 584 * each endpoint and notifies when tx is completed. 585 * 586 * HTT endpoint is creditless so there's no need to care about HTC 587 * flags. In that case it is trivial to fill the HTC header here. 588 * 589 * MSDU transmission is considered completed upon HTT event. This 590 * implies no relevant resources can be freed until after the event is 591 * received. That's why HTC tx completion handler itself is ignored by 592 * setting NULL to transfer_context for all sg items. 593 * 594 * There is simply no point in pushing HTT TX_FRM through HTC tx path 595 * as it's a waste of resources. By bypassing HTC it is possible to 596 * avoid extra memory allocations, compress data structures and thus 597 * improve performance. */ 598 599 skb_cb->htt.txbuf->htc_hdr.eid = htt->eid; 600 skb_cb->htt.txbuf->htc_hdr.len = __cpu_to_le16( 601 sizeof(skb_cb->htt.txbuf->cmd_hdr) + 602 sizeof(skb_cb->htt.txbuf->cmd_tx) + 603 prefetch_len); 604 skb_cb->htt.txbuf->htc_hdr.flags = 0; 605 606 if (skb_cb->htt.nohwcrypt) 607 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 608 609 if (!skb_cb->is_protected) 610 flags0 |= HTT_DATA_TX_DESC_FLAGS0_NO_ENCRYPT; 611 612 flags1 |= SM((u16)vdev_id, HTT_DATA_TX_DESC_FLAGS1_VDEV_ID); 613 flags1 |= SM((u16)tid, HTT_DATA_TX_DESC_FLAGS1_EXT_TID); 614 if (msdu->ip_summed == CHECKSUM_PARTIAL && 615 !test_bit(ATH10K_FLAG_RAW_MODE, &ar->dev_flags)) { 616 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L3_OFFLOAD; 617 flags1 |= HTT_DATA_TX_DESC_FLAGS1_CKSUM_L4_OFFLOAD; 618 if (ar->hw_params.continuous_frag_desc) 619 ext_desc->flags |= HTT_MSDU_CHECKSUM_ENABLE; 620 } 621 622 /* Prevent firmware from sending up tx inspection requests. There's 623 * nothing ath10k can do with frames requested for inspection so force 624 * it to simply rely a regular tx completion with discard status. 625 */ 626 flags1 |= HTT_DATA_TX_DESC_FLAGS1_POSTPONED; 627 628 skb_cb->htt.txbuf->cmd_hdr.msg_type = HTT_H2T_MSG_TYPE_TX_FRM; 629 skb_cb->htt.txbuf->cmd_tx.flags0 = flags0; 630 skb_cb->htt.txbuf->cmd_tx.flags1 = __cpu_to_le16(flags1); 631 skb_cb->htt.txbuf->cmd_tx.len = __cpu_to_le16(msdu->len); 632 skb_cb->htt.txbuf->cmd_tx.id = __cpu_to_le16(msdu_id); 633 skb_cb->htt.txbuf->cmd_tx.frags_paddr = __cpu_to_le32(frags_paddr); 634 skb_cb->htt.txbuf->cmd_tx.peerid = __cpu_to_le16(HTT_INVALID_PEERID); 635 skb_cb->htt.txbuf->cmd_tx.freq = __cpu_to_le16(skb_cb->htt.freq); 636 637 trace_ath10k_htt_tx(ar, msdu_id, msdu->len, vdev_id, tid); 638 ath10k_dbg(ar, ATH10K_DBG_HTT, 639 "htt tx flags0 %hhu flags1 %hu len %d id %hu frags_paddr %08x, msdu_paddr %08x vdev %hhu tid %hhu freq %hu\n", 640 flags0, flags1, msdu->len, msdu_id, frags_paddr, 641 (u32)skb_cb->paddr, vdev_id, tid, skb_cb->htt.freq); 642 ath10k_dbg_dump(ar, ATH10K_DBG_HTT_DUMP, NULL, "htt tx msdu: ", 643 msdu->data, msdu->len); 644 trace_ath10k_tx_hdr(ar, msdu->data, msdu->len); 645 trace_ath10k_tx_payload(ar, msdu->data, msdu->len); 646 647 sg_items[0].transfer_id = 0; 648 sg_items[0].transfer_context = NULL; 649 sg_items[0].vaddr = &skb_cb->htt.txbuf->htc_hdr; 650 sg_items[0].paddr = skb_cb->htt.txbuf_paddr + 651 sizeof(skb_cb->htt.txbuf->frags); 652 sg_items[0].len = sizeof(skb_cb->htt.txbuf->htc_hdr) + 653 sizeof(skb_cb->htt.txbuf->cmd_hdr) + 654 sizeof(skb_cb->htt.txbuf->cmd_tx); 655 656 sg_items[1].transfer_id = 0; 657 sg_items[1].transfer_context = NULL; 658 sg_items[1].vaddr = msdu->data; 659 sg_items[1].paddr = skb_cb->paddr; 660 sg_items[1].len = prefetch_len; 661 662 res = ath10k_hif_tx_sg(htt->ar, 663 htt->ar->htc.endpoint[htt->eid].ul_pipe_id, 664 sg_items, ARRAY_SIZE(sg_items)); 665 if (res) 666 goto err_unmap_msdu; 667 668 return 0; 669 670 err_unmap_msdu: 671 dma_unmap_single(dev, skb_cb->paddr, msdu->len, DMA_TO_DEVICE); 672 err_free_txbuf: 673 dma_pool_free(htt->tx_pool, 674 skb_cb->htt.txbuf, 675 skb_cb->htt.txbuf_paddr); 676 err_free_msdu_id: 677 spin_lock_bh(&htt->tx_lock); 678 ath10k_htt_tx_free_msdu_id(htt, msdu_id); 679 spin_unlock_bh(&htt->tx_lock); 680 err_tx_dec: 681 ath10k_htt_tx_dec_pending(htt); 682 err: 683 return res; 684 } 685