1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2020 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/slab.h> 10 #include <linux/sched.h> 11 #include <net/ip6_checksum.h> 12 #include <net/tso.h> 13 14 #include "iwl-debug.h" 15 #include "iwl-csr.h" 16 #include "iwl-prph.h" 17 #include "iwl-io.h" 18 #include "iwl-scd.h" 19 #include "iwl-op-mode.h" 20 #include "internal.h" 21 #include "fw/api/tx.h" 22 23 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 24 * DMA services 25 * 26 * Theory of operation 27 * 28 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 29 * of buffer descriptors, each of which points to one or more data buffers for 30 * the device to read from or fill. Driver and device exchange status of each 31 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 32 * entries in each circular buffer, to protect against confusing empty and full 33 * queue states. 34 * 35 * The device reads or writes the data in the queues via the device's several 36 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 37 * 38 * For Tx queue, there are low mark and high mark limits. If, after queuing 39 * the packet for Tx, free space become < low mark, Tx queue stopped. When 40 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 41 * Tx queue resumed. 42 * 43 ***************************************************/ 44 45 46 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 47 struct iwl_dma_ptr *ptr, size_t size) 48 { 49 if (WARN_ON(ptr->addr)) 50 return -EINVAL; 51 52 ptr->addr = dma_alloc_coherent(trans->dev, size, 53 &ptr->dma, GFP_KERNEL); 54 if (!ptr->addr) 55 return -ENOMEM; 56 ptr->size = size; 57 return 0; 58 } 59 60 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 61 { 62 if (unlikely(!ptr->addr)) 63 return; 64 65 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 66 memset(ptr, 0, sizeof(*ptr)); 67 } 68 69 /* 70 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 71 */ 72 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 73 struct iwl_txq *txq) 74 { 75 u32 reg = 0; 76 int txq_id = txq->id; 77 78 lockdep_assert_held(&txq->lock); 79 80 /* 81 * explicitly wake up the NIC if: 82 * 1. shadow registers aren't enabled 83 * 2. NIC is woken up for CMD regardless of shadow outside this function 84 * 3. there is a chance that the NIC is asleep 85 */ 86 if (!trans->trans_cfg->base_params->shadow_reg_enable && 87 txq_id != trans->txqs.cmd.q_id && 88 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 89 /* 90 * wake up nic if it's powered down ... 91 * uCode will wake up, and interrupt us again, so next 92 * time we'll skip this part. 93 */ 94 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 95 96 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 97 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 98 txq_id, reg); 99 iwl_set_bit(trans, CSR_GP_CNTRL, 100 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 101 txq->need_update = true; 102 return; 103 } 104 } 105 106 /* 107 * if not in power-save mode, uCode will never sleep when we're 108 * trying to tx (during RFKILL, we're not trying to tx). 109 */ 110 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 111 if (!txq->block) 112 iwl_write32(trans, HBUS_TARG_WRPTR, 113 txq->write_ptr | (txq_id << 8)); 114 } 115 116 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 117 { 118 int i; 119 120 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 121 struct iwl_txq *txq = trans->txqs.txq[i]; 122 123 if (!test_bit(i, trans->txqs.queue_used)) 124 continue; 125 126 spin_lock_bh(&txq->lock); 127 if (txq->need_update) { 128 iwl_pcie_txq_inc_wr_ptr(trans, txq); 129 txq->need_update = false; 130 } 131 spin_unlock_bh(&txq->lock); 132 } 133 } 134 135 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd, 136 u8 idx, dma_addr_t addr, u16 len) 137 { 138 struct iwl_tfd *tfd_fh = (void *)tfd; 139 struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx]; 140 141 u16 hi_n_len = len << 4; 142 143 put_unaligned_le32(addr, &tb->lo); 144 hi_n_len |= iwl_get_dma_hi_addr(addr); 145 146 tb->hi_n_len = cpu_to_le16(hi_n_len); 147 148 tfd_fh->num_tbs = idx + 1; 149 } 150 151 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 152 dma_addr_t addr, u16 len, bool reset) 153 { 154 void *tfd; 155 u32 num_tbs; 156 157 tfd = txq->tfds + trans->txqs.tfd.size * txq->write_ptr; 158 159 if (reset) 160 memset(tfd, 0, trans->txqs.tfd.size); 161 162 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(trans, tfd); 163 164 /* Each TFD can point to a maximum max_tbs Tx buffers */ 165 if (num_tbs >= trans->txqs.tfd.max_tbs) { 166 IWL_ERR(trans, "Error can not send more than %d chunks\n", 167 trans->txqs.tfd.max_tbs); 168 return -EINVAL; 169 } 170 171 if (WARN(addr & ~IWL_TX_DMA_MASK, 172 "Unaligned address = %llx\n", (unsigned long long)addr)) 173 return -EINVAL; 174 175 iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len); 176 177 return num_tbs; 178 } 179 180 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 181 { 182 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 183 184 lockdep_assert_held(&trans_pcie->reg_lock); 185 186 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 187 return; 188 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) 189 return; 190 191 trans_pcie->cmd_hold_nic_awake = false; 192 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 193 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 194 } 195 196 /* 197 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 198 */ 199 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 200 { 201 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 202 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 203 204 spin_lock_bh(&txq->lock); 205 while (txq->write_ptr != txq->read_ptr) { 206 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 207 txq_id, txq->read_ptr); 208 209 if (txq_id != trans->txqs.cmd.q_id) { 210 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 211 212 if (WARN_ON_ONCE(!skb)) 213 continue; 214 215 iwl_txq_free_tso_page(trans, skb); 216 } 217 iwl_txq_free_tfd(trans, txq); 218 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 219 220 if (txq->read_ptr == txq->write_ptr) { 221 unsigned long flags; 222 223 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 224 if (txq_id == trans->txqs.cmd.q_id) 225 iwl_pcie_clear_cmd_in_flight(trans); 226 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 227 } 228 } 229 230 while (!skb_queue_empty(&txq->overflow_q)) { 231 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 232 233 iwl_op_mode_free_skb(trans->op_mode, skb); 234 } 235 236 spin_unlock_bh(&txq->lock); 237 238 /* just in case - this queue may have been stopped */ 239 iwl_wake_queue(trans, txq); 240 } 241 242 /* 243 * iwl_pcie_txq_free - Deallocate DMA queue. 244 * @txq: Transmit queue to deallocate. 245 * 246 * Empty queue by removing and destroying all BD's. 247 * Free all buffers. 248 * 0-fill, but do not free "txq" descriptor structure. 249 */ 250 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 251 { 252 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 253 struct device *dev = trans->dev; 254 int i; 255 256 if (WARN_ON(!txq)) 257 return; 258 259 iwl_pcie_txq_unmap(trans, txq_id); 260 261 /* De-alloc array of command/tx buffers */ 262 if (txq_id == trans->txqs.cmd.q_id) 263 for (i = 0; i < txq->n_window; i++) { 264 kfree_sensitive(txq->entries[i].cmd); 265 kfree_sensitive(txq->entries[i].free_buf); 266 } 267 268 /* De-alloc circular buffer of TFDs */ 269 if (txq->tfds) { 270 dma_free_coherent(dev, 271 trans->txqs.tfd.size * 272 trans->trans_cfg->base_params->max_tfd_queue_size, 273 txq->tfds, txq->dma_addr); 274 txq->dma_addr = 0; 275 txq->tfds = NULL; 276 277 dma_free_coherent(dev, 278 sizeof(*txq->first_tb_bufs) * txq->n_window, 279 txq->first_tb_bufs, txq->first_tb_dma); 280 } 281 282 kfree(txq->entries); 283 txq->entries = NULL; 284 285 del_timer_sync(&txq->stuck_timer); 286 287 /* 0-fill queue descriptor structure */ 288 memset(txq, 0, sizeof(*txq)); 289 } 290 291 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 292 { 293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 294 int nq = trans->trans_cfg->base_params->num_of_queues; 295 int chan; 296 u32 reg_val; 297 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 298 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 299 300 /* make sure all queue are not stopped/used */ 301 memset(trans->txqs.queue_stopped, 0, 302 sizeof(trans->txqs.queue_stopped)); 303 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 304 305 trans_pcie->scd_base_addr = 306 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 307 308 WARN_ON(scd_base_addr != 0 && 309 scd_base_addr != trans_pcie->scd_base_addr); 310 311 /* reset context data, TX status and translation data */ 312 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 313 SCD_CONTEXT_MEM_LOWER_BOUND, 314 NULL, clear_dwords); 315 316 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 317 trans->txqs.scd_bc_tbls.dma >> 10); 318 319 /* The chain extension of the SCD doesn't work well. This feature is 320 * enabled by default by the HW, so we need to disable it manually. 321 */ 322 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 323 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 324 325 iwl_trans_ac_txq_enable(trans, trans->txqs.cmd.q_id, 326 trans->txqs.cmd.fifo, 327 trans->txqs.cmd.wdg_timeout); 328 329 /* Activate all Tx DMA/FIFO channels */ 330 iwl_scd_activate_fifos(trans); 331 332 /* Enable DMA channel */ 333 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 334 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 335 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 336 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 337 338 /* Update FH chicken bits */ 339 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 340 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 341 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 342 343 /* Enable L1-Active */ 344 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 345 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 346 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 347 } 348 349 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 350 { 351 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 352 int txq_id; 353 354 /* 355 * we should never get here in gen2 trans mode return early to avoid 356 * having invalid accesses 357 */ 358 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 359 return; 360 361 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 362 txq_id++) { 363 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 364 if (trans->trans_cfg->use_tfh) 365 iwl_write_direct64(trans, 366 FH_MEM_CBBC_QUEUE(trans, txq_id), 367 txq->dma_addr); 368 else 369 iwl_write_direct32(trans, 370 FH_MEM_CBBC_QUEUE(trans, txq_id), 371 txq->dma_addr >> 8); 372 iwl_pcie_txq_unmap(trans, txq_id); 373 txq->read_ptr = 0; 374 txq->write_ptr = 0; 375 } 376 377 /* Tell NIC where to find the "keep warm" buffer */ 378 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 379 trans_pcie->kw.dma >> 4); 380 381 /* 382 * Send 0 as the scd_base_addr since the device may have be reset 383 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 384 * contain garbage. 385 */ 386 iwl_pcie_tx_start(trans, 0); 387 } 388 389 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 390 { 391 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 392 unsigned long flags; 393 int ch, ret; 394 u32 mask = 0; 395 396 spin_lock(&trans_pcie->irq_lock); 397 398 if (!iwl_trans_grab_nic_access(trans, &flags)) 399 goto out; 400 401 /* Stop each Tx DMA channel */ 402 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 403 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 404 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 405 } 406 407 /* Wait for DMA channels to be idle */ 408 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 409 if (ret < 0) 410 IWL_ERR(trans, 411 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 412 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 413 414 iwl_trans_release_nic_access(trans, &flags); 415 416 out: 417 spin_unlock(&trans_pcie->irq_lock); 418 } 419 420 /* 421 * iwl_pcie_tx_stop - Stop all Tx DMA channels 422 */ 423 int iwl_pcie_tx_stop(struct iwl_trans *trans) 424 { 425 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 426 int txq_id; 427 428 /* Turn off all Tx DMA fifos */ 429 iwl_scd_deactivate_fifos(trans); 430 431 /* Turn off all Tx DMA channels */ 432 iwl_pcie_tx_stop_fh(trans); 433 434 /* 435 * This function can be called before the op_mode disabled the 436 * queues. This happens when we have an rfkill interrupt. 437 * Since we stop Tx altogether - mark the queues as stopped. 438 */ 439 memset(trans->txqs.queue_stopped, 0, 440 sizeof(trans->txqs.queue_stopped)); 441 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 442 443 /* This can happen: start_hw, stop_device */ 444 if (!trans_pcie->txq_memory) 445 return 0; 446 447 /* Unmap DMA from host system and free skb's */ 448 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 449 txq_id++) 450 iwl_pcie_txq_unmap(trans, txq_id); 451 452 return 0; 453 } 454 455 /* 456 * iwl_trans_tx_free - Free TXQ Context 457 * 458 * Destroy all TX DMA queues and structures 459 */ 460 void iwl_pcie_tx_free(struct iwl_trans *trans) 461 { 462 int txq_id; 463 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 464 465 memset(trans->txqs.queue_used, 0, sizeof(trans->txqs.queue_used)); 466 467 /* Tx queues */ 468 if (trans_pcie->txq_memory) { 469 for (txq_id = 0; 470 txq_id < trans->trans_cfg->base_params->num_of_queues; 471 txq_id++) { 472 iwl_pcie_txq_free(trans, txq_id); 473 trans->txqs.txq[txq_id] = NULL; 474 } 475 } 476 477 kfree(trans_pcie->txq_memory); 478 trans_pcie->txq_memory = NULL; 479 480 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 481 482 iwl_pcie_free_dma_ptr(trans, &trans->txqs.scd_bc_tbls); 483 } 484 485 /* 486 * iwl_pcie_tx_alloc - allocate TX context 487 * Allocate all Tx DMA structures and initialize them 488 */ 489 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 490 { 491 int ret; 492 int txq_id, slots_num; 493 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 494 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 495 496 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 497 return -EINVAL; 498 499 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 500 501 /*It is not allowed to alloc twice, so warn when this happens. 502 * We cannot rely on the previous allocation, so free and fail */ 503 if (WARN_ON(trans_pcie->txq_memory)) { 504 ret = -EINVAL; 505 goto error; 506 } 507 508 ret = iwl_pcie_alloc_dma_ptr(trans, &trans->txqs.scd_bc_tbls, 509 bc_tbls_size); 510 if (ret) { 511 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 512 goto error; 513 } 514 515 /* Alloc keep-warm buffer */ 516 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 517 if (ret) { 518 IWL_ERR(trans, "Keep Warm allocation failed\n"); 519 goto error; 520 } 521 522 trans_pcie->txq_memory = 523 kcalloc(trans->trans_cfg->base_params->num_of_queues, 524 sizeof(struct iwl_txq), GFP_KERNEL); 525 if (!trans_pcie->txq_memory) { 526 IWL_ERR(trans, "Not enough memory for txq\n"); 527 ret = -ENOMEM; 528 goto error; 529 } 530 531 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 532 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 533 txq_id++) { 534 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 535 536 if (cmd_queue) 537 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 538 trans->cfg->min_txq_size); 539 else 540 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 541 trans->cfg->min_256_ba_txq_size); 542 trans->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 543 ret = iwl_txq_alloc(trans, trans->txqs.txq[txq_id], slots_num, 544 cmd_queue); 545 if (ret) { 546 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 547 goto error; 548 } 549 trans->txqs.txq[txq_id]->id = txq_id; 550 } 551 552 return 0; 553 554 error: 555 iwl_pcie_tx_free(trans); 556 557 return ret; 558 } 559 560 int iwl_pcie_tx_init(struct iwl_trans *trans) 561 { 562 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 563 int ret; 564 int txq_id, slots_num; 565 bool alloc = false; 566 567 if (!trans_pcie->txq_memory) { 568 ret = iwl_pcie_tx_alloc(trans); 569 if (ret) 570 goto error; 571 alloc = true; 572 } 573 574 spin_lock(&trans_pcie->irq_lock); 575 576 /* Turn off all Tx DMA fifos */ 577 iwl_scd_deactivate_fifos(trans); 578 579 /* Tell NIC where to find the "keep warm" buffer */ 580 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 581 trans_pcie->kw.dma >> 4); 582 583 spin_unlock(&trans_pcie->irq_lock); 584 585 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 586 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 587 txq_id++) { 588 bool cmd_queue = (txq_id == trans->txqs.cmd.q_id); 589 590 if (cmd_queue) 591 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 592 trans->cfg->min_txq_size); 593 else 594 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 595 trans->cfg->min_256_ba_txq_size); 596 ret = iwl_txq_init(trans, trans->txqs.txq[txq_id], slots_num, 597 cmd_queue); 598 if (ret) { 599 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 600 goto error; 601 } 602 603 /* 604 * Tell nic where to find circular buffer of TFDs for a 605 * given Tx queue, and enable the DMA channel used for that 606 * queue. 607 * Circular buffer (TFD queue in DRAM) physical base address 608 */ 609 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 610 trans->txqs.txq[txq_id]->dma_addr >> 8); 611 } 612 613 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 614 if (trans->trans_cfg->base_params->num_of_queues > 20) 615 iwl_set_bits_prph(trans, SCD_GP_CTRL, 616 SCD_GP_CTRL_ENABLE_31_QUEUES); 617 618 return 0; 619 error: 620 /*Upon error, free only if we allocated something */ 621 if (alloc) 622 iwl_pcie_tx_free(trans); 623 return ret; 624 } 625 626 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 627 const struct iwl_host_cmd *cmd) 628 { 629 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 630 int ret; 631 632 lockdep_assert_held(&trans_pcie->reg_lock); 633 634 /* Make sure the NIC is still alive in the bus */ 635 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 636 return -ENODEV; 637 638 /* 639 * wake up the NIC to make sure that the firmware will see the host 640 * command - we will let the NIC sleep once all the host commands 641 * returned. This needs to be done only on NICs that have 642 * apmg_wake_up_wa set. 643 */ 644 if (trans->trans_cfg->base_params->apmg_wake_up_wa && 645 !trans_pcie->cmd_hold_nic_awake) { 646 __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL, 647 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 648 649 ret = iwl_poll_bit(trans, CSR_GP_CNTRL, 650 CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN, 651 (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY | 652 CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 653 15000); 654 if (ret < 0) { 655 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 656 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 657 IWL_ERR(trans, "Failed to wake NIC for hcmd\n"); 658 return -EIO; 659 } 660 trans_pcie->cmd_hold_nic_awake = true; 661 } 662 663 return 0; 664 } 665 666 /* 667 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 668 * 669 * When FW advances 'R' index, all entries between old and new 'R' index 670 * need to be reclaimed. As result, some free space forms. If there is 671 * enough free space (> low mark), wake the stack that feeds us. 672 */ 673 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 674 { 675 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 676 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 677 unsigned long flags; 678 int nfreed = 0; 679 u16 r; 680 681 lockdep_assert_held(&txq->lock); 682 683 idx = iwl_txq_get_cmd_index(txq, idx); 684 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 685 686 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 687 (!iwl_txq_used(txq, idx))) { 688 WARN_ONCE(test_bit(txq_id, trans->txqs.queue_used), 689 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 690 __func__, txq_id, idx, 691 trans->trans_cfg->base_params->max_tfd_queue_size, 692 txq->write_ptr, txq->read_ptr); 693 return; 694 } 695 696 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 697 r = iwl_txq_inc_wrap(trans, r)) { 698 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 699 700 if (nfreed++ > 0) { 701 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 702 idx, txq->write_ptr, r); 703 iwl_force_nmi(trans); 704 } 705 } 706 707 if (txq->read_ptr == txq->write_ptr) { 708 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 709 iwl_pcie_clear_cmd_in_flight(trans); 710 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 711 } 712 713 iwl_txq_progress(txq); 714 } 715 716 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 717 u16 txq_id) 718 { 719 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 720 u32 tbl_dw_addr; 721 u32 tbl_dw; 722 u16 scd_q2ratid; 723 724 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 725 726 tbl_dw_addr = trans_pcie->scd_base_addr + 727 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 728 729 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 730 731 if (txq_id & 0x1) 732 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 733 else 734 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 735 736 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 737 738 return 0; 739 } 740 741 /* Receiver address (actually, Rx station's index into station table), 742 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 743 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 744 745 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 746 const struct iwl_trans_txq_scd_cfg *cfg, 747 unsigned int wdg_timeout) 748 { 749 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 750 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 751 int fifo = -1; 752 bool scd_bug = false; 753 754 if (test_and_set_bit(txq_id, trans->txqs.queue_used)) 755 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 756 757 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 758 759 if (cfg) { 760 fifo = cfg->fifo; 761 762 /* Disable the scheduler prior configuring the cmd queue */ 763 if (txq_id == trans->txqs.cmd.q_id && 764 trans_pcie->scd_set_active) 765 iwl_scd_enable_set_active(trans, 0); 766 767 /* Stop this Tx queue before configuring it */ 768 iwl_scd_txq_set_inactive(trans, txq_id); 769 770 /* Set this queue as a chain-building queue unless it is CMD */ 771 if (txq_id != trans->txqs.cmd.q_id) 772 iwl_scd_txq_set_chain(trans, txq_id); 773 774 if (cfg->aggregate) { 775 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 776 777 /* Map receiver-address / traffic-ID to this queue */ 778 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 779 780 /* enable aggregations for the queue */ 781 iwl_scd_txq_enable_agg(trans, txq_id); 782 txq->ampdu = true; 783 } else { 784 /* 785 * disable aggregations for the queue, this will also 786 * make the ra_tid mapping configuration irrelevant 787 * since it is now a non-AGG queue. 788 */ 789 iwl_scd_txq_disable_agg(trans, txq_id); 790 791 ssn = txq->read_ptr; 792 } 793 } else { 794 /* 795 * If we need to move the SCD write pointer by steps of 796 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 797 * the op_mode know by returning true later. 798 * Do this only in case cfg is NULL since this trick can 799 * be done only if we have DQA enabled which is true for mvm 800 * only. And mvm never sets a cfg pointer. 801 * This is really ugly, but this is the easiest way out for 802 * this sad hardware issue. 803 * This bug has been fixed on devices 9000 and up. 804 */ 805 scd_bug = !trans->trans_cfg->mq_rx_supported && 806 !((ssn - txq->write_ptr) & 0x3f) && 807 (ssn != txq->write_ptr); 808 if (scd_bug) 809 ssn++; 810 } 811 812 /* Place first TFD at index corresponding to start sequence number. 813 * Assumes that ssn_idx is valid (!= 0xFFF) */ 814 txq->read_ptr = (ssn & 0xff); 815 txq->write_ptr = (ssn & 0xff); 816 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 817 (ssn & 0xff) | (txq_id << 8)); 818 819 if (cfg) { 820 u8 frame_limit = cfg->frame_limit; 821 822 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 823 824 /* Set up Tx window size and frame limit for this queue */ 825 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 826 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 827 iwl_trans_write_mem32(trans, 828 trans_pcie->scd_base_addr + 829 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 830 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 831 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 832 833 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 834 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 835 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 836 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 837 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 838 SCD_QUEUE_STTS_REG_MSK); 839 840 /* enable the scheduler for this queue (only) */ 841 if (txq_id == trans->txqs.cmd.q_id && 842 trans_pcie->scd_set_active) 843 iwl_scd_enable_set_active(trans, BIT(txq_id)); 844 845 IWL_DEBUG_TX_QUEUES(trans, 846 "Activate queue %d on FIFO %d WrPtr: %d\n", 847 txq_id, fifo, ssn & 0xff); 848 } else { 849 IWL_DEBUG_TX_QUEUES(trans, 850 "Activate queue %d WrPtr: %d\n", 851 txq_id, ssn & 0xff); 852 } 853 854 return scd_bug; 855 } 856 857 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 858 bool shared_mode) 859 { 860 struct iwl_txq *txq = trans->txqs.txq[txq_id]; 861 862 txq->ampdu = !shared_mode; 863 } 864 865 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 866 bool configure_scd) 867 { 868 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 869 u32 stts_addr = trans_pcie->scd_base_addr + 870 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 871 static const u32 zero_val[4] = {}; 872 873 trans->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 874 trans->txqs.txq[txq_id]->frozen = false; 875 876 /* 877 * Upon HW Rfkill - we stop the device, and then stop the queues 878 * in the op_mode. Just for the sake of the simplicity of the op_mode, 879 * allow the op_mode to call txq_disable after it already called 880 * stop_device. 881 */ 882 if (!test_and_clear_bit(txq_id, trans->txqs.queue_used)) { 883 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 884 "queue %d not used", txq_id); 885 return; 886 } 887 888 if (configure_scd) { 889 iwl_scd_txq_set_inactive(trans, txq_id); 890 891 iwl_trans_write_mem(trans, stts_addr, (void *)zero_val, 892 ARRAY_SIZE(zero_val)); 893 } 894 895 iwl_pcie_txq_unmap(trans, txq_id); 896 trans->txqs.txq[txq_id]->ampdu = false; 897 898 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 899 } 900 901 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 902 903 /* 904 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 905 * @priv: device private data point 906 * @cmd: a pointer to the ucode command structure 907 * 908 * The function returns < 0 values to indicate the operation 909 * failed. On success, it returns the index (>= 0) of command in the 910 * command queue. 911 */ 912 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 913 struct iwl_host_cmd *cmd) 914 { 915 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 916 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 917 struct iwl_device_cmd *out_cmd; 918 struct iwl_cmd_meta *out_meta; 919 unsigned long flags; 920 void *dup_buf = NULL; 921 dma_addr_t phys_addr; 922 int idx; 923 u16 copy_size, cmd_size, tb0_size; 924 bool had_nocopy = false; 925 u8 group_id = iwl_cmd_groupid(cmd->id); 926 int i, ret; 927 u32 cmd_pos; 928 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 929 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 930 931 if (WARN(!trans->wide_cmd_header && 932 group_id > IWL_ALWAYS_LONG_GROUP, 933 "unsupported wide command %#x\n", cmd->id)) 934 return -EINVAL; 935 936 if (group_id != 0) { 937 copy_size = sizeof(struct iwl_cmd_header_wide); 938 cmd_size = sizeof(struct iwl_cmd_header_wide); 939 } else { 940 copy_size = sizeof(struct iwl_cmd_header); 941 cmd_size = sizeof(struct iwl_cmd_header); 942 } 943 944 /* need one for the header if the first is NOCOPY */ 945 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 946 947 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 948 cmddata[i] = cmd->data[i]; 949 cmdlen[i] = cmd->len[i]; 950 951 if (!cmd->len[i]) 952 continue; 953 954 /* need at least IWL_FIRST_TB_SIZE copied */ 955 if (copy_size < IWL_FIRST_TB_SIZE) { 956 int copy = IWL_FIRST_TB_SIZE - copy_size; 957 958 if (copy > cmdlen[i]) 959 copy = cmdlen[i]; 960 cmdlen[i] -= copy; 961 cmddata[i] += copy; 962 copy_size += copy; 963 } 964 965 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 966 had_nocopy = true; 967 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 968 idx = -EINVAL; 969 goto free_dup_buf; 970 } 971 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 972 /* 973 * This is also a chunk that isn't copied 974 * to the static buffer so set had_nocopy. 975 */ 976 had_nocopy = true; 977 978 /* only allowed once */ 979 if (WARN_ON(dup_buf)) { 980 idx = -EINVAL; 981 goto free_dup_buf; 982 } 983 984 dup_buf = kmemdup(cmddata[i], cmdlen[i], 985 GFP_ATOMIC); 986 if (!dup_buf) 987 return -ENOMEM; 988 } else { 989 /* NOCOPY must not be followed by normal! */ 990 if (WARN_ON(had_nocopy)) { 991 idx = -EINVAL; 992 goto free_dup_buf; 993 } 994 copy_size += cmdlen[i]; 995 } 996 cmd_size += cmd->len[i]; 997 } 998 999 /* 1000 * If any of the command structures end up being larger than 1001 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1002 * allocated into separate TFDs, then we will need to 1003 * increase the size of the buffers. 1004 */ 1005 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1006 "Command %s (%#x) is too large (%d bytes)\n", 1007 iwl_get_cmd_string(trans, cmd->id), 1008 cmd->id, copy_size)) { 1009 idx = -EINVAL; 1010 goto free_dup_buf; 1011 } 1012 1013 spin_lock_bh(&txq->lock); 1014 1015 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1016 spin_unlock_bh(&txq->lock); 1017 1018 IWL_ERR(trans, "No space in command queue\n"); 1019 iwl_op_mode_cmd_queue_full(trans->op_mode); 1020 idx = -ENOSPC; 1021 goto free_dup_buf; 1022 } 1023 1024 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1025 out_cmd = txq->entries[idx].cmd; 1026 out_meta = &txq->entries[idx].meta; 1027 1028 memset(out_meta, 0, sizeof(*out_meta)); /* re-initialize to NULL */ 1029 if (cmd->flags & CMD_WANT_SKB) 1030 out_meta->source = cmd; 1031 1032 /* set up the header */ 1033 if (group_id != 0) { 1034 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1035 out_cmd->hdr_wide.group_id = group_id; 1036 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1037 out_cmd->hdr_wide.length = 1038 cpu_to_le16(cmd_size - 1039 sizeof(struct iwl_cmd_header_wide)); 1040 out_cmd->hdr_wide.reserved = 0; 1041 out_cmd->hdr_wide.sequence = 1042 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1043 INDEX_TO_SEQ(txq->write_ptr)); 1044 1045 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1046 copy_size = sizeof(struct iwl_cmd_header_wide); 1047 } else { 1048 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1049 out_cmd->hdr.sequence = 1050 cpu_to_le16(QUEUE_TO_SEQ(trans->txqs.cmd.q_id) | 1051 INDEX_TO_SEQ(txq->write_ptr)); 1052 out_cmd->hdr.group_id = 0; 1053 1054 cmd_pos = sizeof(struct iwl_cmd_header); 1055 copy_size = sizeof(struct iwl_cmd_header); 1056 } 1057 1058 /* and copy the data that needs to be copied */ 1059 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1060 int copy; 1061 1062 if (!cmd->len[i]) 1063 continue; 1064 1065 /* copy everything if not nocopy/dup */ 1066 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1067 IWL_HCMD_DFL_DUP))) { 1068 copy = cmd->len[i]; 1069 1070 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1071 cmd_pos += copy; 1072 copy_size += copy; 1073 continue; 1074 } 1075 1076 /* 1077 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1078 * in total (for bi-directional DMA), but copy up to what 1079 * we can fit into the payload for debug dump purposes. 1080 */ 1081 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1082 1083 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1084 cmd_pos += copy; 1085 1086 /* However, treat copy_size the proper way, we need it below */ 1087 if (copy_size < IWL_FIRST_TB_SIZE) { 1088 copy = IWL_FIRST_TB_SIZE - copy_size; 1089 1090 if (copy > cmd->len[i]) 1091 copy = cmd->len[i]; 1092 copy_size += copy; 1093 } 1094 } 1095 1096 IWL_DEBUG_HC(trans, 1097 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1098 iwl_get_cmd_string(trans, cmd->id), 1099 group_id, out_cmd->hdr.cmd, 1100 le16_to_cpu(out_cmd->hdr.sequence), 1101 cmd_size, txq->write_ptr, idx, trans->txqs.cmd.q_id); 1102 1103 /* start the TFD with the minimum copy bytes */ 1104 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1105 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1106 iwl_pcie_txq_build_tfd(trans, txq, 1107 iwl_txq_get_first_tb_dma(txq, idx), 1108 tb0_size, true); 1109 1110 /* map first command fragment, if any remains */ 1111 if (copy_size > tb0_size) { 1112 phys_addr = dma_map_single(trans->dev, 1113 ((u8 *)&out_cmd->hdr) + tb0_size, 1114 copy_size - tb0_size, 1115 DMA_TO_DEVICE); 1116 if (dma_mapping_error(trans->dev, phys_addr)) { 1117 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1118 txq->write_ptr); 1119 idx = -ENOMEM; 1120 goto out; 1121 } 1122 1123 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1124 copy_size - tb0_size, false); 1125 } 1126 1127 /* map the remaining (adjusted) nocopy/dup fragments */ 1128 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1129 const void *data = cmddata[i]; 1130 1131 if (!cmdlen[i]) 1132 continue; 1133 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1134 IWL_HCMD_DFL_DUP))) 1135 continue; 1136 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1137 data = dup_buf; 1138 phys_addr = dma_map_single(trans->dev, (void *)data, 1139 cmdlen[i], DMA_TO_DEVICE); 1140 if (dma_mapping_error(trans->dev, phys_addr)) { 1141 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1142 txq->write_ptr); 1143 idx = -ENOMEM; 1144 goto out; 1145 } 1146 1147 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1148 } 1149 1150 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1151 out_meta->flags = cmd->flags; 1152 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1153 kfree_sensitive(txq->entries[idx].free_buf); 1154 txq->entries[idx].free_buf = dup_buf; 1155 1156 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1157 1158 /* start timer if queue currently empty */ 1159 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1160 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1161 1162 spin_lock_irqsave(&trans_pcie->reg_lock, flags); 1163 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1164 if (ret < 0) { 1165 idx = ret; 1166 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1167 goto out; 1168 } 1169 1170 /* Increment and update queue's write index */ 1171 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1172 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1173 1174 spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); 1175 1176 out: 1177 spin_unlock_bh(&txq->lock); 1178 free_dup_buf: 1179 if (idx < 0) 1180 kfree(dup_buf); 1181 return idx; 1182 } 1183 1184 /* 1185 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1186 * @rxb: Rx buffer to reclaim 1187 */ 1188 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1189 struct iwl_rx_cmd_buffer *rxb) 1190 { 1191 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1192 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1193 u8 group_id; 1194 u32 cmd_id; 1195 int txq_id = SEQ_TO_QUEUE(sequence); 1196 int index = SEQ_TO_INDEX(sequence); 1197 int cmd_index; 1198 struct iwl_device_cmd *cmd; 1199 struct iwl_cmd_meta *meta; 1200 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1201 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1202 1203 /* If a Tx command is being handled and it isn't in the actual 1204 * command queue then there a command routing bug has been introduced 1205 * in the queue management code. */ 1206 if (WARN(txq_id != trans->txqs.cmd.q_id, 1207 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1208 txq_id, trans->txqs.cmd.q_id, sequence, txq->read_ptr, 1209 txq->write_ptr)) { 1210 iwl_print_hex_error(trans, pkt, 32); 1211 return; 1212 } 1213 1214 spin_lock_bh(&txq->lock); 1215 1216 cmd_index = iwl_txq_get_cmd_index(txq, index); 1217 cmd = txq->entries[cmd_index].cmd; 1218 meta = &txq->entries[cmd_index].meta; 1219 group_id = cmd->hdr.group_id; 1220 cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0); 1221 1222 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1223 1224 /* Input error checking is done when commands are added to queue. */ 1225 if (meta->flags & CMD_WANT_SKB) { 1226 struct page *p = rxb_steal_page(rxb); 1227 1228 meta->source->resp_pkt = pkt; 1229 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1230 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1231 } 1232 1233 if (meta->flags & CMD_WANT_ASYNC_CALLBACK) 1234 iwl_op_mode_async_cb(trans->op_mode, cmd); 1235 1236 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1237 1238 if (!(meta->flags & CMD_ASYNC)) { 1239 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1240 IWL_WARN(trans, 1241 "HCMD_ACTIVE already clear for command %s\n", 1242 iwl_get_cmd_string(trans, cmd_id)); 1243 } 1244 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1245 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1246 iwl_get_cmd_string(trans, cmd_id)); 1247 wake_up(&trans_pcie->wait_command_queue); 1248 } 1249 1250 meta->flags = 0; 1251 1252 spin_unlock_bh(&txq->lock); 1253 } 1254 1255 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 1256 1257 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans, 1258 struct iwl_host_cmd *cmd) 1259 { 1260 int ret; 1261 1262 /* An asynchronous command can not expect an SKB to be set. */ 1263 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 1264 return -EINVAL; 1265 1266 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 1267 if (ret < 0) { 1268 IWL_ERR(trans, 1269 "Error sending %s: enqueue_hcmd failed: %d\n", 1270 iwl_get_cmd_string(trans, cmd->id), ret); 1271 return ret; 1272 } 1273 return 0; 1274 } 1275 1276 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans, 1277 struct iwl_host_cmd *cmd) 1278 { 1279 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1280 struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id]; 1281 int cmd_idx; 1282 int ret; 1283 1284 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", 1285 iwl_get_cmd_string(trans, cmd->id)); 1286 1287 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 1288 &trans->status), 1289 "Command %s: a command is already active!\n", 1290 iwl_get_cmd_string(trans, cmd->id))) 1291 return -EIO; 1292 1293 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", 1294 iwl_get_cmd_string(trans, cmd->id)); 1295 1296 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 1297 if (cmd_idx < 0) { 1298 ret = cmd_idx; 1299 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1300 IWL_ERR(trans, 1301 "Error sending %s: enqueue_hcmd failed: %d\n", 1302 iwl_get_cmd_string(trans, cmd->id), ret); 1303 return ret; 1304 } 1305 1306 ret = wait_event_timeout(trans_pcie->wait_command_queue, 1307 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 1308 &trans->status), 1309 HOST_COMPLETE_TIMEOUT); 1310 if (!ret) { 1311 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 1312 iwl_get_cmd_string(trans, cmd->id), 1313 jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 1314 1315 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 1316 txq->read_ptr, txq->write_ptr); 1317 1318 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1319 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1320 iwl_get_cmd_string(trans, cmd->id)); 1321 ret = -ETIMEDOUT; 1322 1323 iwl_trans_pcie_sync_nmi(trans); 1324 goto cancel; 1325 } 1326 1327 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 1328 iwl_trans_pcie_dump_regs(trans); 1329 IWL_ERR(trans, "FW error in SYNC CMD %s\n", 1330 iwl_get_cmd_string(trans, cmd->id)); 1331 dump_stack(); 1332 ret = -EIO; 1333 goto cancel; 1334 } 1335 1336 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1337 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1338 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 1339 ret = -ERFKILL; 1340 goto cancel; 1341 } 1342 1343 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 1344 IWL_ERR(trans, "Error: Response NULL in '%s'\n", 1345 iwl_get_cmd_string(trans, cmd->id)); 1346 ret = -EIO; 1347 goto cancel; 1348 } 1349 1350 return 0; 1351 1352 cancel: 1353 if (cmd->flags & CMD_WANT_SKB) { 1354 /* 1355 * Cancel the CMD_WANT_SKB flag for the cmd in the 1356 * TX cmd queue. Otherwise in case the cmd comes 1357 * in later, it will possibly set an invalid 1358 * address (cmd->meta.source). 1359 */ 1360 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 1361 } 1362 1363 if (cmd->resp_pkt) { 1364 iwl_free_resp(cmd); 1365 cmd->resp_pkt = NULL; 1366 } 1367 1368 return ret; 1369 } 1370 1371 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd) 1372 { 1373 /* Make sure the NIC is still alive in the bus */ 1374 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1375 return -ENODEV; 1376 1377 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 1378 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 1379 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 1380 cmd->id); 1381 return -ERFKILL; 1382 } 1383 1384 if (cmd->flags & CMD_ASYNC) 1385 return iwl_pcie_send_hcmd_async(trans, cmd); 1386 1387 /* We still can fail on RFKILL that can be asserted while we wait */ 1388 return iwl_pcie_send_hcmd_sync(trans, cmd); 1389 } 1390 1391 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1392 struct iwl_txq *txq, u8 hdr_len, 1393 struct iwl_cmd_meta *out_meta) 1394 { 1395 u16 head_tb_len; 1396 int i; 1397 1398 /* 1399 * Set up TFD's third entry to point directly to remainder 1400 * of skb's head, if any 1401 */ 1402 head_tb_len = skb_headlen(skb) - hdr_len; 1403 1404 if (head_tb_len > 0) { 1405 dma_addr_t tb_phys = dma_map_single(trans->dev, 1406 skb->data + hdr_len, 1407 head_tb_len, DMA_TO_DEVICE); 1408 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1409 return -EINVAL; 1410 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1411 tb_phys, head_tb_len); 1412 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1413 } 1414 1415 /* set up the remaining entries to point to the data */ 1416 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1417 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1418 dma_addr_t tb_phys; 1419 int tb_idx; 1420 1421 if (!skb_frag_size(frag)) 1422 continue; 1423 1424 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1425 skb_frag_size(frag), DMA_TO_DEVICE); 1426 1427 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1428 return -EINVAL; 1429 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1430 tb_phys, skb_frag_size(frag)); 1431 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1432 skb_frag_size(frag), false); 1433 if (tb_idx < 0) 1434 return tb_idx; 1435 1436 out_meta->tbs |= BIT(tb_idx); 1437 } 1438 1439 return 0; 1440 } 1441 1442 #ifdef CONFIG_INET 1443 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1444 struct iwl_txq *txq, u8 hdr_len, 1445 struct iwl_cmd_meta *out_meta, 1446 struct iwl_device_tx_cmd *dev_cmd, 1447 u16 tb1_len) 1448 { 1449 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1450 struct ieee80211_hdr *hdr = (void *)skb->data; 1451 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1452 unsigned int mss = skb_shinfo(skb)->gso_size; 1453 u16 length, iv_len, amsdu_pad; 1454 u8 *start_hdr; 1455 struct iwl_tso_hdr_page *hdr_page; 1456 struct tso_t tso; 1457 1458 /* if the packet is protected, then it must be CCMP or GCMP */ 1459 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1460 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1461 IEEE80211_CCMP_HDR_LEN : 0; 1462 1463 trace_iwlwifi_dev_tx(trans->dev, skb, 1464 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1465 trans->txqs.tfd.size, 1466 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1467 1468 ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb); 1469 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1470 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1471 amsdu_pad = 0; 1472 1473 /* total amount of header we may need for this A-MSDU */ 1474 hdr_room = DIV_ROUND_UP(total_len, mss) * 1475 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1476 1477 /* Our device supports 9 segments at most, it will fit in 1 page */ 1478 hdr_page = get_page_hdr(trans, hdr_room, skb); 1479 if (!hdr_page) 1480 return -ENOMEM; 1481 1482 start_hdr = hdr_page->pos; 1483 memcpy(hdr_page->pos, skb->data + hdr_len, iv_len); 1484 hdr_page->pos += iv_len; 1485 1486 /* 1487 * Pull the ieee80211 header + IV to be able to use TSO core, 1488 * we will restore it for the tx_status flow. 1489 */ 1490 skb_pull(skb, hdr_len + iv_len); 1491 1492 /* 1493 * Remove the length of all the headers that we don't actually 1494 * have in the MPDU by themselves, but that we duplicate into 1495 * all the different MSDUs inside the A-MSDU. 1496 */ 1497 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1498 1499 tso_start(skb, &tso); 1500 1501 while (total_len) { 1502 /* this is the data left for this subframe */ 1503 unsigned int data_left = 1504 min_t(unsigned int, mss, total_len); 1505 struct sk_buff *csum_skb = NULL; 1506 unsigned int hdr_tb_len; 1507 dma_addr_t hdr_tb_phys; 1508 u8 *subf_hdrs_start = hdr_page->pos; 1509 1510 total_len -= data_left; 1511 1512 memset(hdr_page->pos, 0, amsdu_pad); 1513 hdr_page->pos += amsdu_pad; 1514 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1515 data_left)) & 0x3; 1516 ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr)); 1517 hdr_page->pos += ETH_ALEN; 1518 ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr)); 1519 hdr_page->pos += ETH_ALEN; 1520 1521 length = snap_ip_tcp_hdrlen + data_left; 1522 *((__be16 *)hdr_page->pos) = cpu_to_be16(length); 1523 hdr_page->pos += sizeof(length); 1524 1525 /* 1526 * This will copy the SNAP as well which will be considered 1527 * as MAC header. 1528 */ 1529 tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len); 1530 1531 hdr_page->pos += snap_ip_tcp_hdrlen; 1532 1533 hdr_tb_len = hdr_page->pos - start_hdr; 1534 hdr_tb_phys = dma_map_single(trans->dev, start_hdr, 1535 hdr_tb_len, DMA_TO_DEVICE); 1536 if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) { 1537 dev_kfree_skb(csum_skb); 1538 return -EINVAL; 1539 } 1540 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1541 hdr_tb_len, false); 1542 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1543 hdr_tb_phys, hdr_tb_len); 1544 /* add this subframe's headers' length to the tx_cmd */ 1545 le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start); 1546 1547 /* prepare the start_hdr for the next subframe */ 1548 start_hdr = hdr_page->pos; 1549 1550 /* put the payload */ 1551 while (data_left) { 1552 unsigned int size = min_t(unsigned int, tso.size, 1553 data_left); 1554 dma_addr_t tb_phys; 1555 1556 tb_phys = dma_map_single(trans->dev, tso.data, 1557 size, DMA_TO_DEVICE); 1558 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) { 1559 dev_kfree_skb(csum_skb); 1560 return -EINVAL; 1561 } 1562 1563 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1564 size, false); 1565 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 1566 tb_phys, size); 1567 1568 data_left -= size; 1569 tso_build_data(skb, &tso, size); 1570 } 1571 } 1572 1573 /* re -add the WiFi header and IV */ 1574 skb_push(skb, hdr_len + iv_len); 1575 1576 return 0; 1577 } 1578 #else /* CONFIG_INET */ 1579 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1580 struct iwl_txq *txq, u8 hdr_len, 1581 struct iwl_cmd_meta *out_meta, 1582 struct iwl_device_tx_cmd *dev_cmd, 1583 u16 tb1_len) 1584 { 1585 /* No A-MSDU without CONFIG_INET */ 1586 WARN_ON(1); 1587 1588 return -1; 1589 } 1590 #endif /* CONFIG_INET */ 1591 1592 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 1593 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 1594 { 1595 struct ieee80211_hdr *hdr; 1596 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 1597 struct iwl_cmd_meta *out_meta; 1598 struct iwl_txq *txq; 1599 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 1600 void *tb1_addr; 1601 void *tfd; 1602 u16 len, tb1_len; 1603 bool wait_write_ptr; 1604 __le16 fc; 1605 u8 hdr_len; 1606 u16 wifi_seq; 1607 bool amsdu; 1608 1609 txq = trans->txqs.txq[txq_id]; 1610 1611 if (WARN_ONCE(!test_bit(txq_id, trans->txqs.queue_used), 1612 "TX on unused queue %d\n", txq_id)) 1613 return -EINVAL; 1614 1615 if (skb_is_nonlinear(skb) && 1616 skb_shinfo(skb)->nr_frags > IWL_TRANS_MAX_FRAGS(trans) && 1617 __skb_linearize(skb)) 1618 return -ENOMEM; 1619 1620 /* mac80211 always puts the full header into the SKB's head, 1621 * so there's no need to check if it's readable there 1622 */ 1623 hdr = (struct ieee80211_hdr *)skb->data; 1624 fc = hdr->frame_control; 1625 hdr_len = ieee80211_hdrlen(fc); 1626 1627 spin_lock(&txq->lock); 1628 1629 if (iwl_txq_space(trans, txq) < txq->high_mark) { 1630 iwl_txq_stop(trans, txq); 1631 1632 /* don't put the packet on the ring, if there is no room */ 1633 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 1634 struct iwl_device_tx_cmd **dev_cmd_ptr; 1635 1636 dev_cmd_ptr = (void *)((u8 *)skb->cb + 1637 trans->txqs.dev_cmd_offs); 1638 1639 *dev_cmd_ptr = dev_cmd; 1640 __skb_queue_tail(&txq->overflow_q, skb); 1641 1642 spin_unlock(&txq->lock); 1643 return 0; 1644 } 1645 } 1646 1647 /* In AGG mode, the index in the ring must correspond to the WiFi 1648 * sequence number. This is a HW requirements to help the SCD to parse 1649 * the BA. 1650 * Check here that the packets are in the right place on the ring. 1651 */ 1652 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 1653 WARN_ONCE(txq->ampdu && 1654 (wifi_seq & 0xff) != txq->write_ptr, 1655 "Q: %d WiFi Seq %d tfdNum %d", 1656 txq_id, wifi_seq, txq->write_ptr); 1657 1658 /* Set up driver data for this TFD */ 1659 txq->entries[txq->write_ptr].skb = skb; 1660 txq->entries[txq->write_ptr].cmd = dev_cmd; 1661 1662 dev_cmd->hdr.sequence = 1663 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 1664 INDEX_TO_SEQ(txq->write_ptr))); 1665 1666 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 1667 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 1668 offsetof(struct iwl_tx_cmd, scratch); 1669 1670 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 1671 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 1672 1673 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 1674 out_meta = &txq->entries[txq->write_ptr].meta; 1675 out_meta->flags = 0; 1676 1677 /* 1678 * The second TB (tb1) points to the remainder of the TX command 1679 * and the 802.11 header - dword aligned size 1680 * (This calculation modifies the TX command, so do it before the 1681 * setup of the first TB) 1682 */ 1683 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 1684 hdr_len - IWL_FIRST_TB_SIZE; 1685 /* do not align A-MSDU to dword as the subframe header aligns it */ 1686 amsdu = ieee80211_is_data_qos(fc) && 1687 (*ieee80211_get_qos_ctl(hdr) & 1688 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 1689 if (!amsdu) { 1690 tb1_len = ALIGN(len, 4); 1691 /* Tell NIC about any 2-byte padding after MAC header */ 1692 if (tb1_len != len) 1693 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 1694 } else { 1695 tb1_len = len; 1696 } 1697 1698 /* 1699 * The first TB points to bi-directional DMA data, we'll 1700 * memcpy the data into it later. 1701 */ 1702 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 1703 IWL_FIRST_TB_SIZE, true); 1704 1705 /* there must be data left over for TB1 or this code must be changed */ 1706 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 1707 1708 /* map the data for TB1 */ 1709 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 1710 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 1711 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 1712 goto out_err; 1713 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 1714 1715 trace_iwlwifi_dev_tx(trans->dev, skb, 1716 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1717 trans->txqs.tfd.size, 1718 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 1719 hdr_len); 1720 1721 /* 1722 * If gso_size wasn't set, don't give the frame "amsdu treatment" 1723 * (adding subframes, etc.). 1724 * This can happen in some testing flows when the amsdu was already 1725 * pre-built, and we just need to send the resulting skb. 1726 */ 1727 if (amsdu && skb_shinfo(skb)->gso_size) { 1728 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 1729 out_meta, dev_cmd, 1730 tb1_len))) 1731 goto out_err; 1732 } else { 1733 struct sk_buff *frag; 1734 1735 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 1736 out_meta))) 1737 goto out_err; 1738 1739 skb_walk_frags(skb, frag) { 1740 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 1741 out_meta))) 1742 goto out_err; 1743 } 1744 } 1745 1746 /* building the A-MSDU might have changed this data, so memcpy it now */ 1747 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 1748 1749 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 1750 /* Set up entry for this TFD in Tx byte-count array */ 1751 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 1752 iwl_txq_gen1_tfd_get_num_tbs(trans, 1753 tfd)); 1754 1755 wait_write_ptr = ieee80211_has_morefrags(fc); 1756 1757 /* start timer if queue currently empty */ 1758 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 1759 /* 1760 * If the TXQ is active, then set the timer, if not, 1761 * set the timer in remainder so that the timer will 1762 * be armed with the right value when the station will 1763 * wake up. 1764 */ 1765 if (!txq->frozen) 1766 mod_timer(&txq->stuck_timer, 1767 jiffies + txq->wd_timeout); 1768 else 1769 txq->frozen_expiry_remainder = txq->wd_timeout; 1770 } 1771 1772 /* Tell device the write index *just past* this latest filled TFD */ 1773 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1774 if (!wait_write_ptr) 1775 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1776 1777 /* 1778 * At this point the frame is "transmitted" successfully 1779 * and we will get a TX status notification eventually. 1780 */ 1781 spin_unlock(&txq->lock); 1782 return 0; 1783 out_err: 1784 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 1785 spin_unlock(&txq->lock); 1786 return -1; 1787 } 1788