1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause 2 /* 3 * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation 4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH 5 * Copyright (C) 2016-2017 Intel Deutschland GmbH 6 */ 7 #include <linux/etherdevice.h> 8 #include <linux/ieee80211.h> 9 #include <linux/dmapool.h> 10 #include <linux/slab.h> 11 #include <linux/sched.h> 12 #include <linux/tcp.h> 13 #include <net/ip6_checksum.h> 14 #include <net/tso.h> 15 16 #include "fw/api/commands.h" 17 #include "fw/api/datapath.h" 18 #include "fw/api/debug.h" 19 #include "iwl-fh.h" 20 #include "iwl-debug.h" 21 #include "iwl-csr.h" 22 #include "iwl-prph.h" 23 #include "iwl-io.h" 24 #include "iwl-scd.h" 25 #include "iwl-op-mode.h" 26 #include "internal.h" 27 #include "fw/api/tx.h" 28 29 /*************** DMA-QUEUE-GENERAL-FUNCTIONS ***** 30 * DMA services 31 * 32 * Theory of operation 33 * 34 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer 35 * of buffer descriptors, each of which points to one or more data buffers for 36 * the device to read from or fill. Driver and device exchange status of each 37 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty 38 * entries in each circular buffer, to protect against confusing empty and full 39 * queue states. 40 * 41 * The device reads or writes the data in the queues via the device's several 42 * DMA/FIFO channels. Each queue is mapped to a single DMA channel. 43 * 44 * For Tx queue, there are low mark and high mark limits. If, after queuing 45 * the packet for Tx, free space become < low mark, Tx queue stopped. When 46 * reclaiming packets (on 'tx done IRQ), if free space become > high mark, 47 * Tx queue resumed. 48 * 49 ***************************************************/ 50 51 52 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans, 53 struct iwl_dma_ptr *ptr, size_t size) 54 { 55 if (WARN_ON(ptr->addr)) 56 return -EINVAL; 57 58 ptr->addr = dma_alloc_coherent(trans->dev, size, 59 &ptr->dma, GFP_KERNEL); 60 if (!ptr->addr) 61 return -ENOMEM; 62 ptr->size = size; 63 return 0; 64 } 65 66 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr) 67 { 68 if (unlikely(!ptr->addr)) 69 return; 70 71 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma); 72 memset(ptr, 0, sizeof(*ptr)); 73 } 74 75 /* 76 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware 77 */ 78 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans, 79 struct iwl_txq *txq) 80 { 81 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 82 u32 reg = 0; 83 int txq_id = txq->id; 84 85 lockdep_assert_held(&txq->lock); 86 87 /* 88 * explicitly wake up the NIC if: 89 * 1. shadow registers aren't enabled 90 * 2. NIC is woken up for CMD regardless of shadow outside this function 91 * 3. there is a chance that the NIC is asleep 92 */ 93 if (!trans->trans_cfg->base_params->shadow_reg_enable && 94 txq_id != trans_pcie->txqs.cmd.q_id && 95 test_bit(STATUS_TPOWER_PMI, &trans->status)) { 96 /* 97 * wake up nic if it's powered down ... 98 * uCode will wake up, and interrupt us again, so next 99 * time we'll skip this part. 100 */ 101 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1); 102 103 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) { 104 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n", 105 txq_id, reg); 106 iwl_set_bit(trans, CSR_GP_CNTRL, 107 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 108 txq->need_update = true; 109 return; 110 } 111 } 112 113 /* 114 * if not in power-save mode, uCode will never sleep when we're 115 * trying to tx (during RFKILL, we're not trying to tx). 116 */ 117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr); 118 if (!txq->block) 119 iwl_write32(trans, HBUS_TARG_WRPTR, 120 txq->write_ptr | (txq_id << 8)); 121 } 122 123 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans) 124 { 125 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 126 int i; 127 128 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 129 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; 130 131 if (!test_bit(i, trans_pcie->txqs.queue_used)) 132 continue; 133 134 spin_lock_bh(&txq->lock); 135 if (txq->need_update) { 136 iwl_pcie_txq_inc_wr_ptr(trans, txq); 137 txq->need_update = false; 138 } 139 spin_unlock_bh(&txq->lock); 140 } 141 } 142 143 static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd, 144 u8 idx, dma_addr_t addr, u16 len) 145 { 146 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 147 u16 hi_n_len = len << 4; 148 149 put_unaligned_le32(addr, &tb->lo); 150 hi_n_len |= iwl_get_dma_hi_addr(addr); 151 152 tb->hi_n_len = cpu_to_le16(hi_n_len); 153 154 tfd->num_tbs = idx + 1; 155 } 156 157 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd) 158 { 159 return tfd->num_tbs & 0x1f; 160 } 161 162 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 163 dma_addr_t addr, u16 len, bool reset) 164 { 165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 166 void *tfd; 167 u32 num_tbs; 168 169 tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr; 170 171 if (reset) 172 memset(tfd, 0, trans_pcie->txqs.tfd.size); 173 174 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); 175 176 /* Each TFD can point to a maximum max_tbs Tx buffers */ 177 if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) { 178 IWL_ERR(trans, "Error can not send more than %d chunks\n", 179 trans_pcie->txqs.tfd.max_tbs); 180 return -EINVAL; 181 } 182 183 if (WARN(addr & ~IWL_TX_DMA_MASK, 184 "Unaligned address = %llx\n", (unsigned long long)addr)) 185 return -EINVAL; 186 187 iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len); 188 189 return num_tbs; 190 } 191 192 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans) 193 { 194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 195 196 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 197 return; 198 199 spin_lock(&trans_pcie->reg_lock); 200 201 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) { 202 spin_unlock(&trans_pcie->reg_lock); 203 return; 204 } 205 206 trans_pcie->cmd_hold_nic_awake = false; 207 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL, 208 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ); 209 spin_unlock(&trans_pcie->reg_lock); 210 } 211 212 static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans, 213 struct page *page) 214 { 215 struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page)); 216 217 /* Decrease internal use count and unmap/free page if needed */ 218 if (refcount_dec_and_test(&info->use_count)) { 219 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE, 220 DMA_TO_DEVICE); 221 222 __free_page(page); 223 } 224 } 225 226 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb, 227 struct iwl_cmd_meta *cmd_meta) 228 { 229 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 230 struct page **page_ptr; 231 struct page *next; 232 233 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); 234 next = *page_ptr; 235 *page_ptr = NULL; 236 237 while (next) { 238 struct iwl_tso_page_info *info; 239 struct page *tmp = next; 240 241 info = IWL_TSO_PAGE_INFO(page_address(next)); 242 next = info->next; 243 244 /* Unmap the scatter gather list that is on the last page */ 245 if (!next && cmd_meta->sg_offset) { 246 struct sg_table *sgt; 247 248 sgt = (void *)((u8 *)page_address(tmp) + 249 cmd_meta->sg_offset); 250 251 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0); 252 } 253 254 iwl_pcie_free_and_unmap_tso_page(trans, tmp); 255 } 256 } 257 258 static inline dma_addr_t 259 iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx) 260 { 261 struct iwl_tfd_tb *tb = &tfd->tbs[idx]; 262 dma_addr_t addr; 263 dma_addr_t hi_len; 264 265 addr = get_unaligned_le32(&tb->lo); 266 267 if (sizeof(dma_addr_t) <= sizeof(u32)) 268 return addr; 269 270 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF; 271 272 /* 273 * shift by 16 twice to avoid warnings on 32-bit 274 * (where this code never runs anyway due to the 275 * if statement above) 276 */ 277 return addr | ((hi_len << 16) << 16); 278 } 279 280 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans, 281 struct iwl_tfd *tfd) 282 { 283 tfd->num_tbs = 0; 284 285 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma, 286 trans->invalid_tx_cmd.size); 287 } 288 289 static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans, 290 struct iwl_cmd_meta *meta, 291 struct iwl_txq *txq, int index) 292 { 293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 294 int i, num_tbs; 295 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index); 296 297 /* Sanity check on number of chunks */ 298 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd); 299 300 if (num_tbs > trans_pcie->txqs.tfd.max_tbs) { 301 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs); 302 /* @todo issue fatal error, it is quite serious situation */ 303 return; 304 } 305 306 /* TB1 is mapped directly, the rest is the TSO page and SG list. */ 307 if (meta->sg_offset) 308 num_tbs = 2; 309 310 /* first TB is never freed - it's the bidirectional DMA data */ 311 312 for (i = 1; i < num_tbs; i++) { 313 if (meta->tbs & BIT(i)) 314 dma_unmap_page(trans->dev, 315 iwl_txq_gen1_tfd_tb_get_addr(tfd, i), 316 iwl_txq_gen1_tfd_tb_get_len(trans, 317 tfd, i), 318 DMA_TO_DEVICE); 319 else 320 dma_unmap_single(trans->dev, 321 iwl_txq_gen1_tfd_tb_get_addr(tfd, i), 322 iwl_txq_gen1_tfd_tb_get_len(trans, 323 tfd, i), 324 DMA_TO_DEVICE); 325 } 326 327 meta->tbs = 0; 328 329 iwl_txq_set_tfd_invalid_gen1(trans, tfd); 330 } 331 332 /** 333 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr] 334 * @trans: transport private data 335 * @txq: tx queue 336 * @read_ptr: the TXQ read_ptr to free 337 * 338 * Does NOT advance any TFD circular buffer read/write indexes 339 * Does NOT free the TFD itself (which is within circular buffer) 340 */ 341 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq, 342 int read_ptr) 343 { 344 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and 345 * idx is bounded by n_window 346 */ 347 int idx = iwl_txq_get_cmd_index(txq, read_ptr); 348 struct sk_buff *skb; 349 350 lockdep_assert_held(&txq->reclaim_lock); 351 352 if (!txq->entries) 353 return; 354 355 /* We have only q->n_window txq->entries, but we use 356 * TFD_QUEUE_SIZE_MAX tfds 357 */ 358 if (trans->trans_cfg->gen2) 359 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta, 360 iwl_txq_get_tfd(trans, txq, read_ptr)); 361 else 362 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta, 363 txq, read_ptr); 364 365 /* free SKB */ 366 skb = txq->entries[idx].skb; 367 368 /* Can be called from irqs-disabled context 369 * If skb is not NULL, it means that the whole queue is being 370 * freed and that the queue is not empty - free the skb 371 */ 372 if (skb) { 373 iwl_op_mode_free_skb(trans->op_mode, skb); 374 txq->entries[idx].skb = NULL; 375 } 376 } 377 378 /* 379 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's 380 */ 381 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) 382 { 383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 384 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 385 386 if (!txq) { 387 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n"); 388 return; 389 } 390 391 spin_lock_bh(&txq->reclaim_lock); 392 spin_lock(&txq->lock); 393 while (txq->write_ptr != txq->read_ptr) { 394 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n", 395 txq_id, txq->read_ptr); 396 397 if (txq_id != trans_pcie->txqs.cmd.q_id) { 398 struct sk_buff *skb = txq->entries[txq->read_ptr].skb; 399 struct iwl_cmd_meta *cmd_meta = 400 &txq->entries[txq->read_ptr].meta; 401 402 if (WARN_ON_ONCE(!skb)) 403 continue; 404 405 iwl_pcie_free_tso_pages(trans, skb, cmd_meta); 406 } 407 iwl_txq_free_tfd(trans, txq, txq->read_ptr); 408 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 409 410 if (txq->read_ptr == txq->write_ptr && 411 txq_id == trans_pcie->txqs.cmd.q_id) 412 iwl_pcie_clear_cmd_in_flight(trans); 413 } 414 415 while (!skb_queue_empty(&txq->overflow_q)) { 416 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q); 417 418 iwl_op_mode_free_skb(trans->op_mode, skb); 419 } 420 421 spin_unlock(&txq->lock); 422 spin_unlock_bh(&txq->reclaim_lock); 423 424 /* just in case - this queue may have been stopped */ 425 iwl_trans_pcie_wake_queue(trans, txq); 426 } 427 428 /* 429 * iwl_pcie_txq_free - Deallocate DMA queue. 430 * @txq: Transmit queue to deallocate. 431 * 432 * Empty queue by removing and destroying all BD's. 433 * Free all buffers. 434 * 0-fill, but do not free "txq" descriptor structure. 435 */ 436 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) 437 { 438 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 439 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 440 struct device *dev = trans->dev; 441 int i; 442 443 if (WARN_ON(!txq)) 444 return; 445 446 iwl_pcie_txq_unmap(trans, txq_id); 447 448 /* De-alloc array of command/tx buffers */ 449 if (txq_id == trans_pcie->txqs.cmd.q_id) 450 for (i = 0; i < txq->n_window; i++) { 451 kfree_sensitive(txq->entries[i].cmd); 452 kfree_sensitive(txq->entries[i].free_buf); 453 } 454 455 /* De-alloc circular buffer of TFDs */ 456 if (txq->tfds) { 457 dma_free_coherent(dev, 458 trans_pcie->txqs.tfd.size * 459 trans->trans_cfg->base_params->max_tfd_queue_size, 460 txq->tfds, txq->dma_addr); 461 txq->dma_addr = 0; 462 txq->tfds = NULL; 463 464 dma_free_coherent(dev, 465 sizeof(*txq->first_tb_bufs) * txq->n_window, 466 txq->first_tb_bufs, txq->first_tb_dma); 467 } 468 469 kfree(txq->entries); 470 txq->entries = NULL; 471 472 del_timer_sync(&txq->stuck_timer); 473 474 /* 0-fill queue descriptor structure */ 475 memset(txq, 0, sizeof(*txq)); 476 } 477 478 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr) 479 { 480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 481 int nq = trans->trans_cfg->base_params->num_of_queues; 482 int chan; 483 u32 reg_val; 484 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) - 485 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32); 486 487 /* make sure all queue are not stopped/used */ 488 memset(trans_pcie->txqs.queue_stopped, 0, 489 sizeof(trans_pcie->txqs.queue_stopped)); 490 memset(trans_pcie->txqs.queue_used, 0, 491 sizeof(trans_pcie->txqs.queue_used)); 492 493 trans_pcie->scd_base_addr = 494 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR); 495 496 WARN_ON(scd_base_addr != 0 && 497 scd_base_addr != trans_pcie->scd_base_addr); 498 499 /* reset context data, TX status and translation data */ 500 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr + 501 SCD_CONTEXT_MEM_LOWER_BOUND, 502 NULL, clear_dwords); 503 504 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR, 505 trans_pcie->txqs.scd_bc_tbls.dma >> 10); 506 507 /* The chain extension of the SCD doesn't work well. This feature is 508 * enabled by default by the HW, so we need to disable it manually. 509 */ 510 if (trans->trans_cfg->base_params->scd_chain_ext_wa) 511 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0); 512 513 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id, 514 trans_pcie->txqs.cmd.fifo, 515 trans_pcie->txqs.cmd.wdg_timeout); 516 517 /* Activate all Tx DMA/FIFO channels */ 518 iwl_scd_activate_fifos(trans); 519 520 /* Enable DMA channel */ 521 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++) 522 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan), 523 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE | 524 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE); 525 526 /* Update FH chicken bits */ 527 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG); 528 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG, 529 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN); 530 531 /* Enable L1-Active */ 532 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000) 533 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG, 534 APMG_PCIDEV_STT_VAL_L1_ACT_DIS); 535 } 536 537 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans) 538 { 539 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 540 int txq_id; 541 542 /* 543 * we should never get here in gen2 trans mode return early to avoid 544 * having invalid accesses 545 */ 546 if (WARN_ON_ONCE(trans->trans_cfg->gen2)) 547 return; 548 549 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 550 txq_id++) { 551 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 552 if (trans->trans_cfg->gen2) 553 iwl_write_direct64(trans, 554 FH_MEM_CBBC_QUEUE(trans, txq_id), 555 txq->dma_addr); 556 else 557 iwl_write_direct32(trans, 558 FH_MEM_CBBC_QUEUE(trans, txq_id), 559 txq->dma_addr >> 8); 560 iwl_pcie_txq_unmap(trans, txq_id); 561 txq->read_ptr = 0; 562 txq->write_ptr = 0; 563 } 564 565 /* Tell NIC where to find the "keep warm" buffer */ 566 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 567 trans_pcie->kw.dma >> 4); 568 569 /* 570 * Send 0 as the scd_base_addr since the device may have be reset 571 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will 572 * contain garbage. 573 */ 574 iwl_pcie_tx_start(trans, 0); 575 } 576 577 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans) 578 { 579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 580 int ch, ret; 581 u32 mask = 0; 582 583 spin_lock_bh(&trans_pcie->irq_lock); 584 585 if (!iwl_trans_grab_nic_access(trans)) 586 goto out; 587 588 /* Stop each Tx DMA channel */ 589 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) { 590 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0); 591 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch); 592 } 593 594 /* Wait for DMA channels to be idle */ 595 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000); 596 if (ret < 0) 597 IWL_ERR(trans, 598 "Failing on timeout while stopping DMA channel %d [0x%08x]\n", 599 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG)); 600 601 iwl_trans_release_nic_access(trans); 602 603 out: 604 spin_unlock_bh(&trans_pcie->irq_lock); 605 } 606 607 /* 608 * iwl_pcie_tx_stop - Stop all Tx DMA channels 609 */ 610 int iwl_pcie_tx_stop(struct iwl_trans *trans) 611 { 612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 613 int txq_id; 614 615 /* Turn off all Tx DMA fifos */ 616 iwl_scd_deactivate_fifos(trans); 617 618 /* Turn off all Tx DMA channels */ 619 iwl_pcie_tx_stop_fh(trans); 620 621 /* 622 * This function can be called before the op_mode disabled the 623 * queues. This happens when we have an rfkill interrupt. 624 * Since we stop Tx altogether - mark the queues as stopped. 625 */ 626 memset(trans_pcie->txqs.queue_stopped, 0, 627 sizeof(trans_pcie->txqs.queue_stopped)); 628 memset(trans_pcie->txqs.queue_used, 0, 629 sizeof(trans_pcie->txqs.queue_used)); 630 631 /* This can happen: start_hw, stop_device */ 632 if (!trans_pcie->txq_memory) 633 return 0; 634 635 /* Unmap DMA from host system and free skb's */ 636 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 637 txq_id++) 638 iwl_pcie_txq_unmap(trans, txq_id); 639 640 return 0; 641 } 642 643 /* 644 * iwl_trans_tx_free - Free TXQ Context 645 * 646 * Destroy all TX DMA queues and structures 647 */ 648 void iwl_pcie_tx_free(struct iwl_trans *trans) 649 { 650 int txq_id; 651 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 652 653 memset(trans_pcie->txqs.queue_used, 0, 654 sizeof(trans_pcie->txqs.queue_used)); 655 656 /* Tx queues */ 657 if (trans_pcie->txq_memory) { 658 for (txq_id = 0; 659 txq_id < trans->trans_cfg->base_params->num_of_queues; 660 txq_id++) { 661 iwl_pcie_txq_free(trans, txq_id); 662 trans_pcie->txqs.txq[txq_id] = NULL; 663 } 664 } 665 666 kfree(trans_pcie->txq_memory); 667 trans_pcie->txq_memory = NULL; 668 669 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw); 670 671 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls); 672 } 673 674 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) 675 { 676 u32 txq_id = txq->id; 677 u32 status; 678 bool active; 679 u8 fifo; 680 681 if (trans->trans_cfg->gen2) { 682 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id, 683 txq->read_ptr, txq->write_ptr); 684 /* TODO: access new SCD registers and dump them */ 685 return; 686 } 687 688 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id)); 689 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7; 690 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE)); 691 692 IWL_ERR(trans, 693 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n", 694 txq_id, active ? "" : "in", fifo, 695 jiffies_to_msecs(txq->wd_timeout), 696 txq->read_ptr, txq->write_ptr, 697 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & 698 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 699 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & 700 (trans->trans_cfg->base_params->max_tfd_queue_size - 1), 701 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); 702 } 703 704 static void iwl_txq_stuck_timer(struct timer_list *t) 705 { 706 struct iwl_txq *txq = from_timer(txq, t, stuck_timer); 707 struct iwl_trans *trans = txq->trans; 708 709 spin_lock(&txq->lock); 710 /* check if triggered erroneously */ 711 if (txq->read_ptr == txq->write_ptr) { 712 spin_unlock(&txq->lock); 713 return; 714 } 715 spin_unlock(&txq->lock); 716 717 iwl_txq_log_scd_error(trans, txq); 718 719 iwl_force_nmi(trans); 720 } 721 722 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, 723 int slots_num, bool cmd_queue) 724 { 725 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 726 size_t num_entries = trans->trans_cfg->gen2 ? 727 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size; 728 size_t tfd_sz; 729 size_t tb0_buf_sz; 730 int i; 731 732 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num)) 733 return -EINVAL; 734 735 if (WARN_ON(txq->entries || txq->tfds)) 736 return -EINVAL; 737 738 tfd_sz = trans_pcie->txqs.tfd.size * num_entries; 739 740 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0); 741 txq->trans = trans; 742 743 txq->n_window = slots_num; 744 745 txq->entries = kcalloc(slots_num, 746 sizeof(struct iwl_pcie_txq_entry), 747 GFP_KERNEL); 748 749 if (!txq->entries) 750 goto error; 751 752 if (cmd_queue) 753 for (i = 0; i < slots_num; i++) { 754 txq->entries[i].cmd = 755 kmalloc(sizeof(struct iwl_device_cmd), 756 GFP_KERNEL); 757 if (!txq->entries[i].cmd) 758 goto error; 759 } 760 761 /* Circular buffer of transmit frame descriptors (TFDs), 762 * shared with device 763 */ 764 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz, 765 &txq->dma_addr, GFP_KERNEL); 766 if (!txq->tfds) 767 goto error; 768 769 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN); 770 771 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num; 772 773 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz, 774 &txq->first_tb_dma, 775 GFP_KERNEL); 776 if (!txq->first_tb_bufs) 777 goto err_free_tfds; 778 779 for (i = 0; i < num_entries; i++) { 780 void *tfd = iwl_txq_get_tfd(trans, txq, i); 781 782 if (trans->trans_cfg->gen2) 783 iwl_txq_set_tfd_invalid_gen2(trans, tfd); 784 else 785 iwl_txq_set_tfd_invalid_gen1(trans, tfd); 786 } 787 788 return 0; 789 err_free_tfds: 790 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr); 791 txq->tfds = NULL; 792 error: 793 if (txq->entries && cmd_queue) 794 for (i = 0; i < slots_num; i++) 795 kfree(txq->entries[i].cmd); 796 kfree(txq->entries); 797 txq->entries = NULL; 798 799 return -ENOMEM; 800 } 801 802 /* 803 * iwl_pcie_tx_alloc - allocate TX context 804 * Allocate all Tx DMA structures and initialize them 805 */ 806 static int iwl_pcie_tx_alloc(struct iwl_trans *trans) 807 { 808 int ret; 809 int txq_id, slots_num; 810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 811 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues; 812 813 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)) 814 return -EINVAL; 815 816 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl); 817 818 /*It is not allowed to alloc twice, so warn when this happens. 819 * We cannot rely on the previous allocation, so free and fail */ 820 if (WARN_ON(trans_pcie->txq_memory)) { 821 ret = -EINVAL; 822 goto error; 823 } 824 825 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls, 826 bc_tbls_size); 827 if (ret) { 828 IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); 829 goto error; 830 } 831 832 /* Alloc keep-warm buffer */ 833 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE); 834 if (ret) { 835 IWL_ERR(trans, "Keep Warm allocation failed\n"); 836 goto error; 837 } 838 839 trans_pcie->txq_memory = 840 kcalloc(trans->trans_cfg->base_params->num_of_queues, 841 sizeof(struct iwl_txq), GFP_KERNEL); 842 if (!trans_pcie->txq_memory) { 843 IWL_ERR(trans, "Not enough memory for txq\n"); 844 ret = -ENOMEM; 845 goto error; 846 } 847 848 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 849 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 850 txq_id++) { 851 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); 852 853 if (cmd_queue) 854 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 855 trans->cfg->min_txq_size); 856 else 857 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 858 trans->cfg->min_ba_txq_size); 859 trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id]; 860 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id], 861 slots_num, cmd_queue); 862 if (ret) { 863 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id); 864 goto error; 865 } 866 trans_pcie->txqs.txq[txq_id]->id = txq_id; 867 } 868 869 return 0; 870 871 error: 872 iwl_pcie_tx_free(trans); 873 874 return ret; 875 } 876 877 /* 878 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes 879 */ 880 static int iwl_queue_init(struct iwl_txq *q, int slots_num) 881 { 882 q->n_window = slots_num; 883 884 /* slots_num must be power-of-two size, otherwise 885 * iwl_txq_get_cmd_index is broken. 886 */ 887 if (WARN_ON(!is_power_of_2(slots_num))) 888 return -EINVAL; 889 890 q->low_mark = q->n_window / 4; 891 if (q->low_mark < 4) 892 q->low_mark = 4; 893 894 q->high_mark = q->n_window / 8; 895 if (q->high_mark < 2) 896 q->high_mark = 2; 897 898 q->write_ptr = 0; 899 q->read_ptr = 0; 900 901 return 0; 902 } 903 904 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, 905 int slots_num, bool cmd_queue) 906 { 907 u32 tfd_queue_max_size = 908 trans->trans_cfg->base_params->max_tfd_queue_size; 909 int ret; 910 911 txq->need_update = false; 912 913 /* max_tfd_queue_size must be power-of-two size, otherwise 914 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken. 915 */ 916 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), 917 "Max tfd queue size must be a power of two, but is %d", 918 tfd_queue_max_size)) 919 return -EINVAL; 920 921 /* Initialize queue's high/low-water marks, and head/tail indexes */ 922 ret = iwl_queue_init(txq, slots_num); 923 if (ret) 924 return ret; 925 926 spin_lock_init(&txq->lock); 927 spin_lock_init(&txq->reclaim_lock); 928 929 if (cmd_queue) { 930 static struct lock_class_key iwl_txq_cmd_queue_lock_class; 931 932 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class); 933 } 934 935 __skb_queue_head_init(&txq->overflow_q); 936 937 return 0; 938 } 939 940 int iwl_pcie_tx_init(struct iwl_trans *trans) 941 { 942 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 943 int ret; 944 int txq_id, slots_num; 945 bool alloc = false; 946 947 if (!trans_pcie->txq_memory) { 948 ret = iwl_pcie_tx_alloc(trans); 949 if (ret) 950 goto error; 951 alloc = true; 952 } 953 954 spin_lock_bh(&trans_pcie->irq_lock); 955 956 /* Turn off all Tx DMA fifos */ 957 iwl_scd_deactivate_fifos(trans); 958 959 /* Tell NIC where to find the "keep warm" buffer */ 960 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG, 961 trans_pcie->kw.dma >> 4); 962 963 spin_unlock_bh(&trans_pcie->irq_lock); 964 965 /* Alloc and init all Tx queues, including the command queue (#4/#9) */ 966 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues; 967 txq_id++) { 968 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id); 969 970 if (cmd_queue) 971 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE, 972 trans->cfg->min_txq_size); 973 else 974 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE, 975 trans->cfg->min_ba_txq_size); 976 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num, 977 cmd_queue); 978 if (ret) { 979 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id); 980 goto error; 981 } 982 983 /* 984 * Tell nic where to find circular buffer of TFDs for a 985 * given Tx queue, and enable the DMA channel used for that 986 * queue. 987 * Circular buffer (TFD queue in DRAM) physical base address 988 */ 989 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id), 990 trans_pcie->txqs.txq[txq_id]->dma_addr >> 8); 991 } 992 993 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE); 994 if (trans->trans_cfg->base_params->num_of_queues > 20) 995 iwl_set_bits_prph(trans, SCD_GP_CTRL, 996 SCD_GP_CTRL_ENABLE_31_QUEUES); 997 998 return 0; 999 error: 1000 /*Upon error, free only if we allocated something */ 1001 if (alloc) 1002 iwl_pcie_tx_free(trans); 1003 return ret; 1004 } 1005 1006 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans, 1007 const struct iwl_host_cmd *cmd) 1008 { 1009 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1010 1011 /* Make sure the NIC is still alive in the bus */ 1012 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 1013 return -ENODEV; 1014 1015 if (!trans->trans_cfg->base_params->apmg_wake_up_wa) 1016 return 0; 1017 1018 /* 1019 * wake up the NIC to make sure that the firmware will see the host 1020 * command - we will let the NIC sleep once all the host commands 1021 * returned. This needs to be done only on NICs that have 1022 * apmg_wake_up_wa set (see above.) 1023 */ 1024 if (!_iwl_trans_pcie_grab_nic_access(trans)) 1025 return -EIO; 1026 1027 /* 1028 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock. 1029 * There, we also returned immediately if cmd_hold_nic_awake is 1030 * already true, so it's OK to unconditionally set it to true. 1031 */ 1032 trans_pcie->cmd_hold_nic_awake = true; 1033 spin_unlock(&trans_pcie->reg_lock); 1034 1035 return 0; 1036 } 1037 1038 static void iwl_txq_progress(struct iwl_txq *txq) 1039 { 1040 lockdep_assert_held(&txq->lock); 1041 1042 if (!txq->wd_timeout) 1043 return; 1044 1045 /* 1046 * station is asleep and we send data - that must 1047 * be uAPSD or PS-Poll. Don't rearm the timer. 1048 */ 1049 if (txq->frozen) 1050 return; 1051 1052 /* 1053 * if empty delete timer, otherwise move timer forward 1054 * since we're making progress on this queue 1055 */ 1056 if (txq->read_ptr == txq->write_ptr) 1057 del_timer(&txq->stuck_timer); 1058 else 1059 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1060 } 1061 1062 static inline bool iwl_txq_used(const struct iwl_txq *q, int i, 1063 int read_ptr, int write_ptr) 1064 { 1065 int index = iwl_txq_get_cmd_index(q, i); 1066 int r = iwl_txq_get_cmd_index(q, read_ptr); 1067 int w = iwl_txq_get_cmd_index(q, write_ptr); 1068 1069 return w >= r ? 1070 (index >= r && index < w) : 1071 !(index < r && index >= w); 1072 } 1073 1074 /* 1075 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd 1076 * 1077 * When FW advances 'R' index, all entries between old and new 'R' index 1078 * need to be reclaimed. As result, some free space forms. If there is 1079 * enough free space (> low mark), wake the stack that feeds us. 1080 */ 1081 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) 1082 { 1083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1084 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 1085 int nfreed = 0; 1086 u16 r; 1087 1088 lockdep_assert_held(&txq->lock); 1089 1090 idx = iwl_txq_get_cmd_index(txq, idx); 1091 r = iwl_txq_get_cmd_index(txq, txq->read_ptr); 1092 1093 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size || 1094 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) { 1095 WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used), 1096 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", 1097 __func__, txq_id, idx, 1098 trans->trans_cfg->base_params->max_tfd_queue_size, 1099 txq->write_ptr, txq->read_ptr); 1100 return; 1101 } 1102 1103 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx; 1104 r = iwl_txq_inc_wrap(trans, r)) { 1105 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr); 1106 1107 if (nfreed++ > 0) { 1108 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", 1109 idx, txq->write_ptr, r); 1110 iwl_force_nmi(trans); 1111 } 1112 } 1113 1114 if (txq->read_ptr == txq->write_ptr) 1115 iwl_pcie_clear_cmd_in_flight(trans); 1116 1117 iwl_txq_progress(txq); 1118 } 1119 1120 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid, 1121 u16 txq_id) 1122 { 1123 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1124 u32 tbl_dw_addr; 1125 u32 tbl_dw; 1126 u16 scd_q2ratid; 1127 1128 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK; 1129 1130 tbl_dw_addr = trans_pcie->scd_base_addr + 1131 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id); 1132 1133 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr); 1134 1135 if (txq_id & 0x1) 1136 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF); 1137 else 1138 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000); 1139 1140 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw); 1141 1142 return 0; 1143 } 1144 1145 /* Receiver address (actually, Rx station's index into station table), 1146 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */ 1147 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid)) 1148 1149 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn, 1150 const struct iwl_trans_txq_scd_cfg *cfg, 1151 unsigned int wdg_timeout) 1152 { 1153 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1154 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 1155 int fifo = -1; 1156 bool scd_bug = false; 1157 1158 if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used)) 1159 WARN_ONCE(1, "queue %d already used - expect issues", txq_id); 1160 1161 txq->wd_timeout = msecs_to_jiffies(wdg_timeout); 1162 1163 if (cfg) { 1164 fifo = cfg->fifo; 1165 1166 /* Disable the scheduler prior configuring the cmd queue */ 1167 if (txq_id == trans_pcie->txqs.cmd.q_id && 1168 trans_pcie->scd_set_active) 1169 iwl_scd_enable_set_active(trans, 0); 1170 1171 /* Stop this Tx queue before configuring it */ 1172 iwl_scd_txq_set_inactive(trans, txq_id); 1173 1174 /* Set this queue as a chain-building queue unless it is CMD */ 1175 if (txq_id != trans_pcie->txqs.cmd.q_id) 1176 iwl_scd_txq_set_chain(trans, txq_id); 1177 1178 if (cfg->aggregate) { 1179 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid); 1180 1181 /* Map receiver-address / traffic-ID to this queue */ 1182 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id); 1183 1184 /* enable aggregations for the queue */ 1185 iwl_scd_txq_enable_agg(trans, txq_id); 1186 txq->ampdu = true; 1187 } else { 1188 /* 1189 * disable aggregations for the queue, this will also 1190 * make the ra_tid mapping configuration irrelevant 1191 * since it is now a non-AGG queue. 1192 */ 1193 iwl_scd_txq_disable_agg(trans, txq_id); 1194 1195 ssn = txq->read_ptr; 1196 } 1197 } else { 1198 /* 1199 * If we need to move the SCD write pointer by steps of 1200 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let 1201 * the op_mode know by returning true later. 1202 * Do this only in case cfg is NULL since this trick can 1203 * be done only if we have DQA enabled which is true for mvm 1204 * only. And mvm never sets a cfg pointer. 1205 * This is really ugly, but this is the easiest way out for 1206 * this sad hardware issue. 1207 * This bug has been fixed on devices 9000 and up. 1208 */ 1209 scd_bug = !trans->trans_cfg->mq_rx_supported && 1210 !((ssn - txq->write_ptr) & 0x3f) && 1211 (ssn != txq->write_ptr); 1212 if (scd_bug) 1213 ssn++; 1214 } 1215 1216 /* Place first TFD at index corresponding to start sequence number. 1217 * Assumes that ssn_idx is valid (!= 0xFFF) */ 1218 txq->read_ptr = (ssn & 0xff); 1219 txq->write_ptr = (ssn & 0xff); 1220 iwl_write_direct32(trans, HBUS_TARG_WRPTR, 1221 (ssn & 0xff) | (txq_id << 8)); 1222 1223 if (cfg) { 1224 u8 frame_limit = cfg->frame_limit; 1225 1226 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn); 1227 1228 /* Set up Tx window size and frame limit for this queue */ 1229 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr + 1230 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0); 1231 iwl_trans_write_mem32(trans, 1232 trans_pcie->scd_base_addr + 1233 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32), 1234 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) | 1235 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit)); 1236 1237 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */ 1238 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id), 1239 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) | 1240 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) | 1241 (1 << SCD_QUEUE_STTS_REG_POS_WSL) | 1242 SCD_QUEUE_STTS_REG_MSK); 1243 1244 /* enable the scheduler for this queue (only) */ 1245 if (txq_id == trans_pcie->txqs.cmd.q_id && 1246 trans_pcie->scd_set_active) 1247 iwl_scd_enable_set_active(trans, BIT(txq_id)); 1248 1249 IWL_DEBUG_TX_QUEUES(trans, 1250 "Activate queue %d on FIFO %d WrPtr: %d\n", 1251 txq_id, fifo, ssn & 0xff); 1252 } else { 1253 IWL_DEBUG_TX_QUEUES(trans, 1254 "Activate queue %d WrPtr: %d\n", 1255 txq_id, ssn & 0xff); 1256 } 1257 1258 return scd_bug; 1259 } 1260 1261 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id, 1262 bool shared_mode) 1263 { 1264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1265 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 1266 1267 txq->ampdu = !shared_mode; 1268 } 1269 1270 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id, 1271 bool configure_scd) 1272 { 1273 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1274 u32 stts_addr = trans_pcie->scd_base_addr + 1275 SCD_TX_STTS_QUEUE_OFFSET(txq_id); 1276 static const u32 zero_val[4] = {}; 1277 1278 trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0; 1279 trans_pcie->txqs.txq[txq_id]->frozen = false; 1280 1281 /* 1282 * Upon HW Rfkill - we stop the device, and then stop the queues 1283 * in the op_mode. Just for the sake of the simplicity of the op_mode, 1284 * allow the op_mode to call txq_disable after it already called 1285 * stop_device. 1286 */ 1287 if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) { 1288 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status), 1289 "queue %d not used", txq_id); 1290 return; 1291 } 1292 1293 if (configure_scd) { 1294 iwl_scd_txq_set_inactive(trans, txq_id); 1295 1296 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val, 1297 ARRAY_SIZE(zero_val)); 1298 } 1299 1300 iwl_pcie_txq_unmap(trans, txq_id); 1301 trans_pcie->txqs.txq[txq_id]->ampdu = false; 1302 1303 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id); 1304 } 1305 1306 /*************** HOST COMMAND QUEUE FUNCTIONS *****/ 1307 1308 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block) 1309 { 1310 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1311 int i; 1312 1313 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) { 1314 struct iwl_txq *txq = trans_pcie->txqs.txq[i]; 1315 1316 if (i == trans_pcie->txqs.cmd.q_id) 1317 continue; 1318 1319 /* we skip the command queue (obviously) so it's OK to nest */ 1320 spin_lock_nested(&txq->lock, 1); 1321 1322 if (!block && !(WARN_ON_ONCE(!txq->block))) { 1323 txq->block--; 1324 if (!txq->block) { 1325 iwl_write32(trans, HBUS_TARG_WRPTR, 1326 txq->write_ptr | (i << 8)); 1327 } 1328 } else if (block) { 1329 txq->block++; 1330 } 1331 1332 spin_unlock(&txq->lock); 1333 } 1334 } 1335 1336 /* 1337 * iwl_pcie_enqueue_hcmd - enqueue a uCode command 1338 * @priv: device private data point 1339 * @cmd: a pointer to the ucode command structure 1340 * 1341 * The function returns < 0 values to indicate the operation 1342 * failed. On success, it returns the index (>= 0) of command in the 1343 * command queue. 1344 */ 1345 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, 1346 struct iwl_host_cmd *cmd) 1347 { 1348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1349 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; 1350 struct iwl_device_cmd *out_cmd; 1351 struct iwl_cmd_meta *out_meta; 1352 void *dup_buf = NULL; 1353 dma_addr_t phys_addr; 1354 int idx; 1355 u16 copy_size, cmd_size, tb0_size; 1356 bool had_nocopy = false; 1357 u8 group_id = iwl_cmd_groupid(cmd->id); 1358 int i, ret; 1359 u32 cmd_pos; 1360 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD]; 1361 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD]; 1362 unsigned long flags; 1363 1364 if (WARN(!trans->wide_cmd_header && 1365 group_id > IWL_ALWAYS_LONG_GROUP, 1366 "unsupported wide command %#x\n", cmd->id)) 1367 return -EINVAL; 1368 1369 if (group_id != 0) { 1370 copy_size = sizeof(struct iwl_cmd_header_wide); 1371 cmd_size = sizeof(struct iwl_cmd_header_wide); 1372 } else { 1373 copy_size = sizeof(struct iwl_cmd_header); 1374 cmd_size = sizeof(struct iwl_cmd_header); 1375 } 1376 1377 /* need one for the header if the first is NOCOPY */ 1378 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1); 1379 1380 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1381 cmddata[i] = cmd->data[i]; 1382 cmdlen[i] = cmd->len[i]; 1383 1384 if (!cmd->len[i]) 1385 continue; 1386 1387 /* need at least IWL_FIRST_TB_SIZE copied */ 1388 if (copy_size < IWL_FIRST_TB_SIZE) { 1389 int copy = IWL_FIRST_TB_SIZE - copy_size; 1390 1391 if (copy > cmdlen[i]) 1392 copy = cmdlen[i]; 1393 cmdlen[i] -= copy; 1394 cmddata[i] += copy; 1395 copy_size += copy; 1396 } 1397 1398 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) { 1399 had_nocopy = true; 1400 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) { 1401 idx = -EINVAL; 1402 goto free_dup_buf; 1403 } 1404 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) { 1405 /* 1406 * This is also a chunk that isn't copied 1407 * to the static buffer so set had_nocopy. 1408 */ 1409 had_nocopy = true; 1410 1411 /* only allowed once */ 1412 if (WARN_ON(dup_buf)) { 1413 idx = -EINVAL; 1414 goto free_dup_buf; 1415 } 1416 1417 dup_buf = kmemdup(cmddata[i], cmdlen[i], 1418 GFP_ATOMIC); 1419 if (!dup_buf) 1420 return -ENOMEM; 1421 } else { 1422 /* NOCOPY must not be followed by normal! */ 1423 if (WARN_ON(had_nocopy)) { 1424 idx = -EINVAL; 1425 goto free_dup_buf; 1426 } 1427 copy_size += cmdlen[i]; 1428 } 1429 cmd_size += cmd->len[i]; 1430 } 1431 1432 /* 1433 * If any of the command structures end up being larger than 1434 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically 1435 * allocated into separate TFDs, then we will need to 1436 * increase the size of the buffers. 1437 */ 1438 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE, 1439 "Command %s (%#x) is too large (%d bytes)\n", 1440 iwl_get_cmd_string(trans, cmd->id), 1441 cmd->id, copy_size)) { 1442 idx = -EINVAL; 1443 goto free_dup_buf; 1444 } 1445 1446 spin_lock_irqsave(&txq->lock, flags); 1447 1448 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { 1449 spin_unlock_irqrestore(&txq->lock, flags); 1450 1451 IWL_ERR(trans, "No space in command queue\n"); 1452 iwl_op_mode_cmd_queue_full(trans->op_mode); 1453 idx = -ENOSPC; 1454 goto free_dup_buf; 1455 } 1456 1457 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr); 1458 out_cmd = txq->entries[idx].cmd; 1459 out_meta = &txq->entries[idx].meta; 1460 1461 /* re-initialize, this also marks the SG list as unused */ 1462 memset(out_meta, 0, sizeof(*out_meta)); 1463 if (cmd->flags & CMD_WANT_SKB) 1464 out_meta->source = cmd; 1465 1466 /* set up the header */ 1467 if (group_id != 0) { 1468 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id); 1469 out_cmd->hdr_wide.group_id = group_id; 1470 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id); 1471 out_cmd->hdr_wide.length = 1472 cpu_to_le16(cmd_size - 1473 sizeof(struct iwl_cmd_header_wide)); 1474 out_cmd->hdr_wide.reserved = 0; 1475 out_cmd->hdr_wide.sequence = 1476 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | 1477 INDEX_TO_SEQ(txq->write_ptr)); 1478 1479 cmd_pos = sizeof(struct iwl_cmd_header_wide); 1480 copy_size = sizeof(struct iwl_cmd_header_wide); 1481 } else { 1482 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id); 1483 out_cmd->hdr.sequence = 1484 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) | 1485 INDEX_TO_SEQ(txq->write_ptr)); 1486 out_cmd->hdr.group_id = 0; 1487 1488 cmd_pos = sizeof(struct iwl_cmd_header); 1489 copy_size = sizeof(struct iwl_cmd_header); 1490 } 1491 1492 /* and copy the data that needs to be copied */ 1493 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1494 int copy; 1495 1496 if (!cmd->len[i]) 1497 continue; 1498 1499 /* copy everything if not nocopy/dup */ 1500 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1501 IWL_HCMD_DFL_DUP))) { 1502 copy = cmd->len[i]; 1503 1504 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1505 cmd_pos += copy; 1506 copy_size += copy; 1507 continue; 1508 } 1509 1510 /* 1511 * Otherwise we need at least IWL_FIRST_TB_SIZE copied 1512 * in total (for bi-directional DMA), but copy up to what 1513 * we can fit into the payload for debug dump purposes. 1514 */ 1515 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]); 1516 1517 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy); 1518 cmd_pos += copy; 1519 1520 /* However, treat copy_size the proper way, we need it below */ 1521 if (copy_size < IWL_FIRST_TB_SIZE) { 1522 copy = IWL_FIRST_TB_SIZE - copy_size; 1523 1524 if (copy > cmd->len[i]) 1525 copy = cmd->len[i]; 1526 copy_size += copy; 1527 } 1528 } 1529 1530 IWL_DEBUG_HC(trans, 1531 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n", 1532 iwl_get_cmd_string(trans, cmd->id), 1533 group_id, out_cmd->hdr.cmd, 1534 le16_to_cpu(out_cmd->hdr.sequence), 1535 cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id); 1536 1537 /* start the TFD with the minimum copy bytes */ 1538 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE); 1539 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size); 1540 iwl_pcie_txq_build_tfd(trans, txq, 1541 iwl_txq_get_first_tb_dma(txq, idx), 1542 tb0_size, true); 1543 1544 /* map first command fragment, if any remains */ 1545 if (copy_size > tb0_size) { 1546 phys_addr = dma_map_single(trans->dev, 1547 ((u8 *)&out_cmd->hdr) + tb0_size, 1548 copy_size - tb0_size, 1549 DMA_TO_DEVICE); 1550 if (dma_mapping_error(trans->dev, phys_addr)) { 1551 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1552 txq->write_ptr); 1553 idx = -ENOMEM; 1554 goto out; 1555 } 1556 1557 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, 1558 copy_size - tb0_size, false); 1559 } 1560 1561 /* map the remaining (adjusted) nocopy/dup fragments */ 1562 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) { 1563 void *data = (void *)(uintptr_t)cmddata[i]; 1564 1565 if (!cmdlen[i]) 1566 continue; 1567 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY | 1568 IWL_HCMD_DFL_DUP))) 1569 continue; 1570 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) 1571 data = dup_buf; 1572 phys_addr = dma_map_single(trans->dev, data, 1573 cmdlen[i], DMA_TO_DEVICE); 1574 if (dma_mapping_error(trans->dev, phys_addr)) { 1575 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, 1576 txq->write_ptr); 1577 idx = -ENOMEM; 1578 goto out; 1579 } 1580 1581 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false); 1582 } 1583 1584 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE); 1585 out_meta->flags = cmd->flags; 1586 if (WARN_ON_ONCE(txq->entries[idx].free_buf)) 1587 kfree_sensitive(txq->entries[idx].free_buf); 1588 txq->entries[idx].free_buf = dup_buf; 1589 1590 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide); 1591 1592 /* start timer if queue currently empty */ 1593 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) 1594 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout); 1595 1596 ret = iwl_pcie_set_cmd_in_flight(trans, cmd); 1597 if (ret < 0) { 1598 idx = ret; 1599 goto out; 1600 } 1601 1602 if (cmd->flags & CMD_BLOCK_TXQS) 1603 iwl_trans_pcie_block_txq_ptrs(trans, true); 1604 1605 /* Increment and update queue's write index */ 1606 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 1607 iwl_pcie_txq_inc_wr_ptr(trans, txq); 1608 1609 out: 1610 spin_unlock_irqrestore(&txq->lock, flags); 1611 free_dup_buf: 1612 if (idx < 0) 1613 kfree(dup_buf); 1614 return idx; 1615 } 1616 1617 /* 1618 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them 1619 * @rxb: Rx buffer to reclaim 1620 */ 1621 void iwl_pcie_hcmd_complete(struct iwl_trans *trans, 1622 struct iwl_rx_cmd_buffer *rxb) 1623 { 1624 struct iwl_rx_packet *pkt = rxb_addr(rxb); 1625 u16 sequence = le16_to_cpu(pkt->hdr.sequence); 1626 u8 group_id; 1627 u32 cmd_id; 1628 int txq_id = SEQ_TO_QUEUE(sequence); 1629 int index = SEQ_TO_INDEX(sequence); 1630 int cmd_index; 1631 struct iwl_device_cmd *cmd; 1632 struct iwl_cmd_meta *meta; 1633 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1634 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; 1635 1636 /* If a Tx command is being handled and it isn't in the actual 1637 * command queue then there a command routing bug has been introduced 1638 * in the queue management code. */ 1639 if (WARN(txq_id != trans_pcie->txqs.cmd.q_id, 1640 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n", 1641 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr, 1642 txq->write_ptr)) { 1643 iwl_print_hex_error(trans, pkt, 32); 1644 return; 1645 } 1646 1647 spin_lock_bh(&txq->lock); 1648 1649 cmd_index = iwl_txq_get_cmd_index(txq, index); 1650 cmd = txq->entries[cmd_index].cmd; 1651 meta = &txq->entries[cmd_index].meta; 1652 group_id = cmd->hdr.group_id; 1653 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd); 1654 1655 if (trans->trans_cfg->gen2) 1656 iwl_txq_gen2_tfd_unmap(trans, meta, 1657 iwl_txq_get_tfd(trans, txq, index)); 1658 else 1659 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index); 1660 1661 /* Input error checking is done when commands are added to queue. */ 1662 if (meta->flags & CMD_WANT_SKB) { 1663 struct page *p = rxb_steal_page(rxb); 1664 1665 meta->source->resp_pkt = pkt; 1666 meta->source->_rx_page_addr = (unsigned long)page_address(p); 1667 meta->source->_rx_page_order = trans_pcie->rx_page_order; 1668 } 1669 1670 if (meta->flags & CMD_BLOCK_TXQS) 1671 iwl_trans_pcie_block_txq_ptrs(trans, false); 1672 1673 iwl_pcie_cmdq_reclaim(trans, txq_id, index); 1674 1675 if (!(meta->flags & CMD_ASYNC)) { 1676 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) { 1677 IWL_WARN(trans, 1678 "HCMD_ACTIVE already clear for command %s\n", 1679 iwl_get_cmd_string(trans, cmd_id)); 1680 } 1681 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 1682 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 1683 iwl_get_cmd_string(trans, cmd_id)); 1684 wake_up(&trans->wait_command_queue); 1685 } 1686 1687 meta->flags = 0; 1688 1689 spin_unlock_bh(&txq->lock); 1690 } 1691 1692 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb, 1693 struct iwl_txq *txq, u8 hdr_len, 1694 struct iwl_cmd_meta *out_meta) 1695 { 1696 u16 head_tb_len; 1697 int i; 1698 1699 /* 1700 * Set up TFD's third entry to point directly to remainder 1701 * of skb's head, if any 1702 */ 1703 head_tb_len = skb_headlen(skb) - hdr_len; 1704 1705 if (head_tb_len > 0) { 1706 dma_addr_t tb_phys = dma_map_single(trans->dev, 1707 skb->data + hdr_len, 1708 head_tb_len, DMA_TO_DEVICE); 1709 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1710 return -EINVAL; 1711 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len, 1712 tb_phys, head_tb_len); 1713 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false); 1714 } 1715 1716 /* set up the remaining entries to point to the data */ 1717 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) { 1718 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i]; 1719 dma_addr_t tb_phys; 1720 int tb_idx; 1721 1722 if (!skb_frag_size(frag)) 1723 continue; 1724 1725 tb_phys = skb_frag_dma_map(trans->dev, frag, 0, 1726 skb_frag_size(frag), DMA_TO_DEVICE); 1727 1728 if (unlikely(dma_mapping_error(trans->dev, tb_phys))) 1729 return -EINVAL; 1730 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag), 1731 tb_phys, skb_frag_size(frag)); 1732 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 1733 skb_frag_size(frag), false); 1734 if (tb_idx < 0) 1735 return tb_idx; 1736 1737 out_meta->tbs |= BIT(tb_idx); 1738 } 1739 1740 return 0; 1741 } 1742 1743 #ifdef CONFIG_INET 1744 static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans, 1745 size_t len, struct sk_buff *skb) 1746 { 1747 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1748 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page); 1749 struct iwl_tso_page_info *info; 1750 struct page **page_ptr; 1751 dma_addr_t phys; 1752 void *ret; 1753 1754 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs); 1755 1756 if (WARN_ON(*page_ptr)) 1757 return NULL; 1758 1759 if (!p->page) 1760 goto alloc; 1761 1762 /* 1763 * Check if there's enough room on this page 1764 * 1765 * Note that we put a page chaining pointer *last* in the 1766 * page - we need it somewhere, and if it's there then we 1767 * avoid DMA mapping the last bits of the page which may 1768 * trigger the 32-bit boundary hardware bug. 1769 * 1770 * (see also get_workaround_page() in tx-gen2.c) 1771 */ 1772 if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) { 1773 info = IWL_TSO_PAGE_INFO(page_address(p->page)); 1774 goto out; 1775 } 1776 1777 /* We don't have enough room on this page, get a new one. */ 1778 iwl_pcie_free_and_unmap_tso_page(trans, p->page); 1779 1780 alloc: 1781 p->page = alloc_page(GFP_ATOMIC); 1782 if (!p->page) 1783 return NULL; 1784 p->pos = page_address(p->page); 1785 1786 info = IWL_TSO_PAGE_INFO(page_address(p->page)); 1787 1788 /* set the chaining pointer to NULL */ 1789 info->next = NULL; 1790 1791 /* Create a DMA mapping for the page */ 1792 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE, 1793 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC); 1794 if (unlikely(dma_mapping_error(trans->dev, phys))) { 1795 __free_page(p->page); 1796 p->page = NULL; 1797 1798 return NULL; 1799 } 1800 1801 /* Store physical address and set use count */ 1802 info->dma_addr = phys; 1803 refcount_set(&info->use_count, 1); 1804 out: 1805 *page_ptr = p->page; 1806 /* Return an internal reference for the caller */ 1807 refcount_inc(&info->use_count); 1808 ret = p->pos; 1809 p->pos += len; 1810 1811 return ret; 1812 } 1813 1814 /** 1815 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list 1816 * @sgt: scatter gather table 1817 * @addr: Virtual address 1818 * 1819 * Find the entry that includes the address for the given address and return 1820 * correct physical address for the TB entry. 1821 * 1822 * Returns: Address for TB entry 1823 */ 1824 dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, void *addr) 1825 { 1826 struct scatterlist *sg; 1827 int i; 1828 1829 for_each_sgtable_dma_sg(sgt, sg, i) { 1830 if (addr >= sg_virt(sg) && 1831 (u8 *)addr < (u8 *)sg_virt(sg) + sg_dma_len(sg)) 1832 return sg_dma_address(sg) + 1833 ((unsigned long)addr - (unsigned long)sg_virt(sg)); 1834 } 1835 1836 WARN_ON_ONCE(1); 1837 1838 return DMA_MAPPING_ERROR; 1839 } 1840 1841 /** 1842 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending 1843 * @trans: transport private data 1844 * @skb: the SKB to map 1845 * @cmd_meta: command meta to store the scatter list information for unmapping 1846 * @hdr: output argument for TSO headers 1847 * @hdr_room: requested length for TSO headers 1848 * 1849 * Allocate space for a scatter gather list and TSO headers and map the SKB 1850 * using the scatter gather list. The SKB is unmapped again when the page is 1851 * free'ed again at the end of the operation. 1852 * 1853 * Returns: newly allocated and mapped scatter gather table with list 1854 */ 1855 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb, 1856 struct iwl_cmd_meta *cmd_meta, 1857 u8 **hdr, unsigned int hdr_room) 1858 { 1859 struct sg_table *sgt; 1860 1861 if (WARN_ON_ONCE(skb_has_frag_list(skb))) 1862 return NULL; 1863 1864 *hdr = iwl_pcie_get_page_hdr(trans, 1865 hdr_room + __alignof__(struct sg_table) + 1866 sizeof(struct sg_table) + 1867 (skb_shinfo(skb)->nr_frags + 1) * 1868 sizeof(struct scatterlist), 1869 skb); 1870 if (!*hdr) 1871 return NULL; 1872 1873 sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table)); 1874 sgt->sgl = (void *)(sgt + 1); 1875 1876 sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1); 1877 1878 sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, 0, skb->len); 1879 if (WARN_ON_ONCE(sgt->orig_nents <= 0)) 1880 return NULL; 1881 1882 /* And map the entire SKB */ 1883 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0) 1884 return NULL; 1885 1886 /* Store non-zero (i.e. valid) offset for unmapping */ 1887 cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK; 1888 1889 return sgt; 1890 } 1891 1892 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 1893 struct iwl_txq *txq, u8 hdr_len, 1894 struct iwl_cmd_meta *out_meta, 1895 struct iwl_device_tx_cmd *dev_cmd, 1896 u16 tb1_len) 1897 { 1898 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 1899 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 1900 struct ieee80211_hdr *hdr = (void *)skb->data; 1901 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room; 1902 unsigned int mss = skb_shinfo(skb)->gso_size; 1903 u16 length, iv_len, amsdu_pad; 1904 dma_addr_t start_hdr_phys; 1905 u8 *start_hdr, *pos_hdr; 1906 struct sg_table *sgt; 1907 struct tso_t tso; 1908 1909 /* if the packet is protected, then it must be CCMP or GCMP */ 1910 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN); 1911 iv_len = ieee80211_has_protected(hdr->frame_control) ? 1912 IEEE80211_CCMP_HDR_LEN : 0; 1913 1914 trace_iwlwifi_dev_tx(trans->dev, skb, 1915 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 1916 trans_pcie->txqs.tfd.size, 1917 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0); 1918 1919 ip_hdrlen = skb_network_header_len(skb); 1920 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb); 1921 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len; 1922 amsdu_pad = 0; 1923 1924 /* total amount of header we may need for this A-MSDU */ 1925 hdr_room = DIV_ROUND_UP(total_len, mss) * 1926 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len; 1927 1928 /* Our device supports 9 segments at most, it will fit in 1 page */ 1929 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room); 1930 if (!sgt) 1931 return -ENOMEM; 1932 1933 start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr); 1934 pos_hdr = start_hdr; 1935 memcpy(pos_hdr, skb->data + hdr_len, iv_len); 1936 pos_hdr += iv_len; 1937 1938 /* 1939 * Pull the ieee80211 header + IV to be able to use TSO core, 1940 * we will restore it for the tx_status flow. 1941 */ 1942 skb_pull(skb, hdr_len + iv_len); 1943 1944 /* 1945 * Remove the length of all the headers that we don't actually 1946 * have in the MPDU by themselves, but that we duplicate into 1947 * all the different MSDUs inside the A-MSDU. 1948 */ 1949 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen); 1950 1951 tso_start(skb, &tso); 1952 1953 while (total_len) { 1954 /* this is the data left for this subframe */ 1955 unsigned int data_left = 1956 min_t(unsigned int, mss, total_len); 1957 unsigned int hdr_tb_len; 1958 dma_addr_t hdr_tb_phys; 1959 u8 *subf_hdrs_start = pos_hdr; 1960 1961 total_len -= data_left; 1962 1963 memset(pos_hdr, 0, amsdu_pad); 1964 pos_hdr += amsdu_pad; 1965 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen + 1966 data_left)) & 0x3; 1967 ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr)); 1968 pos_hdr += ETH_ALEN; 1969 ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr)); 1970 pos_hdr += ETH_ALEN; 1971 1972 length = snap_ip_tcp_hdrlen + data_left; 1973 *((__be16 *)pos_hdr) = cpu_to_be16(length); 1974 pos_hdr += sizeof(length); 1975 1976 /* 1977 * This will copy the SNAP as well which will be considered 1978 * as MAC header. 1979 */ 1980 tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len); 1981 1982 pos_hdr += snap_ip_tcp_hdrlen; 1983 1984 hdr_tb_len = pos_hdr - start_hdr; 1985 hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr); 1986 1987 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys, 1988 hdr_tb_len, false); 1989 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr, 1990 hdr_tb_phys, hdr_tb_len); 1991 /* add this subframe's headers' length to the tx_cmd */ 1992 le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start); 1993 1994 /* prepare the start_hdr for the next subframe */ 1995 start_hdr = pos_hdr; 1996 1997 /* put the payload */ 1998 while (data_left) { 1999 unsigned int size = min_t(unsigned int, tso.size, 2000 data_left); 2001 dma_addr_t tb_phys; 2002 2003 tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, tso.data); 2004 /* Not a real mapping error, use direct comparison */ 2005 if (unlikely(tb_phys == DMA_MAPPING_ERROR)) 2006 return -EINVAL; 2007 2008 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, 2009 size, false); 2010 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data, 2011 tb_phys, size); 2012 2013 data_left -= size; 2014 tso_build_data(skb, &tso, size); 2015 } 2016 } 2017 2018 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room, 2019 DMA_TO_DEVICE); 2020 2021 /* re -add the WiFi header and IV */ 2022 skb_push(skb, hdr_len + iv_len); 2023 2024 return 0; 2025 } 2026 #else /* CONFIG_INET */ 2027 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb, 2028 struct iwl_txq *txq, u8 hdr_len, 2029 struct iwl_cmd_meta *out_meta, 2030 struct iwl_device_tx_cmd *dev_cmd, 2031 u16 tb1_len) 2032 { 2033 /* No A-MSDU without CONFIG_INET */ 2034 WARN_ON(1); 2035 2036 return -1; 2037 } 2038 #endif /* CONFIG_INET */ 2039 2040 #define IWL_TX_CRC_SIZE 4 2041 #define IWL_TX_DELIMITER_SIZE 4 2042 2043 /* 2044 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array 2045 */ 2046 static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans, 2047 struct iwl_txq *txq, u16 byte_cnt, 2048 int num_tbs) 2049 { 2050 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2051 struct iwlagn_scd_bc_tbl *scd_bc_tbl; 2052 int write_ptr = txq->write_ptr; 2053 int txq_id = txq->id; 2054 u8 sec_ctl = 0; 2055 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE; 2056 __le16 bc_ent; 2057 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd; 2058 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 2059 u8 sta_id = tx_cmd->sta_id; 2060 2061 scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; 2062 2063 sec_ctl = tx_cmd->sec_ctl; 2064 2065 switch (sec_ctl & TX_CMD_SEC_MSK) { 2066 case TX_CMD_SEC_CCM: 2067 len += IEEE80211_CCMP_MIC_LEN; 2068 break; 2069 case TX_CMD_SEC_TKIP: 2070 len += IEEE80211_TKIP_ICV_LEN; 2071 break; 2072 case TX_CMD_SEC_WEP: 2073 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN; 2074 break; 2075 } 2076 if (trans_pcie->txqs.bc_table_dword) 2077 len = DIV_ROUND_UP(len, 4); 2078 2079 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX)) 2080 return; 2081 2082 bc_ent = cpu_to_le16(len | (sta_id << 12)); 2083 2084 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent; 2085 2086 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP) 2087 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = 2088 bc_ent; 2089 } 2090 2091 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, 2092 struct iwl_device_tx_cmd *dev_cmd, int txq_id) 2093 { 2094 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2095 struct ieee80211_hdr *hdr; 2096 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload; 2097 struct iwl_cmd_meta *out_meta; 2098 struct iwl_txq *txq; 2099 dma_addr_t tb0_phys, tb1_phys, scratch_phys; 2100 void *tb1_addr; 2101 void *tfd; 2102 u16 len, tb1_len; 2103 bool wait_write_ptr; 2104 __le16 fc; 2105 u8 hdr_len; 2106 u16 wifi_seq; 2107 bool amsdu; 2108 2109 txq = trans_pcie->txqs.txq[txq_id]; 2110 2111 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used), 2112 "TX on unused queue %d\n", txq_id)) 2113 return -EINVAL; 2114 2115 if (skb_is_nonlinear(skb) && 2116 skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) && 2117 __skb_linearize(skb)) 2118 return -ENOMEM; 2119 2120 /* mac80211 always puts the full header into the SKB's head, 2121 * so there's no need to check if it's readable there 2122 */ 2123 hdr = (struct ieee80211_hdr *)skb->data; 2124 fc = hdr->frame_control; 2125 hdr_len = ieee80211_hdrlen(fc); 2126 2127 spin_lock(&txq->lock); 2128 2129 if (iwl_txq_space(trans, txq) < txq->high_mark) { 2130 iwl_txq_stop(trans, txq); 2131 2132 /* don't put the packet on the ring, if there is no room */ 2133 if (unlikely(iwl_txq_space(trans, txq) < 3)) { 2134 struct iwl_device_tx_cmd **dev_cmd_ptr; 2135 2136 dev_cmd_ptr = (void *)((u8 *)skb->cb + 2137 trans_pcie->txqs.dev_cmd_offs); 2138 2139 *dev_cmd_ptr = dev_cmd; 2140 __skb_queue_tail(&txq->overflow_q, skb); 2141 2142 spin_unlock(&txq->lock); 2143 return 0; 2144 } 2145 } 2146 2147 /* In AGG mode, the index in the ring must correspond to the WiFi 2148 * sequence number. This is a HW requirements to help the SCD to parse 2149 * the BA. 2150 * Check here that the packets are in the right place on the ring. 2151 */ 2152 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl)); 2153 WARN_ONCE(txq->ampdu && 2154 (wifi_seq & 0xff) != txq->write_ptr, 2155 "Q: %d WiFi Seq %d tfdNum %d", 2156 txq_id, wifi_seq, txq->write_ptr); 2157 2158 /* Set up driver data for this TFD */ 2159 txq->entries[txq->write_ptr].skb = skb; 2160 txq->entries[txq->write_ptr].cmd = dev_cmd; 2161 2162 dev_cmd->hdr.sequence = 2163 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) | 2164 INDEX_TO_SEQ(txq->write_ptr))); 2165 2166 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr); 2167 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) + 2168 offsetof(struct iwl_tx_cmd, scratch); 2169 2170 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys); 2171 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys); 2172 2173 /* Set up first empty entry in queue's array of Tx/cmd buffers */ 2174 out_meta = &txq->entries[txq->write_ptr].meta; 2175 memset(out_meta, 0, sizeof(*out_meta)); 2176 2177 /* 2178 * The second TB (tb1) points to the remainder of the TX command 2179 * and the 802.11 header - dword aligned size 2180 * (This calculation modifies the TX command, so do it before the 2181 * setup of the first TB) 2182 */ 2183 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) + 2184 hdr_len - IWL_FIRST_TB_SIZE; 2185 /* do not align A-MSDU to dword as the subframe header aligns it */ 2186 amsdu = ieee80211_is_data_qos(fc) && 2187 (*ieee80211_get_qos_ctl(hdr) & 2188 IEEE80211_QOS_CTL_A_MSDU_PRESENT); 2189 if (!amsdu) { 2190 tb1_len = ALIGN(len, 4); 2191 /* Tell NIC about any 2-byte padding after MAC header */ 2192 if (tb1_len != len) 2193 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD); 2194 } else { 2195 tb1_len = len; 2196 } 2197 2198 /* 2199 * The first TB points to bi-directional DMA data, we'll 2200 * memcpy the data into it later. 2201 */ 2202 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys, 2203 IWL_FIRST_TB_SIZE, true); 2204 2205 /* there must be data left over for TB1 or this code must be changed */ 2206 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE); 2207 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) + 2208 offsetofend(struct iwl_tx_cmd, scratch) > 2209 IWL_FIRST_TB_SIZE); 2210 2211 /* map the data for TB1 */ 2212 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; 2213 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE); 2214 if (unlikely(dma_mapping_error(trans->dev, tb1_phys))) 2215 goto out_err; 2216 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false); 2217 2218 trace_iwlwifi_dev_tx(trans->dev, skb, 2219 iwl_txq_get_tfd(trans, txq, txq->write_ptr), 2220 trans_pcie->txqs.tfd.size, 2221 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 2222 hdr_len); 2223 2224 /* 2225 * If gso_size wasn't set, don't give the frame "amsdu treatment" 2226 * (adding subframes, etc.). 2227 * This can happen in some testing flows when the amsdu was already 2228 * pre-built, and we just need to send the resulting skb. 2229 */ 2230 if (amsdu && skb_shinfo(skb)->gso_size) { 2231 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len, 2232 out_meta, dev_cmd, 2233 tb1_len))) 2234 goto out_err; 2235 } else { 2236 struct sk_buff *frag; 2237 2238 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len, 2239 out_meta))) 2240 goto out_err; 2241 2242 skb_walk_frags(skb, frag) { 2243 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0, 2244 out_meta))) 2245 goto out_err; 2246 } 2247 } 2248 2249 /* building the A-MSDU might have changed this data, so memcpy it now */ 2250 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE); 2251 2252 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr); 2253 /* Set up entry for this TFD in Tx byte-count array */ 2254 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len), 2255 iwl_txq_gen1_tfd_get_num_tbs(tfd)); 2256 2257 wait_write_ptr = ieee80211_has_morefrags(fc); 2258 2259 /* start timer if queue currently empty */ 2260 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) { 2261 /* 2262 * If the TXQ is active, then set the timer, if not, 2263 * set the timer in remainder so that the timer will 2264 * be armed with the right value when the station will 2265 * wake up. 2266 */ 2267 if (!txq->frozen) 2268 mod_timer(&txq->stuck_timer, 2269 jiffies + txq->wd_timeout); 2270 else 2271 txq->frozen_expiry_remainder = txq->wd_timeout; 2272 } 2273 2274 /* Tell device the write index *just past* this latest filled TFD */ 2275 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr); 2276 if (!wait_write_ptr) 2277 iwl_pcie_txq_inc_wr_ptr(trans, txq); 2278 2279 /* 2280 * At this point the frame is "transmitted" successfully 2281 * and we will get a TX status notification eventually. 2282 */ 2283 spin_unlock(&txq->lock); 2284 return 0; 2285 out_err: 2286 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr); 2287 spin_unlock(&txq->lock); 2288 return -1; 2289 } 2290 2291 static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans, 2292 struct iwl_txq *txq, 2293 int read_ptr) 2294 { 2295 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2296 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr; 2297 int txq_id = txq->id; 2298 u8 sta_id = 0; 2299 __le16 bc_ent; 2300 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd; 2301 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload; 2302 2303 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX); 2304 2305 if (txq_id != trans_pcie->txqs.cmd.q_id) 2306 sta_id = tx_cmd->sta_id; 2307 2308 bc_ent = cpu_to_le16(1 | (sta_id << 12)); 2309 2310 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent; 2311 2312 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP) 2313 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = 2314 bc_ent; 2315 } 2316 2317 /* Frees buffers until index _not_ inclusive */ 2318 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, 2319 struct sk_buff_head *skbs, bool is_flush) 2320 { 2321 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2322 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 2323 int tfd_num, read_ptr, last_to_free; 2324 int txq_read_ptr, txq_write_ptr; 2325 2326 /* This function is not meant to release cmd queue*/ 2327 if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id)) 2328 return; 2329 2330 if (WARN_ON(!txq)) 2331 return; 2332 2333 tfd_num = iwl_txq_get_cmd_index(txq, ssn); 2334 2335 spin_lock_bh(&txq->reclaim_lock); 2336 2337 spin_lock(&txq->lock); 2338 txq_read_ptr = txq->read_ptr; 2339 txq_write_ptr = txq->write_ptr; 2340 spin_unlock(&txq->lock); 2341 2342 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr); 2343 2344 if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) { 2345 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n", 2346 txq_id, ssn); 2347 goto out; 2348 } 2349 2350 if (read_ptr == tfd_num) 2351 goto out; 2352 2353 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n", 2354 txq_id, read_ptr, txq_read_ptr, tfd_num, ssn); 2355 2356 /* Since we free until index _not_ inclusive, the one before index is 2357 * the last we will free. This one must be used 2358 */ 2359 last_to_free = iwl_txq_dec_wrap(trans, tfd_num); 2360 2361 if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) { 2362 IWL_ERR(trans, 2363 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", 2364 __func__, txq_id, last_to_free, 2365 trans->trans_cfg->base_params->max_tfd_queue_size, 2366 txq_write_ptr, txq_read_ptr); 2367 2368 iwl_op_mode_time_point(trans->op_mode, 2369 IWL_FW_INI_TIME_POINT_FAKE_TX, 2370 NULL); 2371 goto out; 2372 } 2373 2374 if (WARN_ON(!skb_queue_empty(skbs))) 2375 goto out; 2376 2377 for (; 2378 read_ptr != tfd_num; 2379 txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr), 2380 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) { 2381 struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta; 2382 struct sk_buff *skb = txq->entries[read_ptr].skb; 2383 2384 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n", 2385 read_ptr, txq_read_ptr, txq_id)) 2386 continue; 2387 2388 iwl_pcie_free_tso_pages(trans, skb, cmd_meta); 2389 2390 __skb_queue_tail(skbs, skb); 2391 2392 txq->entries[read_ptr].skb = NULL; 2393 2394 if (!trans->trans_cfg->gen2) 2395 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq, 2396 txq_read_ptr); 2397 2398 iwl_txq_free_tfd(trans, txq, txq_read_ptr); 2399 } 2400 2401 spin_lock(&txq->lock); 2402 txq->read_ptr = txq_read_ptr; 2403 2404 iwl_txq_progress(txq); 2405 2406 if (iwl_txq_space(trans, txq) > txq->low_mark && 2407 test_bit(txq_id, trans_pcie->txqs.queue_stopped)) { 2408 struct sk_buff_head overflow_skbs; 2409 struct sk_buff *skb; 2410 2411 __skb_queue_head_init(&overflow_skbs); 2412 skb_queue_splice_init(&txq->overflow_q, 2413 is_flush ? skbs : &overflow_skbs); 2414 2415 /* 2416 * We are going to transmit from the overflow queue. 2417 * Remember this state so that wait_for_txq_empty will know we 2418 * are adding more packets to the TFD queue. It cannot rely on 2419 * the state of &txq->overflow_q, as we just emptied it, but 2420 * haven't TXed the content yet. 2421 */ 2422 txq->overflow_tx = true; 2423 2424 /* 2425 * This is tricky: we are in reclaim path and are holding 2426 * reclaim_lock, so noone will try to access the txq data 2427 * from that path. We stopped tx, so we can't have tx as well. 2428 * Bottom line, we can unlock and re-lock later. 2429 */ 2430 spin_unlock(&txq->lock); 2431 2432 while ((skb = __skb_dequeue(&overflow_skbs))) { 2433 struct iwl_device_tx_cmd *dev_cmd_ptr; 2434 2435 dev_cmd_ptr = *(void **)((u8 *)skb->cb + 2436 trans_pcie->txqs.dev_cmd_offs); 2437 2438 /* 2439 * Note that we can very well be overflowing again. 2440 * In that case, iwl_txq_space will be small again 2441 * and we won't wake mac80211's queue. 2442 */ 2443 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id); 2444 } 2445 2446 if (iwl_txq_space(trans, txq) > txq->low_mark) 2447 iwl_trans_pcie_wake_queue(trans, txq); 2448 2449 spin_lock(&txq->lock); 2450 txq->overflow_tx = false; 2451 } 2452 2453 spin_unlock(&txq->lock); 2454 out: 2455 spin_unlock_bh(&txq->reclaim_lock); 2456 } 2457 2458 /* Set wr_ptr of specific device and txq */ 2459 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr) 2460 { 2461 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2462 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id]; 2463 2464 spin_lock_bh(&txq->lock); 2465 2466 txq->write_ptr = ptr; 2467 txq->read_ptr = txq->write_ptr; 2468 2469 spin_unlock_bh(&txq->lock); 2470 } 2471 2472 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans, 2473 unsigned long txqs, bool freeze) 2474 { 2475 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2476 int queue; 2477 2478 for_each_set_bit(queue, &txqs, BITS_PER_LONG) { 2479 struct iwl_txq *txq = trans_pcie->txqs.txq[queue]; 2480 unsigned long now; 2481 2482 spin_lock_bh(&txq->lock); 2483 2484 now = jiffies; 2485 2486 if (txq->frozen == freeze) 2487 goto next_queue; 2488 2489 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n", 2490 freeze ? "Freezing" : "Waking", queue); 2491 2492 txq->frozen = freeze; 2493 2494 if (txq->read_ptr == txq->write_ptr) 2495 goto next_queue; 2496 2497 if (freeze) { 2498 if (unlikely(time_after(now, 2499 txq->stuck_timer.expires))) { 2500 /* 2501 * The timer should have fired, maybe it is 2502 * spinning right now on the lock. 2503 */ 2504 goto next_queue; 2505 } 2506 /* remember how long until the timer fires */ 2507 txq->frozen_expiry_remainder = 2508 txq->stuck_timer.expires - now; 2509 del_timer(&txq->stuck_timer); 2510 goto next_queue; 2511 } 2512 2513 /* 2514 * Wake a non-empty queue -> arm timer with the 2515 * remainder before it froze 2516 */ 2517 mod_timer(&txq->stuck_timer, 2518 now + txq->frozen_expiry_remainder); 2519 2520 next_queue: 2521 spin_unlock_bh(&txq->lock); 2522 } 2523 } 2524 2525 #define HOST_COMPLETE_TIMEOUT (2 * HZ) 2526 2527 static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans, 2528 struct iwl_host_cmd *cmd) 2529 { 2530 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); 2531 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id); 2532 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id]; 2533 int cmd_idx; 2534 int ret; 2535 2536 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str); 2537 2538 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE, 2539 &trans->status), 2540 "Command %s: a command is already active!\n", cmd_str)) 2541 return -EIO; 2542 2543 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str); 2544 2545 if (trans->trans_cfg->gen2) 2546 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); 2547 else 2548 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd); 2549 2550 if (cmd_idx < 0) { 2551 ret = cmd_idx; 2552 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 2553 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n", 2554 cmd_str, ret); 2555 return ret; 2556 } 2557 2558 ret = wait_event_timeout(trans->wait_command_queue, 2559 !test_bit(STATUS_SYNC_HCMD_ACTIVE, 2560 &trans->status), 2561 HOST_COMPLETE_TIMEOUT); 2562 if (!ret) { 2563 IWL_ERR(trans, "Error sending %s: time out after %dms.\n", 2564 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT)); 2565 2566 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n", 2567 txq->read_ptr, txq->write_ptr); 2568 2569 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status); 2570 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n", 2571 cmd_str); 2572 ret = -ETIMEDOUT; 2573 2574 iwl_trans_sync_nmi(trans); 2575 goto cancel; 2576 } 2577 2578 if (test_bit(STATUS_FW_ERROR, &trans->status)) { 2579 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE, 2580 &trans->status)) { 2581 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str); 2582 dump_stack(); 2583 } 2584 ret = -EIO; 2585 goto cancel; 2586 } 2587 2588 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 2589 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 2590 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n"); 2591 ret = -ERFKILL; 2592 goto cancel; 2593 } 2594 2595 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) { 2596 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str); 2597 ret = -EIO; 2598 goto cancel; 2599 } 2600 2601 return 0; 2602 2603 cancel: 2604 if (cmd->flags & CMD_WANT_SKB) { 2605 /* 2606 * Cancel the CMD_WANT_SKB flag for the cmd in the 2607 * TX cmd queue. Otherwise in case the cmd comes 2608 * in later, it will possibly set an invalid 2609 * address (cmd->meta.source). 2610 */ 2611 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB; 2612 } 2613 2614 if (cmd->resp_pkt) { 2615 iwl_free_resp(cmd); 2616 cmd->resp_pkt = NULL; 2617 } 2618 2619 return ret; 2620 } 2621 2622 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, 2623 struct iwl_host_cmd *cmd) 2624 { 2625 /* Make sure the NIC is still alive in the bus */ 2626 if (test_bit(STATUS_TRANS_DEAD, &trans->status)) 2627 return -ENODEV; 2628 2629 if (!(cmd->flags & CMD_SEND_IN_RFKILL) && 2630 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) { 2631 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n", 2632 cmd->id); 2633 return -ERFKILL; 2634 } 2635 2636 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 && 2637 !(cmd->flags & CMD_SEND_IN_D3))) { 2638 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id); 2639 return -EHOSTDOWN; 2640 } 2641 2642 if (cmd->flags & CMD_ASYNC) { 2643 int ret; 2644 2645 /* An asynchronous command can not expect an SKB to be set. */ 2646 if (WARN_ON(cmd->flags & CMD_WANT_SKB)) 2647 return -EINVAL; 2648 2649 if (trans->trans_cfg->gen2) 2650 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd); 2651 else 2652 ret = iwl_pcie_enqueue_hcmd(trans, cmd); 2653 2654 if (ret < 0) { 2655 IWL_ERR(trans, 2656 "Error sending %s: enqueue_hcmd failed: %d\n", 2657 iwl_get_cmd_string(trans, cmd->id), ret); 2658 return ret; 2659 } 2660 return 0; 2661 } 2662 2663 return iwl_trans_pcie_send_hcmd_sync(trans, cmd); 2664 } 2665 IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd); 2666