Lines Matching +full:queue +full:- +full:pkt +full:- +full:tx
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
3 * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
24 #include "iwl-fh.h"
25 #include "iwl-debug.h"
26 #include "iwl-csr.h"
27 #include "iwl-prph.h"
28 #include "iwl-io.h"
29 #include "iwl-scd.h"
30 #include "iwl-op-mode.h"
32 #include "fw/api/tx.h"
34 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
39 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
42 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
44 * queue states.
47 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
49 * For Tx queue, there are low mark and high mark limits. If, after queuing
50 * the packet for Tx, free space become < low mark, Tx queue stopped. When
51 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
52 * Tx queue resumed.
60 if (WARN_ON(ptr->addr))
61 return -EINVAL;
63 ptr->addr = dma_alloc_coherent(trans->dev, size,
64 &ptr->dma, GFP_KERNEL);
65 if (!ptr->addr)
66 return -ENOMEM;
67 ptr->size = size;
73 if (unlikely(!ptr->addr))
76 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
81 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
88 int txq_id = txq->id;
90 lockdep_assert_held(&txq->lock);
98 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
99 txq_id != trans_pcie->txqs.cmd.q_id &&
100 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
109 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
113 txq->need_update = true;
119 * if not in power-save mode, uCode will never sleep when we're
120 * trying to tx (during RFKILL, we're not trying to tx).
122 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
123 if (!txq->block)
125 txq->write_ptr | (txq_id << 8));
133 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
134 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
136 if (!test_bit(i, trans_pcie->txqs.queue_used))
139 spin_lock_bh(&txq->lock);
140 if (txq->need_update) {
142 txq->need_update = false;
144 spin_unlock_bh(&txq->lock);
151 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
154 put_unaligned_le32(addr, &tb->lo);
157 tb->hi_n_len = cpu_to_le16(hi_n_len);
159 tfd->num_tbs = idx + 1;
164 return tfd->num_tbs & 0x1f;
174 tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
177 memset(tfd, 0, trans_pcie->txqs.tfd.size);
181 /* Each TFD can point to a maximum max_tbs Tx buffers */
182 if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
184 trans_pcie->txqs.tfd.max_tbs);
185 return -EINVAL;
190 return -EINVAL;
201 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
204 spin_lock(&trans_pcie->reg_lock);
206 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
207 spin_unlock(&trans_pcie->reg_lock);
211 trans_pcie->cmd_hold_nic_awake = false;
214 spin_unlock(&trans_pcie->reg_lock);
223 if (refcount_dec_and_test(&info->use_count)) {
224 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
238 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
247 next = info->next;
250 if (!next && cmd_meta->sg_offset) {
254 cmd_meta->sg_offset);
256 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
266 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
270 addr = get_unaligned_le32(&tb->lo);
275 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
278 * shift by 16 twice to avoid warnings on 32-bit
288 tfd->num_tbs = 0;
290 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
291 trans->invalid_tx_cmd.size);
305 if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
312 if (meta->sg_offset)
315 /* first TB is never freed - it's the bidirectional DMA data */
318 if (meta->tbs & BIT(i))
319 dma_unmap_page(trans->dev,
325 dma_unmap_single(trans->dev,
332 meta->tbs = 0;
338 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
340 * @txq: tx queue
355 lockdep_assert_held(&txq->reclaim_lock);
357 if (!txq->entries)
360 /* We have only q->n_window txq->entries, but we use
363 if (trans->trans_cfg->gen2)
364 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
367 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
371 skb = txq->entries[idx].skb;
373 /* Can be called from irqs-disabled context
374 * If skb is not NULL, it means that the whole queue is being
375 * freed and that the queue is not empty - free the skb
378 iwl_op_mode_free_skb(trans->op_mode, skb);
379 txq->entries[idx].skb = NULL;
384 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
389 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
392 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
396 spin_lock_bh(&txq->reclaim_lock);
397 spin_lock(&txq->lock);
398 while (txq->write_ptr != txq->read_ptr) {
400 txq_id, txq->read_ptr);
402 if (txq_id != trans_pcie->txqs.cmd.q_id) {
403 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
405 &txq->entries[txq->read_ptr].meta;
412 iwl_txq_free_tfd(trans, txq, txq->read_ptr);
413 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
415 if (txq->read_ptr == txq->write_ptr &&
416 txq_id == trans_pcie->txqs.cmd.q_id)
420 while (!skb_queue_empty(&txq->overflow_q)) {
421 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
423 iwl_op_mode_free_skb(trans->op_mode, skb);
426 spin_unlock(&txq->lock);
427 spin_unlock_bh(&txq->reclaim_lock);
429 /* just in case - this queue may have been stopped */
434 * iwl_pcie_txq_free - Deallocate DMA queue.
435 * @txq: Transmit queue to deallocate.
437 * Empty queue by removing and destroying all BD's.
439 * 0-fill, but do not free "txq" descriptor structure.
444 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
445 struct device *dev = trans->dev;
453 /* De-alloc array of command/tx buffers */
454 if (txq_id == trans_pcie->txqs.cmd.q_id)
455 for (i = 0; i < txq->n_window; i++) {
456 kfree_sensitive(txq->entries[i].cmd);
457 kfree_sensitive(txq->entries[i].free_buf);
460 /* De-alloc circular buffer of TFDs */
461 if (txq->tfds) {
463 trans_pcie->txqs.tfd.size *
464 trans->trans_cfg->base_params->max_tfd_queue_size,
465 txq->tfds, txq->dma_addr);
466 txq->dma_addr = 0;
467 txq->tfds = NULL;
470 sizeof(*txq->first_tb_bufs) * txq->n_window,
471 txq->first_tb_bufs, txq->first_tb_dma);
474 kfree(txq->entries);
475 txq->entries = NULL;
477 del_timer_sync(&txq->stuck_timer);
479 /* 0-fill queue descriptor structure */
486 int nq = trans->trans_cfg->base_params->num_of_queues;
489 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
492 /* make sure all queue are not stopped/used */
493 memset(trans_pcie->txqs.queue_stopped, 0,
494 sizeof(trans_pcie->txqs.queue_stopped));
495 memset(trans_pcie->txqs.queue_used, 0,
496 sizeof(trans_pcie->txqs.queue_used));
498 trans_pcie->scd_base_addr =
502 scd_base_addr != trans_pcie->scd_base_addr);
504 /* reset context data, TX status and translation data */
505 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
510 trans_pcie->txqs.scd_bc_tbls.dma >> 10);
515 if (trans->trans_cfg->base_params->scd_chain_ext_wa)
518 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
519 trans_pcie->txqs.cmd.fifo,
520 trans_pcie->txqs.cmd.wdg_timeout);
522 /* Activate all Tx DMA/FIFO channels */
536 /* Enable L1-Active */
537 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
551 if (WARN_ON_ONCE(trans->trans_cfg->gen2))
554 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
556 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
557 if (trans->trans_cfg->gen2)
560 txq->dma_addr);
564 txq->dma_addr >> 8);
566 txq->read_ptr = 0;
567 txq->write_ptr = 0;
572 trans_pcie->kw.dma >> 4);
588 spin_lock_bh(&trans_pcie->irq_lock);
593 /* Stop each Tx DMA channel */
609 spin_unlock_bh(&trans_pcie->irq_lock);
613 * iwl_pcie_tx_stop - Stop all Tx DMA channels
620 /* Turn off all Tx DMA fifos */
623 /* Turn off all Tx DMA channels */
629 * Since we stop Tx altogether - mark the queues as stopped.
631 memset(trans_pcie->txqs.queue_stopped, 0,
632 sizeof(trans_pcie->txqs.queue_stopped));
633 memset(trans_pcie->txqs.queue_used, 0,
634 sizeof(trans_pcie->txqs.queue_used));
637 if (!trans_pcie->txq_memory)
641 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
649 * iwl_trans_tx_free - Free TXQ Context
651 * Destroy all TX DMA queues and structures
658 memset(trans_pcie->txqs.queue_used, 0,
659 sizeof(trans_pcie->txqs.queue_used));
661 /* Tx queues */
662 if (trans_pcie->txq_memory) {
664 txq_id < trans->trans_cfg->base_params->num_of_queues;
667 trans_pcie->txqs.txq[txq_id] = NULL;
671 kfree(trans_pcie->txq_memory);
672 trans_pcie->txq_memory = NULL;
674 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
676 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
681 u32 txq_id = txq->id;
686 if (trans->trans_cfg->gen2) {
687 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
688 txq->read_ptr, txq->write_ptr);
698 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
700 jiffies_to_msecs(txq->wd_timeout),
701 txq->read_ptr, txq->write_ptr,
703 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
705 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
712 struct iwl_trans *trans = txq->trans;
714 spin_lock(&txq->lock);
716 if (txq->read_ptr == txq->write_ptr) {
717 spin_unlock(&txq->lock);
720 spin_unlock(&txq->lock);
731 size_t num_entries = trans->trans_cfg->gen2 ?
732 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
738 return -EINVAL;
740 if (WARN_ON(txq->entries || txq->tfds))
741 return -EINVAL;
743 tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
745 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
746 txq->trans = trans;
748 txq->n_window = slots_num;
750 txq->entries = kcalloc(slots_num,
754 if (!txq->entries)
759 txq->entries[i].cmd =
762 if (!txq->entries[i].cmd)
769 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
770 &txq->dma_addr, GFP_KERNEL);
771 if (!txq->tfds)
774 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
776 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
778 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
779 &txq->first_tb_dma,
781 if (!txq->first_tb_bufs)
787 if (trans->trans_cfg->gen2)
795 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
796 txq->tfds = NULL;
798 if (txq->entries && cmd_queue)
800 kfree(txq->entries[i].cmd);
801 kfree(txq->entries);
802 txq->entries = NULL;
804 return -ENOMEM;
808 * iwl_pcie_tx_alloc - allocate TX context
809 * Allocate all Tx DMA structures and initialize them
816 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
818 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
819 return -EINVAL;
825 if (WARN_ON(trans_pcie->txq_memory)) {
826 ret = -EINVAL;
830 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
837 /* Alloc keep-warm buffer */
838 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
844 trans_pcie->txq_memory =
845 kcalloc(trans->trans_cfg->base_params->num_of_queues,
847 if (!trans_pcie->txq_memory) {
849 ret = -ENOMEM;
853 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
854 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
856 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
860 trans->cfg->min_txq_size);
863 trans->cfg->min_ba_txq_size);
864 trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
865 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
868 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
871 trans_pcie->txqs.txq[txq_id]->id = txq_id;
883 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
887 q->n_window = slots_num;
889 /* slots_num must be power-of-two size, otherwise
893 return -EINVAL;
895 q->low_mark = q->n_window / 4;
896 if (q->low_mark < 4)
897 q->low_mark = 4;
899 q->high_mark = q->n_window / 8;
900 if (q->high_mark < 2)
901 q->high_mark = 2;
903 q->write_ptr = 0;
904 q->read_ptr = 0;
913 trans->trans_cfg->base_params->max_tfd_queue_size;
916 txq->need_update = false;
918 /* max_tfd_queue_size must be power-of-two size, otherwise
921 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
922 "Max tfd queue size must be a power of two, but is %d",
924 return -EINVAL;
926 /* Initialize queue's high/low-water marks, and head/tail indexes */
931 spin_lock_init(&txq->lock);
932 spin_lock_init(&txq->reclaim_lock);
938 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
942 __skb_queue_head_init(&txq->overflow_q);
954 if (!trans_pcie->txq_memory) {
961 spin_lock_bh(&trans_pcie->irq_lock);
963 /* Turn off all Tx DMA fifos */
968 trans_pcie->kw.dma >> 4);
970 spin_unlock_bh(&trans_pcie->irq_lock);
972 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
973 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
975 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
979 trans->cfg->min_txq_size);
982 trans->cfg->min_ba_txq_size);
983 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
986 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
992 * given Tx queue, and enable the DMA channel used for that
993 * queue.
994 * Circular buffer (TFD queue in DRAM) physical base address
997 trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
1001 if (trans->trans_cfg->base_params->num_of_queues > 20)
1019 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1020 return -ENODEV;
1022 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
1027 * command - we will let the NIC sleep once all the host commands
1032 return -EIO;
1039 trans_pcie->cmd_hold_nic_awake = true;
1040 spin_unlock(&trans_pcie->reg_lock);
1047 lockdep_assert_held(&txq->lock);
1049 if (!txq->wd_timeout)
1053 * station is asleep and we send data - that must
1054 * be uAPSD or PS-Poll. Don't rearm the timer.
1056 if (txq->frozen)
1061 * since we're making progress on this queue
1063 if (txq->read_ptr == txq->write_ptr)
1064 del_timer(&txq->stuck_timer);
1066 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1082 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1091 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1095 lockdep_assert_held(&txq->lock);
1098 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1100 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
1101 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
1102 WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
1103 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1105 trans->trans_cfg->base_params->max_tfd_queue_size,
1106 txq->write_ptr, txq->read_ptr);
1112 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1116 idx, txq->write_ptr, r);
1121 if (txq->read_ptr == txq->write_ptr)
1137 tbl_dw_addr = trans_pcie->scd_base_addr +
1153 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1161 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1162 int fifo = -1;
1165 if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
1166 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1168 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1171 fifo = cfg->fifo;
1173 /* Disable the scheduler prior configuring the cmd queue */
1174 if (txq_id == trans_pcie->txqs.cmd.q_id &&
1175 trans_pcie->scd_set_active)
1178 /* Stop this Tx queue before configuring it */
1181 /* Set this queue as a chain-building queue unless it is CMD */
1182 if (txq_id != trans_pcie->txqs.cmd.q_id)
1185 if (cfg->aggregate) {
1186 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1188 /* Map receiver-address / traffic-ID to this queue */
1191 /* enable aggregations for the queue */
1193 txq->ampdu = true;
1196 * disable aggregations for the queue, this will also
1198 * since it is now a non-AGG queue.
1202 ssn = txq->read_ptr;
1216 scd_bug = !trans->trans_cfg->mq_rx_supported &&
1217 !((ssn - txq->write_ptr) & 0x3f) &&
1218 (ssn != txq->write_ptr);
1225 txq->read_ptr = (ssn & 0xff);
1226 txq->write_ptr = (ssn & 0xff);
1231 u8 frame_limit = cfg->frame_limit;
1235 /* Set up Tx window size and frame limit for this queue */
1236 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1239 trans_pcie->scd_base_addr +
1244 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1247 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1251 /* enable the scheduler for this queue (only) */
1252 if (txq_id == trans_pcie->txqs.cmd.q_id &&
1253 trans_pcie->scd_set_active)
1257 "Activate queue %d on FIFO %d WrPtr: %d\n",
1261 "Activate queue %d WrPtr: %d\n",
1272 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1274 txq->ampdu = !shared_mode;
1281 u32 stts_addr = trans_pcie->scd_base_addr +
1285 trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
1286 trans_pcie->txqs.txq[txq_id]->frozen = false;
1289 * Upon HW Rfkill - we stop the device, and then stop the queues
1294 if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
1295 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1296 "queue %d not used", txq_id);
1308 trans_pcie->txqs.txq[txq_id]->ampdu = false;
1310 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1313 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1320 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1321 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
1323 if (i == trans_pcie->txqs.cmd.q_id)
1326 /* we skip the command queue (obviously) so it's OK to nest */
1327 spin_lock_nested(&txq->lock, 1);
1329 if (!block && !(WARN_ON_ONCE(!txq->block))) {
1330 txq->block--;
1331 if (!txq->block) {
1333 txq->write_ptr | (i << 8));
1336 txq->block++;
1339 spin_unlock(&txq->lock);
1344 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1350 * command queue.
1356 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1364 u8 group_id = iwl_cmd_groupid(cmd->id);
1371 if (WARN(!trans->wide_cmd_header &&
1373 "unsupported wide command %#x\n", cmd->id))
1374 return -EINVAL;
1385 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1388 cmddata[i] = cmd->data[i];
1389 cmdlen[i] = cmd->len[i];
1391 if (!cmd->len[i])
1396 int copy = IWL_FIRST_TB_SIZE - copy_size;
1400 cmdlen[i] -= copy;
1405 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1407 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1408 idx = -EINVAL;
1411 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1420 idx = -EINVAL;
1427 return -ENOMEM;
1431 idx = -EINVAL;
1436 cmd_size += cmd->len[i];
1447 iwl_get_cmd_string(trans, cmd->id),
1448 cmd->id, copy_size)) {
1449 idx = -EINVAL;
1453 spin_lock_irqsave(&txq->lock, flags);
1455 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1456 spin_unlock_irqrestore(&txq->lock, flags);
1458 IWL_ERR(trans, "No space in command queue\n");
1459 iwl_op_mode_cmd_queue_full(trans->op_mode);
1460 idx = -ENOSPC;
1464 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1465 out_cmd = txq->entries[idx].cmd;
1466 out_meta = &txq->entries[idx].meta;
1468 /* re-initialize, this also marks the SG list as unused */
1470 if (cmd->flags & CMD_WANT_SKB)
1471 out_meta->source = cmd;
1475 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1476 out_cmd->hdr_wide.group_id = group_id;
1477 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1478 out_cmd->hdr_wide.length =
1479 cpu_to_le16(cmd_size -
1481 out_cmd->hdr_wide.reserved = 0;
1482 out_cmd->hdr_wide.sequence =
1483 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1484 INDEX_TO_SEQ(txq->write_ptr));
1489 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1490 out_cmd->hdr.sequence =
1491 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1492 INDEX_TO_SEQ(txq->write_ptr));
1493 out_cmd->hdr.group_id = 0;
1503 if (!cmd->len[i])
1507 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1509 copy = cmd->len[i];
1511 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1519 * in total (for bi-directional DMA), but copy up to what
1522 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1524 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1529 copy = IWL_FIRST_TB_SIZE - copy_size;
1531 if (copy > cmd->len[i])
1532 copy = cmd->len[i];
1539 iwl_get_cmd_string(trans, cmd->id),
1540 group_id, out_cmd->hdr.cmd,
1541 le16_to_cpu(out_cmd->hdr.sequence),
1542 cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
1546 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1553 phys_addr = dma_map_single(trans->dev,
1554 ((u8 *)&out_cmd->hdr) + tb0_size,
1555 copy_size - tb0_size,
1557 if (dma_mapping_error(trans->dev, phys_addr)) {
1559 txq->write_ptr);
1560 idx = -ENOMEM;
1565 copy_size - tb0_size, false);
1574 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1577 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1579 phys_addr = dma_map_single(trans->dev, data,
1581 if (dma_mapping_error(trans->dev, phys_addr)) {
1583 txq->write_ptr);
1584 idx = -ENOMEM;
1591 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1592 out_meta->flags = cmd->flags;
1593 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1594 kfree_sensitive(txq->entries[idx].free_buf);
1595 txq->entries[idx].free_buf = dup_buf;
1597 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1599 /* start timer if queue currently empty */
1600 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1601 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1609 if (cmd->flags & CMD_BLOCK_TXQS)
1612 /* Increment and update queue's write index */
1613 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1617 spin_unlock_irqrestore(&txq->lock, flags);
1625 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1631 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1632 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1641 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1643 /* If a Tx command is being handled and it isn't in the actual
1644 * command queue then there a command routing bug has been introduced
1645 * in the queue management code. */
1646 if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
1647 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1648 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
1649 txq->write_ptr)) {
1650 iwl_print_hex_error(trans, pkt, 32);
1654 spin_lock_bh(&txq->lock);
1657 cmd = txq->entries[cmd_index].cmd;
1658 meta = &txq->entries[cmd_index].meta;
1659 group_id = cmd->hdr.group_id;
1660 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1662 if (trans->trans_cfg->gen2)
1668 /* Input error checking is done when commands are added to queue. */
1669 if (meta->flags & CMD_WANT_SKB) {
1672 meta->source->resp_pkt = pkt;
1674 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1676 meta->source->_page = p;
1678 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1681 if (meta->flags & CMD_BLOCK_TXQS)
1686 if (!(meta->flags & CMD_ASYNC)) {
1687 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1692 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1695 wake_up(&trans->wait_command_queue);
1698 meta->flags = 0;
1700 spin_unlock_bh(&txq->lock);
1714 head_tb_len = skb_headlen(skb) - hdr_len;
1717 dma_addr_t tb_phys = dma_map_single(trans->dev,
1718 skb->data + hdr_len,
1720 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1721 return -EINVAL;
1722 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1728 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1729 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1736 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1739 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1740 return -EINVAL;
1741 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1748 out_meta->tbs |= BIT(tb_idx);
1759 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
1765 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
1770 if (!p->page)
1777 * page - we need it somewhere, and if it's there then we
1779 * trigger the 32-bit boundary hardware bug.
1781 * (see also get_workaround_page() in tx-gen2.c)
1783 if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
1784 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1789 iwl_pcie_free_and_unmap_tso_page(trans, p->page);
1792 p->page = alloc_page(GFP_ATOMIC);
1793 if (!p->page)
1795 p->pos = page_address(p->page);
1797 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1800 info->next = NULL;
1803 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
1805 if (unlikely(dma_mapping_error(trans->dev, phys))) {
1806 __free_page(p->page);
1807 p->page = NULL;
1813 info->dma_addr = phys;
1814 refcount_set(&info->use_count, 1);
1816 *page_ptr = p->page;
1818 refcount_inc(&info->use_count);
1819 ret = p->pos;
1820 p->pos += len;
1826 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
1850 return sg_dma_address(sg) + offset - sg_offset;
1861 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
1886 (skb_shinfo(skb)->nr_frags + 1) *
1893 sgt->sgl = (void *)(sgt + 1);
1895 sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
1898 sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
1899 skb->data_len);
1900 if (WARN_ON_ONCE(sgt->orig_nents <= 0))
1904 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
1907 /* Store non-zero (i.e. valid) offset for unmapping */
1908 cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
1920 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1921 struct ieee80211_hdr *hdr = (void *)skb->data;
1923 unsigned int mss = skb_shinfo(skb)->gso_size;
1933 iv_len = ieee80211_has_protected(hdr->frame_control) ?
1936 trace_iwlwifi_dev_tx(trans->dev, skb,
1937 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1938 trans_pcie->txqs.tfd.size,
1939 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1943 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1946 /* total amount of header we may need for this A-MSDU */
1953 return -ENOMEM;
1957 memcpy(pos_hdr, skb->data + hdr_len, iv_len);
1969 * all the different MSDUs inside the A-MSDU.
1971 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
1983 total_len -= data_left;
1987 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
2006 hdr_tb_len = pos_hdr - start_hdr;
2011 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2014 le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
2028 return -EINVAL;
2032 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2035 data_left -= size;
2041 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
2044 /* re -add the WiFi header and IV */
2056 /* No A-MSDU without CONFIG_INET */
2059 return -1;
2067 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2075 int write_ptr = txq->write_ptr;
2076 int txq_id = txq->id;
2080 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
2081 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2082 u8 sta_id = tx_cmd->sta_id;
2084 scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2086 sec_ctl = tx_cmd->sec_ctl;
2099 if (trans_pcie->txqs.bc_table_dword)
2119 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2132 txq = trans_pcie->txqs.txq[txq_id];
2134 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
2135 "TX on unused queue %d\n", txq_id))
2136 return -EINVAL;
2139 skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
2141 return -ENOMEM;
2146 hdr = (struct ieee80211_hdr *)skb->data;
2147 fc = hdr->frame_control;
2150 spin_lock(&txq->lock);
2152 if (iwl_txq_space(trans, txq) < txq->high_mark) {
2159 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2160 trans_pcie->txqs.dev_cmd_offs);
2163 __skb_queue_tail(&txq->overflow_q, skb);
2165 spin_unlock(&txq->lock);
2175 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2176 WARN_ONCE(txq->ampdu &&
2177 (wifi_seq & 0xff) != txq->write_ptr,
2179 txq_id, wifi_seq, txq->write_ptr);
2182 txq->entries[txq->write_ptr].skb = skb;
2183 txq->entries[txq->write_ptr].cmd = dev_cmd;
2185 dev_cmd->hdr.sequence =
2187 INDEX_TO_SEQ(txq->write_ptr)));
2189 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
2193 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2194 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2196 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2197 out_meta = &txq->entries[txq->write_ptr].meta;
2201 * The second TB (tb1) points to the remainder of the TX command
2202 * and the 802.11 header - dword aligned size
2203 * (This calculation modifies the TX command, so do it before the
2207 hdr_len - IWL_FIRST_TB_SIZE;
2208 /* do not align A-MSDU to dword as the subframe header aligns it */
2214 /* Tell NIC about any 2-byte padding after MAC header */
2216 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2222 * The first TB points to bi-directional DMA data, we'll
2235 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2236 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2237 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2241 trace_iwlwifi_dev_tx(trans->dev, skb,
2242 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2243 trans_pcie->txqs.tfd.size,
2244 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2251 * pre-built, and we just need to send the resulting skb.
2253 if (amsdu && skb_shinfo(skb)->gso_size) {
2272 /* building the A-MSDU might have changed this data, so memcpy it now */
2273 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
2275 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2276 /* Set up entry for this TFD in Tx byte-count array */
2277 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2282 /* start timer if queue currently empty */
2283 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2290 if (!txq->frozen)
2291 mod_timer(&txq->stuck_timer,
2292 jiffies + txq->wd_timeout);
2294 txq->frozen_expiry_remainder = txq->wd_timeout;
2298 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2304 * and we will get a TX status notification eventually.
2306 spin_unlock(&txq->lock);
2309 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2310 spin_unlock(&txq->lock);
2311 return -1;
2319 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2320 int txq_id = txq->id;
2323 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
2324 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2328 if (txq_id != trans_pcie->txqs.cmd.q_id)
2329 sta_id = tx_cmd->sta_id;
2345 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2349 /* This function is not meant to release cmd queue*/
2350 if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
2358 spin_lock_bh(&txq->reclaim_lock);
2360 spin_lock(&txq->lock);
2361 txq_read_ptr = txq->read_ptr;
2362 txq_write_ptr = txq->write_ptr;
2363 spin_unlock(&txq->lock);
2367 if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
2368 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
2376 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
2386 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
2388 trans->trans_cfg->base_params->max_tfd_queue_size,
2391 iwl_op_mode_time_point(trans->op_mode,
2404 struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
2405 struct sk_buff *skb = txq->entries[read_ptr].skb;
2407 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
2415 txq->entries[read_ptr].skb = NULL;
2417 if (!trans->trans_cfg->gen2)
2424 spin_lock(&txq->lock);
2425 txq->read_ptr = txq_read_ptr;
2429 if (iwl_txq_space(trans, txq) > txq->low_mark &&
2430 test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
2435 skb_queue_splice_init(&txq->overflow_q,
2439 * We are going to transmit from the overflow queue.
2441 * are adding more packets to the TFD queue. It cannot rely on
2442 * the state of &txq->overflow_q, as we just emptied it, but
2445 txq->overflow_tx = true;
2450 * from that path. We stopped tx, so we can't have tx as well.
2451 * Bottom line, we can unlock and re-lock later.
2453 spin_unlock(&txq->lock);
2458 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
2459 trans_pcie->txqs.dev_cmd_offs);
2464 * and we won't wake mac80211's queue.
2469 if (iwl_txq_space(trans, txq) > txq->low_mark)
2472 spin_lock(&txq->lock);
2473 txq->overflow_tx = false;
2476 spin_unlock(&txq->lock);
2478 spin_unlock_bh(&txq->reclaim_lock);
2485 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2487 spin_lock_bh(&txq->lock);
2489 txq->write_ptr = ptr;
2490 txq->read_ptr = txq->write_ptr;
2492 spin_unlock_bh(&txq->lock);
2499 int queue;
2501 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2502 struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
2505 spin_lock_bh(&txq->lock);
2509 if (txq->frozen == freeze)
2513 freeze ? "Freezing" : "Waking", queue);
2515 txq->frozen = freeze;
2517 if (txq->read_ptr == txq->write_ptr)
2522 txq->stuck_timer.expires))) {
2530 txq->frozen_expiry_remainder =
2531 txq->stuck_timer.expires - now;
2532 del_timer(&txq->stuck_timer);
2537 * Wake a non-empty queue -> arm timer with the
2540 mod_timer(&txq->stuck_timer,
2541 now + txq->frozen_expiry_remainder);
2544 spin_unlock_bh(&txq->lock);
2554 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
2555 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
2562 &trans->status),
2564 return -EIO;
2568 if (trans->trans_cfg->gen2)
2575 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2581 ret = wait_event_timeout(trans->wait_command_queue,
2583 &trans->status),
2589 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
2590 txq->read_ptr, txq->write_ptr);
2592 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2595 ret = -ETIMEDOUT;
2601 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
2603 &trans->status)) {
2607 ret = -EIO;
2611 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2612 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2614 ret = -ERFKILL;
2618 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
2620 ret = -EIO;
2627 if (cmd->flags & CMD_WANT_SKB) {
2630 * TX cmd queue. Otherwise in case the cmd comes
2632 * address (cmd->meta.source).
2634 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
2637 if (cmd->resp_pkt) {
2639 cmd->resp_pkt = NULL;
2649 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2650 return -ENODEV;
2652 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2653 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2655 cmd->id);
2656 return -ERFKILL;
2659 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
2660 !(cmd->flags & CMD_SEND_IN_D3))) {
2661 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
2662 return -EHOSTDOWN;
2665 if (cmd->flags & CMD_ASYNC) {
2669 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
2670 return -EINVAL;
2672 if (trans->trans_cfg->gen2)
2680 iwl_get_cmd_string(trans, cmd->id), ret);