xref: /linux/drivers/net/wireless/intel/iwlwifi/pcie/tx.c (revision a4a35f6cbebbf9466b6c412506ab89299d567f51)
18e99ea8dSJohannes Berg // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
28e99ea8dSJohannes Berg /*
349101078SJohannes Berg  * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
48e99ea8dSJohannes Berg  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
58e99ea8dSJohannes Berg  * Copyright (C) 2016-2017 Intel Deutschland GmbH
68e99ea8dSJohannes Berg  */
7e705c121SKalle Valo #include <linux/etherdevice.h>
86eb5e529SEmmanuel Grumbach #include <linux/ieee80211.h>
949101078SJohannes Berg #include <linux/dmapool.h>
10e705c121SKalle Valo #include <linux/slab.h>
11e705c121SKalle Valo #include <linux/sched.h>
1249101078SJohannes Berg #include <linux/tcp.h>
136eb5e529SEmmanuel Grumbach #include <net/ip6_checksum.h>
146eb5e529SEmmanuel Grumbach #include <net/tso.h>
15e705c121SKalle Valo 
1649101078SJohannes Berg #include "fw/api/commands.h"
1749101078SJohannes Berg #include "fw/api/datapath.h"
1849101078SJohannes Berg #include "fw/api/debug.h"
1949101078SJohannes Berg #include "iwl-fh.h"
20e705c121SKalle Valo #include "iwl-debug.h"
21e705c121SKalle Valo #include "iwl-csr.h"
22e705c121SKalle Valo #include "iwl-prph.h"
23e705c121SKalle Valo #include "iwl-io.h"
24e705c121SKalle Valo #include "iwl-scd.h"
25e705c121SKalle Valo #include "iwl-op-mode.h"
26e705c121SKalle Valo #include "internal.h"
27d172a5efSJohannes Berg #include "fw/api/tx.h"
28e705c121SKalle Valo 
29e705c121SKalle Valo /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
30e705c121SKalle Valo  * DMA services
31e705c121SKalle Valo  *
32e705c121SKalle Valo  * Theory of operation
33e705c121SKalle Valo  *
34e705c121SKalle Valo  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
35e705c121SKalle Valo  * of buffer descriptors, each of which points to one or more data buffers for
36e705c121SKalle Valo  * the device to read from or fill.  Driver and device exchange status of each
37e705c121SKalle Valo  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
38e705c121SKalle Valo  * entries in each circular buffer, to protect against confusing empty and full
39e705c121SKalle Valo  * queue states.
40e705c121SKalle Valo  *
41e705c121SKalle Valo  * The device reads or writes the data in the queues via the device's several
42e705c121SKalle Valo  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
43e705c121SKalle Valo  *
44e705c121SKalle Valo  * For Tx queue, there are low mark and high mark limits. If, after queuing
45e705c121SKalle Valo  * the packet for Tx, free space become < low mark, Tx queue stopped. When
46e705c121SKalle Valo  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
47e705c121SKalle Valo  * Tx queue resumed.
48e705c121SKalle Valo  *
49e705c121SKalle Valo  ***************************************************/
50e22744afSSara Sharon 
51e705c121SKalle Valo 
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)5213a3a390SSara Sharon int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
53e705c121SKalle Valo 			   struct iwl_dma_ptr *ptr, size_t size)
54e705c121SKalle Valo {
55e705c121SKalle Valo 	if (WARN_ON(ptr->addr))
56e705c121SKalle Valo 		return -EINVAL;
57e705c121SKalle Valo 
58e705c121SKalle Valo 	ptr->addr = dma_alloc_coherent(trans->dev, size,
59e705c121SKalle Valo 				       &ptr->dma, GFP_KERNEL);
60e705c121SKalle Valo 	if (!ptr->addr)
61e705c121SKalle Valo 		return -ENOMEM;
62e705c121SKalle Valo 	ptr->size = size;
63e705c121SKalle Valo 	return 0;
64e705c121SKalle Valo }
65e705c121SKalle Valo 
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)6613a3a390SSara Sharon void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
67e705c121SKalle Valo {
68e705c121SKalle Valo 	if (unlikely(!ptr->addr))
69e705c121SKalle Valo 		return;
70e705c121SKalle Valo 
71e705c121SKalle Valo 	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
72e705c121SKalle Valo 	memset(ptr, 0, sizeof(*ptr));
73e705c121SKalle Valo }
74e705c121SKalle Valo 
75e705c121SKalle Valo /*
76e705c121SKalle Valo  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
77e705c121SKalle Valo  */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)78e705c121SKalle Valo static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
79e705c121SKalle Valo 				    struct iwl_txq *txq)
80e705c121SKalle Valo {
8149101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
82e705c121SKalle Valo 	u32 reg = 0;
83bb98ecd4SSara Sharon 	int txq_id = txq->id;
84e705c121SKalle Valo 
85e705c121SKalle Valo 	lockdep_assert_held(&txq->lock);
86e705c121SKalle Valo 
87e705c121SKalle Valo 	/*
88e705c121SKalle Valo 	 * explicitly wake up the NIC if:
89e705c121SKalle Valo 	 * 1. shadow registers aren't enabled
90e705c121SKalle Valo 	 * 2. NIC is woken up for CMD regardless of shadow outside this function
91e705c121SKalle Valo 	 * 3. there is a chance that the NIC is asleep
92e705c121SKalle Valo 	 */
93286ca8ebSLuca Coelho 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
9449101078SJohannes Berg 	    txq_id != trans_pcie->txqs.cmd.q_id &&
95e705c121SKalle Valo 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
96e705c121SKalle Valo 		/*
97e705c121SKalle Valo 		 * wake up nic if it's powered down ...
98e705c121SKalle Valo 		 * uCode will wake up, and interrupt us again, so next
99e705c121SKalle Valo 		 * time we'll skip this part.
100e705c121SKalle Valo 		 */
101e705c121SKalle Valo 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
102e705c121SKalle Valo 
103e705c121SKalle Valo 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
104e705c121SKalle Valo 			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
105e705c121SKalle Valo 				       txq_id, reg);
106e705c121SKalle Valo 			iwl_set_bit(trans, CSR_GP_CNTRL,
1076dece0e9SLuca Coelho 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
108e705c121SKalle Valo 			txq->need_update = true;
109e705c121SKalle Valo 			return;
110e705c121SKalle Valo 		}
111e705c121SKalle Valo 	}
112e705c121SKalle Valo 
113e705c121SKalle Valo 	/*
114e705c121SKalle Valo 	 * if not in power-save mode, uCode will never sleep when we're
115e705c121SKalle Valo 	 * trying to tx (during RFKILL, we're not trying to tx).
116e705c121SKalle Valo 	 */
117bb98ecd4SSara Sharon 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
1180cd58eaaSEmmanuel Grumbach 	if (!txq->block)
1190cd58eaaSEmmanuel Grumbach 		iwl_write32(trans, HBUS_TARG_WRPTR,
120bb98ecd4SSara Sharon 			    txq->write_ptr | (txq_id << 8));
121e705c121SKalle Valo }
122e705c121SKalle Valo 
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)123e705c121SKalle Valo void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
124e705c121SKalle Valo {
12549101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
126e705c121SKalle Valo 	int i;
127e705c121SKalle Valo 
128286ca8ebSLuca Coelho 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
12949101078SJohannes Berg 		struct iwl_txq *txq = trans_pcie->txqs.txq[i];
130e705c121SKalle Valo 
13149101078SJohannes Berg 		if (!test_bit(i, trans_pcie->txqs.queue_used))
132f6eac740SMordechai Goodstein 			continue;
133f6eac740SMordechai Goodstein 
134e705c121SKalle Valo 		spin_lock_bh(&txq->lock);
135b2a3b1c1SSara Sharon 		if (txq->need_update) {
136e705c121SKalle Valo 			iwl_pcie_txq_inc_wr_ptr(trans, txq);
137b2a3b1c1SSara Sharon 			txq->need_update = false;
138e705c121SKalle Valo 		}
139e705c121SKalle Valo 		spin_unlock_bh(&txq->lock);
140e705c121SKalle Valo 	}
141e705c121SKalle Valo }
142e705c121SKalle Valo 
iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd * tfd,u8 idx,dma_addr_t addr,u16 len)14349101078SJohannes Berg static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
14449101078SJohannes Berg 					    u8 idx, dma_addr_t addr, u16 len)
14549101078SJohannes Berg {
14649101078SJohannes Berg 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
14749101078SJohannes Berg 	u16 hi_n_len = len << 4;
14849101078SJohannes Berg 
14949101078SJohannes Berg 	put_unaligned_le32(addr, &tb->lo);
15049101078SJohannes Berg 	hi_n_len |= iwl_get_dma_hi_addr(addr);
15149101078SJohannes Berg 
15249101078SJohannes Berg 	tb->hi_n_len = cpu_to_le16(hi_n_len);
15349101078SJohannes Berg 
15449101078SJohannes Berg 	tfd->num_tbs = idx + 1;
15549101078SJohannes Berg }
15649101078SJohannes Berg 
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd * tfd)15749101078SJohannes Berg static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
15849101078SJohannes Berg {
15949101078SJohannes Berg 	return tfd->num_tbs & 0x1f;
16049101078SJohannes Berg }
16149101078SJohannes Berg 
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)162e705c121SKalle Valo static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
163e705c121SKalle Valo 				  dma_addr_t addr, u16 len, bool reset)
164e705c121SKalle Valo {
16549101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1666983ba69SSara Sharon 	void *tfd;
167e705c121SKalle Valo 	u32 num_tbs;
168e705c121SKalle Valo 
16949101078SJohannes Berg 	tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
170e705c121SKalle Valo 
171e705c121SKalle Valo 	if (reset)
17249101078SJohannes Berg 		memset(tfd, 0, trans_pcie->txqs.tfd.size);
173e705c121SKalle Valo 
17449101078SJohannes Berg 	num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
175e705c121SKalle Valo 
1766983ba69SSara Sharon 	/* Each TFD can point to a maximum max_tbs Tx buffers */
17749101078SJohannes Berg 	if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
178e705c121SKalle Valo 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
17949101078SJohannes Berg 			trans_pcie->txqs.tfd.max_tbs);
180e705c121SKalle Valo 		return -EINVAL;
181e705c121SKalle Valo 	}
182e705c121SKalle Valo 
183e705c121SKalle Valo 	if (WARN(addr & ~IWL_TX_DMA_MASK,
184e705c121SKalle Valo 		 "Unaligned address = %llx\n", (unsigned long long)addr))
185e705c121SKalle Valo 		return -EINVAL;
186e705c121SKalle Valo 
18749101078SJohannes Berg 	iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
188e705c121SKalle Valo 
189e705c121SKalle Valo 	return num_tbs;
190e705c121SKalle Valo }
191e705c121SKalle Valo 
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)19201d11cd1SSara Sharon static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
19301d11cd1SSara Sharon {
19401d11cd1SSara Sharon 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
19501d11cd1SSara Sharon 
196286ca8ebSLuca Coelho 	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
19701d11cd1SSara Sharon 		return;
19872bc934cSJohannes Berg 
19972bc934cSJohannes Berg 	spin_lock(&trans_pcie->reg_lock);
20072bc934cSJohannes Berg 
20172bc934cSJohannes Berg 	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
20272bc934cSJohannes Berg 		spin_unlock(&trans_pcie->reg_lock);
20301d11cd1SSara Sharon 		return;
20472bc934cSJohannes Berg 	}
20501d11cd1SSara Sharon 
20601d11cd1SSara Sharon 	trans_pcie->cmd_hold_nic_awake = false;
20701d11cd1SSara Sharon 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2086dece0e9SLuca Coelho 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
20972bc934cSJohannes Berg 	spin_unlock(&trans_pcie->reg_lock);
21001d11cd1SSara Sharon }
21101d11cd1SSara Sharon 
iwl_pcie_free_and_unmap_tso_page(struct iwl_trans * trans,struct page * page)212adc902ceSBenjamin Berg static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
213adc902ceSBenjamin Berg 					     struct page *page)
214adc902ceSBenjamin Berg {
215adc902ceSBenjamin Berg 	struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
216adc902ceSBenjamin Berg 
217adc902ceSBenjamin Berg 	/* Decrease internal use count and unmap/free page if needed */
218adc902ceSBenjamin Berg 	if (refcount_dec_and_test(&info->use_count)) {
219adc902ceSBenjamin Berg 		dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
220adc902ceSBenjamin Berg 			       DMA_TO_DEVICE);
221adc902ceSBenjamin Berg 
222adc902ceSBenjamin Berg 		__free_page(page);
223adc902ceSBenjamin Berg 	}
224adc902ceSBenjamin Berg }
225adc902ceSBenjamin Berg 
iwl_pcie_free_tso_pages(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta)226adc902ceSBenjamin Berg void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
2277f5e3038SBenjamin Berg 			     struct iwl_cmd_meta *cmd_meta)
22849101078SJohannes Berg {
22949101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
23049101078SJohannes Berg 	struct page **page_ptr;
23149101078SJohannes Berg 	struct page *next;
23249101078SJohannes Berg 
23349101078SJohannes Berg 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
23449101078SJohannes Berg 	next = *page_ptr;
23549101078SJohannes Berg 	*page_ptr = NULL;
23649101078SJohannes Berg 
23749101078SJohannes Berg 	while (next) {
238adc902ceSBenjamin Berg 		struct iwl_tso_page_info *info;
23949101078SJohannes Berg 		struct page *tmp = next;
24049101078SJohannes Berg 
241adc902ceSBenjamin Berg 		info = IWL_TSO_PAGE_INFO(page_address(next));
242adc902ceSBenjamin Berg 		next = info->next;
2437f5e3038SBenjamin Berg 
2447f5e3038SBenjamin Berg 		/* Unmap the scatter gather list that is on the last page */
2457f5e3038SBenjamin Berg 		if (!next && cmd_meta->sg_offset) {
2467f5e3038SBenjamin Berg 			struct sg_table *sgt;
2477f5e3038SBenjamin Berg 
2487f5e3038SBenjamin Berg 			sgt = (void *)((u8 *)page_address(tmp) +
2497f5e3038SBenjamin Berg 				       cmd_meta->sg_offset);
2507f5e3038SBenjamin Berg 
2517f5e3038SBenjamin Berg 			dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
2527f5e3038SBenjamin Berg 		}
2537f5e3038SBenjamin Berg 
254adc902ceSBenjamin Berg 		iwl_pcie_free_and_unmap_tso_page(trans, tmp);
25549101078SJohannes Berg 	}
25649101078SJohannes Berg }
25749101078SJohannes Berg 
25849101078SJohannes Berg static inline dma_addr_t
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd * tfd,u8 idx)25949101078SJohannes Berg iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
26049101078SJohannes Berg {
26149101078SJohannes Berg 	struct iwl_tfd_tb *tb = &tfd->tbs[idx];
26249101078SJohannes Berg 	dma_addr_t addr;
26349101078SJohannes Berg 	dma_addr_t hi_len;
26449101078SJohannes Berg 
26549101078SJohannes Berg 	addr = get_unaligned_le32(&tb->lo);
26649101078SJohannes Berg 
26749101078SJohannes Berg 	if (sizeof(dma_addr_t) <= sizeof(u32))
26849101078SJohannes Berg 		return addr;
26949101078SJohannes Berg 
27049101078SJohannes Berg 	hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
27149101078SJohannes Berg 
27249101078SJohannes Berg 	/*
27349101078SJohannes Berg 	 * shift by 16 twice to avoid warnings on 32-bit
27449101078SJohannes Berg 	 * (where this code never runs anyway due to the
27549101078SJohannes Berg 	 * if statement above)
27649101078SJohannes Berg 	 */
27749101078SJohannes Berg 	return addr | ((hi_len << 16) << 16);
27849101078SJohannes Berg }
27949101078SJohannes Berg 
iwl_txq_set_tfd_invalid_gen1(struct iwl_trans * trans,struct iwl_tfd * tfd)28049101078SJohannes Berg static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
28149101078SJohannes Berg 					 struct iwl_tfd *tfd)
28249101078SJohannes Berg {
28349101078SJohannes Berg 	tfd->num_tbs = 0;
28449101078SJohannes Berg 
28549101078SJohannes Berg 	iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
28649101078SJohannes Berg 				 trans->invalid_tx_cmd.size);
28749101078SJohannes Berg }
28849101078SJohannes Berg 
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)28949101078SJohannes Berg static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
29049101078SJohannes Berg 				   struct iwl_cmd_meta *meta,
29149101078SJohannes Berg 				   struct iwl_txq *txq, int index)
29249101078SJohannes Berg {
29349101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
29449101078SJohannes Berg 	int i, num_tbs;
29549101078SJohannes Berg 	struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
29649101078SJohannes Berg 
29749101078SJohannes Berg 	/* Sanity check on number of chunks */
29849101078SJohannes Berg 	num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
29949101078SJohannes Berg 
30049101078SJohannes Berg 	if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
30149101078SJohannes Berg 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
30249101078SJohannes Berg 		/* @todo issue fatal error, it is quite serious situation */
30349101078SJohannes Berg 		return;
30449101078SJohannes Berg 	}
30549101078SJohannes Berg 
30690db5075SBenjamin Berg 	/* TB1 is mapped directly, the rest is the TSO page and SG list. */
30790db5075SBenjamin Berg 	if (meta->sg_offset)
30890db5075SBenjamin Berg 		num_tbs = 2;
30990db5075SBenjamin Berg 
31049101078SJohannes Berg 	/* first TB is never freed - it's the bidirectional DMA data */
31149101078SJohannes Berg 
31249101078SJohannes Berg 	for (i = 1; i < num_tbs; i++) {
31349101078SJohannes Berg 		if (meta->tbs & BIT(i))
31449101078SJohannes Berg 			dma_unmap_page(trans->dev,
31549101078SJohannes Berg 				       iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
31649101078SJohannes Berg 				       iwl_txq_gen1_tfd_tb_get_len(trans,
31749101078SJohannes Berg 								   tfd, i),
31849101078SJohannes Berg 				       DMA_TO_DEVICE);
31949101078SJohannes Berg 		else
32049101078SJohannes Berg 			dma_unmap_single(trans->dev,
32149101078SJohannes Berg 					 iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
32249101078SJohannes Berg 					 iwl_txq_gen1_tfd_tb_get_len(trans,
32349101078SJohannes Berg 								     tfd, i),
32449101078SJohannes Berg 					 DMA_TO_DEVICE);
32549101078SJohannes Berg 	}
32649101078SJohannes Berg 
32749101078SJohannes Berg 	meta->tbs = 0;
32849101078SJohannes Berg 
32949101078SJohannes Berg 	iwl_txq_set_tfd_invalid_gen1(trans, tfd);
33049101078SJohannes Berg }
33149101078SJohannes Berg 
33249101078SJohannes Berg /**
33349101078SJohannes Berg  * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
33449101078SJohannes Berg  * @trans: transport private data
33549101078SJohannes Berg  * @txq: tx queue
336a2ed933dSBenjamin Berg  * @read_ptr: the TXQ read_ptr to free
33749101078SJohannes Berg  *
33849101078SJohannes Berg  * Does NOT advance any TFD circular buffer read/write indexes
33949101078SJohannes Berg  * Does NOT free the TFD itself (which is within circular buffer)
34049101078SJohannes Berg  */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)341a2ed933dSBenjamin Berg static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
342a2ed933dSBenjamin Berg 			     int read_ptr)
34349101078SJohannes Berg {
34449101078SJohannes Berg 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
34549101078SJohannes Berg 	 * idx is bounded by n_window
34649101078SJohannes Berg 	 */
347a2ed933dSBenjamin Berg 	int idx = iwl_txq_get_cmd_index(txq, read_ptr);
34849101078SJohannes Berg 	struct sk_buff *skb;
34949101078SJohannes Berg 
350a2ed933dSBenjamin Berg 	lockdep_assert_held(&txq->reclaim_lock);
35149101078SJohannes Berg 
35249101078SJohannes Berg 	if (!txq->entries)
35349101078SJohannes Berg 		return;
35449101078SJohannes Berg 
35549101078SJohannes Berg 	/* We have only q->n_window txq->entries, but we use
35649101078SJohannes Berg 	 * TFD_QUEUE_SIZE_MAX tfds
35749101078SJohannes Berg 	 */
35849101078SJohannes Berg 	if (trans->trans_cfg->gen2)
35949101078SJohannes Berg 		iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
360a2ed933dSBenjamin Berg 				       iwl_txq_get_tfd(trans, txq, read_ptr));
36149101078SJohannes Berg 	else
36249101078SJohannes Berg 		iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
363a2ed933dSBenjamin Berg 				       txq, read_ptr);
36449101078SJohannes Berg 
36549101078SJohannes Berg 	/* free SKB */
36649101078SJohannes Berg 	skb = txq->entries[idx].skb;
36749101078SJohannes Berg 
36849101078SJohannes Berg 	/* Can be called from irqs-disabled context
36949101078SJohannes Berg 	 * If skb is not NULL, it means that the whole queue is being
37049101078SJohannes Berg 	 * freed and that the queue is not empty - free the skb
37149101078SJohannes Berg 	 */
37249101078SJohannes Berg 	if (skb) {
37349101078SJohannes Berg 		iwl_op_mode_free_skb(trans->op_mode, skb);
37449101078SJohannes Berg 		txq->entries[idx].skb = NULL;
37549101078SJohannes Berg 	}
37649101078SJohannes Berg }
37749101078SJohannes Berg 
378e705c121SKalle Valo /*
379e705c121SKalle Valo  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
380e705c121SKalle Valo  */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)381e705c121SKalle Valo static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
382e705c121SKalle Valo {
38349101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
38449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
385e705c121SKalle Valo 
38698c7d21fSEmmanuel Grumbach 	if (!txq) {
38798c7d21fSEmmanuel Grumbach 		IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
38898c7d21fSEmmanuel Grumbach 		return;
38998c7d21fSEmmanuel Grumbach 	}
39098c7d21fSEmmanuel Grumbach 
391a2ed933dSBenjamin Berg 	spin_lock_bh(&txq->reclaim_lock);
392a2ed933dSBenjamin Berg 	spin_lock(&txq->lock);
393bb98ecd4SSara Sharon 	while (txq->write_ptr != txq->read_ptr) {
394e705c121SKalle Valo 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
395bb98ecd4SSara Sharon 				   txq_id, txq->read_ptr);
3966eb5e529SEmmanuel Grumbach 
39749101078SJohannes Berg 		if (txq_id != trans_pcie->txqs.cmd.q_id) {
398bb98ecd4SSara Sharon 			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
3997f5e3038SBenjamin Berg 			struct iwl_cmd_meta *cmd_meta =
4007f5e3038SBenjamin Berg 				&txq->entries[txq->read_ptr].meta;
4016eb5e529SEmmanuel Grumbach 
4026eb5e529SEmmanuel Grumbach 			if (WARN_ON_ONCE(!skb))
4036eb5e529SEmmanuel Grumbach 				continue;
4046eb5e529SEmmanuel Grumbach 
405adc902ceSBenjamin Berg 			iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
4066eb5e529SEmmanuel Grumbach 		}
407a2ed933dSBenjamin Berg 		iwl_txq_free_tfd(trans, txq, txq->read_ptr);
4080cd1ad2dSMordechay Goodstein 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
40901d11cd1SSara Sharon 
41072bc934cSJohannes Berg 		if (txq->read_ptr == txq->write_ptr &&
41149101078SJohannes Berg 		    txq_id == trans_pcie->txqs.cmd.q_id)
41201d11cd1SSara Sharon 			iwl_pcie_clear_cmd_in_flight(trans);
413e705c121SKalle Valo 	}
4143955525dSEmmanuel Grumbach 
4153955525dSEmmanuel Grumbach 	while (!skb_queue_empty(&txq->overflow_q)) {
4163955525dSEmmanuel Grumbach 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
4173955525dSEmmanuel Grumbach 
4183955525dSEmmanuel Grumbach 		iwl_op_mode_free_skb(trans->op_mode, skb);
4193955525dSEmmanuel Grumbach 	}
4203955525dSEmmanuel Grumbach 
421a2ed933dSBenjamin Berg 	spin_unlock(&txq->lock);
422a2ed933dSBenjamin Berg 	spin_unlock_bh(&txq->reclaim_lock);
423e705c121SKalle Valo 
424e705c121SKalle Valo 	/* just in case - this queue may have been stopped */
42549101078SJohannes Berg 	iwl_trans_pcie_wake_queue(trans, txq);
426e705c121SKalle Valo }
427e705c121SKalle Valo 
428e705c121SKalle Valo /*
429e705c121SKalle Valo  * iwl_pcie_txq_free - Deallocate DMA queue.
430e705c121SKalle Valo  * @txq: Transmit queue to deallocate.
431e705c121SKalle Valo  *
432e705c121SKalle Valo  * Empty queue by removing and destroying all BD's.
433e705c121SKalle Valo  * Free all buffers.
434e705c121SKalle Valo  * 0-fill, but do not free "txq" descriptor structure.
435e705c121SKalle Valo  */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)436e705c121SKalle Valo static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
437e705c121SKalle Valo {
43849101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
43949101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
440e705c121SKalle Valo 	struct device *dev = trans->dev;
441e705c121SKalle Valo 	int i;
442e705c121SKalle Valo 
443e705c121SKalle Valo 	if (WARN_ON(!txq))
444e705c121SKalle Valo 		return;
445e705c121SKalle Valo 
446e705c121SKalle Valo 	iwl_pcie_txq_unmap(trans, txq_id);
447e705c121SKalle Valo 
448e705c121SKalle Valo 	/* De-alloc array of command/tx buffers */
44949101078SJohannes Berg 	if (txq_id == trans_pcie->txqs.cmd.q_id)
450bb98ecd4SSara Sharon 		for (i = 0; i < txq->n_window; i++) {
451453431a5SWaiman Long 			kfree_sensitive(txq->entries[i].cmd);
452453431a5SWaiman Long 			kfree_sensitive(txq->entries[i].free_buf);
453e705c121SKalle Valo 		}
454e705c121SKalle Valo 
455e705c121SKalle Valo 	/* De-alloc circular buffer of TFDs */
456e705c121SKalle Valo 	if (txq->tfds) {
457e705c121SKalle Valo 		dma_free_coherent(dev,
45849101078SJohannes Berg 				  trans_pcie->txqs.tfd.size *
459286ca8ebSLuca Coelho 				  trans->trans_cfg->base_params->max_tfd_queue_size,
460bb98ecd4SSara Sharon 				  txq->tfds, txq->dma_addr);
461bb98ecd4SSara Sharon 		txq->dma_addr = 0;
462e705c121SKalle Valo 		txq->tfds = NULL;
463e705c121SKalle Valo 
464e705c121SKalle Valo 		dma_free_coherent(dev,
465bb98ecd4SSara Sharon 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
4668de437c7SSara Sharon 				  txq->first_tb_bufs, txq->first_tb_dma);
467e705c121SKalle Valo 	}
468e705c121SKalle Valo 
469e705c121SKalle Valo 	kfree(txq->entries);
470e705c121SKalle Valo 	txq->entries = NULL;
471e705c121SKalle Valo 
472e705c121SKalle Valo 	del_timer_sync(&txq->stuck_timer);
473e705c121SKalle Valo 
474e705c121SKalle Valo 	/* 0-fill queue descriptor structure */
475e705c121SKalle Valo 	memset(txq, 0, sizeof(*txq));
476e705c121SKalle Valo }
477e705c121SKalle Valo 
iwl_pcie_tx_start(struct iwl_trans * trans,u32 scd_base_addr)478e705c121SKalle Valo void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
479e705c121SKalle Valo {
480e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481286ca8ebSLuca Coelho 	int nq = trans->trans_cfg->base_params->num_of_queues;
482e705c121SKalle Valo 	int chan;
483e705c121SKalle Valo 	u32 reg_val;
484e705c121SKalle Valo 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
485e705c121SKalle Valo 				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
486e705c121SKalle Valo 
487e705c121SKalle Valo 	/* make sure all queue are not stopped/used */
48849101078SJohannes Berg 	memset(trans_pcie->txqs.queue_stopped, 0,
48949101078SJohannes Berg 	       sizeof(trans_pcie->txqs.queue_stopped));
49049101078SJohannes Berg 	memset(trans_pcie->txqs.queue_used, 0,
49149101078SJohannes Berg 	       sizeof(trans_pcie->txqs.queue_used));
492e705c121SKalle Valo 
493e705c121SKalle Valo 	trans_pcie->scd_base_addr =
494e705c121SKalle Valo 		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
495e705c121SKalle Valo 
496e705c121SKalle Valo 	WARN_ON(scd_base_addr != 0 &&
497e705c121SKalle Valo 		scd_base_addr != trans_pcie->scd_base_addr);
498e705c121SKalle Valo 
499e705c121SKalle Valo 	/* reset context data, TX status and translation data */
500e705c121SKalle Valo 	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
501e705c121SKalle Valo 				   SCD_CONTEXT_MEM_LOWER_BOUND,
502e705c121SKalle Valo 			    NULL, clear_dwords);
503e705c121SKalle Valo 
504e705c121SKalle Valo 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
50549101078SJohannes Berg 		       trans_pcie->txqs.scd_bc_tbls.dma >> 10);
506e705c121SKalle Valo 
507e705c121SKalle Valo 	/* The chain extension of the SCD doesn't work well. This feature is
508e705c121SKalle Valo 	 * enabled by default by the HW, so we need to disable it manually.
509e705c121SKalle Valo 	 */
510286ca8ebSLuca Coelho 	if (trans->trans_cfg->base_params->scd_chain_ext_wa)
511e705c121SKalle Valo 		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
512e705c121SKalle Valo 
51349101078SJohannes Berg 	iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
51449101078SJohannes Berg 				trans_pcie->txqs.cmd.fifo,
51549101078SJohannes Berg 				trans_pcie->txqs.cmd.wdg_timeout);
516e705c121SKalle Valo 
517e705c121SKalle Valo 	/* Activate all Tx DMA/FIFO channels */
518e705c121SKalle Valo 	iwl_scd_activate_fifos(trans);
519e705c121SKalle Valo 
520e705c121SKalle Valo 	/* Enable DMA channel */
521e705c121SKalle Valo 	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
522e705c121SKalle Valo 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
523e705c121SKalle Valo 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
524e705c121SKalle Valo 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
525e705c121SKalle Valo 
526e705c121SKalle Valo 	/* Update FH chicken bits */
527e705c121SKalle Valo 	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
528e705c121SKalle Valo 	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
529e705c121SKalle Valo 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
530e705c121SKalle Valo 
531e705c121SKalle Valo 	/* Enable L1-Active */
532286ca8ebSLuca Coelho 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
533e705c121SKalle Valo 		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
534e705c121SKalle Valo 				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
535e705c121SKalle Valo }
536e705c121SKalle Valo 
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)537e705c121SKalle Valo void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
538e705c121SKalle Valo {
539e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
540e705c121SKalle Valo 	int txq_id;
541e705c121SKalle Valo 
54213a3a390SSara Sharon 	/*
54313a3a390SSara Sharon 	 * we should never get here in gen2 trans mode return early to avoid
54413a3a390SSara Sharon 	 * having invalid accesses
54513a3a390SSara Sharon 	 */
546286ca8ebSLuca Coelho 	if (WARN_ON_ONCE(trans->trans_cfg->gen2))
54713a3a390SSara Sharon 		return;
54813a3a390SSara Sharon 
549286ca8ebSLuca Coelho 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
550e705c121SKalle Valo 	     txq_id++) {
55149101078SJohannes Berg 		struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
55212a89f01SJohannes Berg 		if (trans->trans_cfg->gen2)
553e22744afSSara Sharon 			iwl_write_direct64(trans,
554e22744afSSara Sharon 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
555bb98ecd4SSara Sharon 					   txq->dma_addr);
556e22744afSSara Sharon 		else
557e22744afSSara Sharon 			iwl_write_direct32(trans,
558e22744afSSara Sharon 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
559bb98ecd4SSara Sharon 					   txq->dma_addr >> 8);
560e705c121SKalle Valo 		iwl_pcie_txq_unmap(trans, txq_id);
561bb98ecd4SSara Sharon 		txq->read_ptr = 0;
562bb98ecd4SSara Sharon 		txq->write_ptr = 0;
563e705c121SKalle Valo 	}
564e705c121SKalle Valo 
565e705c121SKalle Valo 	/* Tell NIC where to find the "keep warm" buffer */
566e705c121SKalle Valo 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
567e705c121SKalle Valo 			   trans_pcie->kw.dma >> 4);
568e705c121SKalle Valo 
569e705c121SKalle Valo 	/*
570e705c121SKalle Valo 	 * Send 0 as the scd_base_addr since the device may have be reset
571e705c121SKalle Valo 	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
572e705c121SKalle Valo 	 * contain garbage.
573e705c121SKalle Valo 	 */
574e705c121SKalle Valo 	iwl_pcie_tx_start(trans, 0);
575e705c121SKalle Valo }
576e705c121SKalle Valo 
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)577e705c121SKalle Valo static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
578e705c121SKalle Valo {
579e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580e705c121SKalle Valo 	int ch, ret;
581e705c121SKalle Valo 	u32 mask = 0;
582e705c121SKalle Valo 
58325edc8f2SJohannes Berg 	spin_lock_bh(&trans_pcie->irq_lock);
584e705c121SKalle Valo 
5851ed08f6fSJohannes Berg 	if (!iwl_trans_grab_nic_access(trans))
586e705c121SKalle Valo 		goto out;
587e705c121SKalle Valo 
588e705c121SKalle Valo 	/* Stop each Tx DMA channel */
589e705c121SKalle Valo 	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
590e705c121SKalle Valo 		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
591e705c121SKalle Valo 		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
592e705c121SKalle Valo 	}
593e705c121SKalle Valo 
594e705c121SKalle Valo 	/* Wait for DMA channels to be idle */
595e705c121SKalle Valo 	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
596e705c121SKalle Valo 	if (ret < 0)
597e705c121SKalle Valo 		IWL_ERR(trans,
598e705c121SKalle Valo 			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
599e705c121SKalle Valo 			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
600e705c121SKalle Valo 
6011ed08f6fSJohannes Berg 	iwl_trans_release_nic_access(trans);
602e705c121SKalle Valo 
603e705c121SKalle Valo out:
60425edc8f2SJohannes Berg 	spin_unlock_bh(&trans_pcie->irq_lock);
605e705c121SKalle Valo }
606e705c121SKalle Valo 
607e705c121SKalle Valo /*
608e705c121SKalle Valo  * iwl_pcie_tx_stop - Stop all Tx DMA channels
609e705c121SKalle Valo  */
iwl_pcie_tx_stop(struct iwl_trans * trans)610e705c121SKalle Valo int iwl_pcie_tx_stop(struct iwl_trans *trans)
611e705c121SKalle Valo {
612e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613e705c121SKalle Valo 	int txq_id;
614e705c121SKalle Valo 
615e705c121SKalle Valo 	/* Turn off all Tx DMA fifos */
616e705c121SKalle Valo 	iwl_scd_deactivate_fifos(trans);
617e705c121SKalle Valo 
618e705c121SKalle Valo 	/* Turn off all Tx DMA channels */
619e705c121SKalle Valo 	iwl_pcie_tx_stop_fh(trans);
620e705c121SKalle Valo 
621e705c121SKalle Valo 	/*
622e705c121SKalle Valo 	 * This function can be called before the op_mode disabled the
623e705c121SKalle Valo 	 * queues. This happens when we have an rfkill interrupt.
624e705c121SKalle Valo 	 * Since we stop Tx altogether - mark the queues as stopped.
625e705c121SKalle Valo 	 */
62649101078SJohannes Berg 	memset(trans_pcie->txqs.queue_stopped, 0,
62749101078SJohannes Berg 	       sizeof(trans_pcie->txqs.queue_stopped));
62849101078SJohannes Berg 	memset(trans_pcie->txqs.queue_used, 0,
62949101078SJohannes Berg 	       sizeof(trans_pcie->txqs.queue_used));
630e705c121SKalle Valo 
631e705c121SKalle Valo 	/* This can happen: start_hw, stop_device */
632b2a3b1c1SSara Sharon 	if (!trans_pcie->txq_memory)
633e705c121SKalle Valo 		return 0;
634e705c121SKalle Valo 
635e705c121SKalle Valo 	/* Unmap DMA from host system and free skb's */
636286ca8ebSLuca Coelho 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
637e705c121SKalle Valo 	     txq_id++)
638e705c121SKalle Valo 		iwl_pcie_txq_unmap(trans, txq_id);
639e705c121SKalle Valo 
640e705c121SKalle Valo 	return 0;
641e705c121SKalle Valo }
642e705c121SKalle Valo 
643e705c121SKalle Valo /*
644e705c121SKalle Valo  * iwl_trans_tx_free - Free TXQ Context
645e705c121SKalle Valo  *
646e705c121SKalle Valo  * Destroy all TX DMA queues and structures
647e705c121SKalle Valo  */
iwl_pcie_tx_free(struct iwl_trans * trans)648e705c121SKalle Valo void iwl_pcie_tx_free(struct iwl_trans *trans)
649e705c121SKalle Valo {
650e705c121SKalle Valo 	int txq_id;
651e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
652e705c121SKalle Valo 
65349101078SJohannes Berg 	memset(trans_pcie->txqs.queue_used, 0,
65449101078SJohannes Berg 	       sizeof(trans_pcie->txqs.queue_used));
655de74c455SSara Sharon 
656e705c121SKalle Valo 	/* Tx queues */
657b2a3b1c1SSara Sharon 	if (trans_pcie->txq_memory) {
658e705c121SKalle Valo 		for (txq_id = 0;
659286ca8ebSLuca Coelho 		     txq_id < trans->trans_cfg->base_params->num_of_queues;
660b2a3b1c1SSara Sharon 		     txq_id++) {
661e705c121SKalle Valo 			iwl_pcie_txq_free(trans, txq_id);
66249101078SJohannes Berg 			trans_pcie->txqs.txq[txq_id] = NULL;
663b2a3b1c1SSara Sharon 		}
664e705c121SKalle Valo 	}
665e705c121SKalle Valo 
666b2a3b1c1SSara Sharon 	kfree(trans_pcie->txq_memory);
667b2a3b1c1SSara Sharon 	trans_pcie->txq_memory = NULL;
668e705c121SKalle Valo 
669e705c121SKalle Valo 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
670e705c121SKalle Valo 
67149101078SJohannes Berg 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
67249101078SJohannes Berg }
67349101078SJohannes Berg 
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)67449101078SJohannes Berg void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
67549101078SJohannes Berg {
67649101078SJohannes Berg 	u32 txq_id = txq->id;
67749101078SJohannes Berg 	u32 status;
67849101078SJohannes Berg 	bool active;
67949101078SJohannes Berg 	u8 fifo;
68049101078SJohannes Berg 
68149101078SJohannes Berg 	if (trans->trans_cfg->gen2) {
68249101078SJohannes Berg 		IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
68349101078SJohannes Berg 			txq->read_ptr, txq->write_ptr);
68449101078SJohannes Berg 		/* TODO: access new SCD registers and dump them */
68549101078SJohannes Berg 		return;
68649101078SJohannes Berg 	}
68749101078SJohannes Berg 
68849101078SJohannes Berg 	status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
68949101078SJohannes Berg 	fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
69049101078SJohannes Berg 	active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
69149101078SJohannes Berg 
69249101078SJohannes Berg 	IWL_ERR(trans,
69349101078SJohannes Berg 		"Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
69449101078SJohannes Berg 		txq_id, active ? "" : "in", fifo,
69549101078SJohannes Berg 		jiffies_to_msecs(txq->wd_timeout),
69649101078SJohannes Berg 		txq->read_ptr, txq->write_ptr,
69749101078SJohannes Berg 		iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
69849101078SJohannes Berg 			(trans->trans_cfg->base_params->max_tfd_queue_size - 1),
69949101078SJohannes Berg 			iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
70049101078SJohannes Berg 			(trans->trans_cfg->base_params->max_tfd_queue_size - 1),
70149101078SJohannes Berg 			iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
70249101078SJohannes Berg }
70349101078SJohannes Berg 
iwl_txq_stuck_timer(struct timer_list * t)70449101078SJohannes Berg static void iwl_txq_stuck_timer(struct timer_list *t)
70549101078SJohannes Berg {
70649101078SJohannes Berg 	struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
70749101078SJohannes Berg 	struct iwl_trans *trans = txq->trans;
70849101078SJohannes Berg 
70949101078SJohannes Berg 	spin_lock(&txq->lock);
71049101078SJohannes Berg 	/* check if triggered erroneously */
71149101078SJohannes Berg 	if (txq->read_ptr == txq->write_ptr) {
71249101078SJohannes Berg 		spin_unlock(&txq->lock);
71349101078SJohannes Berg 		return;
71449101078SJohannes Berg 	}
71549101078SJohannes Berg 	spin_unlock(&txq->lock);
71649101078SJohannes Berg 
71749101078SJohannes Berg 	iwl_txq_log_scd_error(trans, txq);
71849101078SJohannes Berg 
71949101078SJohannes Berg 	iwl_force_nmi(trans);
72049101078SJohannes Berg }
72149101078SJohannes Berg 
iwl_pcie_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)72249101078SJohannes Berg int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
72349101078SJohannes Berg 		       int slots_num, bool cmd_queue)
72449101078SJohannes Berg {
72549101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
72649101078SJohannes Berg 	size_t num_entries = trans->trans_cfg->gen2 ?
72749101078SJohannes Berg 		slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
72849101078SJohannes Berg 	size_t tfd_sz;
72949101078SJohannes Berg 	size_t tb0_buf_sz;
73049101078SJohannes Berg 	int i;
73149101078SJohannes Berg 
73249101078SJohannes Berg 	if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
73349101078SJohannes Berg 		return -EINVAL;
73449101078SJohannes Berg 
73549101078SJohannes Berg 	if (WARN_ON(txq->entries || txq->tfds))
73649101078SJohannes Berg 		return -EINVAL;
73749101078SJohannes Berg 
73849101078SJohannes Berg 	tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
73949101078SJohannes Berg 
74049101078SJohannes Berg 	timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
74149101078SJohannes Berg 	txq->trans = trans;
74249101078SJohannes Berg 
74349101078SJohannes Berg 	txq->n_window = slots_num;
74449101078SJohannes Berg 
74549101078SJohannes Berg 	txq->entries = kcalloc(slots_num,
74649101078SJohannes Berg 			       sizeof(struct iwl_pcie_txq_entry),
74749101078SJohannes Berg 			       GFP_KERNEL);
74849101078SJohannes Berg 
74949101078SJohannes Berg 	if (!txq->entries)
75049101078SJohannes Berg 		goto error;
75149101078SJohannes Berg 
75249101078SJohannes Berg 	if (cmd_queue)
75349101078SJohannes Berg 		for (i = 0; i < slots_num; i++) {
75449101078SJohannes Berg 			txq->entries[i].cmd =
75549101078SJohannes Berg 				kmalloc(sizeof(struct iwl_device_cmd),
75649101078SJohannes Berg 					GFP_KERNEL);
75749101078SJohannes Berg 			if (!txq->entries[i].cmd)
75849101078SJohannes Berg 				goto error;
75949101078SJohannes Berg 		}
76049101078SJohannes Berg 
76149101078SJohannes Berg 	/* Circular buffer of transmit frame descriptors (TFDs),
76249101078SJohannes Berg 	 * shared with device
76349101078SJohannes Berg 	 */
76449101078SJohannes Berg 	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
76549101078SJohannes Berg 				       &txq->dma_addr, GFP_KERNEL);
76649101078SJohannes Berg 	if (!txq->tfds)
76749101078SJohannes Berg 		goto error;
76849101078SJohannes Berg 
76949101078SJohannes Berg 	BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
77049101078SJohannes Berg 
77149101078SJohannes Berg 	tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
77249101078SJohannes Berg 
77349101078SJohannes Berg 	txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
77449101078SJohannes Berg 						&txq->first_tb_dma,
77549101078SJohannes Berg 						GFP_KERNEL);
77649101078SJohannes Berg 	if (!txq->first_tb_bufs)
77749101078SJohannes Berg 		goto err_free_tfds;
77849101078SJohannes Berg 
77949101078SJohannes Berg 	for (i = 0; i < num_entries; i++) {
78049101078SJohannes Berg 		void *tfd = iwl_txq_get_tfd(trans, txq, i);
78149101078SJohannes Berg 
78249101078SJohannes Berg 		if (trans->trans_cfg->gen2)
78349101078SJohannes Berg 			iwl_txq_set_tfd_invalid_gen2(trans, tfd);
78449101078SJohannes Berg 		else
78549101078SJohannes Berg 			iwl_txq_set_tfd_invalid_gen1(trans, tfd);
78649101078SJohannes Berg 	}
78749101078SJohannes Berg 
78849101078SJohannes Berg 	return 0;
78949101078SJohannes Berg err_free_tfds:
79049101078SJohannes Berg 	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
79149101078SJohannes Berg 	txq->tfds = NULL;
79249101078SJohannes Berg error:
79349101078SJohannes Berg 	if (txq->entries && cmd_queue)
79449101078SJohannes Berg 		for (i = 0; i < slots_num; i++)
79549101078SJohannes Berg 			kfree(txq->entries[i].cmd);
79649101078SJohannes Berg 	kfree(txq->entries);
79749101078SJohannes Berg 	txq->entries = NULL;
79849101078SJohannes Berg 
79949101078SJohannes Berg 	return -ENOMEM;
800e705c121SKalle Valo }
801e705c121SKalle Valo 
802e705c121SKalle Valo /*
803e705c121SKalle Valo  * iwl_pcie_tx_alloc - allocate TX context
804e705c121SKalle Valo  * Allocate all Tx DMA structures and initialize them
805e705c121SKalle Valo  */
iwl_pcie_tx_alloc(struct iwl_trans * trans)806e705c121SKalle Valo static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
807e705c121SKalle Valo {
808e705c121SKalle Valo 	int ret;
809e705c121SKalle Valo 	int txq_id, slots_num;
810e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
811286ca8ebSLuca Coelho 	u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
812e705c121SKalle Valo 
813a8e82c36SJohannes Berg 	if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
814a8e82c36SJohannes Berg 		return -EINVAL;
815a8e82c36SJohannes Berg 
816a8e82c36SJohannes Berg 	bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
817e705c121SKalle Valo 
818e705c121SKalle Valo 	/*It is not allowed to alloc twice, so warn when this happens.
819e705c121SKalle Valo 	 * We cannot rely on the previous allocation, so free and fail */
820b2a3b1c1SSara Sharon 	if (WARN_ON(trans_pcie->txq_memory)) {
821e705c121SKalle Valo 		ret = -EINVAL;
822e705c121SKalle Valo 		goto error;
823e705c121SKalle Valo 	}
824e705c121SKalle Valo 
82549101078SJohannes Berg 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
8267b3e42eaSGolan Ben Ami 				     bc_tbls_size);
827e705c121SKalle Valo 	if (ret) {
828e705c121SKalle Valo 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
829e705c121SKalle Valo 		goto error;
830e705c121SKalle Valo 	}
831e705c121SKalle Valo 
832e705c121SKalle Valo 	/* Alloc keep-warm buffer */
833e705c121SKalle Valo 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
834e705c121SKalle Valo 	if (ret) {
835e705c121SKalle Valo 		IWL_ERR(trans, "Keep Warm allocation failed\n");
836e705c121SKalle Valo 		goto error;
837e705c121SKalle Valo 	}
838e705c121SKalle Valo 
83979b6c8feSLuca Coelho 	trans_pcie->txq_memory =
840286ca8ebSLuca Coelho 		kcalloc(trans->trans_cfg->base_params->num_of_queues,
841e705c121SKalle Valo 			sizeof(struct iwl_txq), GFP_KERNEL);
842b2a3b1c1SSara Sharon 	if (!trans_pcie->txq_memory) {
843e705c121SKalle Valo 		IWL_ERR(trans, "Not enough memory for txq\n");
844e705c121SKalle Valo 		ret = -ENOMEM;
845e705c121SKalle Valo 		goto error;
846e705c121SKalle Valo 	}
847e705c121SKalle Valo 
848e705c121SKalle Valo 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
849286ca8ebSLuca Coelho 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
850e705c121SKalle Valo 	     txq_id++) {
85149101078SJohannes Berg 		bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
852b8e8d7ceSSara Sharon 
853ff911dcaSShaul Triebitz 		if (cmd_queue)
854718a8b23SShaul Triebitz 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
855ff911dcaSShaul Triebitz 					  trans->cfg->min_txq_size);
856ff911dcaSShaul Triebitz 		else
857718a8b23SShaul Triebitz 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
858d5399f11SMordechay Goodstein 					  trans->cfg->min_ba_txq_size);
85949101078SJohannes Berg 		trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
86049101078SJohannes Berg 		ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
86149101078SJohannes Berg 					 slots_num, cmd_queue);
862e705c121SKalle Valo 		if (ret) {
863e705c121SKalle Valo 			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
864e705c121SKalle Valo 			goto error;
865e705c121SKalle Valo 		}
86649101078SJohannes Berg 		trans_pcie->txqs.txq[txq_id]->id = txq_id;
867e705c121SKalle Valo 	}
868e705c121SKalle Valo 
869e705c121SKalle Valo 	return 0;
870e705c121SKalle Valo 
871e705c121SKalle Valo error:
872e705c121SKalle Valo 	iwl_pcie_tx_free(trans);
873e705c121SKalle Valo 
874e705c121SKalle Valo 	return ret;
875e705c121SKalle Valo }
876eda50cdeSSara Sharon 
87749101078SJohannes Berg /*
87849101078SJohannes Berg  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
87949101078SJohannes Berg  */
iwl_queue_init(struct iwl_txq * q,int slots_num)88049101078SJohannes Berg static int iwl_queue_init(struct iwl_txq *q, int slots_num)
88149101078SJohannes Berg {
88249101078SJohannes Berg 	q->n_window = slots_num;
88349101078SJohannes Berg 
88449101078SJohannes Berg 	/* slots_num must be power-of-two size, otherwise
88549101078SJohannes Berg 	 * iwl_txq_get_cmd_index is broken.
88649101078SJohannes Berg 	 */
88749101078SJohannes Berg 	if (WARN_ON(!is_power_of_2(slots_num)))
88849101078SJohannes Berg 		return -EINVAL;
88949101078SJohannes Berg 
89049101078SJohannes Berg 	q->low_mark = q->n_window / 4;
89149101078SJohannes Berg 	if (q->low_mark < 4)
89249101078SJohannes Berg 		q->low_mark = 4;
89349101078SJohannes Berg 
89449101078SJohannes Berg 	q->high_mark = q->n_window / 8;
89549101078SJohannes Berg 	if (q->high_mark < 2)
89649101078SJohannes Berg 		q->high_mark = 2;
89749101078SJohannes Berg 
89849101078SJohannes Berg 	q->write_ptr = 0;
89949101078SJohannes Berg 	q->read_ptr = 0;
90049101078SJohannes Berg 
90149101078SJohannes Berg 	return 0;
90249101078SJohannes Berg }
90349101078SJohannes Berg 
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)90449101078SJohannes Berg int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
90549101078SJohannes Berg 		 int slots_num, bool cmd_queue)
90649101078SJohannes Berg {
90749101078SJohannes Berg 	u32 tfd_queue_max_size =
90849101078SJohannes Berg 		trans->trans_cfg->base_params->max_tfd_queue_size;
90949101078SJohannes Berg 	int ret;
91049101078SJohannes Berg 
91149101078SJohannes Berg 	txq->need_update = false;
91249101078SJohannes Berg 
91349101078SJohannes Berg 	/* max_tfd_queue_size must be power-of-two size, otherwise
91449101078SJohannes Berg 	 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
91549101078SJohannes Berg 	 */
91649101078SJohannes Berg 	if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
91749101078SJohannes Berg 		      "Max tfd queue size must be a power of two, but is %d",
91849101078SJohannes Berg 		      tfd_queue_max_size))
91949101078SJohannes Berg 		return -EINVAL;
92049101078SJohannes Berg 
92149101078SJohannes Berg 	/* Initialize queue's high/low-water marks, and head/tail indexes */
92249101078SJohannes Berg 	ret = iwl_queue_init(txq, slots_num);
92349101078SJohannes Berg 	if (ret)
92449101078SJohannes Berg 		return ret;
92549101078SJohannes Berg 
92649101078SJohannes Berg 	spin_lock_init(&txq->lock);
927a2ed933dSBenjamin Berg 	spin_lock_init(&txq->reclaim_lock);
92849101078SJohannes Berg 
92949101078SJohannes Berg 	if (cmd_queue) {
93049101078SJohannes Berg 		static struct lock_class_key iwl_txq_cmd_queue_lock_class;
93149101078SJohannes Berg 
93249101078SJohannes Berg 		lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
93349101078SJohannes Berg 	}
93449101078SJohannes Berg 
93549101078SJohannes Berg 	__skb_queue_head_init(&txq->overflow_q);
93649101078SJohannes Berg 
93749101078SJohannes Berg 	return 0;
93849101078SJohannes Berg }
93949101078SJohannes Berg 
iwl_pcie_tx_init(struct iwl_trans * trans)940e705c121SKalle Valo int iwl_pcie_tx_init(struct iwl_trans *trans)
941e705c121SKalle Valo {
942e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
943e705c121SKalle Valo 	int ret;
944e705c121SKalle Valo 	int txq_id, slots_num;
945e705c121SKalle Valo 	bool alloc = false;
946e705c121SKalle Valo 
947b2a3b1c1SSara Sharon 	if (!trans_pcie->txq_memory) {
948e705c121SKalle Valo 		ret = iwl_pcie_tx_alloc(trans);
949e705c121SKalle Valo 		if (ret)
950e705c121SKalle Valo 			goto error;
951e705c121SKalle Valo 		alloc = true;
952e705c121SKalle Valo 	}
953e705c121SKalle Valo 
95425edc8f2SJohannes Berg 	spin_lock_bh(&trans_pcie->irq_lock);
955e705c121SKalle Valo 
956e705c121SKalle Valo 	/* Turn off all Tx DMA fifos */
957e705c121SKalle Valo 	iwl_scd_deactivate_fifos(trans);
958e705c121SKalle Valo 
959e705c121SKalle Valo 	/* Tell NIC where to find the "keep warm" buffer */
960e705c121SKalle Valo 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
961e705c121SKalle Valo 			   trans_pcie->kw.dma >> 4);
962e705c121SKalle Valo 
96325edc8f2SJohannes Berg 	spin_unlock_bh(&trans_pcie->irq_lock);
964e705c121SKalle Valo 
965e705c121SKalle Valo 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
966286ca8ebSLuca Coelho 	for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
967e705c121SKalle Valo 	     txq_id++) {
96849101078SJohannes Berg 		bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
969b8e8d7ceSSara Sharon 
970ff911dcaSShaul Triebitz 		if (cmd_queue)
971718a8b23SShaul Triebitz 			slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
972ff911dcaSShaul Triebitz 					  trans->cfg->min_txq_size);
973ff911dcaSShaul Triebitz 		else
974718a8b23SShaul Triebitz 			slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
975d5399f11SMordechay Goodstein 					  trans->cfg->min_ba_txq_size);
97649101078SJohannes Berg 		ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
9770cd1ad2dSMordechay Goodstein 				   cmd_queue);
978e705c121SKalle Valo 		if (ret) {
979e705c121SKalle Valo 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
980e705c121SKalle Valo 			goto error;
981e705c121SKalle Valo 		}
982e705c121SKalle Valo 
983eda50cdeSSara Sharon 		/*
984eda50cdeSSara Sharon 		 * Tell nic where to find circular buffer of TFDs for a
985eda50cdeSSara Sharon 		 * given Tx queue, and enable the DMA channel used for that
986eda50cdeSSara Sharon 		 * queue.
987eda50cdeSSara Sharon 		 * Circular buffer (TFD queue in DRAM) physical base address
988eda50cdeSSara Sharon 		 */
989eda50cdeSSara Sharon 		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
99049101078SJohannes Berg 				   trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
991ae79785fSSara Sharon 	}
992e22744afSSara Sharon 
993e705c121SKalle Valo 	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
994286ca8ebSLuca Coelho 	if (trans->trans_cfg->base_params->num_of_queues > 20)
995e705c121SKalle Valo 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
996e705c121SKalle Valo 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
997e705c121SKalle Valo 
998e705c121SKalle Valo 	return 0;
999e705c121SKalle Valo error:
1000e705c121SKalle Valo 	/*Upon error, free only if we allocated something */
1001e705c121SKalle Valo 	if (alloc)
1002e705c121SKalle Valo 		iwl_pcie_tx_free(trans);
1003e705c121SKalle Valo 	return ret;
1004e705c121SKalle Valo }
1005e705c121SKalle Valo 
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)1006e705c121SKalle Valo static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1007e705c121SKalle Valo 				      const struct iwl_host_cmd *cmd)
1008e705c121SKalle Valo {
1009e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1010e705c121SKalle Valo 
10112b3fae66SMatt Chen 	/* Make sure the NIC is still alive in the bus */
1012f60c9e59SEmmanuel Grumbach 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1013f60c9e59SEmmanuel Grumbach 		return -ENODEV;
10142b3fae66SMatt Chen 
101572bc934cSJohannes Berg 	if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
101672bc934cSJohannes Berg 		return 0;
101772bc934cSJohannes Berg 
1018e705c121SKalle Valo 	/*
1019e705c121SKalle Valo 	 * wake up the NIC to make sure that the firmware will see the host
1020e705c121SKalle Valo 	 * command - we will let the NIC sleep once all the host commands
1021e705c121SKalle Valo 	 * returned. This needs to be done only on NICs that have
102272bc934cSJohannes Berg 	 * apmg_wake_up_wa set (see above.)
1023e705c121SKalle Valo 	 */
1024c544d89bSJohannes Berg 	if (!_iwl_trans_pcie_grab_nic_access(trans))
1025416dde0fSJohannes Berg 		return -EIO;
1026e705c121SKalle Valo 
1027416dde0fSJohannes Berg 	/*
1028416dde0fSJohannes Berg 	 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
1029416dde0fSJohannes Berg 	 * There, we also returned immediately if cmd_hold_nic_awake is
1030416dde0fSJohannes Berg 	 * already true, so it's OK to unconditionally set it to true.
1031416dde0fSJohannes Berg 	 */
1032e705c121SKalle Valo 	trans_pcie->cmd_hold_nic_awake = true;
1033c544d89bSJohannes Berg 	spin_unlock(&trans_pcie->reg_lock);
1034e705c121SKalle Valo 
1035416dde0fSJohannes Berg 	return 0;
1036e705c121SKalle Valo }
1037e705c121SKalle Valo 
iwl_txq_progress(struct iwl_txq * txq)103849101078SJohannes Berg static void iwl_txq_progress(struct iwl_txq *txq)
103949101078SJohannes Berg {
104049101078SJohannes Berg 	lockdep_assert_held(&txq->lock);
104149101078SJohannes Berg 
104249101078SJohannes Berg 	if (!txq->wd_timeout)
104349101078SJohannes Berg 		return;
104449101078SJohannes Berg 
104549101078SJohannes Berg 	/*
104649101078SJohannes Berg 	 * station is asleep and we send data - that must
104749101078SJohannes Berg 	 * be uAPSD or PS-Poll. Don't rearm the timer.
104849101078SJohannes Berg 	 */
104949101078SJohannes Berg 	if (txq->frozen)
105049101078SJohannes Berg 		return;
105149101078SJohannes Berg 
105249101078SJohannes Berg 	/*
105349101078SJohannes Berg 	 * if empty delete timer, otherwise move timer forward
105449101078SJohannes Berg 	 * since we're making progress on this queue
105549101078SJohannes Berg 	 */
105649101078SJohannes Berg 	if (txq->read_ptr == txq->write_ptr)
105749101078SJohannes Berg 		del_timer(&txq->stuck_timer);
105849101078SJohannes Berg 	else
105949101078SJohannes Berg 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
106049101078SJohannes Berg }
106149101078SJohannes Berg 
iwl_txq_used(const struct iwl_txq * q,int i,int read_ptr,int write_ptr)1062a2ed933dSBenjamin Berg static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
1063a2ed933dSBenjamin Berg 				int read_ptr, int write_ptr)
106449101078SJohannes Berg {
106549101078SJohannes Berg 	int index = iwl_txq_get_cmd_index(q, i);
1066a2ed933dSBenjamin Berg 	int r = iwl_txq_get_cmd_index(q, read_ptr);
1067a2ed933dSBenjamin Berg 	int w = iwl_txq_get_cmd_index(q, write_ptr);
106849101078SJohannes Berg 
106949101078SJohannes Berg 	return w >= r ?
107049101078SJohannes Berg 		(index >= r && index < w) :
107149101078SJohannes Berg 		!(index < r && index >= w);
107249101078SJohannes Berg }
107349101078SJohannes Berg 
1074e705c121SKalle Valo /*
1075e705c121SKalle Valo  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1076e705c121SKalle Valo  *
1077e705c121SKalle Valo  * When FW advances 'R' index, all entries between old and new 'R' index
1078e705c121SKalle Valo  * need to be reclaimed. As result, some free space forms.  If there is
1079e705c121SKalle Valo  * enough free space (> low mark), wake the stack that feeds us.
1080e705c121SKalle Valo  */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)10817216dc99SJohannes Berg static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1082e705c121SKalle Valo {
108349101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
108449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1085e705c121SKalle Valo 	int nfreed = 0;
1086f5955a6cSGolan Ben Ami 	u16 r;
1087e705c121SKalle Valo 
1088e705c121SKalle Valo 	lockdep_assert_held(&txq->lock);
1089e705c121SKalle Valo 
10900cd1ad2dSMordechay Goodstein 	idx = iwl_txq_get_cmd_index(txq, idx);
10910cd1ad2dSMordechay Goodstein 	r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1092f5955a6cSGolan Ben Ami 
1093286ca8ebSLuca Coelho 	if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
1094a2ed933dSBenjamin Berg 	    (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
109549101078SJohannes Berg 		WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
1096e705c121SKalle Valo 			  "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
10977b3e42eaSGolan Ben Ami 			  __func__, txq_id, idx,
1098286ca8ebSLuca Coelho 			  trans->trans_cfg->base_params->max_tfd_queue_size,
1099bb98ecd4SSara Sharon 			  txq->write_ptr, txq->read_ptr);
1100e705c121SKalle Valo 		return;
1101e705c121SKalle Valo 	}
1102e705c121SKalle Valo 
11030cd1ad2dSMordechay Goodstein 	for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
11040cd1ad2dSMordechay Goodstein 	     r = iwl_txq_inc_wrap(trans, r)) {
11050cd1ad2dSMordechay Goodstein 		txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1106e705c121SKalle Valo 
1107e705c121SKalle Valo 		if (nfreed++ > 0) {
1108e705c121SKalle Valo 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1109f5955a6cSGolan Ben Ami 				idx, txq->write_ptr, r);
1110e705c121SKalle Valo 			iwl_force_nmi(trans);
1111e705c121SKalle Valo 		}
1112e705c121SKalle Valo 	}
1113e705c121SKalle Valo 
111472bc934cSJohannes Berg 	if (txq->read_ptr == txq->write_ptr)
1115e705c121SKalle Valo 		iwl_pcie_clear_cmd_in_flight(trans);
1116e705c121SKalle Valo 
1117a4450980SMordechay Goodstein 	iwl_txq_progress(txq);
1118e705c121SKalle Valo }
1119e705c121SKalle Valo 
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)1120e705c121SKalle Valo static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1121e705c121SKalle Valo 				 u16 txq_id)
1122e705c121SKalle Valo {
1123e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1124e705c121SKalle Valo 	u32 tbl_dw_addr;
1125e705c121SKalle Valo 	u32 tbl_dw;
1126e705c121SKalle Valo 	u16 scd_q2ratid;
1127e705c121SKalle Valo 
1128e705c121SKalle Valo 	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1129e705c121SKalle Valo 
1130e705c121SKalle Valo 	tbl_dw_addr = trans_pcie->scd_base_addr +
1131e705c121SKalle Valo 			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1132e705c121SKalle Valo 
1133e705c121SKalle Valo 	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1134e705c121SKalle Valo 
1135e705c121SKalle Valo 	if (txq_id & 0x1)
1136e705c121SKalle Valo 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1137e705c121SKalle Valo 	else
1138e705c121SKalle Valo 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1139e705c121SKalle Valo 
1140e705c121SKalle Valo 	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1141e705c121SKalle Valo 
1142e705c121SKalle Valo 	return 0;
1143e705c121SKalle Valo }
1144e705c121SKalle Valo 
1145e705c121SKalle Valo /* Receiver address (actually, Rx station's index into station table),
1146e705c121SKalle Valo  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1147e705c121SKalle Valo #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
1148e705c121SKalle Valo 
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)1149dcfbd67bSEmmanuel Grumbach bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1150e705c121SKalle Valo 			       const struct iwl_trans_txq_scd_cfg *cfg,
1151e705c121SKalle Valo 			       unsigned int wdg_timeout)
1152e705c121SKalle Valo {
1153e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
115449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1155e705c121SKalle Valo 	int fifo = -1;
1156dcfbd67bSEmmanuel Grumbach 	bool scd_bug = false;
1157e705c121SKalle Valo 
115849101078SJohannes Berg 	if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
1159e705c121SKalle Valo 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1160e705c121SKalle Valo 
1161e705c121SKalle Valo 	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1162e705c121SKalle Valo 
1163e705c121SKalle Valo 	if (cfg) {
1164e705c121SKalle Valo 		fifo = cfg->fifo;
1165e705c121SKalle Valo 
1166e705c121SKalle Valo 		/* Disable the scheduler prior configuring the cmd queue */
116749101078SJohannes Berg 		if (txq_id == trans_pcie->txqs.cmd.q_id &&
1168e705c121SKalle Valo 		    trans_pcie->scd_set_active)
1169e705c121SKalle Valo 			iwl_scd_enable_set_active(trans, 0);
1170e705c121SKalle Valo 
1171e705c121SKalle Valo 		/* Stop this Tx queue before configuring it */
1172e705c121SKalle Valo 		iwl_scd_txq_set_inactive(trans, txq_id);
1173e705c121SKalle Valo 
1174e705c121SKalle Valo 		/* Set this queue as a chain-building queue unless it is CMD */
117549101078SJohannes Berg 		if (txq_id != trans_pcie->txqs.cmd.q_id)
1176e705c121SKalle Valo 			iwl_scd_txq_set_chain(trans, txq_id);
1177e705c121SKalle Valo 
1178e705c121SKalle Valo 		if (cfg->aggregate) {
1179e705c121SKalle Valo 			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1180e705c121SKalle Valo 
1181e705c121SKalle Valo 			/* Map receiver-address / traffic-ID to this queue */
1182e705c121SKalle Valo 			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1183e705c121SKalle Valo 
1184e705c121SKalle Valo 			/* enable aggregations for the queue */
1185e705c121SKalle Valo 			iwl_scd_txq_enable_agg(trans, txq_id);
1186e705c121SKalle Valo 			txq->ampdu = true;
1187e705c121SKalle Valo 		} else {
1188e705c121SKalle Valo 			/*
1189e705c121SKalle Valo 			 * disable aggregations for the queue, this will also
1190e705c121SKalle Valo 			 * make the ra_tid mapping configuration irrelevant
1191e705c121SKalle Valo 			 * since it is now a non-AGG queue.
1192e705c121SKalle Valo 			 */
1193e705c121SKalle Valo 			iwl_scd_txq_disable_agg(trans, txq_id);
1194e705c121SKalle Valo 
1195bb98ecd4SSara Sharon 			ssn = txq->read_ptr;
1196e705c121SKalle Valo 		}
1197dcfbd67bSEmmanuel Grumbach 	} else {
1198dcfbd67bSEmmanuel Grumbach 		/*
1199dcfbd67bSEmmanuel Grumbach 		 * If we need to move the SCD write pointer by steps of
1200dcfbd67bSEmmanuel Grumbach 		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
1201dcfbd67bSEmmanuel Grumbach 		 * the op_mode know by returning true later.
1202dcfbd67bSEmmanuel Grumbach 		 * Do this only in case cfg is NULL since this trick can
1203dcfbd67bSEmmanuel Grumbach 		 * be done only if we have DQA enabled which is true for mvm
1204dcfbd67bSEmmanuel Grumbach 		 * only. And mvm never sets a cfg pointer.
1205dcfbd67bSEmmanuel Grumbach 		 * This is really ugly, but this is the easiest way out for
1206dcfbd67bSEmmanuel Grumbach 		 * this sad hardware issue.
1207dcfbd67bSEmmanuel Grumbach 		 * This bug has been fixed on devices 9000 and up.
1208dcfbd67bSEmmanuel Grumbach 		 */
1209286ca8ebSLuca Coelho 		scd_bug = !trans->trans_cfg->mq_rx_supported &&
1210dcfbd67bSEmmanuel Grumbach 			!((ssn - txq->write_ptr) & 0x3f) &&
1211dcfbd67bSEmmanuel Grumbach 			(ssn != txq->write_ptr);
1212dcfbd67bSEmmanuel Grumbach 		if (scd_bug)
1213dcfbd67bSEmmanuel Grumbach 			ssn++;
1214e705c121SKalle Valo 	}
1215e705c121SKalle Valo 
1216e705c121SKalle Valo 	/* Place first TFD at index corresponding to start sequence number.
1217e705c121SKalle Valo 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1218bb98ecd4SSara Sharon 	txq->read_ptr = (ssn & 0xff);
1219bb98ecd4SSara Sharon 	txq->write_ptr = (ssn & 0xff);
1220e705c121SKalle Valo 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1221e705c121SKalle Valo 			   (ssn & 0xff) | (txq_id << 8));
1222e705c121SKalle Valo 
1223e705c121SKalle Valo 	if (cfg) {
1224e705c121SKalle Valo 		u8 frame_limit = cfg->frame_limit;
1225e705c121SKalle Valo 
1226e705c121SKalle Valo 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1227e705c121SKalle Valo 
1228e705c121SKalle Valo 		/* Set up Tx window size and frame limit for this queue */
1229e705c121SKalle Valo 		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1230e705c121SKalle Valo 				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1231e705c121SKalle Valo 		iwl_trans_write_mem32(trans,
1232e705c121SKalle Valo 			trans_pcie->scd_base_addr +
1233e705c121SKalle Valo 			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1234f3779f47SJohannes Berg 			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
1235f3779f47SJohannes Berg 			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1236e705c121SKalle Valo 
1237e705c121SKalle Valo 		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1238e705c121SKalle Valo 		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1239e705c121SKalle Valo 			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1240e705c121SKalle Valo 			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1241e705c121SKalle Valo 			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1242e705c121SKalle Valo 			       SCD_QUEUE_STTS_REG_MSK);
1243e705c121SKalle Valo 
1244e705c121SKalle Valo 		/* enable the scheduler for this queue (only) */
124549101078SJohannes Berg 		if (txq_id == trans_pcie->txqs.cmd.q_id &&
1246e705c121SKalle Valo 		    trans_pcie->scd_set_active)
1247e705c121SKalle Valo 			iwl_scd_enable_set_active(trans, BIT(txq_id));
1248e705c121SKalle Valo 
1249e705c121SKalle Valo 		IWL_DEBUG_TX_QUEUES(trans,
1250e705c121SKalle Valo 				    "Activate queue %d on FIFO %d WrPtr: %d\n",
1251e705c121SKalle Valo 				    txq_id, fifo, ssn & 0xff);
1252e705c121SKalle Valo 	} else {
1253e705c121SKalle Valo 		IWL_DEBUG_TX_QUEUES(trans,
1254e705c121SKalle Valo 				    "Activate queue %d WrPtr: %d\n",
1255e705c121SKalle Valo 				    txq_id, ssn & 0xff);
1256e705c121SKalle Valo 	}
1257dcfbd67bSEmmanuel Grumbach 
1258dcfbd67bSEmmanuel Grumbach 	return scd_bug;
1259e705c121SKalle Valo }
1260e705c121SKalle Valo 
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)126142db09c1SLiad Kaufman void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
126242db09c1SLiad Kaufman 					bool shared_mode)
126342db09c1SLiad Kaufman {
126449101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
126549101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
126642db09c1SLiad Kaufman 
126742db09c1SLiad Kaufman 	txq->ampdu = !shared_mode;
126842db09c1SLiad Kaufman }
126942db09c1SLiad Kaufman 
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)1270e705c121SKalle Valo void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1271e705c121SKalle Valo 				bool configure_scd)
1272e705c121SKalle Valo {
1273e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1274e705c121SKalle Valo 	u32 stts_addr = trans_pcie->scd_base_addr +
1275e705c121SKalle Valo 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1276e705c121SKalle Valo 	static const u32 zero_val[4] = {};
1277e705c121SKalle Valo 
127849101078SJohannes Berg 	trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
127949101078SJohannes Berg 	trans_pcie->txqs.txq[txq_id]->frozen = false;
1280e705c121SKalle Valo 
1281e705c121SKalle Valo 	/*
1282e705c121SKalle Valo 	 * Upon HW Rfkill - we stop the device, and then stop the queues
1283e705c121SKalle Valo 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1284e705c121SKalle Valo 	 * allow the op_mode to call txq_disable after it already called
1285e705c121SKalle Valo 	 * stop_device.
1286e705c121SKalle Valo 	 */
128749101078SJohannes Berg 	if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
1288e705c121SKalle Valo 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1289e705c121SKalle Valo 			  "queue %d not used", txq_id);
1290e705c121SKalle Valo 		return;
1291e705c121SKalle Valo 	}
1292e705c121SKalle Valo 
1293e705c121SKalle Valo 	if (configure_scd) {
1294e705c121SKalle Valo 		iwl_scd_txq_set_inactive(trans, txq_id);
1295e705c121SKalle Valo 
129673c289baSBjoern A. Zeeb 		iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
1297e705c121SKalle Valo 				    ARRAY_SIZE(zero_val));
1298e705c121SKalle Valo 	}
1299e705c121SKalle Valo 
1300e705c121SKalle Valo 	iwl_pcie_txq_unmap(trans, txq_id);
130149101078SJohannes Berg 	trans_pcie->txqs.txq[txq_id]->ampdu = false;
1302e705c121SKalle Valo 
1303e705c121SKalle Valo 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1304e705c121SKalle Valo }
1305e705c121SKalle Valo 
1306e705c121SKalle Valo /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
1307e705c121SKalle Valo 
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)130814c1b6f4SJohannes Berg static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
130914c1b6f4SJohannes Berg {
131049101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
131114c1b6f4SJohannes Berg 	int i;
131214c1b6f4SJohannes Berg 
131314c1b6f4SJohannes Berg 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
131449101078SJohannes Berg 		struct iwl_txq *txq = trans_pcie->txqs.txq[i];
131514c1b6f4SJohannes Berg 
131649101078SJohannes Berg 		if (i == trans_pcie->txqs.cmd.q_id)
131714c1b6f4SJohannes Berg 			continue;
131814c1b6f4SJohannes Berg 
131914c1b6f4SJohannes Berg 		/* we skip the command queue (obviously) so it's OK to nest */
132014c1b6f4SJohannes Berg 		spin_lock_nested(&txq->lock, 1);
132114c1b6f4SJohannes Berg 
132214c1b6f4SJohannes Berg 		if (!block && !(WARN_ON_ONCE(!txq->block))) {
132314c1b6f4SJohannes Berg 			txq->block--;
132414c1b6f4SJohannes Berg 			if (!txq->block) {
132514c1b6f4SJohannes Berg 				iwl_write32(trans, HBUS_TARG_WRPTR,
132614c1b6f4SJohannes Berg 					    txq->write_ptr | (i << 8));
132714c1b6f4SJohannes Berg 			}
132814c1b6f4SJohannes Berg 		} else if (block) {
132914c1b6f4SJohannes Berg 			txq->block++;
133014c1b6f4SJohannes Berg 		}
133114c1b6f4SJohannes Berg 
133214c1b6f4SJohannes Berg 		spin_unlock(&txq->lock);
133314c1b6f4SJohannes Berg 	}
133414c1b6f4SJohannes Berg }
133514c1b6f4SJohannes Berg 
1336e705c121SKalle Valo /*
1337e705c121SKalle Valo  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1338e705c121SKalle Valo  * @priv: device private data point
1339e705c121SKalle Valo  * @cmd: a pointer to the ucode command structure
1340e705c121SKalle Valo  *
1341e705c121SKalle Valo  * The function returns < 0 values to indicate the operation
1342e705c121SKalle Valo  * failed. On success, it returns the index (>= 0) of command in the
1343e705c121SKalle Valo  * command queue.
1344e705c121SKalle Valo  */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)134513f028b4SMordechay Goodstein int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1346e705c121SKalle Valo 			  struct iwl_host_cmd *cmd)
1347e705c121SKalle Valo {
134849101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
134949101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1350e705c121SKalle Valo 	struct iwl_device_cmd *out_cmd;
1351e705c121SKalle Valo 	struct iwl_cmd_meta *out_meta;
1352e705c121SKalle Valo 	void *dup_buf = NULL;
1353e705c121SKalle Valo 	dma_addr_t phys_addr;
1354e705c121SKalle Valo 	int idx;
13558de437c7SSara Sharon 	u16 copy_size, cmd_size, tb0_size;
1356e705c121SKalle Valo 	bool had_nocopy = false;
1357e705c121SKalle Valo 	u8 group_id = iwl_cmd_groupid(cmd->id);
1358e705c121SKalle Valo 	int i, ret;
1359e705c121SKalle Valo 	u32 cmd_pos;
1360e705c121SKalle Valo 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1361e705c121SKalle Valo 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
13622800aadcSJiri Kosina 	unsigned long flags;
1363e705c121SKalle Valo 
1364b7d96bcaSLuca Coelho 	if (WARN(!trans->wide_cmd_header &&
1365b7d96bcaSLuca Coelho 		 group_id > IWL_ALWAYS_LONG_GROUP,
1366b7d96bcaSLuca Coelho 		 "unsupported wide command %#x\n", cmd->id))
1367b7d96bcaSLuca Coelho 		return -EINVAL;
1368b7d96bcaSLuca Coelho 
1369e705c121SKalle Valo 	if (group_id != 0) {
1370e705c121SKalle Valo 		copy_size = sizeof(struct iwl_cmd_header_wide);
1371e705c121SKalle Valo 		cmd_size = sizeof(struct iwl_cmd_header_wide);
1372e705c121SKalle Valo 	} else {
1373e705c121SKalle Valo 		copy_size = sizeof(struct iwl_cmd_header);
1374e705c121SKalle Valo 		cmd_size = sizeof(struct iwl_cmd_header);
1375e705c121SKalle Valo 	}
1376e705c121SKalle Valo 
1377e705c121SKalle Valo 	/* need one for the header if the first is NOCOPY */
1378e705c121SKalle Valo 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1379e705c121SKalle Valo 
1380e705c121SKalle Valo 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1381e705c121SKalle Valo 		cmddata[i] = cmd->data[i];
1382e705c121SKalle Valo 		cmdlen[i] = cmd->len[i];
1383e705c121SKalle Valo 
1384e705c121SKalle Valo 		if (!cmd->len[i])
1385e705c121SKalle Valo 			continue;
1386e705c121SKalle Valo 
13878de437c7SSara Sharon 		/* need at least IWL_FIRST_TB_SIZE copied */
13888de437c7SSara Sharon 		if (copy_size < IWL_FIRST_TB_SIZE) {
13898de437c7SSara Sharon 			int copy = IWL_FIRST_TB_SIZE - copy_size;
1390e705c121SKalle Valo 
1391e705c121SKalle Valo 			if (copy > cmdlen[i])
1392e705c121SKalle Valo 				copy = cmdlen[i];
1393e705c121SKalle Valo 			cmdlen[i] -= copy;
1394e705c121SKalle Valo 			cmddata[i] += copy;
1395e705c121SKalle Valo 			copy_size += copy;
1396e705c121SKalle Valo 		}
1397e705c121SKalle Valo 
1398e705c121SKalle Valo 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1399e705c121SKalle Valo 			had_nocopy = true;
1400e705c121SKalle Valo 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1401e705c121SKalle Valo 				idx = -EINVAL;
1402e705c121SKalle Valo 				goto free_dup_buf;
1403e705c121SKalle Valo 			}
1404e705c121SKalle Valo 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1405e705c121SKalle Valo 			/*
1406e705c121SKalle Valo 			 * This is also a chunk that isn't copied
1407e705c121SKalle Valo 			 * to the static buffer so set had_nocopy.
1408e705c121SKalle Valo 			 */
1409e705c121SKalle Valo 			had_nocopy = true;
1410e705c121SKalle Valo 
1411e705c121SKalle Valo 			/* only allowed once */
1412e705c121SKalle Valo 			if (WARN_ON(dup_buf)) {
1413e705c121SKalle Valo 				idx = -EINVAL;
1414e705c121SKalle Valo 				goto free_dup_buf;
1415e705c121SKalle Valo 			}
1416e705c121SKalle Valo 
1417e705c121SKalle Valo 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1418e705c121SKalle Valo 					  GFP_ATOMIC);
1419e705c121SKalle Valo 			if (!dup_buf)
1420e705c121SKalle Valo 				return -ENOMEM;
1421e705c121SKalle Valo 		} else {
1422e705c121SKalle Valo 			/* NOCOPY must not be followed by normal! */
1423e705c121SKalle Valo 			if (WARN_ON(had_nocopy)) {
1424e705c121SKalle Valo 				idx = -EINVAL;
1425e705c121SKalle Valo 				goto free_dup_buf;
1426e705c121SKalle Valo 			}
1427e705c121SKalle Valo 			copy_size += cmdlen[i];
1428e705c121SKalle Valo 		}
1429e705c121SKalle Valo 		cmd_size += cmd->len[i];
1430e705c121SKalle Valo 	}
1431e705c121SKalle Valo 
1432e705c121SKalle Valo 	/*
1433e705c121SKalle Valo 	 * If any of the command structures end up being larger than
1434e705c121SKalle Valo 	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1435e705c121SKalle Valo 	 * allocated into separate TFDs, then we will need to
1436e705c121SKalle Valo 	 * increase the size of the buffers.
1437e705c121SKalle Valo 	 */
1438e705c121SKalle Valo 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1439e705c121SKalle Valo 		 "Command %s (%#x) is too large (%d bytes)\n",
144039bdb17eSSharon Dvir 		 iwl_get_cmd_string(trans, cmd->id),
144139bdb17eSSharon Dvir 		 cmd->id, copy_size)) {
1442e705c121SKalle Valo 		idx = -EINVAL;
1443e705c121SKalle Valo 		goto free_dup_buf;
1444e705c121SKalle Valo 	}
1445e705c121SKalle Valo 
14462800aadcSJiri Kosina 	spin_lock_irqsave(&txq->lock, flags);
1447e705c121SKalle Valo 
14480cd1ad2dSMordechay Goodstein 	if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
14492800aadcSJiri Kosina 		spin_unlock_irqrestore(&txq->lock, flags);
1450e705c121SKalle Valo 
1451e705c121SKalle Valo 		IWL_ERR(trans, "No space in command queue\n");
1452e705c121SKalle Valo 		iwl_op_mode_cmd_queue_full(trans->op_mode);
1453e705c121SKalle Valo 		idx = -ENOSPC;
1454e705c121SKalle Valo 		goto free_dup_buf;
1455e705c121SKalle Valo 	}
1456e705c121SKalle Valo 
14570cd1ad2dSMordechay Goodstein 	idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1458e705c121SKalle Valo 	out_cmd = txq->entries[idx].cmd;
1459e705c121SKalle Valo 	out_meta = &txq->entries[idx].meta;
1460e705c121SKalle Valo 
14617f5e3038SBenjamin Berg 	/* re-initialize, this also marks the SG list as unused */
14627f5e3038SBenjamin Berg 	memset(out_meta, 0, sizeof(*out_meta));
1463e705c121SKalle Valo 	if (cmd->flags & CMD_WANT_SKB)
1464e705c121SKalle Valo 		out_meta->source = cmd;
1465e705c121SKalle Valo 
1466e705c121SKalle Valo 	/* set up the header */
1467e705c121SKalle Valo 	if (group_id != 0) {
1468e705c121SKalle Valo 		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1469e705c121SKalle Valo 		out_cmd->hdr_wide.group_id = group_id;
1470e705c121SKalle Valo 		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1471e705c121SKalle Valo 		out_cmd->hdr_wide.length =
1472e705c121SKalle Valo 			cpu_to_le16(cmd_size -
1473e705c121SKalle Valo 				    sizeof(struct iwl_cmd_header_wide));
1474e705c121SKalle Valo 		out_cmd->hdr_wide.reserved = 0;
1475e705c121SKalle Valo 		out_cmd->hdr_wide.sequence =
147649101078SJohannes Berg 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1477bb98ecd4SSara Sharon 						 INDEX_TO_SEQ(txq->write_ptr));
1478e705c121SKalle Valo 
1479e705c121SKalle Valo 		cmd_pos = sizeof(struct iwl_cmd_header_wide);
1480e705c121SKalle Valo 		copy_size = sizeof(struct iwl_cmd_header_wide);
1481e705c121SKalle Valo 	} else {
1482e705c121SKalle Valo 		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1483e705c121SKalle Valo 		out_cmd->hdr.sequence =
148449101078SJohannes Berg 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1485bb98ecd4SSara Sharon 						 INDEX_TO_SEQ(txq->write_ptr));
1486e705c121SKalle Valo 		out_cmd->hdr.group_id = 0;
1487e705c121SKalle Valo 
1488e705c121SKalle Valo 		cmd_pos = sizeof(struct iwl_cmd_header);
1489e705c121SKalle Valo 		copy_size = sizeof(struct iwl_cmd_header);
1490e705c121SKalle Valo 	}
1491e705c121SKalle Valo 
1492e705c121SKalle Valo 	/* and copy the data that needs to be copied */
1493e705c121SKalle Valo 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1494e705c121SKalle Valo 		int copy;
1495e705c121SKalle Valo 
1496e705c121SKalle Valo 		if (!cmd->len[i])
1497e705c121SKalle Valo 			continue;
1498e705c121SKalle Valo 
1499e705c121SKalle Valo 		/* copy everything if not nocopy/dup */
1500e705c121SKalle Valo 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1501e705c121SKalle Valo 					   IWL_HCMD_DFL_DUP))) {
1502e705c121SKalle Valo 			copy = cmd->len[i];
1503e705c121SKalle Valo 
1504e705c121SKalle Valo 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1505e705c121SKalle Valo 			cmd_pos += copy;
1506e705c121SKalle Valo 			copy_size += copy;
1507e705c121SKalle Valo 			continue;
1508e705c121SKalle Valo 		}
1509e705c121SKalle Valo 
1510e705c121SKalle Valo 		/*
15118de437c7SSara Sharon 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
15128de437c7SSara Sharon 		 * in total (for bi-directional DMA), but copy up to what
1513e705c121SKalle Valo 		 * we can fit into the payload for debug dump purposes.
1514e705c121SKalle Valo 		 */
1515e705c121SKalle Valo 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1516e705c121SKalle Valo 
1517e705c121SKalle Valo 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1518e705c121SKalle Valo 		cmd_pos += copy;
1519e705c121SKalle Valo 
1520e705c121SKalle Valo 		/* However, treat copy_size the proper way, we need it below */
15218de437c7SSara Sharon 		if (copy_size < IWL_FIRST_TB_SIZE) {
15228de437c7SSara Sharon 			copy = IWL_FIRST_TB_SIZE - copy_size;
1523e705c121SKalle Valo 
1524e705c121SKalle Valo 			if (copy > cmd->len[i])
1525e705c121SKalle Valo 				copy = cmd->len[i];
1526e705c121SKalle Valo 			copy_size += copy;
1527e705c121SKalle Valo 		}
1528e705c121SKalle Valo 	}
1529e705c121SKalle Valo 
1530e705c121SKalle Valo 	IWL_DEBUG_HC(trans,
1531e705c121SKalle Valo 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
153239bdb17eSSharon Dvir 		     iwl_get_cmd_string(trans, cmd->id),
1533e705c121SKalle Valo 		     group_id, out_cmd->hdr.cmd,
1534e705c121SKalle Valo 		     le16_to_cpu(out_cmd->hdr.sequence),
153549101078SJohannes Berg 		     cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
1536e705c121SKalle Valo 
15378de437c7SSara Sharon 	/* start the TFD with the minimum copy bytes */
15388de437c7SSara Sharon 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
15398de437c7SSara Sharon 	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1540e705c121SKalle Valo 	iwl_pcie_txq_build_tfd(trans, txq,
15410cd1ad2dSMordechay Goodstein 			       iwl_txq_get_first_tb_dma(txq, idx),
15428de437c7SSara Sharon 			       tb0_size, true);
1543e705c121SKalle Valo 
1544e705c121SKalle Valo 	/* map first command fragment, if any remains */
15458de437c7SSara Sharon 	if (copy_size > tb0_size) {
1546e705c121SKalle Valo 		phys_addr = dma_map_single(trans->dev,
15478de437c7SSara Sharon 					   ((u8 *)&out_cmd->hdr) + tb0_size,
15488de437c7SSara Sharon 					   copy_size - tb0_size,
1549e705c121SKalle Valo 					   DMA_TO_DEVICE);
1550e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, phys_addr)) {
15510179bfffSMordechay Goodstein 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1552bb98ecd4SSara Sharon 					       txq->write_ptr);
1553e705c121SKalle Valo 			idx = -ENOMEM;
1554e705c121SKalle Valo 			goto out;
1555e705c121SKalle Valo 		}
1556e705c121SKalle Valo 
1557e705c121SKalle Valo 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
15588de437c7SSara Sharon 				       copy_size - tb0_size, false);
1559e705c121SKalle Valo 	}
1560e705c121SKalle Valo 
1561e705c121SKalle Valo 	/* map the remaining (adjusted) nocopy/dup fragments */
1562e705c121SKalle Valo 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
15630301bcd5SBjoern A. Zeeb 		void *data = (void *)(uintptr_t)cmddata[i];
1564e705c121SKalle Valo 
1565e705c121SKalle Valo 		if (!cmdlen[i])
1566e705c121SKalle Valo 			continue;
1567e705c121SKalle Valo 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1568e705c121SKalle Valo 					   IWL_HCMD_DFL_DUP)))
1569e705c121SKalle Valo 			continue;
1570e705c121SKalle Valo 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1571e705c121SKalle Valo 			data = dup_buf;
15720301bcd5SBjoern A. Zeeb 		phys_addr = dma_map_single(trans->dev, data,
1573e705c121SKalle Valo 					   cmdlen[i], DMA_TO_DEVICE);
1574e705c121SKalle Valo 		if (dma_mapping_error(trans->dev, phys_addr)) {
15750179bfffSMordechay Goodstein 			iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1576bb98ecd4SSara Sharon 					       txq->write_ptr);
1577e705c121SKalle Valo 			idx = -ENOMEM;
1578e705c121SKalle Valo 			goto out;
1579e705c121SKalle Valo 		}
1580e705c121SKalle Valo 
1581e705c121SKalle Valo 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1582e705c121SKalle Valo 	}
1583e705c121SKalle Valo 
15843cd1980bSSara Sharon 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1585e705c121SKalle Valo 	out_meta->flags = cmd->flags;
1586e705c121SKalle Valo 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1587453431a5SWaiman Long 		kfree_sensitive(txq->entries[idx].free_buf);
1588e705c121SKalle Valo 	txq->entries[idx].free_buf = dup_buf;
1589e705c121SKalle Valo 
1590e705c121SKalle Valo 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1591e705c121SKalle Valo 
1592e705c121SKalle Valo 	/* start timer if queue currently empty */
1593bb98ecd4SSara Sharon 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1594e705c121SKalle Valo 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1595e705c121SKalle Valo 
1596e705c121SKalle Valo 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1597e705c121SKalle Valo 	if (ret < 0) {
1598e705c121SKalle Valo 		idx = ret;
159972bc934cSJohannes Berg 		goto out;
1600e705c121SKalle Valo 	}
1601e705c121SKalle Valo 
160214c1b6f4SJohannes Berg 	if (cmd->flags & CMD_BLOCK_TXQS)
160314c1b6f4SJohannes Berg 		iwl_trans_pcie_block_txq_ptrs(trans, true);
160414c1b6f4SJohannes Berg 
1605e705c121SKalle Valo 	/* Increment and update queue's write index */
16060cd1ad2dSMordechay Goodstein 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1607e705c121SKalle Valo 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1608e705c121SKalle Valo 
1609e705c121SKalle Valo  out:
16102800aadcSJiri Kosina 	spin_unlock_irqrestore(&txq->lock, flags);
1611e705c121SKalle Valo  free_dup_buf:
1612e705c121SKalle Valo 	if (idx < 0)
1613e705c121SKalle Valo 		kfree(dup_buf);
1614e705c121SKalle Valo 	return idx;
1615e705c121SKalle Valo }
1616e705c121SKalle Valo 
1617e705c121SKalle Valo /*
1618e705c121SKalle Valo  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1619e705c121SKalle Valo  * @rxb: Rx buffer to reclaim
1620e705c121SKalle Valo  */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1621e705c121SKalle Valo void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1622e705c121SKalle Valo 			    struct iwl_rx_cmd_buffer *rxb)
1623e705c121SKalle Valo {
1624e705c121SKalle Valo 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1625e705c121SKalle Valo 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1626d490e097SJohannes Berg 	u8 group_id;
162739bdb17eSSharon Dvir 	u32 cmd_id;
1628e705c121SKalle Valo 	int txq_id = SEQ_TO_QUEUE(sequence);
1629e705c121SKalle Valo 	int index = SEQ_TO_INDEX(sequence);
1630e705c121SKalle Valo 	int cmd_index;
1631e705c121SKalle Valo 	struct iwl_device_cmd *cmd;
1632e705c121SKalle Valo 	struct iwl_cmd_meta *meta;
1633e705c121SKalle Valo 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
163449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1635e705c121SKalle Valo 
1636e705c121SKalle Valo 	/* If a Tx command is being handled and it isn't in the actual
1637e705c121SKalle Valo 	 * command queue then there a command routing bug has been introduced
1638e705c121SKalle Valo 	 * in the queue management code. */
163949101078SJohannes Berg 	if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
1640e705c121SKalle Valo 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
164149101078SJohannes Berg 		 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
1642b2a3b1c1SSara Sharon 		 txq->write_ptr)) {
1643e705c121SKalle Valo 		iwl_print_hex_error(trans, pkt, 32);
1644e705c121SKalle Valo 		return;
1645e705c121SKalle Valo 	}
1646e705c121SKalle Valo 
1647e705c121SKalle Valo 	spin_lock_bh(&txq->lock);
1648e705c121SKalle Valo 
16490cd1ad2dSMordechay Goodstein 	cmd_index = iwl_txq_get_cmd_index(txq, index);
1650e705c121SKalle Valo 	cmd = txq->entries[cmd_index].cmd;
1651e705c121SKalle Valo 	meta = &txq->entries[cmd_index].meta;
1652d490e097SJohannes Berg 	group_id = cmd->hdr.group_id;
1653f0c86427SJohannes Berg 	cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1654e705c121SKalle Valo 
1655a0632004SJohannes Berg 	if (trans->trans_cfg->gen2)
1656a0632004SJohannes Berg 		iwl_txq_gen2_tfd_unmap(trans, meta,
1657a0632004SJohannes Berg 				       iwl_txq_get_tfd(trans, txq, index));
1658a0632004SJohannes Berg 	else
16590179bfffSMordechay Goodstein 		iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1660e705c121SKalle Valo 
1661e705c121SKalle Valo 	/* Input error checking is done when commands are added to queue. */
1662e705c121SKalle Valo 	if (meta->flags & CMD_WANT_SKB) {
1663e705c121SKalle Valo 		struct page *p = rxb_steal_page(rxb);
1664e705c121SKalle Valo 
1665e705c121SKalle Valo 		meta->source->resp_pkt = pkt;
1666e705c121SKalle Valo 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1667e705c121SKalle Valo 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1668e705c121SKalle Valo 	}
1669e705c121SKalle Valo 
167014c1b6f4SJohannes Berg 	if (meta->flags & CMD_BLOCK_TXQS)
167114c1b6f4SJohannes Berg 		iwl_trans_pcie_block_txq_ptrs(trans, false);
1672dcbb4746SEmmanuel Grumbach 
1673e705c121SKalle Valo 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1674e705c121SKalle Valo 
1675e705c121SKalle Valo 	if (!(meta->flags & CMD_ASYNC)) {
1676e705c121SKalle Valo 		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1677e705c121SKalle Valo 			IWL_WARN(trans,
1678e705c121SKalle Valo 				 "HCMD_ACTIVE already clear for command %s\n",
167939bdb17eSSharon Dvir 				 iwl_get_cmd_string(trans, cmd_id));
1680e705c121SKalle Valo 		}
1681e705c121SKalle Valo 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1682e705c121SKalle Valo 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
168339bdb17eSSharon Dvir 			       iwl_get_cmd_string(trans, cmd_id));
168413f028b4SMordechay Goodstein 		wake_up(&trans->wait_command_queue);
1685e705c121SKalle Valo 	}
1686e705c121SKalle Valo 
1687e705c121SKalle Valo 	meta->flags = 0;
1688e705c121SKalle Valo 
1689e705c121SKalle Valo 	spin_unlock_bh(&txq->lock);
1690e705c121SKalle Valo }
1691e705c121SKalle Valo 
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)16923a0b2a42SEmmanuel Grumbach static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
16933a0b2a42SEmmanuel Grumbach 			     struct iwl_txq *txq, u8 hdr_len,
1694bb03927eSJohannes Berg 			     struct iwl_cmd_meta *out_meta)
16953a0b2a42SEmmanuel Grumbach {
1696bb03927eSJohannes Berg 	u16 head_tb_len;
16973a0b2a42SEmmanuel Grumbach 	int i;
16983a0b2a42SEmmanuel Grumbach 
16993a0b2a42SEmmanuel Grumbach 	/*
17003a0b2a42SEmmanuel Grumbach 	 * Set up TFD's third entry to point directly to remainder
17013a0b2a42SEmmanuel Grumbach 	 * of skb's head, if any
17023a0b2a42SEmmanuel Grumbach 	 */
1703bb03927eSJohannes Berg 	head_tb_len = skb_headlen(skb) - hdr_len;
17043a0b2a42SEmmanuel Grumbach 
1705bb03927eSJohannes Berg 	if (head_tb_len > 0) {
1706bb03927eSJohannes Berg 		dma_addr_t tb_phys = dma_map_single(trans->dev,
17073a0b2a42SEmmanuel Grumbach 						    skb->data + hdr_len,
1708bb03927eSJohannes Berg 						    head_tb_len, DMA_TO_DEVICE);
1709bb03927eSJohannes Berg 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
17103a0b2a42SEmmanuel Grumbach 			return -EINVAL;
17119b08ae22SJohannes Berg 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
17129b08ae22SJohannes Berg 					tb_phys, head_tb_len);
1713bb03927eSJohannes Berg 		iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
17143a0b2a42SEmmanuel Grumbach 	}
17153a0b2a42SEmmanuel Grumbach 
17163a0b2a42SEmmanuel Grumbach 	/* set up the remaining entries to point to the data */
17173a0b2a42SEmmanuel Grumbach 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
17183a0b2a42SEmmanuel Grumbach 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
17193a0b2a42SEmmanuel Grumbach 		dma_addr_t tb_phys;
17203a0b2a42SEmmanuel Grumbach 		int tb_idx;
17213a0b2a42SEmmanuel Grumbach 
17223a0b2a42SEmmanuel Grumbach 		if (!skb_frag_size(frag))
17233a0b2a42SEmmanuel Grumbach 			continue;
17243a0b2a42SEmmanuel Grumbach 
17253a0b2a42SEmmanuel Grumbach 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
17263a0b2a42SEmmanuel Grumbach 					   skb_frag_size(frag), DMA_TO_DEVICE);
17273a0b2a42SEmmanuel Grumbach 
17287d50d76eSJohannes Berg 		if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
17293a0b2a42SEmmanuel Grumbach 			return -EINVAL;
17309b08ae22SJohannes Berg 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
17319b08ae22SJohannes Berg 					tb_phys, skb_frag_size(frag));
17323a0b2a42SEmmanuel Grumbach 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
17333a0b2a42SEmmanuel Grumbach 						skb_frag_size(frag), false);
17346e00a237SJohannes Berg 		if (tb_idx < 0)
17356e00a237SJohannes Berg 			return tb_idx;
17363a0b2a42SEmmanuel Grumbach 
17373cd1980bSSara Sharon 		out_meta->tbs |= BIT(tb_idx);
17383a0b2a42SEmmanuel Grumbach 	}
17393a0b2a42SEmmanuel Grumbach 
17403a0b2a42SEmmanuel Grumbach 	return 0;
17413a0b2a42SEmmanuel Grumbach }
17423a0b2a42SEmmanuel Grumbach 
17436eb5e529SEmmanuel Grumbach #ifdef CONFIG_INET
iwl_pcie_get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)17447f5e3038SBenjamin Berg static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
174549101078SJohannes Berg 				   size_t len, struct sk_buff *skb)
174649101078SJohannes Berg {
174749101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
174849101078SJohannes Berg 	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
1749adc902ceSBenjamin Berg 	struct iwl_tso_page_info *info;
175049101078SJohannes Berg 	struct page **page_ptr;
1751adc902ceSBenjamin Berg 	dma_addr_t phys;
1752d023a228SBenjamin Berg 	void *ret;
175349101078SJohannes Berg 
175449101078SJohannes Berg 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
175549101078SJohannes Berg 
175649101078SJohannes Berg 	if (WARN_ON(*page_ptr))
175749101078SJohannes Berg 		return NULL;
175849101078SJohannes Berg 
175949101078SJohannes Berg 	if (!p->page)
176049101078SJohannes Berg 		goto alloc;
176149101078SJohannes Berg 
176249101078SJohannes Berg 	/*
176349101078SJohannes Berg 	 * Check if there's enough room on this page
176449101078SJohannes Berg 	 *
176549101078SJohannes Berg 	 * Note that we put a page chaining pointer *last* in the
176649101078SJohannes Berg 	 * page - we need it somewhere, and if it's there then we
176749101078SJohannes Berg 	 * avoid DMA mapping the last bits of the page which may
176849101078SJohannes Berg 	 * trigger the 32-bit boundary hardware bug.
176949101078SJohannes Berg 	 *
177049101078SJohannes Berg 	 * (see also get_workaround_page() in tx-gen2.c)
177149101078SJohannes Berg 	 */
1772adc902ceSBenjamin Berg 	if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
1773003eae5aSBenjamin Berg 		info = IWL_TSO_PAGE_INFO(page_address(p->page));
177449101078SJohannes Berg 		goto out;
1775adc902ceSBenjamin Berg 	}
177649101078SJohannes Berg 
177749101078SJohannes Berg 	/* We don't have enough room on this page, get a new one. */
1778adc902ceSBenjamin Berg 	iwl_pcie_free_and_unmap_tso_page(trans, p->page);
177949101078SJohannes Berg 
178049101078SJohannes Berg alloc:
178149101078SJohannes Berg 	p->page = alloc_page(GFP_ATOMIC);
178249101078SJohannes Berg 	if (!p->page)
178349101078SJohannes Berg 		return NULL;
178449101078SJohannes Berg 	p->pos = page_address(p->page);
1785adc902ceSBenjamin Berg 
1786003eae5aSBenjamin Berg 	info = IWL_TSO_PAGE_INFO(page_address(p->page));
1787adc902ceSBenjamin Berg 
178849101078SJohannes Berg 	/* set the chaining pointer to NULL */
1789adc902ceSBenjamin Berg 	info->next = NULL;
1790adc902ceSBenjamin Berg 
1791adc902ceSBenjamin Berg 	/* Create a DMA mapping for the page */
1792adc902ceSBenjamin Berg 	phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
1793adc902ceSBenjamin Berg 				  DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1794adc902ceSBenjamin Berg 	if (unlikely(dma_mapping_error(trans->dev, phys))) {
1795adc902ceSBenjamin Berg 		__free_page(p->page);
1796adc902ceSBenjamin Berg 		p->page = NULL;
1797adc902ceSBenjamin Berg 
1798adc902ceSBenjamin Berg 		return NULL;
1799adc902ceSBenjamin Berg 	}
1800adc902ceSBenjamin Berg 
1801adc902ceSBenjamin Berg 	/* Store physical address and set use count */
1802adc902ceSBenjamin Berg 	info->dma_addr = phys;
1803adc902ceSBenjamin Berg 	refcount_set(&info->use_count, 1);
180449101078SJohannes Berg out:
180549101078SJohannes Berg 	*page_ptr = p->page;
1806adc902ceSBenjamin Berg 	/* Return an internal reference for the caller */
1807adc902ceSBenjamin Berg 	refcount_inc(&info->use_count);
1808d023a228SBenjamin Berg 	ret = p->pos;
1809d023a228SBenjamin Berg 	p->pos += len;
1810d023a228SBenjamin Berg 
1811d023a228SBenjamin Berg 	return ret;
181249101078SJohannes Berg }
181349101078SJohannes Berg 
18147f5e3038SBenjamin Berg /**
18157f5e3038SBenjamin Berg  * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
18167f5e3038SBenjamin Berg  * @sgt: scatter gather table
1817*38c8d025SBenjamin Berg  * @offset: Offset into the mapped memory (i.e. SKB payload data)
1818*38c8d025SBenjamin Berg  * @len: Length of the area
18197f5e3038SBenjamin Berg  *
1820*38c8d025SBenjamin Berg  * Find the DMA address that corresponds to the SKB payload data at the
1821*38c8d025SBenjamin Berg  * position given by @offset.
18227f5e3038SBenjamin Berg  *
18237f5e3038SBenjamin Berg  * Returns: Address for TB entry
18247f5e3038SBenjamin Berg  */
iwl_pcie_get_sgt_tb_phys(struct sg_table * sgt,unsigned int offset,unsigned int len)1825*38c8d025SBenjamin Berg dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
1826*38c8d025SBenjamin Berg 				    unsigned int len)
18277f5e3038SBenjamin Berg {
18287f5e3038SBenjamin Berg 	struct scatterlist *sg;
1829*38c8d025SBenjamin Berg 	unsigned int sg_offset = 0;
18307f5e3038SBenjamin Berg 	int i;
18317f5e3038SBenjamin Berg 
1832*38c8d025SBenjamin Berg 	/*
1833*38c8d025SBenjamin Berg 	 * Search the mapped DMA areas in the SG for the area that contains the
1834*38c8d025SBenjamin Berg 	 * data at offset with the given length.
1835*38c8d025SBenjamin Berg 	 */
18367f5e3038SBenjamin Berg 	for_each_sgtable_dma_sg(sgt, sg, i) {
1837*38c8d025SBenjamin Berg 		if (offset >= sg_offset &&
1838*38c8d025SBenjamin Berg 		    offset + len <= sg_offset + sg_dma_len(sg))
1839*38c8d025SBenjamin Berg 			return sg_dma_address(sg) + offset - sg_offset;
1840*38c8d025SBenjamin Berg 
1841*38c8d025SBenjamin Berg 		sg_offset += sg_dma_len(sg);
18427f5e3038SBenjamin Berg 	}
18437f5e3038SBenjamin Berg 
18447f5e3038SBenjamin Berg 	WARN_ON_ONCE(1);
18457f5e3038SBenjamin Berg 
18467f5e3038SBenjamin Berg 	return DMA_MAPPING_ERROR;
18477f5e3038SBenjamin Berg }
18487f5e3038SBenjamin Berg 
18497f5e3038SBenjamin Berg /**
18507f5e3038SBenjamin Berg  * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
18517f5e3038SBenjamin Berg  * @trans: transport private data
18527f5e3038SBenjamin Berg  * @skb: the SKB to map
18537f5e3038SBenjamin Berg  * @cmd_meta: command meta to store the scatter list information for unmapping
18547f5e3038SBenjamin Berg  * @hdr: output argument for TSO headers
18557f5e3038SBenjamin Berg  * @hdr_room: requested length for TSO headers
18567f5e3038SBenjamin Berg  *
18577f5e3038SBenjamin Berg  * Allocate space for a scatter gather list and TSO headers and map the SKB
18587f5e3038SBenjamin Berg  * using the scatter gather list. The SKB is unmapped again when the page is
18597f5e3038SBenjamin Berg  * free'ed again at the end of the operation.
18607f5e3038SBenjamin Berg  *
18617f5e3038SBenjamin Berg  * Returns: newly allocated and mapped scatter gather table with list
18627f5e3038SBenjamin Berg  */
iwl_pcie_prep_tso(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta,u8 ** hdr,unsigned int hdr_room)18637f5e3038SBenjamin Berg struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
18647f5e3038SBenjamin Berg 				   struct iwl_cmd_meta *cmd_meta,
18657f5e3038SBenjamin Berg 				   u8 **hdr, unsigned int hdr_room)
18667f5e3038SBenjamin Berg {
18677f5e3038SBenjamin Berg 	struct sg_table *sgt;
18687f5e3038SBenjamin Berg 
18697f5e3038SBenjamin Berg 	if (WARN_ON_ONCE(skb_has_frag_list(skb)))
18707f5e3038SBenjamin Berg 		return NULL;
18717f5e3038SBenjamin Berg 
18727f5e3038SBenjamin Berg 	*hdr = iwl_pcie_get_page_hdr(trans,
18737f5e3038SBenjamin Berg 				     hdr_room + __alignof__(struct sg_table) +
18747f5e3038SBenjamin Berg 				     sizeof(struct sg_table) +
18757f5e3038SBenjamin Berg 				     (skb_shinfo(skb)->nr_frags + 1) *
18767f5e3038SBenjamin Berg 				     sizeof(struct scatterlist),
18777f5e3038SBenjamin Berg 				     skb);
18787f5e3038SBenjamin Berg 	if (!*hdr)
18797f5e3038SBenjamin Berg 		return NULL;
18807f5e3038SBenjamin Berg 
18817f5e3038SBenjamin Berg 	sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
18827f5e3038SBenjamin Berg 	sgt->sgl = (void *)(sgt + 1);
18837f5e3038SBenjamin Berg 
18847f5e3038SBenjamin Berg 	sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
18857f5e3038SBenjamin Berg 
1886*38c8d025SBenjamin Berg 	/* Only map the data, not the header (it is copied to the TSO page) */
1887*38c8d025SBenjamin Berg 	sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
1888*38c8d025SBenjamin Berg 				       skb->data_len);
18897f5e3038SBenjamin Berg 	if (WARN_ON_ONCE(sgt->orig_nents <= 0))
18907f5e3038SBenjamin Berg 		return NULL;
18917f5e3038SBenjamin Berg 
18927f5e3038SBenjamin Berg 	/* And map the entire SKB */
18937f5e3038SBenjamin Berg 	if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
18947f5e3038SBenjamin Berg 		return NULL;
18957f5e3038SBenjamin Berg 
18967f5e3038SBenjamin Berg 	/* Store non-zero (i.e. valid) offset for unmapping */
18977f5e3038SBenjamin Berg 	cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
18987f5e3038SBenjamin Berg 
18997f5e3038SBenjamin Berg 	return sgt;
19007f5e3038SBenjamin Berg }
19017f5e3038SBenjamin Berg 
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1902066fd29aSSara Sharon static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
19036eb5e529SEmmanuel Grumbach 				   struct iwl_txq *txq, u8 hdr_len,
19046eb5e529SEmmanuel Grumbach 				   struct iwl_cmd_meta *out_meta,
1905a89c72ffSJohannes Berg 				   struct iwl_device_tx_cmd *dev_cmd,
1906a89c72ffSJohannes Berg 				   u16 tb1_len)
19076eb5e529SEmmanuel Grumbach {
190849101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
190905e5a7e5SJohannes Berg 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
19106eb5e529SEmmanuel Grumbach 	struct ieee80211_hdr *hdr = (void *)skb->data;
19116eb5e529SEmmanuel Grumbach 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
19126eb5e529SEmmanuel Grumbach 	unsigned int mss = skb_shinfo(skb)->gso_size;
1913*38c8d025SBenjamin Berg 	unsigned int data_offset = 0;
19146eb5e529SEmmanuel Grumbach 	u16 length, iv_len, amsdu_pad;
191590db5075SBenjamin Berg 	dma_addr_t start_hdr_phys;
1916d023a228SBenjamin Berg 	u8 *start_hdr, *pos_hdr;
19177f5e3038SBenjamin Berg 	struct sg_table *sgt;
19186eb5e529SEmmanuel Grumbach 	struct tso_t tso;
19196eb5e529SEmmanuel Grumbach 
19206eb5e529SEmmanuel Grumbach 	/* if the packet is protected, then it must be CCMP or GCMP */
19216eb5e529SEmmanuel Grumbach 	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
19226eb5e529SEmmanuel Grumbach 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
19236eb5e529SEmmanuel Grumbach 		IEEE80211_CCMP_HDR_LEN : 0;
19246eb5e529SEmmanuel Grumbach 
19256eb5e529SEmmanuel Grumbach 	trace_iwlwifi_dev_tx(trans->dev, skb,
19260cd1ad2dSMordechay Goodstein 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
192749101078SJohannes Berg 			     trans_pcie->txqs.tfd.size,
19288790fce4SJohannes Berg 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
19296eb5e529SEmmanuel Grumbach 
1930cc15bd10SEric Dumazet 	ip_hdrlen = skb_network_header_len(skb);
19316eb5e529SEmmanuel Grumbach 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
19326eb5e529SEmmanuel Grumbach 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
19336eb5e529SEmmanuel Grumbach 	amsdu_pad = 0;
19346eb5e529SEmmanuel Grumbach 
19356eb5e529SEmmanuel Grumbach 	/* total amount of header we may need for this A-MSDU */
19366eb5e529SEmmanuel Grumbach 	hdr_room = DIV_ROUND_UP(total_len, mss) *
19376eb5e529SEmmanuel Grumbach 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
19386eb5e529SEmmanuel Grumbach 
19396eb5e529SEmmanuel Grumbach 	/* Our device supports 9 segments at most, it will fit in 1 page */
19407f5e3038SBenjamin Berg 	sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
19417f5e3038SBenjamin Berg 	if (!sgt)
19426eb5e529SEmmanuel Grumbach 		return -ENOMEM;
19436eb5e529SEmmanuel Grumbach 
194490db5075SBenjamin Berg 	start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
19457f5e3038SBenjamin Berg 	pos_hdr = start_hdr;
1946d023a228SBenjamin Berg 	memcpy(pos_hdr, skb->data + hdr_len, iv_len);
1947d023a228SBenjamin Berg 	pos_hdr += iv_len;
19486eb5e529SEmmanuel Grumbach 
19496eb5e529SEmmanuel Grumbach 	/*
19506eb5e529SEmmanuel Grumbach 	 * Pull the ieee80211 header + IV to be able to use TSO core,
19516eb5e529SEmmanuel Grumbach 	 * we will restore it for the tx_status flow.
19526eb5e529SEmmanuel Grumbach 	 */
19536eb5e529SEmmanuel Grumbach 	skb_pull(skb, hdr_len + iv_len);
19546eb5e529SEmmanuel Grumbach 
195505e5a7e5SJohannes Berg 	/*
195605e5a7e5SJohannes Berg 	 * Remove the length of all the headers that we don't actually
195705e5a7e5SJohannes Berg 	 * have in the MPDU by themselves, but that we duplicate into
195805e5a7e5SJohannes Berg 	 * all the different MSDUs inside the A-MSDU.
195905e5a7e5SJohannes Berg 	 */
196005e5a7e5SJohannes Berg 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
196105e5a7e5SJohannes Berg 
19626eb5e529SEmmanuel Grumbach 	tso_start(skb, &tso);
19636eb5e529SEmmanuel Grumbach 
19646eb5e529SEmmanuel Grumbach 	while (total_len) {
19656eb5e529SEmmanuel Grumbach 		/* this is the data left for this subframe */
19666eb5e529SEmmanuel Grumbach 		unsigned int data_left =
19676eb5e529SEmmanuel Grumbach 			min_t(unsigned int, mss, total_len);
19686eb5e529SEmmanuel Grumbach 		unsigned int hdr_tb_len;
19696eb5e529SEmmanuel Grumbach 		dma_addr_t hdr_tb_phys;
1970d023a228SBenjamin Berg 		u8 *subf_hdrs_start = pos_hdr;
19716eb5e529SEmmanuel Grumbach 
19726eb5e529SEmmanuel Grumbach 		total_len -= data_left;
19736eb5e529SEmmanuel Grumbach 
1974d023a228SBenjamin Berg 		memset(pos_hdr, 0, amsdu_pad);
1975d023a228SBenjamin Berg 		pos_hdr += amsdu_pad;
19766eb5e529SEmmanuel Grumbach 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
19776eb5e529SEmmanuel Grumbach 				  data_left)) & 0x3;
1978d023a228SBenjamin Berg 		ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
1979d023a228SBenjamin Berg 		pos_hdr += ETH_ALEN;
1980d023a228SBenjamin Berg 		ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
1981d023a228SBenjamin Berg 		pos_hdr += ETH_ALEN;
19826eb5e529SEmmanuel Grumbach 
19836eb5e529SEmmanuel Grumbach 		length = snap_ip_tcp_hdrlen + data_left;
1984d023a228SBenjamin Berg 		*((__be16 *)pos_hdr) = cpu_to_be16(length);
1985d023a228SBenjamin Berg 		pos_hdr += sizeof(length);
19866eb5e529SEmmanuel Grumbach 
19876eb5e529SEmmanuel Grumbach 		/*
19886eb5e529SEmmanuel Grumbach 		 * This will copy the SNAP as well which will be considered
19896eb5e529SEmmanuel Grumbach 		 * as MAC header.
19906eb5e529SEmmanuel Grumbach 		 */
1991d023a228SBenjamin Berg 		tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
19926eb5e529SEmmanuel Grumbach 
1993d023a228SBenjamin Berg 		pos_hdr += snap_ip_tcp_hdrlen;
19946eb5e529SEmmanuel Grumbach 
1995d023a228SBenjamin Berg 		hdr_tb_len = pos_hdr - start_hdr;
199690db5075SBenjamin Berg 		hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
199790db5075SBenjamin Berg 
19986eb5e529SEmmanuel Grumbach 		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
19996eb5e529SEmmanuel Grumbach 				       hdr_tb_len, false);
2000bf77ee2eSSara Sharon 		trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
20019b08ae22SJohannes Berg 					hdr_tb_phys, hdr_tb_len);
200205e5a7e5SJohannes Berg 		/* add this subframe's headers' length to the tx_cmd */
2003d023a228SBenjamin Berg 		le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
20046eb5e529SEmmanuel Grumbach 
20056eb5e529SEmmanuel Grumbach 		/* prepare the start_hdr for the next subframe */
2006d023a228SBenjamin Berg 		start_hdr = pos_hdr;
20076eb5e529SEmmanuel Grumbach 
20086eb5e529SEmmanuel Grumbach 		/* put the payload */
20096eb5e529SEmmanuel Grumbach 		while (data_left) {
20106eb5e529SEmmanuel Grumbach 			unsigned int size = min_t(unsigned int, tso.size,
20116eb5e529SEmmanuel Grumbach 						  data_left);
20126eb5e529SEmmanuel Grumbach 			dma_addr_t tb_phys;
20136eb5e529SEmmanuel Grumbach 
2014*38c8d025SBenjamin Berg 			tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
201590db5075SBenjamin Berg 			/* Not a real mapping error, use direct comparison */
201690db5075SBenjamin Berg 			if (unlikely(tb_phys == DMA_MAPPING_ERROR))
20177d50d76eSJohannes Berg 				return -EINVAL;
20186eb5e529SEmmanuel Grumbach 
20196eb5e529SEmmanuel Grumbach 			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
20206eb5e529SEmmanuel Grumbach 					       size, false);
2021bf77ee2eSSara Sharon 			trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
20229b08ae22SJohannes Berg 						tb_phys, size);
20236eb5e529SEmmanuel Grumbach 
20246eb5e529SEmmanuel Grumbach 			data_left -= size;
2025*38c8d025SBenjamin Berg 			data_offset += size;
20266eb5e529SEmmanuel Grumbach 			tso_build_data(skb, &tso, size);
20276eb5e529SEmmanuel Grumbach 		}
20286eb5e529SEmmanuel Grumbach 	}
20296eb5e529SEmmanuel Grumbach 
203090db5075SBenjamin Berg 	dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
203190db5075SBenjamin Berg 				   DMA_TO_DEVICE);
203290db5075SBenjamin Berg 
20336eb5e529SEmmanuel Grumbach 	/* re -add the WiFi header and IV */
20346eb5e529SEmmanuel Grumbach 	skb_push(skb, hdr_len + iv_len);
20356eb5e529SEmmanuel Grumbach 
20366eb5e529SEmmanuel Grumbach 	return 0;
20376eb5e529SEmmanuel Grumbach }
20386eb5e529SEmmanuel Grumbach #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)20396eb5e529SEmmanuel Grumbach static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
20406eb5e529SEmmanuel Grumbach 				   struct iwl_txq *txq, u8 hdr_len,
20416eb5e529SEmmanuel Grumbach 				   struct iwl_cmd_meta *out_meta,
2042a89c72ffSJohannes Berg 				   struct iwl_device_tx_cmd *dev_cmd,
2043a89c72ffSJohannes Berg 				   u16 tb1_len)
20446eb5e529SEmmanuel Grumbach {
20456eb5e529SEmmanuel Grumbach 	/* No A-MSDU without CONFIG_INET */
20466eb5e529SEmmanuel Grumbach 	WARN_ON(1);
20476eb5e529SEmmanuel Grumbach 
20486eb5e529SEmmanuel Grumbach 	return -1;
20496eb5e529SEmmanuel Grumbach }
20506eb5e529SEmmanuel Grumbach #endif /* CONFIG_INET */
20516eb5e529SEmmanuel Grumbach 
205249101078SJohannes Berg #define IWL_TX_CRC_SIZE 4
205349101078SJohannes Berg #define IWL_TX_DELIMITER_SIZE 4
205449101078SJohannes Berg 
205549101078SJohannes Berg /*
205649101078SJohannes Berg  * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
205749101078SJohannes Berg  */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)205849101078SJohannes Berg static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
205949101078SJohannes Berg 					     struct iwl_txq *txq, u16 byte_cnt,
206049101078SJohannes Berg 					     int num_tbs)
206149101078SJohannes Berg {
206249101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
206349101078SJohannes Berg 	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
206449101078SJohannes Berg 	int write_ptr = txq->write_ptr;
206549101078SJohannes Berg 	int txq_id = txq->id;
206649101078SJohannes Berg 	u8 sec_ctl = 0;
206749101078SJohannes Berg 	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
206849101078SJohannes Berg 	__le16 bc_ent;
206949101078SJohannes Berg 	struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
207049101078SJohannes Berg 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
207149101078SJohannes Berg 	u8 sta_id = tx_cmd->sta_id;
207249101078SJohannes Berg 
207349101078SJohannes Berg 	scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
207449101078SJohannes Berg 
207549101078SJohannes Berg 	sec_ctl = tx_cmd->sec_ctl;
207649101078SJohannes Berg 
207749101078SJohannes Berg 	switch (sec_ctl & TX_CMD_SEC_MSK) {
207849101078SJohannes Berg 	case TX_CMD_SEC_CCM:
207949101078SJohannes Berg 		len += IEEE80211_CCMP_MIC_LEN;
208049101078SJohannes Berg 		break;
208149101078SJohannes Berg 	case TX_CMD_SEC_TKIP:
208249101078SJohannes Berg 		len += IEEE80211_TKIP_ICV_LEN;
208349101078SJohannes Berg 		break;
208449101078SJohannes Berg 	case TX_CMD_SEC_WEP:
208549101078SJohannes Berg 		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
208649101078SJohannes Berg 		break;
208749101078SJohannes Berg 	}
208849101078SJohannes Berg 	if (trans_pcie->txqs.bc_table_dword)
208949101078SJohannes Berg 		len = DIV_ROUND_UP(len, 4);
209049101078SJohannes Berg 
209149101078SJohannes Berg 	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
209249101078SJohannes Berg 		return;
209349101078SJohannes Berg 
209449101078SJohannes Berg 	bc_ent = cpu_to_le16(len | (sta_id << 12));
209549101078SJohannes Berg 
209649101078SJohannes Berg 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
209749101078SJohannes Berg 
209849101078SJohannes Berg 	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
209949101078SJohannes Berg 		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
210049101078SJohannes Berg 			bc_ent;
210149101078SJohannes Berg }
210249101078SJohannes Berg 
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)2103e705c121SKalle Valo int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2104a89c72ffSJohannes Berg 		      struct iwl_device_tx_cmd *dev_cmd, int txq_id)
2105e705c121SKalle Valo {
210649101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2107e705c121SKalle Valo 	struct ieee80211_hdr *hdr;
2108e705c121SKalle Valo 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2109e705c121SKalle Valo 	struct iwl_cmd_meta *out_meta;
2110e705c121SKalle Valo 	struct iwl_txq *txq;
2111e705c121SKalle Valo 	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2112e705c121SKalle Valo 	void *tb1_addr;
21134fe10bc6SSara Sharon 	void *tfd;
21143a0b2a42SEmmanuel Grumbach 	u16 len, tb1_len;
2115e705c121SKalle Valo 	bool wait_write_ptr;
2116e705c121SKalle Valo 	__le16 fc;
2117e705c121SKalle Valo 	u8 hdr_len;
2118e705c121SKalle Valo 	u16 wifi_seq;
2119c772a3d3SSara Sharon 	bool amsdu;
2120e705c121SKalle Valo 
212149101078SJohannes Berg 	txq = trans_pcie->txqs.txq[txq_id];
2122e705c121SKalle Valo 
212349101078SJohannes Berg 	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
2124e705c121SKalle Valo 		      "TX on unused queue %d\n", txq_id))
2125e705c121SKalle Valo 		return -EINVAL;
2126e705c121SKalle Valo 
2127e705c121SKalle Valo 	if (skb_is_nonlinear(skb) &&
212849101078SJohannes Berg 	    skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
2129e705c121SKalle Valo 	    __skb_linearize(skb))
2130e705c121SKalle Valo 		return -ENOMEM;
2131e705c121SKalle Valo 
2132e705c121SKalle Valo 	/* mac80211 always puts the full header into the SKB's head,
2133e705c121SKalle Valo 	 * so there's no need to check if it's readable there
2134e705c121SKalle Valo 	 */
2135e705c121SKalle Valo 	hdr = (struct ieee80211_hdr *)skb->data;
2136e705c121SKalle Valo 	fc = hdr->frame_control;
2137e705c121SKalle Valo 	hdr_len = ieee80211_hdrlen(fc);
2138e705c121SKalle Valo 
2139e705c121SKalle Valo 	spin_lock(&txq->lock);
2140e705c121SKalle Valo 
21410cd1ad2dSMordechay Goodstein 	if (iwl_txq_space(trans, txq) < txq->high_mark) {
21420cd1ad2dSMordechay Goodstein 		iwl_txq_stop(trans, txq);
21433955525dSEmmanuel Grumbach 
21443955525dSEmmanuel Grumbach 		/* don't put the packet on the ring, if there is no room */
21450cd1ad2dSMordechay Goodstein 		if (unlikely(iwl_txq_space(trans, txq) < 3)) {
2146a89c72ffSJohannes Berg 			struct iwl_device_tx_cmd **dev_cmd_ptr;
21473955525dSEmmanuel Grumbach 
214821cb3222SJohannes Berg 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
214949101078SJohannes Berg 					       trans_pcie->txqs.dev_cmd_offs);
215021cb3222SJohannes Berg 
215121cb3222SJohannes Berg 			*dev_cmd_ptr = dev_cmd;
21523955525dSEmmanuel Grumbach 			__skb_queue_tail(&txq->overflow_q, skb);
21533955525dSEmmanuel Grumbach 
21543955525dSEmmanuel Grumbach 			spin_unlock(&txq->lock);
21553955525dSEmmanuel Grumbach 			return 0;
21563955525dSEmmanuel Grumbach 		}
21573955525dSEmmanuel Grumbach 	}
21583955525dSEmmanuel Grumbach 
2159e705c121SKalle Valo 	/* In AGG mode, the index in the ring must correspond to the WiFi
2160e705c121SKalle Valo 	 * sequence number. This is a HW requirements to help the SCD to parse
2161e705c121SKalle Valo 	 * the BA.
2162e705c121SKalle Valo 	 * Check here that the packets are in the right place on the ring.
2163e705c121SKalle Valo 	 */
2164e705c121SKalle Valo 	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2165e705c121SKalle Valo 	WARN_ONCE(txq->ampdu &&
2166bb98ecd4SSara Sharon 		  (wifi_seq & 0xff) != txq->write_ptr,
2167e705c121SKalle Valo 		  "Q: %d WiFi Seq %d tfdNum %d",
2168bb98ecd4SSara Sharon 		  txq_id, wifi_seq, txq->write_ptr);
2169e705c121SKalle Valo 
2170e705c121SKalle Valo 	/* Set up driver data for this TFD */
2171bb98ecd4SSara Sharon 	txq->entries[txq->write_ptr].skb = skb;
2172bb98ecd4SSara Sharon 	txq->entries[txq->write_ptr].cmd = dev_cmd;
2173e705c121SKalle Valo 
2174e705c121SKalle Valo 	dev_cmd->hdr.sequence =
2175e705c121SKalle Valo 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2176bb98ecd4SSara Sharon 			    INDEX_TO_SEQ(txq->write_ptr)));
2177e705c121SKalle Valo 
21780cd1ad2dSMordechay Goodstein 	tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
2179e705c121SKalle Valo 	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2180e705c121SKalle Valo 		       offsetof(struct iwl_tx_cmd, scratch);
2181e705c121SKalle Valo 
2182e705c121SKalle Valo 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2183e705c121SKalle Valo 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2184e705c121SKalle Valo 
2185e705c121SKalle Valo 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
2186bb98ecd4SSara Sharon 	out_meta = &txq->entries[txq->write_ptr].meta;
21877f5e3038SBenjamin Berg 	memset(out_meta, 0, sizeof(*out_meta));
2188e705c121SKalle Valo 
2189e705c121SKalle Valo 	/*
2190e705c121SKalle Valo 	 * The second TB (tb1) points to the remainder of the TX command
2191e705c121SKalle Valo 	 * and the 802.11 header - dword aligned size
2192e705c121SKalle Valo 	 * (This calculation modifies the TX command, so do it before the
2193e705c121SKalle Valo 	 * setup of the first TB)
2194e705c121SKalle Valo 	 */
2195e705c121SKalle Valo 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
21968de437c7SSara Sharon 	      hdr_len - IWL_FIRST_TB_SIZE;
2197c772a3d3SSara Sharon 	/* do not align A-MSDU to dword as the subframe header aligns it */
2198c772a3d3SSara Sharon 	amsdu = ieee80211_is_data_qos(fc) &&
2199c772a3d3SSara Sharon 		(*ieee80211_get_qos_ctl(hdr) &
2200c772a3d3SSara Sharon 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
220159fa61f3SEmmanuel Grumbach 	if (!amsdu) {
2202e705c121SKalle Valo 		tb1_len = ALIGN(len, 4);
2203e705c121SKalle Valo 		/* Tell NIC about any 2-byte padding after MAC header */
2204e705c121SKalle Valo 		if (tb1_len != len)
2205d172a5efSJohannes Berg 			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2206c772a3d3SSara Sharon 	} else {
2207c772a3d3SSara Sharon 		tb1_len = len;
2208c772a3d3SSara Sharon 	}
2209e705c121SKalle Valo 
221005e5a7e5SJohannes Berg 	/*
221105e5a7e5SJohannes Berg 	 * The first TB points to bi-directional DMA data, we'll
221205e5a7e5SJohannes Berg 	 * memcpy the data into it later.
221305e5a7e5SJohannes Berg 	 */
2214e705c121SKalle Valo 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
22158de437c7SSara Sharon 			       IWL_FIRST_TB_SIZE, true);
2216e705c121SKalle Valo 
2217e705c121SKalle Valo 	/* there must be data left over for TB1 or this code must be changed */
22188de437c7SSara Sharon 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
22191caa3a5eSJohannes Berg 	BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
22201caa3a5eSJohannes Berg 		     offsetofend(struct iwl_tx_cmd, scratch) >
22211caa3a5eSJohannes Berg 		     IWL_FIRST_TB_SIZE);
2222e705c121SKalle Valo 
2223e705c121SKalle Valo 	/* map the data for TB1 */
22248de437c7SSara Sharon 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2225e705c121SKalle Valo 	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2226e705c121SKalle Valo 	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2227e705c121SKalle Valo 		goto out_err;
2228e705c121SKalle Valo 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2229e705c121SKalle Valo 
2230bf77ee2eSSara Sharon 	trace_iwlwifi_dev_tx(trans->dev, skb,
22310cd1ad2dSMordechay Goodstein 			     iwl_txq_get_tfd(trans, txq, txq->write_ptr),
223249101078SJohannes Berg 			     trans_pcie->txqs.tfd.size,
2233bf77ee2eSSara Sharon 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2234bf77ee2eSSara Sharon 			     hdr_len);
2235bf77ee2eSSara Sharon 
2236bf1ad897SEliad Peller 	/*
2237bf1ad897SEliad Peller 	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
2238bf1ad897SEliad Peller 	 * (adding subframes, etc.).
2239bf1ad897SEliad Peller 	 * This can happen in some testing flows when the amsdu was already
2240bf1ad897SEliad Peller 	 * pre-built, and we just need to send the resulting skb.
2241bf1ad897SEliad Peller 	 */
2242bf1ad897SEliad Peller 	if (amsdu && skb_shinfo(skb)->gso_size) {
22436eb5e529SEmmanuel Grumbach 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
22446eb5e529SEmmanuel Grumbach 						     out_meta, dev_cmd,
22456eb5e529SEmmanuel Grumbach 						     tb1_len)))
2246e705c121SKalle Valo 			goto out_err;
2247bb03927eSJohannes Berg 	} else {
22480044f171SJohannes Berg 		struct sk_buff *frag;
22490044f171SJohannes Berg 
2250bb03927eSJohannes Berg 		if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2251bb03927eSJohannes Berg 					       out_meta)))
22526eb5e529SEmmanuel Grumbach 			goto out_err;
2253bb03927eSJohannes Berg 
22540044f171SJohannes Berg 		skb_walk_frags(skb, frag) {
22550044f171SJohannes Berg 			if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
22560044f171SJohannes Berg 						       out_meta)))
22570044f171SJohannes Berg 				goto out_err;
22580044f171SJohannes Berg 		}
22596eb5e529SEmmanuel Grumbach 	}
2260e705c121SKalle Valo 
226105e5a7e5SJohannes Berg 	/* building the A-MSDU might have changed this data, so memcpy it now */
2262c1f33442SLiad Kaufman 	memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
226305e5a7e5SJohannes Berg 
22640cd1ad2dSMordechay Goodstein 	tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2265e705c121SKalle Valo 	/* Set up entry for this TFD in Tx byte-count array */
22660179bfffSMordechay Goodstein 	iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
226749101078SJohannes Berg 					 iwl_txq_gen1_tfd_get_num_tbs(tfd));
2268e705c121SKalle Valo 
2269e705c121SKalle Valo 	wait_write_ptr = ieee80211_has_morefrags(fc);
2270e705c121SKalle Valo 
2271e705c121SKalle Valo 	/* start timer if queue currently empty */
22720d52497aSEmmanuel Grumbach 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2273e705c121SKalle Valo 		/*
2274e705c121SKalle Valo 		 * If the TXQ is active, then set the timer, if not,
2275e705c121SKalle Valo 		 * set the timer in remainder so that the timer will
2276e705c121SKalle Valo 		 * be armed with the right value when the station will
2277e705c121SKalle Valo 		 * wake up.
2278e705c121SKalle Valo 		 */
2279e705c121SKalle Valo 		if (!txq->frozen)
2280e705c121SKalle Valo 			mod_timer(&txq->stuck_timer,
2281e705c121SKalle Valo 				  jiffies + txq->wd_timeout);
2282e705c121SKalle Valo 		else
2283e705c121SKalle Valo 			txq->frozen_expiry_remainder = txq->wd_timeout;
2284e705c121SKalle Valo 	}
2285e705c121SKalle Valo 
2286e705c121SKalle Valo 	/* Tell device the write index *just past* this latest filled TFD */
22870cd1ad2dSMordechay Goodstein 	txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2288e705c121SKalle Valo 	if (!wait_write_ptr)
2289e705c121SKalle Valo 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
2290e705c121SKalle Valo 
2291e705c121SKalle Valo 	/*
2292e705c121SKalle Valo 	 * At this point the frame is "transmitted" successfully
2293e705c121SKalle Valo 	 * and we will get a TX status notification eventually.
2294e705c121SKalle Valo 	 */
2295e705c121SKalle Valo 	spin_unlock(&txq->lock);
2296e705c121SKalle Valo 	return 0;
2297e705c121SKalle Valo out_err:
22980179bfffSMordechay Goodstein 	iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2299e705c121SKalle Valo 	spin_unlock(&txq->lock);
2300e705c121SKalle Valo 	return -1;
2301e705c121SKalle Valo }
230249101078SJohannes Berg 
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)230349101078SJohannes Berg static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
2304a2ed933dSBenjamin Berg 					    struct iwl_txq *txq,
2305a2ed933dSBenjamin Berg 					    int read_ptr)
230649101078SJohannes Berg {
230749101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
230849101078SJohannes Berg 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
230949101078SJohannes Berg 	int txq_id = txq->id;
231049101078SJohannes Berg 	u8 sta_id = 0;
231149101078SJohannes Berg 	__le16 bc_ent;
231249101078SJohannes Berg 	struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
231349101078SJohannes Berg 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
231449101078SJohannes Berg 
231549101078SJohannes Berg 	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
231649101078SJohannes Berg 
231749101078SJohannes Berg 	if (txq_id != trans_pcie->txqs.cmd.q_id)
231849101078SJohannes Berg 		sta_id = tx_cmd->sta_id;
231949101078SJohannes Berg 
232049101078SJohannes Berg 	bc_ent = cpu_to_le16(1 | (sta_id << 12));
232149101078SJohannes Berg 
232249101078SJohannes Berg 	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
232349101078SJohannes Berg 
232449101078SJohannes Berg 	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
232549101078SJohannes Berg 		scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
232649101078SJohannes Berg 			bc_ent;
232749101078SJohannes Berg }
232849101078SJohannes Berg 
232949101078SJohannes Berg /* Frees buffers until index _not_ inclusive */
iwl_pcie_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs,bool is_flush)233049101078SJohannes Berg void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
233149101078SJohannes Berg 		      struct sk_buff_head *skbs, bool is_flush)
233249101078SJohannes Berg {
233349101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
233449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
233549101078SJohannes Berg 	int tfd_num, read_ptr, last_to_free;
2336a2ed933dSBenjamin Berg 	int txq_read_ptr, txq_write_ptr;
233749101078SJohannes Berg 
233849101078SJohannes Berg 	/* This function is not meant to release cmd queue*/
233949101078SJohannes Berg 	if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
234049101078SJohannes Berg 		return;
234149101078SJohannes Berg 
234249101078SJohannes Berg 	if (WARN_ON(!txq))
234349101078SJohannes Berg 		return;
234449101078SJohannes Berg 
234549101078SJohannes Berg 	tfd_num = iwl_txq_get_cmd_index(txq, ssn);
234649101078SJohannes Berg 
2347a2ed933dSBenjamin Berg 	spin_lock_bh(&txq->reclaim_lock);
2348a2ed933dSBenjamin Berg 
2349a2ed933dSBenjamin Berg 	spin_lock(&txq->lock);
2350a2ed933dSBenjamin Berg 	txq_read_ptr = txq->read_ptr;
2351a2ed933dSBenjamin Berg 	txq_write_ptr = txq->write_ptr;
2352a2ed933dSBenjamin Berg 	spin_unlock(&txq->lock);
2353a2ed933dSBenjamin Berg 
2354a2ed933dSBenjamin Berg 	read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
235549101078SJohannes Berg 
235649101078SJohannes Berg 	if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
235749101078SJohannes Berg 		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
235849101078SJohannes Berg 				    txq_id, ssn);
235949101078SJohannes Berg 		goto out;
236049101078SJohannes Berg 	}
236149101078SJohannes Berg 
236249101078SJohannes Berg 	if (read_ptr == tfd_num)
236349101078SJohannes Berg 		goto out;
236449101078SJohannes Berg 
236549101078SJohannes Berg 	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
2366a2ed933dSBenjamin Berg 			   txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
236749101078SJohannes Berg 
236849101078SJohannes Berg 	/* Since we free until index _not_ inclusive, the one before index is
236949101078SJohannes Berg 	 * the last we will free. This one must be used
237049101078SJohannes Berg 	 */
237149101078SJohannes Berg 	last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
237249101078SJohannes Berg 
2373a2ed933dSBenjamin Berg 	if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
237449101078SJohannes Berg 		IWL_ERR(trans,
237549101078SJohannes Berg 			"%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
237649101078SJohannes Berg 			__func__, txq_id, last_to_free,
237749101078SJohannes Berg 			trans->trans_cfg->base_params->max_tfd_queue_size,
2378a2ed933dSBenjamin Berg 			txq_write_ptr, txq_read_ptr);
237949101078SJohannes Berg 
238049101078SJohannes Berg 		iwl_op_mode_time_point(trans->op_mode,
238149101078SJohannes Berg 				       IWL_FW_INI_TIME_POINT_FAKE_TX,
238249101078SJohannes Berg 				       NULL);
238349101078SJohannes Berg 		goto out;
238449101078SJohannes Berg 	}
238549101078SJohannes Berg 
238649101078SJohannes Berg 	if (WARN_ON(!skb_queue_empty(skbs)))
238749101078SJohannes Berg 		goto out;
238849101078SJohannes Berg 
238949101078SJohannes Berg 	for (;
239049101078SJohannes Berg 	     read_ptr != tfd_num;
2391a2ed933dSBenjamin Berg 	     txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
2392a2ed933dSBenjamin Berg 	     read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
23937f5e3038SBenjamin Berg 		struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
239449101078SJohannes Berg 		struct sk_buff *skb = txq->entries[read_ptr].skb;
239549101078SJohannes Berg 
239649101078SJohannes Berg 		if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
2397a2ed933dSBenjamin Berg 			      read_ptr, txq_read_ptr, txq_id))
239849101078SJohannes Berg 			continue;
239949101078SJohannes Berg 
2400adc902ceSBenjamin Berg 		iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
240149101078SJohannes Berg 
240249101078SJohannes Berg 		__skb_queue_tail(skbs, skb);
240349101078SJohannes Berg 
240449101078SJohannes Berg 		txq->entries[read_ptr].skb = NULL;
240549101078SJohannes Berg 
240649101078SJohannes Berg 		if (!trans->trans_cfg->gen2)
2407a2ed933dSBenjamin Berg 			iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
2408a2ed933dSBenjamin Berg 							txq_read_ptr);
240949101078SJohannes Berg 
2410a2ed933dSBenjamin Berg 		iwl_txq_free_tfd(trans, txq, txq_read_ptr);
241149101078SJohannes Berg 	}
241249101078SJohannes Berg 
2413a2ed933dSBenjamin Berg 	spin_lock(&txq->lock);
2414a2ed933dSBenjamin Berg 	txq->read_ptr = txq_read_ptr;
2415a2ed933dSBenjamin Berg 
241649101078SJohannes Berg 	iwl_txq_progress(txq);
241749101078SJohannes Berg 
241849101078SJohannes Berg 	if (iwl_txq_space(trans, txq) > txq->low_mark &&
241949101078SJohannes Berg 	    test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
242049101078SJohannes Berg 		struct sk_buff_head overflow_skbs;
242149101078SJohannes Berg 		struct sk_buff *skb;
242249101078SJohannes Berg 
242349101078SJohannes Berg 		__skb_queue_head_init(&overflow_skbs);
242449101078SJohannes Berg 		skb_queue_splice_init(&txq->overflow_q,
242549101078SJohannes Berg 				      is_flush ? skbs : &overflow_skbs);
242649101078SJohannes Berg 
242749101078SJohannes Berg 		/*
242849101078SJohannes Berg 		 * We are going to transmit from the overflow queue.
242949101078SJohannes Berg 		 * Remember this state so that wait_for_txq_empty will know we
243049101078SJohannes Berg 		 * are adding more packets to the TFD queue. It cannot rely on
243149101078SJohannes Berg 		 * the state of &txq->overflow_q, as we just emptied it, but
243249101078SJohannes Berg 		 * haven't TXed the content yet.
243349101078SJohannes Berg 		 */
243449101078SJohannes Berg 		txq->overflow_tx = true;
243549101078SJohannes Berg 
243649101078SJohannes Berg 		/*
2437a2ed933dSBenjamin Berg 		 * This is tricky: we are in reclaim path and are holding
2438a2ed933dSBenjamin Berg 		 * reclaim_lock, so noone will try to access the txq data
2439a2ed933dSBenjamin Berg 		 * from that path. We stopped tx, so we can't have tx as well.
2440a2ed933dSBenjamin Berg 		 * Bottom line, we can unlock and re-lock later.
244149101078SJohannes Berg 		 */
24421a3364e9SBenjamin Berg 		spin_unlock(&txq->lock);
244349101078SJohannes Berg 
244449101078SJohannes Berg 		while ((skb = __skb_dequeue(&overflow_skbs))) {
244549101078SJohannes Berg 			struct iwl_device_tx_cmd *dev_cmd_ptr;
244649101078SJohannes Berg 
244749101078SJohannes Berg 			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
244849101078SJohannes Berg 						 trans_pcie->txqs.dev_cmd_offs);
244949101078SJohannes Berg 
245049101078SJohannes Berg 			/*
245149101078SJohannes Berg 			 * Note that we can very well be overflowing again.
245249101078SJohannes Berg 			 * In that case, iwl_txq_space will be small again
245349101078SJohannes Berg 			 * and we won't wake mac80211's queue.
245449101078SJohannes Berg 			 */
245549101078SJohannes Berg 			iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
245649101078SJohannes Berg 		}
245749101078SJohannes Berg 
245849101078SJohannes Berg 		if (iwl_txq_space(trans, txq) > txq->low_mark)
245949101078SJohannes Berg 			iwl_trans_pcie_wake_queue(trans, txq);
246049101078SJohannes Berg 
24611a3364e9SBenjamin Berg 		spin_lock(&txq->lock);
246249101078SJohannes Berg 		txq->overflow_tx = false;
246349101078SJohannes Berg 	}
246449101078SJohannes Berg 
2465a2ed933dSBenjamin Berg 	spin_unlock(&txq->lock);
246649101078SJohannes Berg out:
2467a2ed933dSBenjamin Berg 	spin_unlock_bh(&txq->reclaim_lock);
246849101078SJohannes Berg }
246949101078SJohannes Berg 
247049101078SJohannes Berg /* Set wr_ptr of specific device and txq  */
iwl_pcie_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)247149101078SJohannes Berg void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
247249101078SJohannes Berg {
247349101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
247449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
247549101078SJohannes Berg 
247649101078SJohannes Berg 	spin_lock_bh(&txq->lock);
247749101078SJohannes Berg 
247849101078SJohannes Berg 	txq->write_ptr = ptr;
247949101078SJohannes Berg 	txq->read_ptr = txq->write_ptr;
248049101078SJohannes Berg 
248149101078SJohannes Berg 	spin_unlock_bh(&txq->lock);
248249101078SJohannes Berg }
248349101078SJohannes Berg 
iwl_pcie_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)248449101078SJohannes Berg void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
248549101078SJohannes Berg 			       unsigned long txqs, bool freeze)
248649101078SJohannes Berg {
248749101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
248849101078SJohannes Berg 	int queue;
248949101078SJohannes Berg 
249049101078SJohannes Berg 	for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
249149101078SJohannes Berg 		struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
249249101078SJohannes Berg 		unsigned long now;
249349101078SJohannes Berg 
249449101078SJohannes Berg 		spin_lock_bh(&txq->lock);
249549101078SJohannes Berg 
249649101078SJohannes Berg 		now = jiffies;
249749101078SJohannes Berg 
249849101078SJohannes Berg 		if (txq->frozen == freeze)
249949101078SJohannes Berg 			goto next_queue;
250049101078SJohannes Berg 
250149101078SJohannes Berg 		IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
250249101078SJohannes Berg 				    freeze ? "Freezing" : "Waking", queue);
250349101078SJohannes Berg 
250449101078SJohannes Berg 		txq->frozen = freeze;
250549101078SJohannes Berg 
250649101078SJohannes Berg 		if (txq->read_ptr == txq->write_ptr)
250749101078SJohannes Berg 			goto next_queue;
250849101078SJohannes Berg 
250949101078SJohannes Berg 		if (freeze) {
251049101078SJohannes Berg 			if (unlikely(time_after(now,
251149101078SJohannes Berg 						txq->stuck_timer.expires))) {
251249101078SJohannes Berg 				/*
251349101078SJohannes Berg 				 * The timer should have fired, maybe it is
251449101078SJohannes Berg 				 * spinning right now on the lock.
251549101078SJohannes Berg 				 */
251649101078SJohannes Berg 				goto next_queue;
251749101078SJohannes Berg 			}
251849101078SJohannes Berg 			/* remember how long until the timer fires */
251949101078SJohannes Berg 			txq->frozen_expiry_remainder =
252049101078SJohannes Berg 				txq->stuck_timer.expires - now;
252149101078SJohannes Berg 			del_timer(&txq->stuck_timer);
252249101078SJohannes Berg 			goto next_queue;
252349101078SJohannes Berg 		}
252449101078SJohannes Berg 
252549101078SJohannes Berg 		/*
252649101078SJohannes Berg 		 * Wake a non-empty queue -> arm timer with the
252749101078SJohannes Berg 		 * remainder before it froze
252849101078SJohannes Berg 		 */
252949101078SJohannes Berg 		mod_timer(&txq->stuck_timer,
253049101078SJohannes Berg 			  now + txq->frozen_expiry_remainder);
253149101078SJohannes Berg 
253249101078SJohannes Berg next_queue:
253349101078SJohannes Berg 		spin_unlock_bh(&txq->lock);
253449101078SJohannes Berg 	}
253549101078SJohannes Berg }
253649101078SJohannes Berg 
253749101078SJohannes Berg #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
253849101078SJohannes Berg 
iwl_trans_pcie_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)253949101078SJohannes Berg static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
254049101078SJohannes Berg 					 struct iwl_host_cmd *cmd)
254149101078SJohannes Berg {
254249101078SJohannes Berg 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
254349101078SJohannes Berg 	const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
254449101078SJohannes Berg 	struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
254549101078SJohannes Berg 	int cmd_idx;
254649101078SJohannes Berg 	int ret;
254749101078SJohannes Berg 
254849101078SJohannes Berg 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
254949101078SJohannes Berg 
255049101078SJohannes Berg 	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
255149101078SJohannes Berg 				  &trans->status),
255249101078SJohannes Berg 		 "Command %s: a command is already active!\n", cmd_str))
255349101078SJohannes Berg 		return -EIO;
255449101078SJohannes Berg 
255549101078SJohannes Berg 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
255649101078SJohannes Berg 
25570d91a2bfSYedidya Benshimol 	if (trans->trans_cfg->gen2)
25580d91a2bfSYedidya Benshimol 		cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
25590d91a2bfSYedidya Benshimol 	else
25600d91a2bfSYedidya Benshimol 		cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
25610d91a2bfSYedidya Benshimol 
256249101078SJohannes Berg 	if (cmd_idx < 0) {
256349101078SJohannes Berg 		ret = cmd_idx;
256449101078SJohannes Berg 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
256549101078SJohannes Berg 		IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
256649101078SJohannes Berg 			cmd_str, ret);
256749101078SJohannes Berg 		return ret;
256849101078SJohannes Berg 	}
256949101078SJohannes Berg 
257049101078SJohannes Berg 	ret = wait_event_timeout(trans->wait_command_queue,
257149101078SJohannes Berg 				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
257249101078SJohannes Berg 					   &trans->status),
257349101078SJohannes Berg 				 HOST_COMPLETE_TIMEOUT);
257449101078SJohannes Berg 	if (!ret) {
257549101078SJohannes Berg 		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
257649101078SJohannes Berg 			cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
257749101078SJohannes Berg 
257849101078SJohannes Berg 		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
257949101078SJohannes Berg 			txq->read_ptr, txq->write_ptr);
258049101078SJohannes Berg 
258149101078SJohannes Berg 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
258249101078SJohannes Berg 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
258349101078SJohannes Berg 			       cmd_str);
258449101078SJohannes Berg 		ret = -ETIMEDOUT;
258549101078SJohannes Berg 
258649101078SJohannes Berg 		iwl_trans_sync_nmi(trans);
258749101078SJohannes Berg 		goto cancel;
258849101078SJohannes Berg 	}
258949101078SJohannes Berg 
259049101078SJohannes Berg 	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
259149101078SJohannes Berg 		if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
259249101078SJohannes Berg 					&trans->status)) {
259349101078SJohannes Berg 			IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
259449101078SJohannes Berg 			dump_stack();
259549101078SJohannes Berg 		}
259649101078SJohannes Berg 		ret = -EIO;
259749101078SJohannes Berg 		goto cancel;
259849101078SJohannes Berg 	}
259949101078SJohannes Berg 
260049101078SJohannes Berg 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
260149101078SJohannes Berg 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
260249101078SJohannes Berg 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
260349101078SJohannes Berg 		ret = -ERFKILL;
260449101078SJohannes Berg 		goto cancel;
260549101078SJohannes Berg 	}
260649101078SJohannes Berg 
260749101078SJohannes Berg 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
260849101078SJohannes Berg 		IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
260949101078SJohannes Berg 		ret = -EIO;
261049101078SJohannes Berg 		goto cancel;
261149101078SJohannes Berg 	}
261249101078SJohannes Berg 
261349101078SJohannes Berg 	return 0;
261449101078SJohannes Berg 
261549101078SJohannes Berg cancel:
261649101078SJohannes Berg 	if (cmd->flags & CMD_WANT_SKB) {
261749101078SJohannes Berg 		/*
261849101078SJohannes Berg 		 * Cancel the CMD_WANT_SKB flag for the cmd in the
261949101078SJohannes Berg 		 * TX cmd queue. Otherwise in case the cmd comes
262049101078SJohannes Berg 		 * in later, it will possibly set an invalid
262149101078SJohannes Berg 		 * address (cmd->meta.source).
262249101078SJohannes Berg 		 */
262349101078SJohannes Berg 		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
262449101078SJohannes Berg 	}
262549101078SJohannes Berg 
262649101078SJohannes Berg 	if (cmd->resp_pkt) {
262749101078SJohannes Berg 		iwl_free_resp(cmd);
262849101078SJohannes Berg 		cmd->resp_pkt = NULL;
262949101078SJohannes Berg 	}
263049101078SJohannes Berg 
263149101078SJohannes Berg 	return ret;
263249101078SJohannes Berg }
263349101078SJohannes Berg 
iwl_trans_pcie_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)263449101078SJohannes Berg int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
263549101078SJohannes Berg 			     struct iwl_host_cmd *cmd)
263649101078SJohannes Berg {
263749101078SJohannes Berg 	/* Make sure the NIC is still alive in the bus */
263849101078SJohannes Berg 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
263949101078SJohannes Berg 		return -ENODEV;
264049101078SJohannes Berg 
264149101078SJohannes Berg 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
264249101078SJohannes Berg 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
264349101078SJohannes Berg 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
264449101078SJohannes Berg 				  cmd->id);
264549101078SJohannes Berg 		return -ERFKILL;
264649101078SJohannes Berg 	}
264749101078SJohannes Berg 
264849101078SJohannes Berg 	if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
264949101078SJohannes Berg 		     !(cmd->flags & CMD_SEND_IN_D3))) {
265049101078SJohannes Berg 		IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
265149101078SJohannes Berg 		return -EHOSTDOWN;
265249101078SJohannes Berg 	}
265349101078SJohannes Berg 
265449101078SJohannes Berg 	if (cmd->flags & CMD_ASYNC) {
265549101078SJohannes Berg 		int ret;
265649101078SJohannes Berg 
265749101078SJohannes Berg 		/* An asynchronous command can not expect an SKB to be set. */
265849101078SJohannes Berg 		if (WARN_ON(cmd->flags & CMD_WANT_SKB))
265949101078SJohannes Berg 			return -EINVAL;
266049101078SJohannes Berg 
26610d91a2bfSYedidya Benshimol 		if (trans->trans_cfg->gen2)
26620d91a2bfSYedidya Benshimol 			ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
26630d91a2bfSYedidya Benshimol 		else
26640d91a2bfSYedidya Benshimol 			ret = iwl_pcie_enqueue_hcmd(trans, cmd);
26650d91a2bfSYedidya Benshimol 
266649101078SJohannes Berg 		if (ret < 0) {
266749101078SJohannes Berg 			IWL_ERR(trans,
266849101078SJohannes Berg 				"Error sending %s: enqueue_hcmd failed: %d\n",
266949101078SJohannes Berg 				iwl_get_cmd_string(trans, cmd->id), ret);
267049101078SJohannes Berg 			return ret;
267149101078SJohannes Berg 		}
267249101078SJohannes Berg 		return 0;
267349101078SJohannes Berg 	}
267449101078SJohannes Berg 
267549101078SJohannes Berg 	return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
267649101078SJohannes Berg }
267749101078SJohannes Berg IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
2678