1*6b627f88SBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*6b627f88SBjoern A. Zeeb /*
3*6b627f88SBjoern A. Zeeb * Copyright (C) 2017 Intel Deutschland GmbH
4*6b627f88SBjoern A. Zeeb * Copyright (C) 2018-2020, 2023-2025 Intel Corporation
5*6b627f88SBjoern A. Zeeb */
6*6b627f88SBjoern A. Zeeb #ifdef CONFIG_INET
7*6b627f88SBjoern A. Zeeb #include <net/tso.h>
8*6b627f88SBjoern A. Zeeb #endif
9*6b627f88SBjoern A. Zeeb #include <linux/tcp.h>
10*6b627f88SBjoern A. Zeeb
11*6b627f88SBjoern A. Zeeb #include "iwl-debug.h"
12*6b627f88SBjoern A. Zeeb #include "iwl-csr.h"
13*6b627f88SBjoern A. Zeeb #include "iwl-io.h"
14*6b627f88SBjoern A. Zeeb #include "internal.h"
15*6b627f88SBjoern A. Zeeb #include "fw/api/tx.h"
16*6b627f88SBjoern A. Zeeb #include "fw/api/commands.h"
17*6b627f88SBjoern A. Zeeb #include "fw/api/datapath.h"
18*6b627f88SBjoern A. Zeeb #include "iwl-scd.h"
19*6b627f88SBjoern A. Zeeb
get_workaround_page(struct iwl_trans * trans,struct sk_buff * skb)20*6b627f88SBjoern A. Zeeb static struct page *get_workaround_page(struct iwl_trans *trans,
21*6b627f88SBjoern A. Zeeb struct sk_buff *skb)
22*6b627f88SBjoern A. Zeeb {
23*6b627f88SBjoern A. Zeeb struct iwl_tso_page_info *info;
24*6b627f88SBjoern A. Zeeb struct page **page_ptr;
25*6b627f88SBjoern A. Zeeb struct page *ret;
26*6b627f88SBjoern A. Zeeb dma_addr_t phys;
27*6b627f88SBjoern A. Zeeb
28*6b627f88SBjoern A. Zeeb page_ptr = (void *)((u8 *)skb->cb + trans->conf.cb_data_offs);
29*6b627f88SBjoern A. Zeeb
30*6b627f88SBjoern A. Zeeb ret = alloc_page(GFP_ATOMIC);
31*6b627f88SBjoern A. Zeeb if (!ret)
32*6b627f88SBjoern A. Zeeb return NULL;
33*6b627f88SBjoern A. Zeeb
34*6b627f88SBjoern A. Zeeb info = IWL_TSO_PAGE_INFO(page_address(ret));
35*6b627f88SBjoern A. Zeeb
36*6b627f88SBjoern A. Zeeb /* Create a DMA mapping for the page */
37*6b627f88SBjoern A. Zeeb phys = dma_map_page_attrs(trans->dev, ret, 0, PAGE_SIZE,
38*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
39*6b627f88SBjoern A. Zeeb if (unlikely(dma_mapping_error(trans->dev, phys))) {
40*6b627f88SBjoern A. Zeeb __free_page(ret);
41*6b627f88SBjoern A. Zeeb return NULL;
42*6b627f88SBjoern A. Zeeb }
43*6b627f88SBjoern A. Zeeb
44*6b627f88SBjoern A. Zeeb /* Store physical address and set use count */
45*6b627f88SBjoern A. Zeeb info->dma_addr = phys;
46*6b627f88SBjoern A. Zeeb refcount_set(&info->use_count, 1);
47*6b627f88SBjoern A. Zeeb
48*6b627f88SBjoern A. Zeeb /* set the chaining pointer to the previous page if there */
49*6b627f88SBjoern A. Zeeb info->next = *page_ptr;
50*6b627f88SBjoern A. Zeeb *page_ptr = ret;
51*6b627f88SBjoern A. Zeeb
52*6b627f88SBjoern A. Zeeb return ret;
53*6b627f88SBjoern A. Zeeb }
54*6b627f88SBjoern A. Zeeb
55*6b627f88SBjoern A. Zeeb /*
56*6b627f88SBjoern A. Zeeb * Add a TB and if needed apply the FH HW bug workaround;
57*6b627f88SBjoern A. Zeeb * meta != NULL indicates that it's a page mapping and we
58*6b627f88SBjoern A. Zeeb * need to dma_unmap_page() and set the meta->tbs bit in
59*6b627f88SBjoern A. Zeeb * this case.
60*6b627f88SBjoern A. Zeeb */
iwl_txq_gen2_set_tb_with_wa(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,dma_addr_t phys,void * virt,u16 len,struct iwl_cmd_meta * meta,bool unmap)61*6b627f88SBjoern A. Zeeb static int iwl_txq_gen2_set_tb_with_wa(struct iwl_trans *trans,
62*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
63*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd,
64*6b627f88SBjoern A. Zeeb dma_addr_t phys, void *virt,
65*6b627f88SBjoern A. Zeeb u16 len, struct iwl_cmd_meta *meta,
66*6b627f88SBjoern A. Zeeb bool unmap)
67*6b627f88SBjoern A. Zeeb {
68*6b627f88SBjoern A. Zeeb dma_addr_t oldphys = phys;
69*6b627f88SBjoern A. Zeeb struct page *page;
70*6b627f88SBjoern A. Zeeb int ret;
71*6b627f88SBjoern A. Zeeb
72*6b627f88SBjoern A. Zeeb if (unlikely(dma_mapping_error(trans->dev, phys)))
73*6b627f88SBjoern A. Zeeb return -ENOMEM;
74*6b627f88SBjoern A. Zeeb
75*6b627f88SBjoern A. Zeeb if (likely(!iwl_txq_crosses_4g_boundary(phys, len))) {
76*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
77*6b627f88SBjoern A. Zeeb
78*6b627f88SBjoern A. Zeeb if (ret < 0)
79*6b627f88SBjoern A. Zeeb goto unmap;
80*6b627f88SBjoern A. Zeeb
81*6b627f88SBjoern A. Zeeb if (meta)
82*6b627f88SBjoern A. Zeeb meta->tbs |= BIT(ret);
83*6b627f88SBjoern A. Zeeb
84*6b627f88SBjoern A. Zeeb ret = 0;
85*6b627f88SBjoern A. Zeeb goto trace;
86*6b627f88SBjoern A. Zeeb }
87*6b627f88SBjoern A. Zeeb
88*6b627f88SBjoern A. Zeeb /*
89*6b627f88SBjoern A. Zeeb * Work around a hardware bug. If (as expressed in the
90*6b627f88SBjoern A. Zeeb * condition above) the TB ends on a 32-bit boundary,
91*6b627f88SBjoern A. Zeeb * then the next TB may be accessed with the wrong
92*6b627f88SBjoern A. Zeeb * address.
93*6b627f88SBjoern A. Zeeb * To work around it, copy the data elsewhere and make
94*6b627f88SBjoern A. Zeeb * a new mapping for it so the device will not fail.
95*6b627f88SBjoern A. Zeeb */
96*6b627f88SBjoern A. Zeeb
97*6b627f88SBjoern A. Zeeb if (WARN_ON(len > IWL_TSO_PAGE_DATA_SIZE)) {
98*6b627f88SBjoern A. Zeeb ret = -ENOBUFS;
99*6b627f88SBjoern A. Zeeb goto unmap;
100*6b627f88SBjoern A. Zeeb }
101*6b627f88SBjoern A. Zeeb
102*6b627f88SBjoern A. Zeeb page = get_workaround_page(trans, skb);
103*6b627f88SBjoern A. Zeeb if (!page) {
104*6b627f88SBjoern A. Zeeb ret = -ENOMEM;
105*6b627f88SBjoern A. Zeeb goto unmap;
106*6b627f88SBjoern A. Zeeb }
107*6b627f88SBjoern A. Zeeb
108*6b627f88SBjoern A. Zeeb memcpy(page_address(page), virt, len);
109*6b627f88SBjoern A. Zeeb
110*6b627f88SBjoern A. Zeeb /*
111*6b627f88SBjoern A. Zeeb * This is a bit odd, but performance does not matter here, what
112*6b627f88SBjoern A. Zeeb * matters are the expectations of the calling code and TB cleanup
113*6b627f88SBjoern A. Zeeb * function.
114*6b627f88SBjoern A. Zeeb *
115*6b627f88SBjoern A. Zeeb * As such, if unmap is set, then create another mapping for the TB
116*6b627f88SBjoern A. Zeeb * entry as it will be unmapped later. On the other hand, if it is not
117*6b627f88SBjoern A. Zeeb * set, then the TB entry will not be unmapped and instead we simply
118*6b627f88SBjoern A. Zeeb * reference and sync the mapping that get_workaround_page() created.
119*6b627f88SBjoern A. Zeeb */
120*6b627f88SBjoern A. Zeeb if (unmap) {
121*6b627f88SBjoern A. Zeeb phys = dma_map_single(trans->dev, page_address(page), len,
122*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
123*6b627f88SBjoern A. Zeeb if (unlikely(dma_mapping_error(trans->dev, phys)))
124*6b627f88SBjoern A. Zeeb return -ENOMEM;
125*6b627f88SBjoern A. Zeeb } else {
126*6b627f88SBjoern A. Zeeb phys = iwl_pcie_get_tso_page_phys(page_address(page));
127*6b627f88SBjoern A. Zeeb dma_sync_single_for_device(trans->dev, phys, len,
128*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
129*6b627f88SBjoern A. Zeeb }
130*6b627f88SBjoern A. Zeeb
131*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb(trans, tfd, phys, len);
132*6b627f88SBjoern A. Zeeb if (ret < 0) {
133*6b627f88SBjoern A. Zeeb /* unmap the new allocation as single */
134*6b627f88SBjoern A. Zeeb oldphys = phys;
135*6b627f88SBjoern A. Zeeb meta = NULL;
136*6b627f88SBjoern A. Zeeb goto unmap;
137*6b627f88SBjoern A. Zeeb }
138*6b627f88SBjoern A. Zeeb
139*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX(trans,
140*6b627f88SBjoern A. Zeeb "TB bug workaround: copied %d bytes from 0x%llx to 0x%llx\n",
141*6b627f88SBjoern A. Zeeb len, (unsigned long long)oldphys,
142*6b627f88SBjoern A. Zeeb (unsigned long long)phys);
143*6b627f88SBjoern A. Zeeb
144*6b627f88SBjoern A. Zeeb ret = 0;
145*6b627f88SBjoern A. Zeeb unmap:
146*6b627f88SBjoern A. Zeeb if (!unmap)
147*6b627f88SBjoern A. Zeeb goto trace;
148*6b627f88SBjoern A. Zeeb
149*6b627f88SBjoern A. Zeeb if (meta)
150*6b627f88SBjoern A. Zeeb dma_unmap_page(trans->dev, oldphys, len, DMA_TO_DEVICE);
151*6b627f88SBjoern A. Zeeb else
152*6b627f88SBjoern A. Zeeb dma_unmap_single(trans->dev, oldphys, len, DMA_TO_DEVICE);
153*6b627f88SBjoern A. Zeeb trace:
154*6b627f88SBjoern A. Zeeb trace_iwlwifi_dev_tx_tb(trans->dev, skb, virt, phys, len);
155*6b627f88SBjoern A. Zeeb
156*6b627f88SBjoern A. Zeeb return ret;
157*6b627f88SBjoern A. Zeeb }
158*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_build_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta,int start_len,u8 hdr_len,struct iwl_device_tx_cmd * dev_cmd)159*6b627f88SBjoern A. Zeeb static int iwl_txq_gen2_build_amsdu(struct iwl_trans *trans,
160*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
161*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd,
162*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta,
163*6b627f88SBjoern A. Zeeb int start_len,
164*6b627f88SBjoern A. Zeeb u8 hdr_len,
165*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_cmd)
166*6b627f88SBjoern A. Zeeb {
167*6b627f88SBjoern A. Zeeb #ifdef CONFIG_INET
168*6b627f88SBjoern A. Zeeb struct iwl_tx_cmd_v9 *tx_cmd = (void *)dev_cmd->payload;
169*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (void *)skb->data;
170*6b627f88SBjoern A. Zeeb unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
171*6b627f88SBjoern A. Zeeb unsigned int mss = skb_shinfo(skb)->gso_size;
172*6b627f88SBjoern A. Zeeb unsigned int data_offset = 0;
173*6b627f88SBjoern A. Zeeb dma_addr_t start_hdr_phys;
174*6b627f88SBjoern A. Zeeb u16 length, amsdu_pad;
175*6b627f88SBjoern A. Zeeb u8 *start_hdr;
176*6b627f88SBjoern A. Zeeb struct sg_table *sgt;
177*6b627f88SBjoern A. Zeeb struct tso_t tso;
178*6b627f88SBjoern A. Zeeb
179*6b627f88SBjoern A. Zeeb trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd),
180*6b627f88SBjoern A. Zeeb &dev_cmd->hdr, start_len, 0);
181*6b627f88SBjoern A. Zeeb
182*6b627f88SBjoern A. Zeeb ip_hdrlen = skb_network_header_len(skb);
183*6b627f88SBjoern A. Zeeb snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
184*6b627f88SBjoern A. Zeeb total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len;
185*6b627f88SBjoern A. Zeeb amsdu_pad = 0;
186*6b627f88SBjoern A. Zeeb
187*6b627f88SBjoern A. Zeeb /* total amount of header we may need for this A-MSDU */
188*6b627f88SBjoern A. Zeeb hdr_room = DIV_ROUND_UP(total_len, mss) *
189*6b627f88SBjoern A. Zeeb (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr));
190*6b627f88SBjoern A. Zeeb
191*6b627f88SBjoern A. Zeeb /* Our device supports 9 segments at most, it will fit in 1 page */
192*6b627f88SBjoern A. Zeeb sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room,
193*6b627f88SBjoern A. Zeeb snap_ip_tcp_hdrlen + hdr_len);
194*6b627f88SBjoern A. Zeeb if (!sgt)
195*6b627f88SBjoern A. Zeeb return -ENOMEM;
196*6b627f88SBjoern A. Zeeb
197*6b627f88SBjoern A. Zeeb start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
198*6b627f88SBjoern A. Zeeb
199*6b627f88SBjoern A. Zeeb /*
200*6b627f88SBjoern A. Zeeb * Pull the ieee80211 header to be able to use TSO core,
201*6b627f88SBjoern A. Zeeb * we will restore it for the tx_status flow.
202*6b627f88SBjoern A. Zeeb */
203*6b627f88SBjoern A. Zeeb skb_pull(skb, hdr_len);
204*6b627f88SBjoern A. Zeeb
205*6b627f88SBjoern A. Zeeb /*
206*6b627f88SBjoern A. Zeeb * Remove the length of all the headers that we don't actually
207*6b627f88SBjoern A. Zeeb * have in the MPDU by themselves, but that we duplicate into
208*6b627f88SBjoern A. Zeeb * all the different MSDUs inside the A-MSDU.
209*6b627f88SBjoern A. Zeeb */
210*6b627f88SBjoern A. Zeeb le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
211*6b627f88SBjoern A. Zeeb
212*6b627f88SBjoern A. Zeeb tso_start(skb, &tso);
213*6b627f88SBjoern A. Zeeb
214*6b627f88SBjoern A. Zeeb while (total_len) {
215*6b627f88SBjoern A. Zeeb /* this is the data left for this subframe */
216*6b627f88SBjoern A. Zeeb unsigned int data_left = min_t(unsigned int, mss, total_len);
217*6b627f88SBjoern A. Zeeb unsigned int tb_len;
218*6b627f88SBjoern A. Zeeb dma_addr_t tb_phys;
219*6b627f88SBjoern A. Zeeb u8 *pos_hdr = start_hdr;
220*6b627f88SBjoern A. Zeeb
221*6b627f88SBjoern A. Zeeb total_len -= data_left;
222*6b627f88SBjoern A. Zeeb
223*6b627f88SBjoern A. Zeeb memset(pos_hdr, 0, amsdu_pad);
224*6b627f88SBjoern A. Zeeb pos_hdr += amsdu_pad;
225*6b627f88SBjoern A. Zeeb amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
226*6b627f88SBjoern A. Zeeb data_left)) & 0x3;
227*6b627f88SBjoern A. Zeeb ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
228*6b627f88SBjoern A. Zeeb pos_hdr += ETH_ALEN;
229*6b627f88SBjoern A. Zeeb ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
230*6b627f88SBjoern A. Zeeb pos_hdr += ETH_ALEN;
231*6b627f88SBjoern A. Zeeb
232*6b627f88SBjoern A. Zeeb length = snap_ip_tcp_hdrlen + data_left;
233*6b627f88SBjoern A. Zeeb *((__be16 *)pos_hdr) = cpu_to_be16(length);
234*6b627f88SBjoern A. Zeeb pos_hdr += sizeof(length);
235*6b627f88SBjoern A. Zeeb
236*6b627f88SBjoern A. Zeeb /*
237*6b627f88SBjoern A. Zeeb * This will copy the SNAP as well which will be considered
238*6b627f88SBjoern A. Zeeb * as MAC header.
239*6b627f88SBjoern A. Zeeb */
240*6b627f88SBjoern A. Zeeb tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
241*6b627f88SBjoern A. Zeeb
242*6b627f88SBjoern A. Zeeb pos_hdr += snap_ip_tcp_hdrlen;
243*6b627f88SBjoern A. Zeeb
244*6b627f88SBjoern A. Zeeb tb_len = pos_hdr - start_hdr;
245*6b627f88SBjoern A. Zeeb tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
246*6b627f88SBjoern A. Zeeb
247*6b627f88SBjoern A. Zeeb /*
248*6b627f88SBjoern A. Zeeb * No need for _with_wa, this is from the TSO page and
249*6b627f88SBjoern A. Zeeb * we leave some space at the end of it so can't hit
250*6b627f88SBjoern A. Zeeb * the buggy scenario.
251*6b627f88SBjoern A. Zeeb */
252*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb_len);
253*6b627f88SBjoern A. Zeeb trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
254*6b627f88SBjoern A. Zeeb tb_phys, tb_len);
255*6b627f88SBjoern A. Zeeb /* add this subframe's headers' length to the tx_cmd */
256*6b627f88SBjoern A. Zeeb le16_add_cpu(&tx_cmd->len, tb_len);
257*6b627f88SBjoern A. Zeeb
258*6b627f88SBjoern A. Zeeb /* prepare the start_hdr for the next subframe */
259*6b627f88SBjoern A. Zeeb start_hdr = pos_hdr;
260*6b627f88SBjoern A. Zeeb
261*6b627f88SBjoern A. Zeeb /* put the payload */
262*6b627f88SBjoern A. Zeeb while (data_left) {
263*6b627f88SBjoern A. Zeeb int ret;
264*6b627f88SBjoern A. Zeeb
265*6b627f88SBjoern A. Zeeb tb_len = min_t(unsigned int, tso.size, data_left);
266*6b627f88SBjoern A. Zeeb tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset,
267*6b627f88SBjoern A. Zeeb tb_len);
268*6b627f88SBjoern A. Zeeb /* Not a real mapping error, use direct comparison */
269*6b627f88SBjoern A. Zeeb if (unlikely(tb_phys == DMA_MAPPING_ERROR))
270*6b627f88SBjoern A. Zeeb goto out_err;
271*6b627f88SBjoern A. Zeeb
272*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd,
273*6b627f88SBjoern A. Zeeb tb_phys, tso.data,
274*6b627f88SBjoern A. Zeeb tb_len, NULL, false);
275*6b627f88SBjoern A. Zeeb if (ret)
276*6b627f88SBjoern A. Zeeb goto out_err;
277*6b627f88SBjoern A. Zeeb
278*6b627f88SBjoern A. Zeeb data_left -= tb_len;
279*6b627f88SBjoern A. Zeeb data_offset += tb_len;
280*6b627f88SBjoern A. Zeeb tso_build_data(skb, &tso, tb_len);
281*6b627f88SBjoern A. Zeeb }
282*6b627f88SBjoern A. Zeeb }
283*6b627f88SBjoern A. Zeeb
284*6b627f88SBjoern A. Zeeb dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
285*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
286*6b627f88SBjoern A. Zeeb
287*6b627f88SBjoern A. Zeeb /* re -add the WiFi header */
288*6b627f88SBjoern A. Zeeb skb_push(skb, hdr_len);
289*6b627f88SBjoern A. Zeeb
290*6b627f88SBjoern A. Zeeb return 0;
291*6b627f88SBjoern A. Zeeb
292*6b627f88SBjoern A. Zeeb out_err:
293*6b627f88SBjoern A. Zeeb #endif
294*6b627f88SBjoern A. Zeeb return -EINVAL;
295*6b627f88SBjoern A. Zeeb }
296*6b627f88SBjoern A. Zeeb
297*6b627f88SBjoern A. Zeeb static struct
iwl_txq_gen2_build_tx_amsdu(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len)298*6b627f88SBjoern A. Zeeb iwl_tfh_tfd *iwl_txq_gen2_build_tx_amsdu(struct iwl_trans *trans,
299*6b627f88SBjoern A. Zeeb struct iwl_txq *txq,
300*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_cmd,
301*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
302*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta,
303*6b627f88SBjoern A. Zeeb int hdr_len,
304*6b627f88SBjoern A. Zeeb int tx_cmd_len)
305*6b627f88SBjoern A. Zeeb {
306*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
307*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
308*6b627f88SBjoern A. Zeeb dma_addr_t tb_phys;
309*6b627f88SBjoern A. Zeeb int len;
310*6b627f88SBjoern A. Zeeb void *tb1_addr;
311*6b627f88SBjoern A. Zeeb
312*6b627f88SBjoern A. Zeeb tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
313*6b627f88SBjoern A. Zeeb
314*6b627f88SBjoern A. Zeeb /*
315*6b627f88SBjoern A. Zeeb * No need for _with_wa, the first TB allocation is aligned up
316*6b627f88SBjoern A. Zeeb * to a 64-byte boundary and thus can't be at the end or cross
317*6b627f88SBjoern A. Zeeb * a page boundary (much less a 2^32 boundary).
318*6b627f88SBjoern A. Zeeb */
319*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
320*6b627f88SBjoern A. Zeeb
321*6b627f88SBjoern A. Zeeb /*
322*6b627f88SBjoern A. Zeeb * The second TB (tb1) points to the remainder of the TX command
323*6b627f88SBjoern A. Zeeb * and the 802.11 header - dword aligned size
324*6b627f88SBjoern A. Zeeb * (This calculation modifies the TX command, so do it before the
325*6b627f88SBjoern A. Zeeb * setup of the first TB)
326*6b627f88SBjoern A. Zeeb */
327*6b627f88SBjoern A. Zeeb len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
328*6b627f88SBjoern A. Zeeb IWL_FIRST_TB_SIZE;
329*6b627f88SBjoern A. Zeeb
330*6b627f88SBjoern A. Zeeb /* do not align A-MSDU to dword as the subframe header aligns it */
331*6b627f88SBjoern A. Zeeb
332*6b627f88SBjoern A. Zeeb /* map the data for TB1 */
333*6b627f88SBjoern A. Zeeb tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
334*6b627f88SBjoern A. Zeeb tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE);
335*6b627f88SBjoern A. Zeeb if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
336*6b627f88SBjoern A. Zeeb goto out_err;
337*6b627f88SBjoern A. Zeeb /*
338*6b627f88SBjoern A. Zeeb * No need for _with_wa(), we ensure (via alignment) that the data
339*6b627f88SBjoern A. Zeeb * here can never cross or end at a page boundary.
340*6b627f88SBjoern A. Zeeb */
341*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, tb_phys, len);
342*6b627f88SBjoern A. Zeeb
343*6b627f88SBjoern A. Zeeb if (iwl_txq_gen2_build_amsdu(trans, skb, tfd, out_meta,
344*6b627f88SBjoern A. Zeeb len + IWL_FIRST_TB_SIZE, hdr_len, dev_cmd))
345*6b627f88SBjoern A. Zeeb goto out_err;
346*6b627f88SBjoern A. Zeeb
347*6b627f88SBjoern A. Zeeb /* building the A-MSDU might have changed this data, memcpy it now */
348*6b627f88SBjoern A. Zeeb memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
349*6b627f88SBjoern A. Zeeb return tfd;
350*6b627f88SBjoern A. Zeeb
351*6b627f88SBjoern A. Zeeb out_err:
352*6b627f88SBjoern A. Zeeb iwl_pcie_free_tso_pages(trans, skb, out_meta);
353*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
354*6b627f88SBjoern A. Zeeb return NULL;
355*6b627f88SBjoern A. Zeeb }
356*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_tx_add_frags(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_tfh_tfd * tfd,struct iwl_cmd_meta * out_meta)357*6b627f88SBjoern A. Zeeb static int iwl_txq_gen2_tx_add_frags(struct iwl_trans *trans,
358*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
359*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd,
360*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta)
361*6b627f88SBjoern A. Zeeb {
362*6b627f88SBjoern A. Zeeb int i;
363*6b627f88SBjoern A. Zeeb
364*6b627f88SBjoern A. Zeeb for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
365*6b627f88SBjoern A. Zeeb const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
366*6b627f88SBjoern A. Zeeb dma_addr_t tb_phys;
367*6b627f88SBjoern A. Zeeb unsigned int fragsz = skb_frag_size(frag);
368*6b627f88SBjoern A. Zeeb int ret;
369*6b627f88SBjoern A. Zeeb
370*6b627f88SBjoern A. Zeeb if (!fragsz)
371*6b627f88SBjoern A. Zeeb continue;
372*6b627f88SBjoern A. Zeeb
373*6b627f88SBjoern A. Zeeb tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
374*6b627f88SBjoern A. Zeeb fragsz, DMA_TO_DEVICE);
375*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
376*6b627f88SBjoern A. Zeeb skb_frag_address(frag),
377*6b627f88SBjoern A. Zeeb fragsz, out_meta, true);
378*6b627f88SBjoern A. Zeeb if (ret)
379*6b627f88SBjoern A. Zeeb return ret;
380*6b627f88SBjoern A. Zeeb }
381*6b627f88SBjoern A. Zeeb
382*6b627f88SBjoern A. Zeeb return 0;
383*6b627f88SBjoern A. Zeeb }
384*6b627f88SBjoern A. Zeeb
385*6b627f88SBjoern A. Zeeb static struct
iwl_txq_gen2_build_tx(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta,int hdr_len,int tx_cmd_len,bool pad)386*6b627f88SBjoern A. Zeeb iwl_tfh_tfd *iwl_txq_gen2_build_tx(struct iwl_trans *trans,
387*6b627f88SBjoern A. Zeeb struct iwl_txq *txq,
388*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_cmd,
389*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
390*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta,
391*6b627f88SBjoern A. Zeeb int hdr_len,
392*6b627f88SBjoern A. Zeeb int tx_cmd_len,
393*6b627f88SBjoern A. Zeeb bool pad)
394*6b627f88SBjoern A. Zeeb {
395*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
396*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
397*6b627f88SBjoern A. Zeeb dma_addr_t tb_phys;
398*6b627f88SBjoern A. Zeeb int len, tb1_len, tb2_len;
399*6b627f88SBjoern A. Zeeb void *tb1_addr;
400*6b627f88SBjoern A. Zeeb struct sk_buff *frag;
401*6b627f88SBjoern A. Zeeb
402*6b627f88SBjoern A. Zeeb tb_phys = iwl_txq_get_first_tb_dma(txq, idx);
403*6b627f88SBjoern A. Zeeb
404*6b627f88SBjoern A. Zeeb /* The first TB points to bi-directional DMA data */
405*6b627f88SBjoern A. Zeeb memcpy(&txq->first_tb_bufs[idx], dev_cmd, IWL_FIRST_TB_SIZE);
406*6b627f88SBjoern A. Zeeb
407*6b627f88SBjoern A. Zeeb /*
408*6b627f88SBjoern A. Zeeb * No need for _with_wa, the first TB allocation is aligned up
409*6b627f88SBjoern A. Zeeb * to a 64-byte boundary and thus can't be at the end or cross
410*6b627f88SBjoern A. Zeeb * a page boundary (much less a 2^32 boundary).
411*6b627f88SBjoern A. Zeeb */
412*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE);
413*6b627f88SBjoern A. Zeeb
414*6b627f88SBjoern A. Zeeb /*
415*6b627f88SBjoern A. Zeeb * The second TB (tb1) points to the remainder of the TX command
416*6b627f88SBjoern A. Zeeb * and the 802.11 header - dword aligned size
417*6b627f88SBjoern A. Zeeb * (This calculation modifies the TX command, so do it before the
418*6b627f88SBjoern A. Zeeb * setup of the first TB)
419*6b627f88SBjoern A. Zeeb */
420*6b627f88SBjoern A. Zeeb len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len -
421*6b627f88SBjoern A. Zeeb IWL_FIRST_TB_SIZE;
422*6b627f88SBjoern A. Zeeb
423*6b627f88SBjoern A. Zeeb if (pad)
424*6b627f88SBjoern A. Zeeb tb1_len = ALIGN(len, 4);
425*6b627f88SBjoern A. Zeeb else
426*6b627f88SBjoern A. Zeeb tb1_len = len;
427*6b627f88SBjoern A. Zeeb
428*6b627f88SBjoern A. Zeeb /* map the data for TB1 */
429*6b627f88SBjoern A. Zeeb tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
430*6b627f88SBjoern A. Zeeb tb_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
431*6b627f88SBjoern A. Zeeb if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
432*6b627f88SBjoern A. Zeeb goto out_err;
433*6b627f88SBjoern A. Zeeb /*
434*6b627f88SBjoern A. Zeeb * No need for _with_wa(), we ensure (via alignment) that the data
435*6b627f88SBjoern A. Zeeb * here can never cross or end at a page boundary.
436*6b627f88SBjoern A. Zeeb */
437*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, tb_phys, tb1_len);
438*6b627f88SBjoern A. Zeeb trace_iwlwifi_dev_tx(trans->dev, skb, tfd, sizeof(*tfd), &dev_cmd->hdr,
439*6b627f88SBjoern A. Zeeb IWL_FIRST_TB_SIZE + tb1_len, hdr_len);
440*6b627f88SBjoern A. Zeeb
441*6b627f88SBjoern A. Zeeb /* set up TFD's third entry to point to remainder of skb's head */
442*6b627f88SBjoern A. Zeeb tb2_len = skb_headlen(skb) - hdr_len;
443*6b627f88SBjoern A. Zeeb
444*6b627f88SBjoern A. Zeeb if (tb2_len > 0) {
445*6b627f88SBjoern A. Zeeb int ret;
446*6b627f88SBjoern A. Zeeb
447*6b627f88SBjoern A. Zeeb tb_phys = dma_map_single(trans->dev, skb->data + hdr_len,
448*6b627f88SBjoern A. Zeeb tb2_len, DMA_TO_DEVICE);
449*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
450*6b627f88SBjoern A. Zeeb skb->data + hdr_len, tb2_len,
451*6b627f88SBjoern A. Zeeb NULL, true);
452*6b627f88SBjoern A. Zeeb if (ret)
453*6b627f88SBjoern A. Zeeb goto out_err;
454*6b627f88SBjoern A. Zeeb }
455*6b627f88SBjoern A. Zeeb
456*6b627f88SBjoern A. Zeeb if (iwl_txq_gen2_tx_add_frags(trans, skb, tfd, out_meta))
457*6b627f88SBjoern A. Zeeb goto out_err;
458*6b627f88SBjoern A. Zeeb
459*6b627f88SBjoern A. Zeeb skb_walk_frags(skb, frag) {
460*6b627f88SBjoern A. Zeeb int ret;
461*6b627f88SBjoern A. Zeeb
462*6b627f88SBjoern A. Zeeb tb_phys = dma_map_single(trans->dev, frag->data,
463*6b627f88SBjoern A. Zeeb skb_headlen(frag), DMA_TO_DEVICE);
464*6b627f88SBjoern A. Zeeb ret = iwl_txq_gen2_set_tb_with_wa(trans, skb, tfd, tb_phys,
465*6b627f88SBjoern A. Zeeb frag->data,
466*6b627f88SBjoern A. Zeeb skb_headlen(frag), NULL,
467*6b627f88SBjoern A. Zeeb true);
468*6b627f88SBjoern A. Zeeb if (ret)
469*6b627f88SBjoern A. Zeeb goto out_err;
470*6b627f88SBjoern A. Zeeb if (iwl_txq_gen2_tx_add_frags(trans, frag, tfd, out_meta))
471*6b627f88SBjoern A. Zeeb goto out_err;
472*6b627f88SBjoern A. Zeeb }
473*6b627f88SBjoern A. Zeeb
474*6b627f88SBjoern A. Zeeb return tfd;
475*6b627f88SBjoern A. Zeeb
476*6b627f88SBjoern A. Zeeb out_err:
477*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
478*6b627f88SBjoern A. Zeeb return NULL;
479*6b627f88SBjoern A. Zeeb }
480*6b627f88SBjoern A. Zeeb
481*6b627f88SBjoern A. Zeeb static
iwl_txq_gen2_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_device_tx_cmd * dev_cmd,struct sk_buff * skb,struct iwl_cmd_meta * out_meta)482*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *iwl_txq_gen2_build_tfd(struct iwl_trans *trans,
483*6b627f88SBjoern A. Zeeb struct iwl_txq *txq,
484*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_cmd,
485*6b627f88SBjoern A. Zeeb struct sk_buff *skb,
486*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta)
487*6b627f88SBjoern A. Zeeb {
488*6b627f88SBjoern A. Zeeb struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data;
489*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
490*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd = iwl_txq_get_tfd(trans, txq, idx);
491*6b627f88SBjoern A. Zeeb int len, hdr_len;
492*6b627f88SBjoern A. Zeeb bool amsdu;
493*6b627f88SBjoern A. Zeeb
494*6b627f88SBjoern A. Zeeb /* There must be data left over for TB1 or this code must be changed */
495*6b627f88SBjoern A. Zeeb BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_v9) < IWL_FIRST_TB_SIZE);
496*6b627f88SBjoern A. Zeeb BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
497*6b627f88SBjoern A. Zeeb offsetofend(struct iwl_tx_cmd_v9, dram_info) >
498*6b627f88SBjoern A. Zeeb IWL_FIRST_TB_SIZE);
499*6b627f88SBjoern A. Zeeb BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
500*6b627f88SBjoern A. Zeeb BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
501*6b627f88SBjoern A. Zeeb offsetofend(struct iwl_tx_cmd, dram_info) >
502*6b627f88SBjoern A. Zeeb IWL_FIRST_TB_SIZE);
503*6b627f88SBjoern A. Zeeb
504*6b627f88SBjoern A. Zeeb memset(tfd, 0, sizeof(*tfd));
505*6b627f88SBjoern A. Zeeb
506*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family < IWL_DEVICE_FAMILY_AX210)
507*6b627f88SBjoern A. Zeeb len = sizeof(struct iwl_tx_cmd_v9);
508*6b627f88SBjoern A. Zeeb else
509*6b627f88SBjoern A. Zeeb len = sizeof(struct iwl_tx_cmd);
510*6b627f88SBjoern A. Zeeb
511*6b627f88SBjoern A. Zeeb amsdu = ieee80211_is_data_qos(hdr->frame_control) &&
512*6b627f88SBjoern A. Zeeb (*ieee80211_get_qos_ctl(hdr) &
513*6b627f88SBjoern A. Zeeb IEEE80211_QOS_CTL_A_MSDU_PRESENT);
514*6b627f88SBjoern A. Zeeb
515*6b627f88SBjoern A. Zeeb hdr_len = ieee80211_hdrlen(hdr->frame_control);
516*6b627f88SBjoern A. Zeeb
517*6b627f88SBjoern A. Zeeb /*
518*6b627f88SBjoern A. Zeeb * Only build A-MSDUs here if doing so by GSO, otherwise it may be
519*6b627f88SBjoern A. Zeeb * an A-MSDU for other reasons, e.g. NAN or an A-MSDU having been
520*6b627f88SBjoern A. Zeeb * built in the higher layers already.
521*6b627f88SBjoern A. Zeeb */
522*6b627f88SBjoern A. Zeeb if (amsdu && skb_shinfo(skb)->gso_size)
523*6b627f88SBjoern A. Zeeb return iwl_txq_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb,
524*6b627f88SBjoern A. Zeeb out_meta, hdr_len, len);
525*6b627f88SBjoern A. Zeeb return iwl_txq_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta,
526*6b627f88SBjoern A. Zeeb hdr_len, len, !amsdu);
527*6b627f88SBjoern A. Zeeb }
528*6b627f88SBjoern A. Zeeb
iwl_txq_space(struct iwl_trans * trans,const struct iwl_txq * q)529*6b627f88SBjoern A. Zeeb int iwl_txq_space(struct iwl_trans *trans, const struct iwl_txq *q)
530*6b627f88SBjoern A. Zeeb {
531*6b627f88SBjoern A. Zeeb unsigned int max;
532*6b627f88SBjoern A. Zeeb unsigned int used;
533*6b627f88SBjoern A. Zeeb
534*6b627f88SBjoern A. Zeeb /*
535*6b627f88SBjoern A. Zeeb * To avoid ambiguity between empty and completely full queues, there
536*6b627f88SBjoern A. Zeeb * should always be less than max_tfd_queue_size elements in the queue.
537*6b627f88SBjoern A. Zeeb * If q->n_window is smaller than max_tfd_queue_size, there is no need
538*6b627f88SBjoern A. Zeeb * to reserve any queue entries for this purpose.
539*6b627f88SBjoern A. Zeeb */
540*6b627f88SBjoern A. Zeeb if (q->n_window < trans->mac_cfg->base->max_tfd_queue_size)
541*6b627f88SBjoern A. Zeeb max = q->n_window;
542*6b627f88SBjoern A. Zeeb else
543*6b627f88SBjoern A. Zeeb max = trans->mac_cfg->base->max_tfd_queue_size - 1;
544*6b627f88SBjoern A. Zeeb
545*6b627f88SBjoern A. Zeeb /*
546*6b627f88SBjoern A. Zeeb * max_tfd_queue_size is a power of 2, so the following is equivalent to
547*6b627f88SBjoern A. Zeeb * modulo by max_tfd_queue_size and is well defined.
548*6b627f88SBjoern A. Zeeb */
549*6b627f88SBjoern A. Zeeb used = (q->write_ptr - q->read_ptr) &
550*6b627f88SBjoern A. Zeeb (trans->mac_cfg->base->max_tfd_queue_size - 1);
551*6b627f88SBjoern A. Zeeb
552*6b627f88SBjoern A. Zeeb if (WARN_ON(used > max))
553*6b627f88SBjoern A. Zeeb return 0;
554*6b627f88SBjoern A. Zeeb
555*6b627f88SBjoern A. Zeeb return max - used;
556*6b627f88SBjoern A. Zeeb }
557*6b627f88SBjoern A. Zeeb
558*6b627f88SBjoern A. Zeeb /*
559*6b627f88SBjoern A. Zeeb * iwl_pcie_gen2_update_byte_tbl - Set up entry in Tx byte-count array
560*6b627f88SBjoern A. Zeeb */
iwl_pcie_gen2_update_byte_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)561*6b627f88SBjoern A. Zeeb static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans *trans,
562*6b627f88SBjoern A. Zeeb struct iwl_txq *txq, u16 byte_cnt,
563*6b627f88SBjoern A. Zeeb int num_tbs)
564*6b627f88SBjoern A. Zeeb {
565*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
566*6b627f88SBjoern A. Zeeb struct iwl_bc_tbl_entry *scd_bc_tbl = txq->bc_tbl.addr;
567*6b627f88SBjoern A. Zeeb u8 filled_tfd_size, num_fetch_chunks;
568*6b627f88SBjoern A. Zeeb u16 len = byte_cnt;
569*6b627f88SBjoern A. Zeeb __le16 bc_ent;
570*6b627f88SBjoern A. Zeeb
571*6b627f88SBjoern A. Zeeb if (WARN(idx >= txq->n_window, "%d >= %d\n", idx, txq->n_window))
572*6b627f88SBjoern A. Zeeb return;
573*6b627f88SBjoern A. Zeeb
574*6b627f88SBjoern A. Zeeb filled_tfd_size = offsetof(struct iwl_tfh_tfd, tbs) +
575*6b627f88SBjoern A. Zeeb num_tbs * sizeof(struct iwl_tfh_tb);
576*6b627f88SBjoern A. Zeeb /*
577*6b627f88SBjoern A. Zeeb * filled_tfd_size contains the number of filled bytes in the TFD.
578*6b627f88SBjoern A. Zeeb * Dividing it by 64 will give the number of chunks to fetch
579*6b627f88SBjoern A. Zeeb * to SRAM- 0 for one chunk, 1 for 2 and so on.
580*6b627f88SBjoern A. Zeeb * If, for example, TFD contains only 3 TBs then 32 bytes
581*6b627f88SBjoern A. Zeeb * of the TFD are used, and only one chunk of 64 bytes should
582*6b627f88SBjoern A. Zeeb * be fetched
583*6b627f88SBjoern A. Zeeb */
584*6b627f88SBjoern A. Zeeb num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1;
585*6b627f88SBjoern A. Zeeb
586*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
587*6b627f88SBjoern A. Zeeb WARN_ON(len > 0x3FFF);
588*6b627f88SBjoern A. Zeeb bc_ent = cpu_to_le16(len | (num_fetch_chunks << 14));
589*6b627f88SBjoern A. Zeeb } else {
590*6b627f88SBjoern A. Zeeb len = DIV_ROUND_UP(len, 4);
591*6b627f88SBjoern A. Zeeb WARN_ON(len > 0xFFF);
592*6b627f88SBjoern A. Zeeb bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12));
593*6b627f88SBjoern A. Zeeb }
594*6b627f88SBjoern A. Zeeb
595*6b627f88SBjoern A. Zeeb scd_bc_tbl[idx].tfd_offset = bc_ent;
596*6b627f88SBjoern A. Zeeb }
597*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd * tfd)598*6b627f88SBjoern A. Zeeb static u8 iwl_txq_gen2_get_num_tbs(struct iwl_tfh_tfd *tfd)
599*6b627f88SBjoern A. Zeeb {
600*6b627f88SBjoern A. Zeeb return le16_to_cpu(tfd->num_tbs) & 0x1f;
601*6b627f88SBjoern A. Zeeb }
602*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_set_tb(struct iwl_trans * trans,struct iwl_tfh_tfd * tfd,dma_addr_t addr,u16 len)603*6b627f88SBjoern A. Zeeb int iwl_txq_gen2_set_tb(struct iwl_trans *trans, struct iwl_tfh_tfd *tfd,
604*6b627f88SBjoern A. Zeeb dma_addr_t addr, u16 len)
605*6b627f88SBjoern A. Zeeb {
606*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
607*6b627f88SBjoern A. Zeeb int idx = iwl_txq_gen2_get_num_tbs(tfd);
608*6b627f88SBjoern A. Zeeb struct iwl_tfh_tb *tb;
609*6b627f88SBjoern A. Zeeb
610*6b627f88SBjoern A. Zeeb /* Only WARN here so we know about the issue, but we mess up our
611*6b627f88SBjoern A. Zeeb * unmap path because not every place currently checks for errors
612*6b627f88SBjoern A. Zeeb * returned from this function - it can only return an error if
613*6b627f88SBjoern A. Zeeb * there's no more space, and so when we know there is enough we
614*6b627f88SBjoern A. Zeeb * don't always check ...
615*6b627f88SBjoern A. Zeeb */
616*6b627f88SBjoern A. Zeeb WARN(iwl_txq_crosses_4g_boundary(addr, len),
617*6b627f88SBjoern A. Zeeb "possible DMA problem with iova:0x%llx, len:%d\n",
618*6b627f88SBjoern A. Zeeb (unsigned long long)addr, len);
619*6b627f88SBjoern A. Zeeb
620*6b627f88SBjoern A. Zeeb if (WARN_ON(idx >= IWL_TFH_NUM_TBS))
621*6b627f88SBjoern A. Zeeb return -EINVAL;
622*6b627f88SBjoern A. Zeeb tb = &tfd->tbs[idx];
623*6b627f88SBjoern A. Zeeb
624*6b627f88SBjoern A. Zeeb /* Each TFD can point to a maximum max_tbs Tx buffers */
625*6b627f88SBjoern A. Zeeb if (le16_to_cpu(tfd->num_tbs) >= trans_pcie->txqs.tfd.max_tbs) {
626*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Error can not send more than %d chunks\n",
627*6b627f88SBjoern A. Zeeb trans_pcie->txqs.tfd.max_tbs);
628*6b627f88SBjoern A. Zeeb return -EINVAL;
629*6b627f88SBjoern A. Zeeb }
630*6b627f88SBjoern A. Zeeb
631*6b627f88SBjoern A. Zeeb put_unaligned_le64(addr, &tb->addr);
632*6b627f88SBjoern A. Zeeb tb->tb_len = cpu_to_le16(len);
633*6b627f88SBjoern A. Zeeb
634*6b627f88SBjoern A. Zeeb tfd->num_tbs = cpu_to_le16(idx + 1);
635*6b627f88SBjoern A. Zeeb
636*6b627f88SBjoern A. Zeeb return idx;
637*6b627f88SBjoern A. Zeeb }
638*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_tfh_tfd * tfd)639*6b627f88SBjoern A. Zeeb void iwl_txq_gen2_tfd_unmap(struct iwl_trans *trans,
640*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *meta,
641*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd)
642*6b627f88SBjoern A. Zeeb {
643*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
644*6b627f88SBjoern A. Zeeb int i, num_tbs;
645*6b627f88SBjoern A. Zeeb
646*6b627f88SBjoern A. Zeeb /* Sanity check on number of chunks */
647*6b627f88SBjoern A. Zeeb num_tbs = iwl_txq_gen2_get_num_tbs(tfd);
648*6b627f88SBjoern A. Zeeb
649*6b627f88SBjoern A. Zeeb if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
650*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
651*6b627f88SBjoern A. Zeeb return;
652*6b627f88SBjoern A. Zeeb }
653*6b627f88SBjoern A. Zeeb
654*6b627f88SBjoern A. Zeeb /* TB1 is mapped directly, the rest is the TSO page and SG list. */
655*6b627f88SBjoern A. Zeeb if (meta->sg_offset)
656*6b627f88SBjoern A. Zeeb num_tbs = 2;
657*6b627f88SBjoern A. Zeeb
658*6b627f88SBjoern A. Zeeb /* first TB is never freed - it's the bidirectional DMA data */
659*6b627f88SBjoern A. Zeeb for (i = 1; i < num_tbs; i++) {
660*6b627f88SBjoern A. Zeeb if (meta->tbs & BIT(i))
661*6b627f88SBjoern A. Zeeb dma_unmap_page(trans->dev,
662*6b627f88SBjoern A. Zeeb le64_to_cpu(tfd->tbs[i].addr),
663*6b627f88SBjoern A. Zeeb le16_to_cpu(tfd->tbs[i].tb_len),
664*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
665*6b627f88SBjoern A. Zeeb else
666*6b627f88SBjoern A. Zeeb dma_unmap_single(trans->dev,
667*6b627f88SBjoern A. Zeeb le64_to_cpu(tfd->tbs[i].addr),
668*6b627f88SBjoern A. Zeeb le16_to_cpu(tfd->tbs[i].tb_len),
669*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
670*6b627f88SBjoern A. Zeeb }
671*6b627f88SBjoern A. Zeeb
672*6b627f88SBjoern A. Zeeb iwl_txq_set_tfd_invalid_gen2(trans, tfd);
673*6b627f88SBjoern A. Zeeb }
674*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq)675*6b627f88SBjoern A. Zeeb static void iwl_txq_gen2_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
676*6b627f88SBjoern A. Zeeb {
677*6b627f88SBjoern A. Zeeb /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
678*6b627f88SBjoern A. Zeeb * idx is bounded by n_window
679*6b627f88SBjoern A. Zeeb */
680*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
681*6b627f88SBjoern A. Zeeb struct sk_buff *skb;
682*6b627f88SBjoern A. Zeeb
683*6b627f88SBjoern A. Zeeb lockdep_assert_held(&txq->lock);
684*6b627f88SBjoern A. Zeeb
685*6b627f88SBjoern A. Zeeb if (!txq->entries)
686*6b627f88SBjoern A. Zeeb return;
687*6b627f88SBjoern A. Zeeb
688*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
689*6b627f88SBjoern A. Zeeb iwl_txq_get_tfd(trans, txq, idx));
690*6b627f88SBjoern A. Zeeb
691*6b627f88SBjoern A. Zeeb skb = txq->entries[idx].skb;
692*6b627f88SBjoern A. Zeeb
693*6b627f88SBjoern A. Zeeb /* Can be called from irqs-disabled context
694*6b627f88SBjoern A. Zeeb * If skb is not NULL, it means that the whole queue is being
695*6b627f88SBjoern A. Zeeb * freed and that the queue is not empty - free the skb
696*6b627f88SBjoern A. Zeeb */
697*6b627f88SBjoern A. Zeeb if (skb) {
698*6b627f88SBjoern A. Zeeb iwl_op_mode_free_skb(trans->op_mode, skb);
699*6b627f88SBjoern A. Zeeb txq->entries[idx].skb = NULL;
700*6b627f88SBjoern A. Zeeb }
701*6b627f88SBjoern A. Zeeb }
702*6b627f88SBjoern A. Zeeb
703*6b627f88SBjoern A. Zeeb /*
704*6b627f88SBjoern A. Zeeb * iwl_txq_inc_wr_ptr - Send new write index to hardware
705*6b627f88SBjoern A. Zeeb */
iwl_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)706*6b627f88SBjoern A. Zeeb static void iwl_txq_inc_wr_ptr(struct iwl_trans *trans, struct iwl_txq *txq)
707*6b627f88SBjoern A. Zeeb {
708*6b627f88SBjoern A. Zeeb lockdep_assert_held(&txq->lock);
709*6b627f88SBjoern A. Zeeb
710*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq->id, txq->write_ptr);
711*6b627f88SBjoern A. Zeeb
712*6b627f88SBjoern A. Zeeb /*
713*6b627f88SBjoern A. Zeeb * if not in power-save mode, uCode will never sleep when we're
714*6b627f88SBjoern A. Zeeb * trying to tx (during RFKILL, we're not trying to tx).
715*6b627f88SBjoern A. Zeeb */
716*6b627f88SBjoern A. Zeeb iwl_write32(trans, HBUS_TARG_WRPTR, txq->write_ptr | (txq->id << 16));
717*6b627f88SBjoern A. Zeeb }
718*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)719*6b627f88SBjoern A. Zeeb int iwl_txq_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb,
720*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd *dev_cmd, int txq_id)
721*6b627f88SBjoern A. Zeeb {
722*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
723*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta;
724*6b627f88SBjoern A. Zeeb struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
725*6b627f88SBjoern A. Zeeb u16 cmd_len;
726*6b627f88SBjoern A. Zeeb int idx;
727*6b627f88SBjoern A. Zeeb void *tfd;
728*6b627f88SBjoern A. Zeeb
729*6b627f88SBjoern A. Zeeb if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
730*6b627f88SBjoern A. Zeeb "queue %d out of range", txq_id))
731*6b627f88SBjoern A. Zeeb return -EINVAL;
732*6b627f88SBjoern A. Zeeb
733*6b627f88SBjoern A. Zeeb if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
734*6b627f88SBjoern A. Zeeb "TX on unused queue %d\n", txq_id))
735*6b627f88SBjoern A. Zeeb return -EINVAL;
736*6b627f88SBjoern A. Zeeb
737*6b627f88SBjoern A. Zeeb if (skb_is_nonlinear(skb) &&
738*6b627f88SBjoern A. Zeeb skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
739*6b627f88SBjoern A. Zeeb __skb_linearize(skb))
740*6b627f88SBjoern A. Zeeb return -ENOMEM;
741*6b627f88SBjoern A. Zeeb
742*6b627f88SBjoern A. Zeeb spin_lock(&txq->lock);
743*6b627f88SBjoern A. Zeeb
744*6b627f88SBjoern A. Zeeb if (iwl_txq_space(trans, txq) < txq->high_mark) {
745*6b627f88SBjoern A. Zeeb iwl_txq_stop(trans, txq);
746*6b627f88SBjoern A. Zeeb
747*6b627f88SBjoern A. Zeeb /* don't put the packet on the ring, if there is no room */
748*6b627f88SBjoern A. Zeeb if (unlikely(iwl_txq_space(trans, txq) < 3)) {
749*6b627f88SBjoern A. Zeeb struct iwl_device_tx_cmd **dev_cmd_ptr;
750*6b627f88SBjoern A. Zeeb
751*6b627f88SBjoern A. Zeeb dev_cmd_ptr = (void *)((u8 *)skb->cb +
752*6b627f88SBjoern A. Zeeb trans->conf.cb_data_offs +
753*6b627f88SBjoern A. Zeeb sizeof(void *));
754*6b627f88SBjoern A. Zeeb
755*6b627f88SBjoern A. Zeeb *dev_cmd_ptr = dev_cmd;
756*6b627f88SBjoern A. Zeeb __skb_queue_tail(&txq->overflow_q, skb);
757*6b627f88SBjoern A. Zeeb spin_unlock(&txq->lock);
758*6b627f88SBjoern A. Zeeb return 0;
759*6b627f88SBjoern A. Zeeb }
760*6b627f88SBjoern A. Zeeb }
761*6b627f88SBjoern A. Zeeb
762*6b627f88SBjoern A. Zeeb idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
763*6b627f88SBjoern A. Zeeb
764*6b627f88SBjoern A. Zeeb /* Set up driver data for this TFD */
765*6b627f88SBjoern A. Zeeb txq->entries[idx].skb = skb;
766*6b627f88SBjoern A. Zeeb txq->entries[idx].cmd = dev_cmd;
767*6b627f88SBjoern A. Zeeb
768*6b627f88SBjoern A. Zeeb dev_cmd->hdr.sequence =
769*6b627f88SBjoern A. Zeeb cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
770*6b627f88SBjoern A. Zeeb INDEX_TO_SEQ(idx)));
771*6b627f88SBjoern A. Zeeb
772*6b627f88SBjoern A. Zeeb /* Set up first empty entry in queue's array of Tx/cmd buffers */
773*6b627f88SBjoern A. Zeeb out_meta = &txq->entries[idx].meta;
774*6b627f88SBjoern A. Zeeb memset(out_meta, 0, sizeof(*out_meta));
775*6b627f88SBjoern A. Zeeb
776*6b627f88SBjoern A. Zeeb tfd = iwl_txq_gen2_build_tfd(trans, txq, dev_cmd, skb, out_meta);
777*6b627f88SBjoern A. Zeeb if (!tfd) {
778*6b627f88SBjoern A. Zeeb spin_unlock(&txq->lock);
779*6b627f88SBjoern A. Zeeb return -1;
780*6b627f88SBjoern A. Zeeb }
781*6b627f88SBjoern A. Zeeb
782*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
783*6b627f88SBjoern A. Zeeb struct iwl_tx_cmd *tx_cmd =
784*6b627f88SBjoern A. Zeeb (void *)dev_cmd->payload;
785*6b627f88SBjoern A. Zeeb
786*6b627f88SBjoern A. Zeeb cmd_len = le16_to_cpu(tx_cmd->len);
787*6b627f88SBjoern A. Zeeb } else {
788*6b627f88SBjoern A. Zeeb struct iwl_tx_cmd_v9 *tx_cmd_v9 =
789*6b627f88SBjoern A. Zeeb (void *)dev_cmd->payload;
790*6b627f88SBjoern A. Zeeb
791*6b627f88SBjoern A. Zeeb cmd_len = le16_to_cpu(tx_cmd_v9->len);
792*6b627f88SBjoern A. Zeeb }
793*6b627f88SBjoern A. Zeeb
794*6b627f88SBjoern A. Zeeb /* Set up entry for this TFD in Tx byte-count array */
795*6b627f88SBjoern A. Zeeb iwl_pcie_gen2_update_byte_tbl(trans, txq, cmd_len,
796*6b627f88SBjoern A. Zeeb iwl_txq_gen2_get_num_tbs(tfd));
797*6b627f88SBjoern A. Zeeb
798*6b627f88SBjoern A. Zeeb /* start timer if queue currently empty */
799*6b627f88SBjoern A. Zeeb if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
800*6b627f88SBjoern A. Zeeb mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
801*6b627f88SBjoern A. Zeeb
802*6b627f88SBjoern A. Zeeb /* Tell device the write index *just past* this latest filled TFD */
803*6b627f88SBjoern A. Zeeb txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
804*6b627f88SBjoern A. Zeeb iwl_txq_inc_wr_ptr(trans, txq);
805*6b627f88SBjoern A. Zeeb /*
806*6b627f88SBjoern A. Zeeb * At this point the frame is "transmitted" successfully
807*6b627f88SBjoern A. Zeeb * and we will get a TX status notification eventually.
808*6b627f88SBjoern A. Zeeb */
809*6b627f88SBjoern A. Zeeb spin_unlock(&txq->lock);
810*6b627f88SBjoern A. Zeeb return 0;
811*6b627f88SBjoern A. Zeeb }
812*6b627f88SBjoern A. Zeeb
813*6b627f88SBjoern A. Zeeb /*************** HOST COMMAND QUEUE FUNCTIONS *****/
814*6b627f88SBjoern A. Zeeb
815*6b627f88SBjoern A. Zeeb /*
816*6b627f88SBjoern A. Zeeb * iwl_txq_gen2_unmap - Unmap any remaining DMA mappings and free skb's
817*6b627f88SBjoern A. Zeeb */
iwl_txq_gen2_unmap(struct iwl_trans * trans,int txq_id)818*6b627f88SBjoern A. Zeeb static void iwl_txq_gen2_unmap(struct iwl_trans *trans, int txq_id)
819*6b627f88SBjoern A. Zeeb {
820*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821*6b627f88SBjoern A. Zeeb struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
822*6b627f88SBjoern A. Zeeb
823*6b627f88SBjoern A. Zeeb spin_lock_bh(&txq->reclaim_lock);
824*6b627f88SBjoern A. Zeeb spin_lock(&txq->lock);
825*6b627f88SBjoern A. Zeeb while (txq->write_ptr != txq->read_ptr) {
826*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
827*6b627f88SBjoern A. Zeeb txq_id, txq->read_ptr);
828*6b627f88SBjoern A. Zeeb
829*6b627f88SBjoern A. Zeeb if (txq_id != trans->conf.cmd_queue) {
830*6b627f88SBjoern A. Zeeb int idx = iwl_txq_get_cmd_index(txq, txq->read_ptr);
831*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *cmd_meta = &txq->entries[idx].meta;
832*6b627f88SBjoern A. Zeeb struct sk_buff *skb = txq->entries[idx].skb;
833*6b627f88SBjoern A. Zeeb
834*6b627f88SBjoern A. Zeeb if (!WARN_ON_ONCE(!skb))
835*6b627f88SBjoern A. Zeeb iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
836*6b627f88SBjoern A. Zeeb }
837*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free_tfd(trans, txq);
838*6b627f88SBjoern A. Zeeb txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
839*6b627f88SBjoern A. Zeeb }
840*6b627f88SBjoern A. Zeeb
841*6b627f88SBjoern A. Zeeb while (!skb_queue_empty(&txq->overflow_q)) {
842*6b627f88SBjoern A. Zeeb struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
843*6b627f88SBjoern A. Zeeb
844*6b627f88SBjoern A. Zeeb iwl_op_mode_free_skb(trans->op_mode, skb);
845*6b627f88SBjoern A. Zeeb }
846*6b627f88SBjoern A. Zeeb
847*6b627f88SBjoern A. Zeeb spin_unlock(&txq->lock);
848*6b627f88SBjoern A. Zeeb spin_unlock_bh(&txq->reclaim_lock);
849*6b627f88SBjoern A. Zeeb
850*6b627f88SBjoern A. Zeeb /* just in case - this queue may have been stopped */
851*6b627f88SBjoern A. Zeeb iwl_trans_pcie_wake_queue(trans, txq);
852*6b627f88SBjoern A. Zeeb }
853*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_free_memory(struct iwl_trans * trans,struct iwl_txq * txq)854*6b627f88SBjoern A. Zeeb static void iwl_txq_gen2_free_memory(struct iwl_trans *trans,
855*6b627f88SBjoern A. Zeeb struct iwl_txq *txq)
856*6b627f88SBjoern A. Zeeb {
857*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
858*6b627f88SBjoern A. Zeeb struct device *dev = trans->dev;
859*6b627f88SBjoern A. Zeeb
860*6b627f88SBjoern A. Zeeb /* De-alloc circular buffer of TFDs */
861*6b627f88SBjoern A. Zeeb if (txq->tfds) {
862*6b627f88SBjoern A. Zeeb dma_free_coherent(dev,
863*6b627f88SBjoern A. Zeeb trans_pcie->txqs.tfd.size * txq->n_window,
864*6b627f88SBjoern A. Zeeb txq->tfds, txq->dma_addr);
865*6b627f88SBjoern A. Zeeb dma_free_coherent(dev,
866*6b627f88SBjoern A. Zeeb sizeof(*txq->first_tb_bufs) * txq->n_window,
867*6b627f88SBjoern A. Zeeb txq->first_tb_bufs, txq->first_tb_dma);
868*6b627f88SBjoern A. Zeeb }
869*6b627f88SBjoern A. Zeeb
870*6b627f88SBjoern A. Zeeb kfree(txq->entries);
871*6b627f88SBjoern A. Zeeb if (txq->bc_tbl.addr)
872*6b627f88SBjoern A. Zeeb dma_pool_free(trans_pcie->txqs.bc_pool,
873*6b627f88SBjoern A. Zeeb txq->bc_tbl.addr, txq->bc_tbl.dma);
874*6b627f88SBjoern A. Zeeb kfree(txq);
875*6b627f88SBjoern A. Zeeb }
876*6b627f88SBjoern A. Zeeb
877*6b627f88SBjoern A. Zeeb /*
878*6b627f88SBjoern A. Zeeb * iwl_pcie_txq_free - Deallocate DMA queue.
879*6b627f88SBjoern A. Zeeb * @txq: Transmit queue to deallocate.
880*6b627f88SBjoern A. Zeeb *
881*6b627f88SBjoern A. Zeeb * Empty queue by removing and destroying all BD's.
882*6b627f88SBjoern A. Zeeb * Free all buffers.
883*6b627f88SBjoern A. Zeeb * 0-fill, but do not free "txq" descriptor structure.
884*6b627f88SBjoern A. Zeeb */
iwl_txq_gen2_free(struct iwl_trans * trans,int txq_id)885*6b627f88SBjoern A. Zeeb static void iwl_txq_gen2_free(struct iwl_trans *trans, int txq_id)
886*6b627f88SBjoern A. Zeeb {
887*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
888*6b627f88SBjoern A. Zeeb struct iwl_txq *txq;
889*6b627f88SBjoern A. Zeeb int i;
890*6b627f88SBjoern A. Zeeb
891*6b627f88SBjoern A. Zeeb if (WARN_ONCE(txq_id >= IWL_MAX_TVQM_QUEUES,
892*6b627f88SBjoern A. Zeeb "queue %d out of range", txq_id))
893*6b627f88SBjoern A. Zeeb return;
894*6b627f88SBjoern A. Zeeb
895*6b627f88SBjoern A. Zeeb txq = trans_pcie->txqs.txq[txq_id];
896*6b627f88SBjoern A. Zeeb
897*6b627f88SBjoern A. Zeeb if (WARN_ON(!txq))
898*6b627f88SBjoern A. Zeeb return;
899*6b627f88SBjoern A. Zeeb
900*6b627f88SBjoern A. Zeeb iwl_txq_gen2_unmap(trans, txq_id);
901*6b627f88SBjoern A. Zeeb
902*6b627f88SBjoern A. Zeeb /* De-alloc array of command/tx buffers */
903*6b627f88SBjoern A. Zeeb if (txq_id == trans->conf.cmd_queue)
904*6b627f88SBjoern A. Zeeb for (i = 0; i < txq->n_window; i++) {
905*6b627f88SBjoern A. Zeeb kfree_sensitive(txq->entries[i].cmd);
906*6b627f88SBjoern A. Zeeb kfree_sensitive(txq->entries[i].free_buf);
907*6b627f88SBjoern A. Zeeb }
908*6b627f88SBjoern A. Zeeb timer_delete_sync(&txq->stuck_timer);
909*6b627f88SBjoern A. Zeeb
910*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free_memory(trans, txq);
911*6b627f88SBjoern A. Zeeb
912*6b627f88SBjoern A. Zeeb trans_pcie->txqs.txq[txq_id] = NULL;
913*6b627f88SBjoern A. Zeeb
914*6b627f88SBjoern A. Zeeb clear_bit(txq_id, trans_pcie->txqs.queue_used);
915*6b627f88SBjoern A. Zeeb }
916*6b627f88SBjoern A. Zeeb
917*6b627f88SBjoern A. Zeeb static struct iwl_txq *
iwl_txq_dyn_alloc_dma(struct iwl_trans * trans,int size,unsigned int timeout)918*6b627f88SBjoern A. Zeeb iwl_txq_dyn_alloc_dma(struct iwl_trans *trans, int size, unsigned int timeout)
919*6b627f88SBjoern A. Zeeb {
920*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
921*6b627f88SBjoern A. Zeeb size_t bc_tbl_size, bc_tbl_entries;
922*6b627f88SBjoern A. Zeeb struct iwl_txq *txq;
923*6b627f88SBjoern A. Zeeb int ret;
924*6b627f88SBjoern A. Zeeb
925*6b627f88SBjoern A. Zeeb WARN_ON(!trans_pcie->txqs.bc_tbl_size);
926*6b627f88SBjoern A. Zeeb
927*6b627f88SBjoern A. Zeeb bc_tbl_size = trans_pcie->txqs.bc_tbl_size;
928*6b627f88SBjoern A. Zeeb bc_tbl_entries = bc_tbl_size / sizeof(u16);
929*6b627f88SBjoern A. Zeeb
930*6b627f88SBjoern A. Zeeb if (WARN_ON(size > bc_tbl_entries))
931*6b627f88SBjoern A. Zeeb return ERR_PTR(-EINVAL);
932*6b627f88SBjoern A. Zeeb
933*6b627f88SBjoern A. Zeeb txq = kzalloc(sizeof(*txq), GFP_KERNEL);
934*6b627f88SBjoern A. Zeeb if (!txq)
935*6b627f88SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
936*6b627f88SBjoern A. Zeeb
937*6b627f88SBjoern A. Zeeb txq->bc_tbl.addr = dma_pool_alloc(trans_pcie->txqs.bc_pool, GFP_KERNEL,
938*6b627f88SBjoern A. Zeeb &txq->bc_tbl.dma);
939*6b627f88SBjoern A. Zeeb if (!txq->bc_tbl.addr) {
940*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
941*6b627f88SBjoern A. Zeeb kfree(txq);
942*6b627f88SBjoern A. Zeeb return ERR_PTR(-ENOMEM);
943*6b627f88SBjoern A. Zeeb }
944*6b627f88SBjoern A. Zeeb
945*6b627f88SBjoern A. Zeeb ret = iwl_pcie_txq_alloc(trans, txq, size, false);
946*6b627f88SBjoern A. Zeeb if (ret) {
947*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Tx queue alloc failed\n");
948*6b627f88SBjoern A. Zeeb goto error;
949*6b627f88SBjoern A. Zeeb }
950*6b627f88SBjoern A. Zeeb ret = iwl_txq_init(trans, txq, size, false);
951*6b627f88SBjoern A. Zeeb if (ret) {
952*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Tx queue init failed\n");
953*6b627f88SBjoern A. Zeeb goto error;
954*6b627f88SBjoern A. Zeeb }
955*6b627f88SBjoern A. Zeeb
956*6b627f88SBjoern A. Zeeb txq->wd_timeout = msecs_to_jiffies(timeout);
957*6b627f88SBjoern A. Zeeb
958*6b627f88SBjoern A. Zeeb return txq;
959*6b627f88SBjoern A. Zeeb
960*6b627f88SBjoern A. Zeeb error:
961*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free_memory(trans, txq);
962*6b627f88SBjoern A. Zeeb return ERR_PTR(ret);
963*6b627f88SBjoern A. Zeeb }
964*6b627f88SBjoern A. Zeeb
iwl_pcie_txq_alloc_response(struct iwl_trans * trans,struct iwl_txq * txq,struct iwl_host_cmd * hcmd)965*6b627f88SBjoern A. Zeeb static int iwl_pcie_txq_alloc_response(struct iwl_trans *trans,
966*6b627f88SBjoern A. Zeeb struct iwl_txq *txq,
967*6b627f88SBjoern A. Zeeb struct iwl_host_cmd *hcmd)
968*6b627f88SBjoern A. Zeeb {
969*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
970*6b627f88SBjoern A. Zeeb struct iwl_tx_queue_cfg_rsp *rsp;
971*6b627f88SBjoern A. Zeeb int ret, qid;
972*6b627f88SBjoern A. Zeeb u32 wr_ptr;
973*6b627f88SBjoern A. Zeeb
974*6b627f88SBjoern A. Zeeb if (WARN_ON(iwl_rx_packet_payload_len(hcmd->resp_pkt) !=
975*6b627f88SBjoern A. Zeeb sizeof(*rsp))) {
976*6b627f88SBjoern A. Zeeb ret = -EINVAL;
977*6b627f88SBjoern A. Zeeb goto error_free_resp;
978*6b627f88SBjoern A. Zeeb }
979*6b627f88SBjoern A. Zeeb
980*6b627f88SBjoern A. Zeeb rsp = (void *)hcmd->resp_pkt->data;
981*6b627f88SBjoern A. Zeeb qid = le16_to_cpu(rsp->queue_number);
982*6b627f88SBjoern A. Zeeb wr_ptr = le16_to_cpu(rsp->write_pointer);
983*6b627f88SBjoern A. Zeeb
984*6b627f88SBjoern A. Zeeb if (qid >= ARRAY_SIZE(trans_pcie->txqs.txq)) {
985*6b627f88SBjoern A. Zeeb WARN_ONCE(1, "queue index %d unsupported", qid);
986*6b627f88SBjoern A. Zeeb ret = -EIO;
987*6b627f88SBjoern A. Zeeb goto error_free_resp;
988*6b627f88SBjoern A. Zeeb }
989*6b627f88SBjoern A. Zeeb
990*6b627f88SBjoern A. Zeeb if (test_and_set_bit(qid, trans_pcie->txqs.queue_used)) {
991*6b627f88SBjoern A. Zeeb WARN_ONCE(1, "queue %d already used", qid);
992*6b627f88SBjoern A. Zeeb ret = -EIO;
993*6b627f88SBjoern A. Zeeb goto error_free_resp;
994*6b627f88SBjoern A. Zeeb }
995*6b627f88SBjoern A. Zeeb
996*6b627f88SBjoern A. Zeeb if (WARN_ONCE(trans_pcie->txqs.txq[qid],
997*6b627f88SBjoern A. Zeeb "queue %d already allocated\n", qid)) {
998*6b627f88SBjoern A. Zeeb ret = -EIO;
999*6b627f88SBjoern A. Zeeb goto error_free_resp;
1000*6b627f88SBjoern A. Zeeb }
1001*6b627f88SBjoern A. Zeeb
1002*6b627f88SBjoern A. Zeeb txq->id = qid;
1003*6b627f88SBjoern A. Zeeb trans_pcie->txqs.txq[qid] = txq;
1004*6b627f88SBjoern A. Zeeb wr_ptr &= (trans->mac_cfg->base->max_tfd_queue_size - 1);
1005*6b627f88SBjoern A. Zeeb
1006*6b627f88SBjoern A. Zeeb /* Place first TFD at index corresponding to start sequence number */
1007*6b627f88SBjoern A. Zeeb txq->read_ptr = wr_ptr;
1008*6b627f88SBjoern A. Zeeb txq->write_ptr = wr_ptr;
1009*6b627f88SBjoern A. Zeeb
1010*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(trans, "Activate queue %d\n", qid);
1011*6b627f88SBjoern A. Zeeb
1012*6b627f88SBjoern A. Zeeb iwl_free_resp(hcmd);
1013*6b627f88SBjoern A. Zeeb return qid;
1014*6b627f88SBjoern A. Zeeb
1015*6b627f88SBjoern A. Zeeb error_free_resp:
1016*6b627f88SBjoern A. Zeeb iwl_free_resp(hcmd);
1017*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free_memory(trans, txq);
1018*6b627f88SBjoern A. Zeeb return ret;
1019*6b627f88SBjoern A. Zeeb }
1020*6b627f88SBjoern A. Zeeb
iwl_txq_dyn_alloc(struct iwl_trans * trans,u32 flags,u32 sta_mask,u8 tid,int size,unsigned int timeout)1021*6b627f88SBjoern A. Zeeb int iwl_txq_dyn_alloc(struct iwl_trans *trans, u32 flags, u32 sta_mask,
1022*6b627f88SBjoern A. Zeeb u8 tid, int size, unsigned int timeout)
1023*6b627f88SBjoern A. Zeeb {
1024*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1025*6b627f88SBjoern A. Zeeb struct iwl_txq *txq;
1026*6b627f88SBjoern A. Zeeb union {
1027*6b627f88SBjoern A. Zeeb struct iwl_tx_queue_cfg_cmd old;
1028*6b627f88SBjoern A. Zeeb struct iwl_scd_queue_cfg_cmd new;
1029*6b627f88SBjoern A. Zeeb } cmd;
1030*6b627f88SBjoern A. Zeeb struct iwl_host_cmd hcmd = {
1031*6b627f88SBjoern A. Zeeb .flags = CMD_WANT_SKB,
1032*6b627f88SBjoern A. Zeeb };
1033*6b627f88SBjoern A. Zeeb int ret;
1034*6b627f88SBjoern A. Zeeb
1035*6b627f88SBjoern A. Zeeb /* take the min with bytecount table entries allowed */
1036*6b627f88SBjoern A. Zeeb size = min_t(u32, size, trans_pcie->txqs.bc_tbl_size / sizeof(u16));
1037*6b627f88SBjoern A. Zeeb /* but must be power of 2 values for calculating read/write pointers */
1038*6b627f88SBjoern A. Zeeb size = rounddown_pow_of_two(size);
1039*6b627f88SBjoern A. Zeeb
1040*6b627f88SBjoern A. Zeeb if (trans->mac_cfg->device_family == IWL_DEVICE_FAMILY_BZ &&
1041*6b627f88SBjoern A. Zeeb trans->info.hw_rev_step == SILICON_A_STEP) {
1042*6b627f88SBjoern A. Zeeb size = 4096;
1043*6b627f88SBjoern A. Zeeb txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1044*6b627f88SBjoern A. Zeeb } else {
1045*6b627f88SBjoern A. Zeeb do {
1046*6b627f88SBjoern A. Zeeb txq = iwl_txq_dyn_alloc_dma(trans, size, timeout);
1047*6b627f88SBjoern A. Zeeb if (!IS_ERR(txq))
1048*6b627f88SBjoern A. Zeeb break;
1049*6b627f88SBjoern A. Zeeb
1050*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(trans,
1051*6b627f88SBjoern A. Zeeb "Failed allocating TXQ of size %d for sta mask %x tid %d, ret: %ld\n",
1052*6b627f88SBjoern A. Zeeb size, sta_mask, tid,
1053*6b627f88SBjoern A. Zeeb PTR_ERR(txq));
1054*6b627f88SBjoern A. Zeeb size /= 2;
1055*6b627f88SBjoern A. Zeeb } while (size >= 16);
1056*6b627f88SBjoern A. Zeeb }
1057*6b627f88SBjoern A. Zeeb
1058*6b627f88SBjoern A. Zeeb if (IS_ERR(txq))
1059*6b627f88SBjoern A. Zeeb return PTR_ERR(txq);
1060*6b627f88SBjoern A. Zeeb
1061*6b627f88SBjoern A. Zeeb if (trans->conf.queue_alloc_cmd_ver == 0) {
1062*6b627f88SBjoern A. Zeeb memset(&cmd.old, 0, sizeof(cmd.old));
1063*6b627f88SBjoern A. Zeeb cmd.old.tfdq_addr = cpu_to_le64(txq->dma_addr);
1064*6b627f88SBjoern A. Zeeb cmd.old.byte_cnt_addr = cpu_to_le64(txq->bc_tbl.dma);
1065*6b627f88SBjoern A. Zeeb cmd.old.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1066*6b627f88SBjoern A. Zeeb cmd.old.flags = cpu_to_le16(flags | TX_QUEUE_CFG_ENABLE_QUEUE);
1067*6b627f88SBjoern A. Zeeb cmd.old.tid = tid;
1068*6b627f88SBjoern A. Zeeb
1069*6b627f88SBjoern A. Zeeb if (hweight32(sta_mask) != 1) {
1070*6b627f88SBjoern A. Zeeb ret = -EINVAL;
1071*6b627f88SBjoern A. Zeeb goto error;
1072*6b627f88SBjoern A. Zeeb }
1073*6b627f88SBjoern A. Zeeb cmd.old.sta_id = ffs(sta_mask) - 1;
1074*6b627f88SBjoern A. Zeeb
1075*6b627f88SBjoern A. Zeeb hcmd.id = SCD_QUEUE_CFG;
1076*6b627f88SBjoern A. Zeeb hcmd.len[0] = sizeof(cmd.old);
1077*6b627f88SBjoern A. Zeeb hcmd.data[0] = &cmd.old;
1078*6b627f88SBjoern A. Zeeb } else if (trans->conf.queue_alloc_cmd_ver == 3) {
1079*6b627f88SBjoern A. Zeeb memset(&cmd.new, 0, sizeof(cmd.new));
1080*6b627f88SBjoern A. Zeeb cmd.new.operation = cpu_to_le32(IWL_SCD_QUEUE_ADD);
1081*6b627f88SBjoern A. Zeeb cmd.new.u.add.tfdq_dram_addr = cpu_to_le64(txq->dma_addr);
1082*6b627f88SBjoern A. Zeeb cmd.new.u.add.bc_dram_addr = cpu_to_le64(txq->bc_tbl.dma);
1083*6b627f88SBjoern A. Zeeb cmd.new.u.add.cb_size = cpu_to_le32(TFD_QUEUE_CB_SIZE(size));
1084*6b627f88SBjoern A. Zeeb cmd.new.u.add.flags = cpu_to_le32(flags);
1085*6b627f88SBjoern A. Zeeb cmd.new.u.add.sta_mask = cpu_to_le32(sta_mask);
1086*6b627f88SBjoern A. Zeeb cmd.new.u.add.tid = tid;
1087*6b627f88SBjoern A. Zeeb
1088*6b627f88SBjoern A. Zeeb hcmd.id = WIDE_ID(DATA_PATH_GROUP, SCD_QUEUE_CONFIG_CMD);
1089*6b627f88SBjoern A. Zeeb hcmd.len[0] = sizeof(cmd.new);
1090*6b627f88SBjoern A. Zeeb hcmd.data[0] = &cmd.new;
1091*6b627f88SBjoern A. Zeeb } else {
1092*6b627f88SBjoern A. Zeeb ret = -EOPNOTSUPP;
1093*6b627f88SBjoern A. Zeeb goto error;
1094*6b627f88SBjoern A. Zeeb }
1095*6b627f88SBjoern A. Zeeb
1096*6b627f88SBjoern A. Zeeb ret = iwl_trans_send_cmd(trans, &hcmd);
1097*6b627f88SBjoern A. Zeeb if (ret)
1098*6b627f88SBjoern A. Zeeb goto error;
1099*6b627f88SBjoern A. Zeeb
1100*6b627f88SBjoern A. Zeeb return iwl_pcie_txq_alloc_response(trans, txq, &hcmd);
1101*6b627f88SBjoern A. Zeeb
1102*6b627f88SBjoern A. Zeeb error:
1103*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free_memory(trans, txq);
1104*6b627f88SBjoern A. Zeeb return ret;
1105*6b627f88SBjoern A. Zeeb }
1106*6b627f88SBjoern A. Zeeb
iwl_txq_dyn_free(struct iwl_trans * trans,int queue)1107*6b627f88SBjoern A. Zeeb void iwl_txq_dyn_free(struct iwl_trans *trans, int queue)
1108*6b627f88SBjoern A. Zeeb {
1109*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1110*6b627f88SBjoern A. Zeeb
1111*6b627f88SBjoern A. Zeeb if (WARN(queue >= IWL_MAX_TVQM_QUEUES,
1112*6b627f88SBjoern A. Zeeb "queue %d out of range", queue))
1113*6b627f88SBjoern A. Zeeb return;
1114*6b627f88SBjoern A. Zeeb
1115*6b627f88SBjoern A. Zeeb /*
1116*6b627f88SBjoern A. Zeeb * Upon HW Rfkill - we stop the device, and then stop the queues
1117*6b627f88SBjoern A. Zeeb * in the op_mode. Just for the sake of the simplicity of the op_mode,
1118*6b627f88SBjoern A. Zeeb * allow the op_mode to call txq_disable after it already called
1119*6b627f88SBjoern A. Zeeb * stop_device.
1120*6b627f88SBjoern A. Zeeb */
1121*6b627f88SBjoern A. Zeeb if (!test_and_clear_bit(queue, trans_pcie->txqs.queue_used)) {
1122*6b627f88SBjoern A. Zeeb WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1123*6b627f88SBjoern A. Zeeb "queue %d not used", queue);
1124*6b627f88SBjoern A. Zeeb return;
1125*6b627f88SBjoern A. Zeeb }
1126*6b627f88SBjoern A. Zeeb
1127*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free(trans, queue);
1128*6b627f88SBjoern A. Zeeb
1129*6b627f88SBjoern A. Zeeb IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", queue);
1130*6b627f88SBjoern A. Zeeb }
1131*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_tx_free(struct iwl_trans * trans)1132*6b627f88SBjoern A. Zeeb void iwl_txq_gen2_tx_free(struct iwl_trans *trans)
1133*6b627f88SBjoern A. Zeeb {
1134*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1135*6b627f88SBjoern A. Zeeb int i;
1136*6b627f88SBjoern A. Zeeb
1137*6b627f88SBjoern A. Zeeb memset(trans_pcie->txqs.queue_used, 0,
1138*6b627f88SBjoern A. Zeeb sizeof(trans_pcie->txqs.queue_used));
1139*6b627f88SBjoern A. Zeeb
1140*6b627f88SBjoern A. Zeeb /* Free all TX queues */
1141*6b627f88SBjoern A. Zeeb for (i = 0; i < ARRAY_SIZE(trans_pcie->txqs.txq); i++) {
1142*6b627f88SBjoern A. Zeeb if (!trans_pcie->txqs.txq[i])
1143*6b627f88SBjoern A. Zeeb continue;
1144*6b627f88SBjoern A. Zeeb
1145*6b627f88SBjoern A. Zeeb iwl_txq_gen2_free(trans, i);
1146*6b627f88SBjoern A. Zeeb }
1147*6b627f88SBjoern A. Zeeb }
1148*6b627f88SBjoern A. Zeeb
iwl_txq_gen2_init(struct iwl_trans * trans,int txq_id,int queue_size)1149*6b627f88SBjoern A. Zeeb int iwl_txq_gen2_init(struct iwl_trans *trans, int txq_id, int queue_size)
1150*6b627f88SBjoern A. Zeeb {
1151*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1152*6b627f88SBjoern A. Zeeb struct iwl_txq *queue;
1153*6b627f88SBjoern A. Zeeb int ret;
1154*6b627f88SBjoern A. Zeeb
1155*6b627f88SBjoern A. Zeeb /* alloc and init the tx queue */
1156*6b627f88SBjoern A. Zeeb if (!trans_pcie->txqs.txq[txq_id]) {
1157*6b627f88SBjoern A. Zeeb queue = kzalloc(sizeof(*queue), GFP_KERNEL);
1158*6b627f88SBjoern A. Zeeb if (!queue) {
1159*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Not enough memory for tx queue\n");
1160*6b627f88SBjoern A. Zeeb return -ENOMEM;
1161*6b627f88SBjoern A. Zeeb }
1162*6b627f88SBjoern A. Zeeb trans_pcie->txqs.txq[txq_id] = queue;
1163*6b627f88SBjoern A. Zeeb ret = iwl_pcie_txq_alloc(trans, queue, queue_size, true);
1164*6b627f88SBjoern A. Zeeb if (ret) {
1165*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1166*6b627f88SBjoern A. Zeeb goto error;
1167*6b627f88SBjoern A. Zeeb }
1168*6b627f88SBjoern A. Zeeb } else {
1169*6b627f88SBjoern A. Zeeb queue = trans_pcie->txqs.txq[txq_id];
1170*6b627f88SBjoern A. Zeeb }
1171*6b627f88SBjoern A. Zeeb
1172*6b627f88SBjoern A. Zeeb ret = iwl_txq_init(trans, queue, queue_size,
1173*6b627f88SBjoern A. Zeeb (txq_id == trans->conf.cmd_queue));
1174*6b627f88SBjoern A. Zeeb if (ret) {
1175*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
1176*6b627f88SBjoern A. Zeeb goto error;
1177*6b627f88SBjoern A. Zeeb }
1178*6b627f88SBjoern A. Zeeb trans_pcie->txqs.txq[txq_id]->id = txq_id;
1179*6b627f88SBjoern A. Zeeb set_bit(txq_id, trans_pcie->txqs.queue_used);
1180*6b627f88SBjoern A. Zeeb
1181*6b627f88SBjoern A. Zeeb return 0;
1182*6b627f88SBjoern A. Zeeb
1183*6b627f88SBjoern A. Zeeb error:
1184*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tx_free(trans);
1185*6b627f88SBjoern A. Zeeb return ret;
1186*6b627f88SBjoern A. Zeeb }
1187*6b627f88SBjoern A. Zeeb
1188*6b627f88SBjoern A. Zeeb /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1189*6b627f88SBjoern A. Zeeb
1190*6b627f88SBjoern A. Zeeb /*
1191*6b627f88SBjoern A. Zeeb * iwl_pcie_gen2_enqueue_hcmd - enqueue a uCode command
1192*6b627f88SBjoern A. Zeeb * @priv: device private data point
1193*6b627f88SBjoern A. Zeeb * @cmd: a pointer to the ucode command structure
1194*6b627f88SBjoern A. Zeeb *
1195*6b627f88SBjoern A. Zeeb * The function returns < 0 values to indicate the operation
1196*6b627f88SBjoern A. Zeeb * failed. On success, it returns the index (>= 0) of command in the
1197*6b627f88SBjoern A. Zeeb * command queue.
1198*6b627f88SBjoern A. Zeeb */
iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1199*6b627f88SBjoern A. Zeeb int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans,
1200*6b627f88SBjoern A. Zeeb struct iwl_host_cmd *cmd)
1201*6b627f88SBjoern A. Zeeb {
1202*6b627f88SBjoern A. Zeeb struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1203*6b627f88SBjoern A. Zeeb struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
1204*6b627f88SBjoern A. Zeeb struct iwl_device_cmd *out_cmd;
1205*6b627f88SBjoern A. Zeeb struct iwl_cmd_meta *out_meta;
1206*6b627f88SBjoern A. Zeeb void *dup_buf = NULL;
1207*6b627f88SBjoern A. Zeeb dma_addr_t phys_addr;
1208*6b627f88SBjoern A. Zeeb int i, cmd_pos, idx;
1209*6b627f88SBjoern A. Zeeb u16 copy_size, cmd_size, tb0_size;
1210*6b627f88SBjoern A. Zeeb bool had_nocopy = false;
1211*6b627f88SBjoern A. Zeeb u8 group_id = iwl_cmd_groupid(cmd->id);
1212*6b627f88SBjoern A. Zeeb const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1213*6b627f88SBjoern A. Zeeb u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1214*6b627f88SBjoern A. Zeeb struct iwl_tfh_tfd *tfd;
1215*6b627f88SBjoern A. Zeeb unsigned long flags;
1216*6b627f88SBjoern A. Zeeb
1217*6b627f88SBjoern A. Zeeb if (WARN_ON(cmd->flags & CMD_BLOCK_TXQS))
1218*6b627f88SBjoern A. Zeeb return -EINVAL;
1219*6b627f88SBjoern A. Zeeb
1220*6b627f88SBjoern A. Zeeb copy_size = sizeof(struct iwl_cmd_header_wide);
1221*6b627f88SBjoern A. Zeeb cmd_size = sizeof(struct iwl_cmd_header_wide);
1222*6b627f88SBjoern A. Zeeb
1223*6b627f88SBjoern A. Zeeb for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1224*6b627f88SBjoern A. Zeeb cmddata[i] = cmd->data[i];
1225*6b627f88SBjoern A. Zeeb cmdlen[i] = cmd->len[i];
1226*6b627f88SBjoern A. Zeeb
1227*6b627f88SBjoern A. Zeeb if (!cmd->len[i])
1228*6b627f88SBjoern A. Zeeb continue;
1229*6b627f88SBjoern A. Zeeb
1230*6b627f88SBjoern A. Zeeb /* need at least IWL_FIRST_TB_SIZE copied */
1231*6b627f88SBjoern A. Zeeb if (copy_size < IWL_FIRST_TB_SIZE) {
1232*6b627f88SBjoern A. Zeeb int copy = IWL_FIRST_TB_SIZE - copy_size;
1233*6b627f88SBjoern A. Zeeb
1234*6b627f88SBjoern A. Zeeb if (copy > cmdlen[i])
1235*6b627f88SBjoern A. Zeeb copy = cmdlen[i];
1236*6b627f88SBjoern A. Zeeb cmdlen[i] -= copy;
1237*6b627f88SBjoern A. Zeeb cmddata[i] += copy;
1238*6b627f88SBjoern A. Zeeb copy_size += copy;
1239*6b627f88SBjoern A. Zeeb }
1240*6b627f88SBjoern A. Zeeb
1241*6b627f88SBjoern A. Zeeb if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1242*6b627f88SBjoern A. Zeeb had_nocopy = true;
1243*6b627f88SBjoern A. Zeeb if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1244*6b627f88SBjoern A. Zeeb idx = -EINVAL;
1245*6b627f88SBjoern A. Zeeb goto free_dup_buf;
1246*6b627f88SBjoern A. Zeeb }
1247*6b627f88SBjoern A. Zeeb } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1248*6b627f88SBjoern A. Zeeb /*
1249*6b627f88SBjoern A. Zeeb * This is also a chunk that isn't copied
1250*6b627f88SBjoern A. Zeeb * to the static buffer so set had_nocopy.
1251*6b627f88SBjoern A. Zeeb */
1252*6b627f88SBjoern A. Zeeb had_nocopy = true;
1253*6b627f88SBjoern A. Zeeb
1254*6b627f88SBjoern A. Zeeb /* only allowed once */
1255*6b627f88SBjoern A. Zeeb if (WARN_ON(dup_buf)) {
1256*6b627f88SBjoern A. Zeeb idx = -EINVAL;
1257*6b627f88SBjoern A. Zeeb goto free_dup_buf;
1258*6b627f88SBjoern A. Zeeb }
1259*6b627f88SBjoern A. Zeeb
1260*6b627f88SBjoern A. Zeeb dup_buf = kmemdup(cmddata[i], cmdlen[i],
1261*6b627f88SBjoern A. Zeeb GFP_ATOMIC);
1262*6b627f88SBjoern A. Zeeb if (!dup_buf)
1263*6b627f88SBjoern A. Zeeb return -ENOMEM;
1264*6b627f88SBjoern A. Zeeb } else {
1265*6b627f88SBjoern A. Zeeb /* NOCOPY must not be followed by normal! */
1266*6b627f88SBjoern A. Zeeb if (WARN_ON(had_nocopy)) {
1267*6b627f88SBjoern A. Zeeb idx = -EINVAL;
1268*6b627f88SBjoern A. Zeeb goto free_dup_buf;
1269*6b627f88SBjoern A. Zeeb }
1270*6b627f88SBjoern A. Zeeb copy_size += cmdlen[i];
1271*6b627f88SBjoern A. Zeeb }
1272*6b627f88SBjoern A. Zeeb cmd_size += cmd->len[i];
1273*6b627f88SBjoern A. Zeeb }
1274*6b627f88SBjoern A. Zeeb
1275*6b627f88SBjoern A. Zeeb /*
1276*6b627f88SBjoern A. Zeeb * If any of the command structures end up being larger than the
1277*6b627f88SBjoern A. Zeeb * TFD_MAX_PAYLOAD_SIZE and they aren't dynamically allocated into
1278*6b627f88SBjoern A. Zeeb * separate TFDs, then we will need to increase the size of the buffers
1279*6b627f88SBjoern A. Zeeb */
1280*6b627f88SBjoern A. Zeeb if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1281*6b627f88SBjoern A. Zeeb "Command %s (%#x) is too large (%d bytes)\n",
1282*6b627f88SBjoern A. Zeeb iwl_get_cmd_string(trans, cmd->id), cmd->id, copy_size)) {
1283*6b627f88SBjoern A. Zeeb idx = -EINVAL;
1284*6b627f88SBjoern A. Zeeb goto free_dup_buf;
1285*6b627f88SBjoern A. Zeeb }
1286*6b627f88SBjoern A. Zeeb
1287*6b627f88SBjoern A. Zeeb spin_lock_irqsave(&txq->lock, flags);
1288*6b627f88SBjoern A. Zeeb
1289*6b627f88SBjoern A. Zeeb idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1290*6b627f88SBjoern A. Zeeb tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
1291*6b627f88SBjoern A. Zeeb memset(tfd, 0, sizeof(*tfd));
1292*6b627f88SBjoern A. Zeeb
1293*6b627f88SBjoern A. Zeeb if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1294*6b627f88SBjoern A. Zeeb spin_unlock_irqrestore(&txq->lock, flags);
1295*6b627f88SBjoern A. Zeeb
1296*6b627f88SBjoern A. Zeeb IWL_ERR(trans, "No space in command queue\n");
1297*6b627f88SBjoern A. Zeeb iwl_op_mode_nic_error(trans->op_mode,
1298*6b627f88SBjoern A. Zeeb IWL_ERR_TYPE_CMD_QUEUE_FULL);
1299*6b627f88SBjoern A. Zeeb iwl_trans_schedule_reset(trans, IWL_ERR_TYPE_CMD_QUEUE_FULL);
1300*6b627f88SBjoern A. Zeeb idx = -ENOSPC;
1301*6b627f88SBjoern A. Zeeb goto free_dup_buf;
1302*6b627f88SBjoern A. Zeeb }
1303*6b627f88SBjoern A. Zeeb
1304*6b627f88SBjoern A. Zeeb out_cmd = txq->entries[idx].cmd;
1305*6b627f88SBjoern A. Zeeb out_meta = &txq->entries[idx].meta;
1306*6b627f88SBjoern A. Zeeb
1307*6b627f88SBjoern A. Zeeb /* re-initialize, this also marks the SG list as unused */
1308*6b627f88SBjoern A. Zeeb memset(out_meta, 0, sizeof(*out_meta));
1309*6b627f88SBjoern A. Zeeb if (cmd->flags & CMD_WANT_SKB)
1310*6b627f88SBjoern A. Zeeb out_meta->source = cmd;
1311*6b627f88SBjoern A. Zeeb
1312*6b627f88SBjoern A. Zeeb /* set up the header */
1313*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1314*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.group_id = group_id;
1315*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1316*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.length =
1317*6b627f88SBjoern A. Zeeb cpu_to_le16(cmd_size - sizeof(struct iwl_cmd_header_wide));
1318*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.reserved = 0;
1319*6b627f88SBjoern A. Zeeb out_cmd->hdr_wide.sequence =
1320*6b627f88SBjoern A. Zeeb cpu_to_le16(QUEUE_TO_SEQ(trans->conf.cmd_queue) |
1321*6b627f88SBjoern A. Zeeb INDEX_TO_SEQ(txq->write_ptr));
1322*6b627f88SBjoern A. Zeeb
1323*6b627f88SBjoern A. Zeeb cmd_pos = sizeof(struct iwl_cmd_header_wide);
1324*6b627f88SBjoern A. Zeeb copy_size = sizeof(struct iwl_cmd_header_wide);
1325*6b627f88SBjoern A. Zeeb
1326*6b627f88SBjoern A. Zeeb /* and copy the data that needs to be copied */
1327*6b627f88SBjoern A. Zeeb for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1328*6b627f88SBjoern A. Zeeb int copy;
1329*6b627f88SBjoern A. Zeeb
1330*6b627f88SBjoern A. Zeeb if (!cmd->len[i])
1331*6b627f88SBjoern A. Zeeb continue;
1332*6b627f88SBjoern A. Zeeb
1333*6b627f88SBjoern A. Zeeb /* copy everything if not nocopy/dup */
1334*6b627f88SBjoern A. Zeeb if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1335*6b627f88SBjoern A. Zeeb IWL_HCMD_DFL_DUP))) {
1336*6b627f88SBjoern A. Zeeb copy = cmd->len[i];
1337*6b627f88SBjoern A. Zeeb
1338*6b627f88SBjoern A. Zeeb memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1339*6b627f88SBjoern A. Zeeb cmd_pos += copy;
1340*6b627f88SBjoern A. Zeeb copy_size += copy;
1341*6b627f88SBjoern A. Zeeb continue;
1342*6b627f88SBjoern A. Zeeb }
1343*6b627f88SBjoern A. Zeeb
1344*6b627f88SBjoern A. Zeeb /*
1345*6b627f88SBjoern A. Zeeb * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1346*6b627f88SBjoern A. Zeeb * in total (for bi-directional DMA), but copy up to what
1347*6b627f88SBjoern A. Zeeb * we can fit into the payload for debug dump purposes.
1348*6b627f88SBjoern A. Zeeb */
1349*6b627f88SBjoern A. Zeeb copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1350*6b627f88SBjoern A. Zeeb
1351*6b627f88SBjoern A. Zeeb memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1352*6b627f88SBjoern A. Zeeb cmd_pos += copy;
1353*6b627f88SBjoern A. Zeeb
1354*6b627f88SBjoern A. Zeeb /* However, treat copy_size the proper way, we need it below */
1355*6b627f88SBjoern A. Zeeb if (copy_size < IWL_FIRST_TB_SIZE) {
1356*6b627f88SBjoern A. Zeeb copy = IWL_FIRST_TB_SIZE - copy_size;
1357*6b627f88SBjoern A. Zeeb
1358*6b627f88SBjoern A. Zeeb if (copy > cmd->len[i])
1359*6b627f88SBjoern A. Zeeb copy = cmd->len[i];
1360*6b627f88SBjoern A. Zeeb copy_size += copy;
1361*6b627f88SBjoern A. Zeeb }
1362*6b627f88SBjoern A. Zeeb }
1363*6b627f88SBjoern A. Zeeb
1364*6b627f88SBjoern A. Zeeb IWL_DEBUG_HC(trans,
1365*6b627f88SBjoern A. Zeeb "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1366*6b627f88SBjoern A. Zeeb iwl_get_cmd_string(trans, cmd->id), group_id,
1367*6b627f88SBjoern A. Zeeb out_cmd->hdr.cmd, le16_to_cpu(out_cmd->hdr.sequence),
1368*6b627f88SBjoern A. Zeeb cmd_size, txq->write_ptr, idx, trans->conf.cmd_queue);
1369*6b627f88SBjoern A. Zeeb
1370*6b627f88SBjoern A. Zeeb /* start the TFD with the minimum copy bytes */
1371*6b627f88SBjoern A. Zeeb tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1372*6b627f88SBjoern A. Zeeb memcpy(&txq->first_tb_bufs[idx], out_cmd, tb0_size);
1373*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, iwl_txq_get_first_tb_dma(txq, idx),
1374*6b627f88SBjoern A. Zeeb tb0_size);
1375*6b627f88SBjoern A. Zeeb
1376*6b627f88SBjoern A. Zeeb /* map first command fragment, if any remains */
1377*6b627f88SBjoern A. Zeeb if (copy_size > tb0_size) {
1378*6b627f88SBjoern A. Zeeb phys_addr = dma_map_single(trans->dev,
1379*6b627f88SBjoern A. Zeeb (u8 *)out_cmd + tb0_size,
1380*6b627f88SBjoern A. Zeeb copy_size - tb0_size,
1381*6b627f88SBjoern A. Zeeb DMA_TO_DEVICE);
1382*6b627f88SBjoern A. Zeeb if (dma_mapping_error(trans->dev, phys_addr)) {
1383*6b627f88SBjoern A. Zeeb idx = -ENOMEM;
1384*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
1385*6b627f88SBjoern A. Zeeb goto out;
1386*6b627f88SBjoern A. Zeeb }
1387*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, phys_addr,
1388*6b627f88SBjoern A. Zeeb copy_size - tb0_size);
1389*6b627f88SBjoern A. Zeeb }
1390*6b627f88SBjoern A. Zeeb
1391*6b627f88SBjoern A. Zeeb /* map the remaining (adjusted) nocopy/dup fragments */
1392*6b627f88SBjoern A. Zeeb for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1393*6b627f88SBjoern A. Zeeb void *data = (void *)(uintptr_t)cmddata[i];
1394*6b627f88SBjoern A. Zeeb
1395*6b627f88SBjoern A. Zeeb if (!cmdlen[i])
1396*6b627f88SBjoern A. Zeeb continue;
1397*6b627f88SBjoern A. Zeeb if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1398*6b627f88SBjoern A. Zeeb IWL_HCMD_DFL_DUP)))
1399*6b627f88SBjoern A. Zeeb continue;
1400*6b627f88SBjoern A. Zeeb if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1401*6b627f88SBjoern A. Zeeb data = dup_buf;
1402*6b627f88SBjoern A. Zeeb phys_addr = dma_map_single(trans->dev, data,
1403*6b627f88SBjoern A. Zeeb cmdlen[i], DMA_TO_DEVICE);
1404*6b627f88SBjoern A. Zeeb if (dma_mapping_error(trans->dev, phys_addr)) {
1405*6b627f88SBjoern A. Zeeb idx = -ENOMEM;
1406*6b627f88SBjoern A. Zeeb iwl_txq_gen2_tfd_unmap(trans, out_meta, tfd);
1407*6b627f88SBjoern A. Zeeb goto out;
1408*6b627f88SBjoern A. Zeeb }
1409*6b627f88SBjoern A. Zeeb iwl_txq_gen2_set_tb(trans, tfd, phys_addr, cmdlen[i]);
1410*6b627f88SBjoern A. Zeeb }
1411*6b627f88SBjoern A. Zeeb
1412*6b627f88SBjoern A. Zeeb BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1413*6b627f88SBjoern A. Zeeb out_meta->flags = cmd->flags;
1414*6b627f88SBjoern A. Zeeb if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1415*6b627f88SBjoern A. Zeeb kfree_sensitive(txq->entries[idx].free_buf);
1416*6b627f88SBjoern A. Zeeb txq->entries[idx].free_buf = dup_buf;
1417*6b627f88SBjoern A. Zeeb
1418*6b627f88SBjoern A. Zeeb trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1419*6b627f88SBjoern A. Zeeb
1420*6b627f88SBjoern A. Zeeb /* start timer if queue currently empty */
1421*6b627f88SBjoern A. Zeeb if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1422*6b627f88SBjoern A. Zeeb mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1423*6b627f88SBjoern A. Zeeb
1424*6b627f88SBjoern A. Zeeb spin_lock(&trans_pcie->reg_lock);
1425*6b627f88SBjoern A. Zeeb /* Increment and update queue's write index */
1426*6b627f88SBjoern A. Zeeb txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1427*6b627f88SBjoern A. Zeeb iwl_txq_inc_wr_ptr(trans, txq);
1428*6b627f88SBjoern A. Zeeb spin_unlock(&trans_pcie->reg_lock);
1429*6b627f88SBjoern A. Zeeb
1430*6b627f88SBjoern A. Zeeb out:
1431*6b627f88SBjoern A. Zeeb spin_unlock_irqrestore(&txq->lock, flags);
1432*6b627f88SBjoern A. Zeeb free_dup_buf:
1433*6b627f88SBjoern A. Zeeb if (idx < 0)
1434*6b627f88SBjoern A. Zeeb kfree(dup_buf);
1435*6b627f88SBjoern A. Zeeb return idx;
1436*6b627f88SBjoern A. Zeeb }
1437