1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3 * Copyright (C) 2003-2014, 2018-2021, 2023-2024 Intel Corporation
4 * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5 * Copyright (C) 2016-2017 Intel Deutschland GmbH
6 */
7 #include <linux/etherdevice.h>
8 #include <linux/ieee80211.h>
9 #include <linux/dmapool.h>
10 #include <linux/slab.h>
11 #include <linux/sched.h>
12 #include <linux/tcp.h>
13 #include <net/ip6_checksum.h>
14 #include <net/tso.h>
15
16 #include "fw/api/commands.h"
17 #include "fw/api/datapath.h"
18 #include "fw/api/debug.h"
19 #include "iwl-fh.h"
20 #include "iwl-debug.h"
21 #include "iwl-csr.h"
22 #include "iwl-prph.h"
23 #include "iwl-io.h"
24 #include "iwl-scd.h"
25 #include "iwl-op-mode.h"
26 #include "internal.h"
27 #include "fw/api/tx.h"
28
29 /*************** DMA-QUEUE-GENERAL-FUNCTIONS *****
30 * DMA services
31 *
32 * Theory of operation
33 *
34 * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
35 * of buffer descriptors, each of which points to one or more data buffers for
36 * the device to read from or fill. Driver and device exchange status of each
37 * queue via "read" and "write" pointers. Driver keeps minimum of 2 empty
38 * entries in each circular buffer, to protect against confusing empty and full
39 * queue states.
40 *
41 * The device reads or writes the data in the queues via the device's several
42 * DMA/FIFO channels. Each queue is mapped to a single DMA channel.
43 *
44 * For Tx queue, there are low mark and high mark limits. If, after queuing
45 * the packet for Tx, free space become < low mark, Tx queue stopped. When
46 * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
47 * Tx queue resumed.
48 *
49 ***************************************************/
50
51
iwl_pcie_alloc_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr,size_t size)52 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
53 struct iwl_dma_ptr *ptr, size_t size)
54 {
55 if (WARN_ON(ptr->addr))
56 return -EINVAL;
57
58 ptr->addr = dma_alloc_coherent(trans->dev, size,
59 &ptr->dma, GFP_KERNEL);
60 if (!ptr->addr)
61 return -ENOMEM;
62 ptr->size = size;
63 return 0;
64 }
65
iwl_pcie_free_dma_ptr(struct iwl_trans * trans,struct iwl_dma_ptr * ptr)66 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
67 {
68 if (unlikely(!ptr->addr))
69 return;
70
71 dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
72 memset(ptr, 0, sizeof(*ptr));
73 }
74
75 /*
76 * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
77 */
iwl_pcie_txq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_txq * txq)78 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
79 struct iwl_txq *txq)
80 {
81 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
82 u32 reg = 0;
83 int txq_id = txq->id;
84
85 lockdep_assert_held(&txq->lock);
86
87 /*
88 * explicitly wake up the NIC if:
89 * 1. shadow registers aren't enabled
90 * 2. NIC is woken up for CMD regardless of shadow outside this function
91 * 3. there is a chance that the NIC is asleep
92 */
93 if (!trans->trans_cfg->base_params->shadow_reg_enable &&
94 txq_id != trans_pcie->txqs.cmd.q_id &&
95 test_bit(STATUS_TPOWER_PMI, &trans->status)) {
96 /*
97 * wake up nic if it's powered down ...
98 * uCode will wake up, and interrupt us again, so next
99 * time we'll skip this part.
100 */
101 reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
102
103 if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
104 IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
105 txq_id, reg);
106 iwl_set_bit(trans, CSR_GP_CNTRL,
107 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
108 txq->need_update = true;
109 return;
110 }
111 }
112
113 /*
114 * if not in power-save mode, uCode will never sleep when we're
115 * trying to tx (during RFKILL, we're not trying to tx).
116 */
117 IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
118 if (!txq->block)
119 iwl_write32(trans, HBUS_TARG_WRPTR,
120 txq->write_ptr | (txq_id << 8));
121 }
122
iwl_pcie_txq_check_wrptrs(struct iwl_trans * trans)123 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
124 {
125 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
126 int i;
127
128 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
129 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
130
131 if (!test_bit(i, trans_pcie->txqs.queue_used))
132 continue;
133
134 spin_lock_bh(&txq->lock);
135 if (txq->need_update) {
136 iwl_pcie_txq_inc_wr_ptr(trans, txq);
137 txq->need_update = false;
138 }
139 spin_unlock_bh(&txq->lock);
140 }
141 }
142
iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd * tfd,u8 idx,dma_addr_t addr,u16 len)143 static inline void iwl_pcie_gen1_tfd_set_tb(struct iwl_tfd *tfd,
144 u8 idx, dma_addr_t addr, u16 len)
145 {
146 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
147 u16 hi_n_len = len << 4;
148
149 put_unaligned_le32(addr, &tb->lo);
150 hi_n_len |= iwl_get_dma_hi_addr(addr);
151
152 tb->hi_n_len = cpu_to_le16(hi_n_len);
153
154 tfd->num_tbs = idx + 1;
155 }
156
iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd * tfd)157 static inline u8 iwl_txq_gen1_tfd_get_num_tbs(struct iwl_tfd *tfd)
158 {
159 return tfd->num_tbs & 0x1f;
160 }
161
iwl_pcie_txq_build_tfd(struct iwl_trans * trans,struct iwl_txq * txq,dma_addr_t addr,u16 len,bool reset)162 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
163 dma_addr_t addr, u16 len, bool reset)
164 {
165 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
166 void *tfd;
167 u32 num_tbs;
168
169 tfd = (u8 *)txq->tfds + trans_pcie->txqs.tfd.size * txq->write_ptr;
170
171 if (reset)
172 memset(tfd, 0, trans_pcie->txqs.tfd.size);
173
174 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
175
176 /* Each TFD can point to a maximum max_tbs Tx buffers */
177 if (num_tbs >= trans_pcie->txqs.tfd.max_tbs) {
178 IWL_ERR(trans, "Error can not send more than %d chunks\n",
179 trans_pcie->txqs.tfd.max_tbs);
180 return -EINVAL;
181 }
182
183 if (WARN(addr & ~IWL_TX_DMA_MASK,
184 "Unaligned address = %llx\n", (unsigned long long)addr))
185 return -EINVAL;
186
187 iwl_pcie_gen1_tfd_set_tb(tfd, num_tbs, addr, len);
188
189 return num_tbs;
190 }
191
iwl_pcie_clear_cmd_in_flight(struct iwl_trans * trans)192 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
193 {
194 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
195
196 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
197 return;
198
199 spin_lock(&trans_pcie->reg_lock);
200
201 if (WARN_ON(!trans_pcie->cmd_hold_nic_awake)) {
202 spin_unlock(&trans_pcie->reg_lock);
203 return;
204 }
205
206 trans_pcie->cmd_hold_nic_awake = false;
207 __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
208 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
209 spin_unlock(&trans_pcie->reg_lock);
210 }
211
iwl_pcie_free_and_unmap_tso_page(struct iwl_trans * trans,struct page * page)212 static void iwl_pcie_free_and_unmap_tso_page(struct iwl_trans *trans,
213 struct page *page)
214 {
215 struct iwl_tso_page_info *info = IWL_TSO_PAGE_INFO(page_address(page));
216
217 /* Decrease internal use count and unmap/free page if needed */
218 if (refcount_dec_and_test(&info->use_count)) {
219 dma_unmap_page(trans->dev, info->dma_addr, PAGE_SIZE,
220 DMA_TO_DEVICE);
221
222 __free_page(page);
223 }
224 }
225
iwl_pcie_free_tso_pages(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta)226 void iwl_pcie_free_tso_pages(struct iwl_trans *trans, struct sk_buff *skb,
227 struct iwl_cmd_meta *cmd_meta)
228 {
229 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
230 struct page **page_ptr;
231 struct page *next;
232
233 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
234 next = *page_ptr;
235 *page_ptr = NULL;
236
237 while (next) {
238 struct iwl_tso_page_info *info;
239 struct page *tmp = next;
240
241 info = IWL_TSO_PAGE_INFO(page_address(next));
242 next = info->next;
243
244 /* Unmap the scatter gather list that is on the last page */
245 if (!next && cmd_meta->sg_offset) {
246 struct sg_table *sgt;
247
248 sgt = (void *)((u8 *)page_address(tmp) +
249 cmd_meta->sg_offset);
250
251 dma_unmap_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0);
252 }
253
254 iwl_pcie_free_and_unmap_tso_page(trans, tmp);
255 }
256 }
257
258 static inline dma_addr_t
iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd * tfd,u8 idx)259 iwl_txq_gen1_tfd_tb_get_addr(struct iwl_tfd *tfd, u8 idx)
260 {
261 struct iwl_tfd_tb *tb = &tfd->tbs[idx];
262 dma_addr_t addr;
263 dma_addr_t hi_len;
264
265 addr = get_unaligned_le32(&tb->lo);
266
267 if (sizeof(dma_addr_t) <= sizeof(u32))
268 return addr;
269
270 hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
271
272 /*
273 * shift by 16 twice to avoid warnings on 32-bit
274 * (where this code never runs anyway due to the
275 * if statement above)
276 */
277 return addr | ((hi_len << 16) << 16);
278 }
279
iwl_txq_set_tfd_invalid_gen1(struct iwl_trans * trans,struct iwl_tfd * tfd)280 static void iwl_txq_set_tfd_invalid_gen1(struct iwl_trans *trans,
281 struct iwl_tfd *tfd)
282 {
283 tfd->num_tbs = 0;
284
285 iwl_pcie_gen1_tfd_set_tb(tfd, 0, trans->invalid_tx_cmd.dma,
286 trans->invalid_tx_cmd.size);
287 }
288
iwl_txq_gen1_tfd_unmap(struct iwl_trans * trans,struct iwl_cmd_meta * meta,struct iwl_txq * txq,int index)289 static void iwl_txq_gen1_tfd_unmap(struct iwl_trans *trans,
290 struct iwl_cmd_meta *meta,
291 struct iwl_txq *txq, int index)
292 {
293 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
294 int i, num_tbs;
295 struct iwl_tfd *tfd = iwl_txq_get_tfd(trans, txq, index);
296
297 /* Sanity check on number of chunks */
298 num_tbs = iwl_txq_gen1_tfd_get_num_tbs(tfd);
299
300 if (num_tbs > trans_pcie->txqs.tfd.max_tbs) {
301 IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
302 /* @todo issue fatal error, it is quite serious situation */
303 return;
304 }
305
306 /* TB1 is mapped directly, the rest is the TSO page and SG list. */
307 if (meta->sg_offset)
308 num_tbs = 2;
309
310 /* first TB is never freed - it's the bidirectional DMA data */
311
312 for (i = 1; i < num_tbs; i++) {
313 if (meta->tbs & BIT(i))
314 dma_unmap_page(trans->dev,
315 iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
316 iwl_txq_gen1_tfd_tb_get_len(trans,
317 tfd, i),
318 DMA_TO_DEVICE);
319 else
320 dma_unmap_single(trans->dev,
321 iwl_txq_gen1_tfd_tb_get_addr(tfd, i),
322 iwl_txq_gen1_tfd_tb_get_len(trans,
323 tfd, i),
324 DMA_TO_DEVICE);
325 }
326
327 meta->tbs = 0;
328
329 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
330 }
331
332 /**
333 * iwl_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
334 * @trans: transport private data
335 * @txq: tx queue
336 * @read_ptr: the TXQ read_ptr to free
337 *
338 * Does NOT advance any TFD circular buffer read/write indexes
339 * Does NOT free the TFD itself (which is within circular buffer)
340 */
iwl_txq_free_tfd(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)341 static void iwl_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
342 int read_ptr)
343 {
344 /* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
345 * idx is bounded by n_window
346 */
347 int idx = iwl_txq_get_cmd_index(txq, read_ptr);
348 struct sk_buff *skb;
349
350 lockdep_assert_held(&txq->reclaim_lock);
351
352 if (!txq->entries)
353 return;
354
355 /* We have only q->n_window txq->entries, but we use
356 * TFD_QUEUE_SIZE_MAX tfds
357 */
358 if (trans->trans_cfg->gen2)
359 iwl_txq_gen2_tfd_unmap(trans, &txq->entries[idx].meta,
360 iwl_txq_get_tfd(trans, txq, read_ptr));
361 else
362 iwl_txq_gen1_tfd_unmap(trans, &txq->entries[idx].meta,
363 txq, read_ptr);
364
365 /* free SKB */
366 skb = txq->entries[idx].skb;
367
368 /* Can be called from irqs-disabled context
369 * If skb is not NULL, it means that the whole queue is being
370 * freed and that the queue is not empty - free the skb
371 */
372 if (skb) {
373 iwl_op_mode_free_skb(trans->op_mode, skb);
374 txq->entries[idx].skb = NULL;
375 }
376 }
377
378 /*
379 * iwl_pcie_txq_unmap - Unmap any remaining DMA mappings and free skb's
380 */
iwl_pcie_txq_unmap(struct iwl_trans * trans,int txq_id)381 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
382 {
383 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
384 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
385
386 if (!txq) {
387 IWL_ERR(trans, "Trying to free a queue that wasn't allocated?\n");
388 return;
389 }
390
391 spin_lock_bh(&txq->reclaim_lock);
392 spin_lock(&txq->lock);
393 while (txq->write_ptr != txq->read_ptr) {
394 IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
395 txq_id, txq->read_ptr);
396
397 if (txq_id != trans_pcie->txqs.cmd.q_id) {
398 struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
399 struct iwl_cmd_meta *cmd_meta =
400 &txq->entries[txq->read_ptr].meta;
401
402 if (WARN_ON_ONCE(!skb))
403 continue;
404
405 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
406 }
407 iwl_txq_free_tfd(trans, txq, txq->read_ptr);
408 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
409
410 if (txq->read_ptr == txq->write_ptr &&
411 txq_id == trans_pcie->txqs.cmd.q_id)
412 iwl_pcie_clear_cmd_in_flight(trans);
413 }
414
415 while (!skb_queue_empty(&txq->overflow_q)) {
416 struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
417
418 iwl_op_mode_free_skb(trans->op_mode, skb);
419 }
420
421 spin_unlock(&txq->lock);
422 spin_unlock_bh(&txq->reclaim_lock);
423
424 /* just in case - this queue may have been stopped */
425 iwl_trans_pcie_wake_queue(trans, txq);
426 }
427
428 /*
429 * iwl_pcie_txq_free - Deallocate DMA queue.
430 * @txq: Transmit queue to deallocate.
431 *
432 * Empty queue by removing and destroying all BD's.
433 * Free all buffers.
434 * 0-fill, but do not free "txq" descriptor structure.
435 */
iwl_pcie_txq_free(struct iwl_trans * trans,int txq_id)436 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
437 {
438 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
439 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
440 struct device *dev = trans->dev;
441 int i;
442
443 if (WARN_ON(!txq))
444 return;
445
446 iwl_pcie_txq_unmap(trans, txq_id);
447
448 /* De-alloc array of command/tx buffers */
449 if (txq_id == trans_pcie->txqs.cmd.q_id)
450 for (i = 0; i < txq->n_window; i++) {
451 kfree_sensitive(txq->entries[i].cmd);
452 kfree_sensitive(txq->entries[i].free_buf);
453 }
454
455 /* De-alloc circular buffer of TFDs */
456 if (txq->tfds) {
457 dma_free_coherent(dev,
458 trans_pcie->txqs.tfd.size *
459 trans->trans_cfg->base_params->max_tfd_queue_size,
460 txq->tfds, txq->dma_addr);
461 txq->dma_addr = 0;
462 txq->tfds = NULL;
463
464 dma_free_coherent(dev,
465 sizeof(*txq->first_tb_bufs) * txq->n_window,
466 txq->first_tb_bufs, txq->first_tb_dma);
467 }
468
469 kfree(txq->entries);
470 txq->entries = NULL;
471
472 del_timer_sync(&txq->stuck_timer);
473
474 /* 0-fill queue descriptor structure */
475 memset(txq, 0, sizeof(*txq));
476 }
477
iwl_pcie_tx_start(struct iwl_trans * trans,u32 scd_base_addr)478 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
479 {
480 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
481 int nq = trans->trans_cfg->base_params->num_of_queues;
482 int chan;
483 u32 reg_val;
484 int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
485 SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
486
487 /* make sure all queue are not stopped/used */
488 memset(trans_pcie->txqs.queue_stopped, 0,
489 sizeof(trans_pcie->txqs.queue_stopped));
490 memset(trans_pcie->txqs.queue_used, 0,
491 sizeof(trans_pcie->txqs.queue_used));
492
493 trans_pcie->scd_base_addr =
494 iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
495
496 WARN_ON(scd_base_addr != 0 &&
497 scd_base_addr != trans_pcie->scd_base_addr);
498
499 /* reset context data, TX status and translation data */
500 iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
501 SCD_CONTEXT_MEM_LOWER_BOUND,
502 NULL, clear_dwords);
503
504 iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
505 trans_pcie->txqs.scd_bc_tbls.dma >> 10);
506
507 /* The chain extension of the SCD doesn't work well. This feature is
508 * enabled by default by the HW, so we need to disable it manually.
509 */
510 if (trans->trans_cfg->base_params->scd_chain_ext_wa)
511 iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
512
513 iwl_trans_ac_txq_enable(trans, trans_pcie->txqs.cmd.q_id,
514 trans_pcie->txqs.cmd.fifo,
515 trans_pcie->txqs.cmd.wdg_timeout);
516
517 /* Activate all Tx DMA/FIFO channels */
518 iwl_scd_activate_fifos(trans);
519
520 /* Enable DMA channel */
521 for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
522 iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
523 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
524 FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
525
526 /* Update FH chicken bits */
527 reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
528 iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
529 reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
530
531 /* Enable L1-Active */
532 if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
533 iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
534 APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
535 }
536
iwl_trans_pcie_tx_reset(struct iwl_trans * trans)537 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
538 {
539 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
540 int txq_id;
541
542 /*
543 * we should never get here in gen2 trans mode return early to avoid
544 * having invalid accesses
545 */
546 if (WARN_ON_ONCE(trans->trans_cfg->gen2))
547 return;
548
549 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
550 txq_id++) {
551 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
552 if (trans->trans_cfg->gen2)
553 iwl_write_direct64(trans,
554 FH_MEM_CBBC_QUEUE(trans, txq_id),
555 txq->dma_addr);
556 else
557 iwl_write_direct32(trans,
558 FH_MEM_CBBC_QUEUE(trans, txq_id),
559 txq->dma_addr >> 8);
560 iwl_pcie_txq_unmap(trans, txq_id);
561 txq->read_ptr = 0;
562 txq->write_ptr = 0;
563 }
564
565 /* Tell NIC where to find the "keep warm" buffer */
566 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
567 trans_pcie->kw.dma >> 4);
568
569 /*
570 * Send 0 as the scd_base_addr since the device may have be reset
571 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
572 * contain garbage.
573 */
574 iwl_pcie_tx_start(trans, 0);
575 }
576
iwl_pcie_tx_stop_fh(struct iwl_trans * trans)577 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
578 {
579 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
580 int ch, ret;
581 u32 mask = 0;
582
583 spin_lock_bh(&trans_pcie->irq_lock);
584
585 if (!iwl_trans_grab_nic_access(trans))
586 goto out;
587
588 /* Stop each Tx DMA channel */
589 for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
590 iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
591 mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
592 }
593
594 /* Wait for DMA channels to be idle */
595 ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
596 if (ret < 0)
597 IWL_ERR(trans,
598 "Failing on timeout while stopping DMA channel %d [0x%08x]\n",
599 ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
600
601 iwl_trans_release_nic_access(trans);
602
603 out:
604 spin_unlock_bh(&trans_pcie->irq_lock);
605 }
606
607 /*
608 * iwl_pcie_tx_stop - Stop all Tx DMA channels
609 */
iwl_pcie_tx_stop(struct iwl_trans * trans)610 int iwl_pcie_tx_stop(struct iwl_trans *trans)
611 {
612 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
613 int txq_id;
614
615 /* Turn off all Tx DMA fifos */
616 iwl_scd_deactivate_fifos(trans);
617
618 /* Turn off all Tx DMA channels */
619 iwl_pcie_tx_stop_fh(trans);
620
621 /*
622 * This function can be called before the op_mode disabled the
623 * queues. This happens when we have an rfkill interrupt.
624 * Since we stop Tx altogether - mark the queues as stopped.
625 */
626 memset(trans_pcie->txqs.queue_stopped, 0,
627 sizeof(trans_pcie->txqs.queue_stopped));
628 memset(trans_pcie->txqs.queue_used, 0,
629 sizeof(trans_pcie->txqs.queue_used));
630
631 /* This can happen: start_hw, stop_device */
632 if (!trans_pcie->txq_memory)
633 return 0;
634
635 /* Unmap DMA from host system and free skb's */
636 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
637 txq_id++)
638 iwl_pcie_txq_unmap(trans, txq_id);
639
640 return 0;
641 }
642
643 /*
644 * iwl_trans_tx_free - Free TXQ Context
645 *
646 * Destroy all TX DMA queues and structures
647 */
iwl_pcie_tx_free(struct iwl_trans * trans)648 void iwl_pcie_tx_free(struct iwl_trans *trans)
649 {
650 int txq_id;
651 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
652
653 memset(trans_pcie->txqs.queue_used, 0,
654 sizeof(trans_pcie->txqs.queue_used));
655
656 /* Tx queues */
657 if (trans_pcie->txq_memory) {
658 for (txq_id = 0;
659 txq_id < trans->trans_cfg->base_params->num_of_queues;
660 txq_id++) {
661 iwl_pcie_txq_free(trans, txq_id);
662 trans_pcie->txqs.txq[txq_id] = NULL;
663 }
664 }
665
666 kfree(trans_pcie->txq_memory);
667 trans_pcie->txq_memory = NULL;
668
669 iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
670
671 iwl_pcie_free_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls);
672 }
673
iwl_txq_log_scd_error(struct iwl_trans * trans,struct iwl_txq * txq)674 void iwl_txq_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq)
675 {
676 u32 txq_id = txq->id;
677 u32 status;
678 bool active;
679 u8 fifo;
680
681 if (trans->trans_cfg->gen2) {
682 IWL_ERR(trans, "Queue %d is stuck %d %d\n", txq_id,
683 txq->read_ptr, txq->write_ptr);
684 /* TODO: access new SCD registers and dump them */
685 return;
686 }
687
688 status = iwl_read_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id));
689 fifo = (status >> SCD_QUEUE_STTS_REG_POS_TXF) & 0x7;
690 active = !!(status & BIT(SCD_QUEUE_STTS_REG_POS_ACTIVE));
691
692 IWL_ERR(trans,
693 "Queue %d is %sactive on fifo %d and stuck for %u ms. SW [%d, %d] HW [%d, %d] FH TRB=0x0%x\n",
694 txq_id, active ? "" : "in", fifo,
695 jiffies_to_msecs(txq->wd_timeout),
696 txq->read_ptr, txq->write_ptr,
697 iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) &
698 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
699 iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) &
700 (trans->trans_cfg->base_params->max_tfd_queue_size - 1),
701 iwl_read_direct32(trans, FH_TX_TRB_REG(fifo)));
702 }
703
iwl_txq_stuck_timer(struct timer_list * t)704 static void iwl_txq_stuck_timer(struct timer_list *t)
705 {
706 struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
707 struct iwl_trans *trans = txq->trans;
708
709 spin_lock(&txq->lock);
710 /* check if triggered erroneously */
711 if (txq->read_ptr == txq->write_ptr) {
712 spin_unlock(&txq->lock);
713 return;
714 }
715 spin_unlock(&txq->lock);
716
717 iwl_txq_log_scd_error(trans, txq);
718
719 iwl_force_nmi(trans);
720 }
721
iwl_pcie_txq_alloc(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)722 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
723 int slots_num, bool cmd_queue)
724 {
725 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
726 size_t num_entries = trans->trans_cfg->gen2 ?
727 slots_num : trans->trans_cfg->base_params->max_tfd_queue_size;
728 size_t tfd_sz;
729 size_t tb0_buf_sz;
730 int i;
731
732 if (WARN_ONCE(slots_num <= 0, "Invalid slots num:%d\n", slots_num))
733 return -EINVAL;
734
735 if (WARN_ON(txq->entries || txq->tfds))
736 return -EINVAL;
737
738 tfd_sz = trans_pcie->txqs.tfd.size * num_entries;
739
740 timer_setup(&txq->stuck_timer, iwl_txq_stuck_timer, 0);
741 txq->trans = trans;
742
743 txq->n_window = slots_num;
744
745 txq->entries = kcalloc(slots_num,
746 sizeof(struct iwl_pcie_txq_entry),
747 GFP_KERNEL);
748
749 if (!txq->entries)
750 goto error;
751
752 if (cmd_queue)
753 for (i = 0; i < slots_num; i++) {
754 txq->entries[i].cmd =
755 kmalloc(sizeof(struct iwl_device_cmd),
756 GFP_KERNEL);
757 if (!txq->entries[i].cmd)
758 goto error;
759 }
760
761 /* Circular buffer of transmit frame descriptors (TFDs),
762 * shared with device
763 */
764 txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
765 &txq->dma_addr, GFP_KERNEL);
766 if (!txq->tfds)
767 goto error;
768
769 BUILD_BUG_ON(sizeof(*txq->first_tb_bufs) != IWL_FIRST_TB_SIZE_ALIGN);
770
771 tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
772
773 txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
774 &txq->first_tb_dma,
775 GFP_KERNEL);
776 if (!txq->first_tb_bufs)
777 goto err_free_tfds;
778
779 for (i = 0; i < num_entries; i++) {
780 void *tfd = iwl_txq_get_tfd(trans, txq, i);
781
782 if (trans->trans_cfg->gen2)
783 iwl_txq_set_tfd_invalid_gen2(trans, tfd);
784 else
785 iwl_txq_set_tfd_invalid_gen1(trans, tfd);
786 }
787
788 return 0;
789 err_free_tfds:
790 dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
791 txq->tfds = NULL;
792 error:
793 if (txq->entries && cmd_queue)
794 for (i = 0; i < slots_num; i++)
795 kfree(txq->entries[i].cmd);
796 kfree(txq->entries);
797 txq->entries = NULL;
798
799 return -ENOMEM;
800 }
801
802 /*
803 * iwl_pcie_tx_alloc - allocate TX context
804 * Allocate all Tx DMA structures and initialize them
805 */
iwl_pcie_tx_alloc(struct iwl_trans * trans)806 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
807 {
808 int ret;
809 int txq_id, slots_num;
810 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
811 u16 bc_tbls_size = trans->trans_cfg->base_params->num_of_queues;
812
813 if (WARN_ON(trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))
814 return -EINVAL;
815
816 bc_tbls_size *= sizeof(struct iwlagn_scd_bc_tbl);
817
818 /*It is not allowed to alloc twice, so warn when this happens.
819 * We cannot rely on the previous allocation, so free and fail */
820 if (WARN_ON(trans_pcie->txq_memory)) {
821 ret = -EINVAL;
822 goto error;
823 }
824
825 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->txqs.scd_bc_tbls,
826 bc_tbls_size);
827 if (ret) {
828 IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
829 goto error;
830 }
831
832 /* Alloc keep-warm buffer */
833 ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
834 if (ret) {
835 IWL_ERR(trans, "Keep Warm allocation failed\n");
836 goto error;
837 }
838
839 trans_pcie->txq_memory =
840 kcalloc(trans->trans_cfg->base_params->num_of_queues,
841 sizeof(struct iwl_txq), GFP_KERNEL);
842 if (!trans_pcie->txq_memory) {
843 IWL_ERR(trans, "Not enough memory for txq\n");
844 ret = -ENOMEM;
845 goto error;
846 }
847
848 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
849 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
850 txq_id++) {
851 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
852
853 if (cmd_queue)
854 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
855 trans->cfg->min_txq_size);
856 else
857 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
858 trans->cfg->min_ba_txq_size);
859 trans_pcie->txqs.txq[txq_id] = &trans_pcie->txq_memory[txq_id];
860 ret = iwl_pcie_txq_alloc(trans, trans_pcie->txqs.txq[txq_id],
861 slots_num, cmd_queue);
862 if (ret) {
863 IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
864 goto error;
865 }
866 trans_pcie->txqs.txq[txq_id]->id = txq_id;
867 }
868
869 return 0;
870
871 error:
872 iwl_pcie_tx_free(trans);
873
874 return ret;
875 }
876
877 /*
878 * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
879 */
iwl_queue_init(struct iwl_txq * q,int slots_num)880 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
881 {
882 q->n_window = slots_num;
883
884 /* slots_num must be power-of-two size, otherwise
885 * iwl_txq_get_cmd_index is broken.
886 */
887 if (WARN_ON(!is_power_of_2(slots_num)))
888 return -EINVAL;
889
890 q->low_mark = q->n_window / 4;
891 if (q->low_mark < 4)
892 q->low_mark = 4;
893
894 q->high_mark = q->n_window / 8;
895 if (q->high_mark < 2)
896 q->high_mark = 2;
897
898 q->write_ptr = 0;
899 q->read_ptr = 0;
900
901 return 0;
902 }
903
iwl_txq_init(struct iwl_trans * trans,struct iwl_txq * txq,int slots_num,bool cmd_queue)904 int iwl_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
905 int slots_num, bool cmd_queue)
906 {
907 u32 tfd_queue_max_size =
908 trans->trans_cfg->base_params->max_tfd_queue_size;
909 int ret;
910
911 txq->need_update = false;
912
913 /* max_tfd_queue_size must be power-of-two size, otherwise
914 * iwl_txq_inc_wrap and iwl_txq_dec_wrap are broken.
915 */
916 if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
917 "Max tfd queue size must be a power of two, but is %d",
918 tfd_queue_max_size))
919 return -EINVAL;
920
921 /* Initialize queue's high/low-water marks, and head/tail indexes */
922 ret = iwl_queue_init(txq, slots_num);
923 if (ret)
924 return ret;
925
926 spin_lock_init(&txq->lock);
927 spin_lock_init(&txq->reclaim_lock);
928
929 if (cmd_queue) {
930 static struct lock_class_key iwl_txq_cmd_queue_lock_class;
931
932 lockdep_set_class(&txq->lock, &iwl_txq_cmd_queue_lock_class);
933 }
934
935 __skb_queue_head_init(&txq->overflow_q);
936
937 return 0;
938 }
939
iwl_pcie_tx_init(struct iwl_trans * trans)940 int iwl_pcie_tx_init(struct iwl_trans *trans)
941 {
942 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
943 int ret;
944 int txq_id, slots_num;
945 bool alloc = false;
946
947 if (!trans_pcie->txq_memory) {
948 ret = iwl_pcie_tx_alloc(trans);
949 if (ret)
950 goto error;
951 alloc = true;
952 }
953
954 spin_lock_bh(&trans_pcie->irq_lock);
955
956 /* Turn off all Tx DMA fifos */
957 iwl_scd_deactivate_fifos(trans);
958
959 /* Tell NIC where to find the "keep warm" buffer */
960 iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
961 trans_pcie->kw.dma >> 4);
962
963 spin_unlock_bh(&trans_pcie->irq_lock);
964
965 /* Alloc and init all Tx queues, including the command queue (#4/#9) */
966 for (txq_id = 0; txq_id < trans->trans_cfg->base_params->num_of_queues;
967 txq_id++) {
968 bool cmd_queue = (txq_id == trans_pcie->txqs.cmd.q_id);
969
970 if (cmd_queue)
971 slots_num = max_t(u32, IWL_CMD_QUEUE_SIZE,
972 trans->cfg->min_txq_size);
973 else
974 slots_num = max_t(u32, IWL_DEFAULT_QUEUE_SIZE,
975 trans->cfg->min_ba_txq_size);
976 ret = iwl_txq_init(trans, trans_pcie->txqs.txq[txq_id], slots_num,
977 cmd_queue);
978 if (ret) {
979 IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
980 goto error;
981 }
982
983 /*
984 * Tell nic where to find circular buffer of TFDs for a
985 * given Tx queue, and enable the DMA channel used for that
986 * queue.
987 * Circular buffer (TFD queue in DRAM) physical base address
988 */
989 iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
990 trans_pcie->txqs.txq[txq_id]->dma_addr >> 8);
991 }
992
993 iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
994 if (trans->trans_cfg->base_params->num_of_queues > 20)
995 iwl_set_bits_prph(trans, SCD_GP_CTRL,
996 SCD_GP_CTRL_ENABLE_31_QUEUES);
997
998 return 0;
999 error:
1000 /*Upon error, free only if we allocated something */
1001 if (alloc)
1002 iwl_pcie_tx_free(trans);
1003 return ret;
1004 }
1005
iwl_pcie_set_cmd_in_flight(struct iwl_trans * trans,const struct iwl_host_cmd * cmd)1006 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1007 const struct iwl_host_cmd *cmd)
1008 {
1009 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1010
1011 /* Make sure the NIC is still alive in the bus */
1012 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
1013 return -ENODEV;
1014
1015 if (!trans->trans_cfg->base_params->apmg_wake_up_wa)
1016 return 0;
1017
1018 /*
1019 * wake up the NIC to make sure that the firmware will see the host
1020 * command - we will let the NIC sleep once all the host commands
1021 * returned. This needs to be done only on NICs that have
1022 * apmg_wake_up_wa set (see above.)
1023 */
1024 if (!_iwl_trans_pcie_grab_nic_access(trans))
1025 return -EIO;
1026
1027 /*
1028 * In iwl_trans_grab_nic_access(), we've acquired the reg_lock.
1029 * There, we also returned immediately if cmd_hold_nic_awake is
1030 * already true, so it's OK to unconditionally set it to true.
1031 */
1032 trans_pcie->cmd_hold_nic_awake = true;
1033 spin_unlock(&trans_pcie->reg_lock);
1034
1035 return 0;
1036 }
1037
iwl_txq_progress(struct iwl_txq * txq)1038 static void iwl_txq_progress(struct iwl_txq *txq)
1039 {
1040 lockdep_assert_held(&txq->lock);
1041
1042 if (!txq->wd_timeout)
1043 return;
1044
1045 /*
1046 * station is asleep and we send data - that must
1047 * be uAPSD or PS-Poll. Don't rearm the timer.
1048 */
1049 if (txq->frozen)
1050 return;
1051
1052 /*
1053 * if empty delete timer, otherwise move timer forward
1054 * since we're making progress on this queue
1055 */
1056 if (txq->read_ptr == txq->write_ptr)
1057 del_timer(&txq->stuck_timer);
1058 else
1059 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1060 }
1061
iwl_txq_used(const struct iwl_txq * q,int i,int read_ptr,int write_ptr)1062 static inline bool iwl_txq_used(const struct iwl_txq *q, int i,
1063 int read_ptr, int write_ptr)
1064 {
1065 int index = iwl_txq_get_cmd_index(q, i);
1066 int r = iwl_txq_get_cmd_index(q, read_ptr);
1067 int w = iwl_txq_get_cmd_index(q, write_ptr);
1068
1069 return w >= r ?
1070 (index >= r && index < w) :
1071 !(index < r && index >= w);
1072 }
1073
1074 /*
1075 * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1076 *
1077 * When FW advances 'R' index, all entries between old and new 'R' index
1078 * need to be reclaimed. As result, some free space forms. If there is
1079 * enough free space (> low mark), wake the stack that feeds us.
1080 */
iwl_pcie_cmdq_reclaim(struct iwl_trans * trans,int txq_id,int idx)1081 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1082 {
1083 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1084 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1085 int nfreed = 0;
1086 u16 r;
1087
1088 lockdep_assert_held(&txq->lock);
1089
1090 idx = iwl_txq_get_cmd_index(txq, idx);
1091 r = iwl_txq_get_cmd_index(txq, txq->read_ptr);
1092
1093 if (idx >= trans->trans_cfg->base_params->max_tfd_queue_size ||
1094 (!iwl_txq_used(txq, idx, txq->read_ptr, txq->write_ptr))) {
1095 WARN_ONCE(test_bit(txq_id, trans_pcie->txqs.queue_used),
1096 "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1097 __func__, txq_id, idx,
1098 trans->trans_cfg->base_params->max_tfd_queue_size,
1099 txq->write_ptr, txq->read_ptr);
1100 return;
1101 }
1102
1103 for (idx = iwl_txq_inc_wrap(trans, idx); r != idx;
1104 r = iwl_txq_inc_wrap(trans, r)) {
1105 txq->read_ptr = iwl_txq_inc_wrap(trans, txq->read_ptr);
1106
1107 if (nfreed++ > 0) {
1108 IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1109 idx, txq->write_ptr, r);
1110 iwl_force_nmi(trans);
1111 }
1112 }
1113
1114 if (txq->read_ptr == txq->write_ptr)
1115 iwl_pcie_clear_cmd_in_flight(trans);
1116
1117 iwl_txq_progress(txq);
1118 }
1119
iwl_pcie_txq_set_ratid_map(struct iwl_trans * trans,u16 ra_tid,u16 txq_id)1120 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1121 u16 txq_id)
1122 {
1123 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1124 u32 tbl_dw_addr;
1125 u32 tbl_dw;
1126 u16 scd_q2ratid;
1127
1128 scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1129
1130 tbl_dw_addr = trans_pcie->scd_base_addr +
1131 SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1132
1133 tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1134
1135 if (txq_id & 0x1)
1136 tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1137 else
1138 tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1139
1140 iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1141
1142 return 0;
1143 }
1144
1145 /* Receiver address (actually, Rx station's index into station table),
1146 * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1147 #define BUILD_RAxTID(sta_id, tid) (((sta_id) << 4) + (tid))
1148
iwl_trans_pcie_txq_enable(struct iwl_trans * trans,int txq_id,u16 ssn,const struct iwl_trans_txq_scd_cfg * cfg,unsigned int wdg_timeout)1149 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1150 const struct iwl_trans_txq_scd_cfg *cfg,
1151 unsigned int wdg_timeout)
1152 {
1153 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1154 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1155 int fifo = -1;
1156 bool scd_bug = false;
1157
1158 if (test_and_set_bit(txq_id, trans_pcie->txqs.queue_used))
1159 WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1160
1161 txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1162
1163 if (cfg) {
1164 fifo = cfg->fifo;
1165
1166 /* Disable the scheduler prior configuring the cmd queue */
1167 if (txq_id == trans_pcie->txqs.cmd.q_id &&
1168 trans_pcie->scd_set_active)
1169 iwl_scd_enable_set_active(trans, 0);
1170
1171 /* Stop this Tx queue before configuring it */
1172 iwl_scd_txq_set_inactive(trans, txq_id);
1173
1174 /* Set this queue as a chain-building queue unless it is CMD */
1175 if (txq_id != trans_pcie->txqs.cmd.q_id)
1176 iwl_scd_txq_set_chain(trans, txq_id);
1177
1178 if (cfg->aggregate) {
1179 u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1180
1181 /* Map receiver-address / traffic-ID to this queue */
1182 iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1183
1184 /* enable aggregations for the queue */
1185 iwl_scd_txq_enable_agg(trans, txq_id);
1186 txq->ampdu = true;
1187 } else {
1188 /*
1189 * disable aggregations for the queue, this will also
1190 * make the ra_tid mapping configuration irrelevant
1191 * since it is now a non-AGG queue.
1192 */
1193 iwl_scd_txq_disable_agg(trans, txq_id);
1194
1195 ssn = txq->read_ptr;
1196 }
1197 } else {
1198 /*
1199 * If we need to move the SCD write pointer by steps of
1200 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
1201 * the op_mode know by returning true later.
1202 * Do this only in case cfg is NULL since this trick can
1203 * be done only if we have DQA enabled which is true for mvm
1204 * only. And mvm never sets a cfg pointer.
1205 * This is really ugly, but this is the easiest way out for
1206 * this sad hardware issue.
1207 * This bug has been fixed on devices 9000 and up.
1208 */
1209 scd_bug = !trans->trans_cfg->mq_rx_supported &&
1210 !((ssn - txq->write_ptr) & 0x3f) &&
1211 (ssn != txq->write_ptr);
1212 if (scd_bug)
1213 ssn++;
1214 }
1215
1216 /* Place first TFD at index corresponding to start sequence number.
1217 * Assumes that ssn_idx is valid (!= 0xFFF) */
1218 txq->read_ptr = (ssn & 0xff);
1219 txq->write_ptr = (ssn & 0xff);
1220 iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1221 (ssn & 0xff) | (txq_id << 8));
1222
1223 if (cfg) {
1224 u8 frame_limit = cfg->frame_limit;
1225
1226 iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1227
1228 /* Set up Tx window size and frame limit for this queue */
1229 iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1230 SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1231 iwl_trans_write_mem32(trans,
1232 trans_pcie->scd_base_addr +
1233 SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1234 SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
1235 SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1236
1237 /* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1238 iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1239 (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1240 (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1241 (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1242 SCD_QUEUE_STTS_REG_MSK);
1243
1244 /* enable the scheduler for this queue (only) */
1245 if (txq_id == trans_pcie->txqs.cmd.q_id &&
1246 trans_pcie->scd_set_active)
1247 iwl_scd_enable_set_active(trans, BIT(txq_id));
1248
1249 IWL_DEBUG_TX_QUEUES(trans,
1250 "Activate queue %d on FIFO %d WrPtr: %d\n",
1251 txq_id, fifo, ssn & 0xff);
1252 } else {
1253 IWL_DEBUG_TX_QUEUES(trans,
1254 "Activate queue %d WrPtr: %d\n",
1255 txq_id, ssn & 0xff);
1256 }
1257
1258 return scd_bug;
1259 }
1260
iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans * trans,u32 txq_id,bool shared_mode)1261 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1262 bool shared_mode)
1263 {
1264 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1265 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
1266
1267 txq->ampdu = !shared_mode;
1268 }
1269
iwl_trans_pcie_txq_disable(struct iwl_trans * trans,int txq_id,bool configure_scd)1270 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1271 bool configure_scd)
1272 {
1273 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1274 u32 stts_addr = trans_pcie->scd_base_addr +
1275 SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1276 static const u32 zero_val[4] = {};
1277
1278 trans_pcie->txqs.txq[txq_id]->frozen_expiry_remainder = 0;
1279 trans_pcie->txqs.txq[txq_id]->frozen = false;
1280
1281 /*
1282 * Upon HW Rfkill - we stop the device, and then stop the queues
1283 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1284 * allow the op_mode to call txq_disable after it already called
1285 * stop_device.
1286 */
1287 if (!test_and_clear_bit(txq_id, trans_pcie->txqs.queue_used)) {
1288 WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1289 "queue %d not used", txq_id);
1290 return;
1291 }
1292
1293 if (configure_scd) {
1294 iwl_scd_txq_set_inactive(trans, txq_id);
1295
1296 iwl_trans_write_mem(trans, stts_addr, (const void *)zero_val,
1297 ARRAY_SIZE(zero_val));
1298 }
1299
1300 iwl_pcie_txq_unmap(trans, txq_id);
1301 trans_pcie->txqs.txq[txq_id]->ampdu = false;
1302
1303 IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1304 }
1305
1306 /*************** HOST COMMAND QUEUE FUNCTIONS *****/
1307
iwl_trans_pcie_block_txq_ptrs(struct iwl_trans * trans,bool block)1308 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
1309 {
1310 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1311 int i;
1312
1313 for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1314 struct iwl_txq *txq = trans_pcie->txqs.txq[i];
1315
1316 if (i == trans_pcie->txqs.cmd.q_id)
1317 continue;
1318
1319 /* we skip the command queue (obviously) so it's OK to nest */
1320 spin_lock_nested(&txq->lock, 1);
1321
1322 if (!block && !(WARN_ON_ONCE(!txq->block))) {
1323 txq->block--;
1324 if (!txq->block) {
1325 iwl_write32(trans, HBUS_TARG_WRPTR,
1326 txq->write_ptr | (i << 8));
1327 }
1328 } else if (block) {
1329 txq->block++;
1330 }
1331
1332 spin_unlock(&txq->lock);
1333 }
1334 }
1335
1336 /*
1337 * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1338 * @priv: device private data point
1339 * @cmd: a pointer to the ucode command structure
1340 *
1341 * The function returns < 0 values to indicate the operation
1342 * failed. On success, it returns the index (>= 0) of command in the
1343 * command queue.
1344 */
iwl_pcie_enqueue_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)1345 int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1346 struct iwl_host_cmd *cmd)
1347 {
1348 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1349 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1350 struct iwl_device_cmd *out_cmd;
1351 struct iwl_cmd_meta *out_meta;
1352 void *dup_buf = NULL;
1353 dma_addr_t phys_addr;
1354 int idx;
1355 u16 copy_size, cmd_size, tb0_size;
1356 bool had_nocopy = false;
1357 u8 group_id = iwl_cmd_groupid(cmd->id);
1358 int i, ret;
1359 u32 cmd_pos;
1360 const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1361 u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1362 unsigned long flags;
1363
1364 if (WARN(!trans->wide_cmd_header &&
1365 group_id > IWL_ALWAYS_LONG_GROUP,
1366 "unsupported wide command %#x\n", cmd->id))
1367 return -EINVAL;
1368
1369 if (group_id != 0) {
1370 copy_size = sizeof(struct iwl_cmd_header_wide);
1371 cmd_size = sizeof(struct iwl_cmd_header_wide);
1372 } else {
1373 copy_size = sizeof(struct iwl_cmd_header);
1374 cmd_size = sizeof(struct iwl_cmd_header);
1375 }
1376
1377 /* need one for the header if the first is NOCOPY */
1378 BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1379
1380 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1381 cmddata[i] = cmd->data[i];
1382 cmdlen[i] = cmd->len[i];
1383
1384 if (!cmd->len[i])
1385 continue;
1386
1387 /* need at least IWL_FIRST_TB_SIZE copied */
1388 if (copy_size < IWL_FIRST_TB_SIZE) {
1389 int copy = IWL_FIRST_TB_SIZE - copy_size;
1390
1391 if (copy > cmdlen[i])
1392 copy = cmdlen[i];
1393 cmdlen[i] -= copy;
1394 cmddata[i] += copy;
1395 copy_size += copy;
1396 }
1397
1398 if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1399 had_nocopy = true;
1400 if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1401 idx = -EINVAL;
1402 goto free_dup_buf;
1403 }
1404 } else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1405 /*
1406 * This is also a chunk that isn't copied
1407 * to the static buffer so set had_nocopy.
1408 */
1409 had_nocopy = true;
1410
1411 /* only allowed once */
1412 if (WARN_ON(dup_buf)) {
1413 idx = -EINVAL;
1414 goto free_dup_buf;
1415 }
1416
1417 dup_buf = kmemdup(cmddata[i], cmdlen[i],
1418 GFP_ATOMIC);
1419 if (!dup_buf)
1420 return -ENOMEM;
1421 } else {
1422 /* NOCOPY must not be followed by normal! */
1423 if (WARN_ON(had_nocopy)) {
1424 idx = -EINVAL;
1425 goto free_dup_buf;
1426 }
1427 copy_size += cmdlen[i];
1428 }
1429 cmd_size += cmd->len[i];
1430 }
1431
1432 /*
1433 * If any of the command structures end up being larger than
1434 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1435 * allocated into separate TFDs, then we will need to
1436 * increase the size of the buffers.
1437 */
1438 if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1439 "Command %s (%#x) is too large (%d bytes)\n",
1440 iwl_get_cmd_string(trans, cmd->id),
1441 cmd->id, copy_size)) {
1442 idx = -EINVAL;
1443 goto free_dup_buf;
1444 }
1445
1446 spin_lock_irqsave(&txq->lock, flags);
1447
1448 if (iwl_txq_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1449 spin_unlock_irqrestore(&txq->lock, flags);
1450
1451 IWL_ERR(trans, "No space in command queue\n");
1452 iwl_op_mode_cmd_queue_full(trans->op_mode);
1453 idx = -ENOSPC;
1454 goto free_dup_buf;
1455 }
1456
1457 idx = iwl_txq_get_cmd_index(txq, txq->write_ptr);
1458 out_cmd = txq->entries[idx].cmd;
1459 out_meta = &txq->entries[idx].meta;
1460
1461 /* re-initialize, this also marks the SG list as unused */
1462 memset(out_meta, 0, sizeof(*out_meta));
1463 if (cmd->flags & CMD_WANT_SKB)
1464 out_meta->source = cmd;
1465
1466 /* set up the header */
1467 if (group_id != 0) {
1468 out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1469 out_cmd->hdr_wide.group_id = group_id;
1470 out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1471 out_cmd->hdr_wide.length =
1472 cpu_to_le16(cmd_size -
1473 sizeof(struct iwl_cmd_header_wide));
1474 out_cmd->hdr_wide.reserved = 0;
1475 out_cmd->hdr_wide.sequence =
1476 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1477 INDEX_TO_SEQ(txq->write_ptr));
1478
1479 cmd_pos = sizeof(struct iwl_cmd_header_wide);
1480 copy_size = sizeof(struct iwl_cmd_header_wide);
1481 } else {
1482 out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1483 out_cmd->hdr.sequence =
1484 cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->txqs.cmd.q_id) |
1485 INDEX_TO_SEQ(txq->write_ptr));
1486 out_cmd->hdr.group_id = 0;
1487
1488 cmd_pos = sizeof(struct iwl_cmd_header);
1489 copy_size = sizeof(struct iwl_cmd_header);
1490 }
1491
1492 /* and copy the data that needs to be copied */
1493 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1494 int copy;
1495
1496 if (!cmd->len[i])
1497 continue;
1498
1499 /* copy everything if not nocopy/dup */
1500 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1501 IWL_HCMD_DFL_DUP))) {
1502 copy = cmd->len[i];
1503
1504 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1505 cmd_pos += copy;
1506 copy_size += copy;
1507 continue;
1508 }
1509
1510 /*
1511 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1512 * in total (for bi-directional DMA), but copy up to what
1513 * we can fit into the payload for debug dump purposes.
1514 */
1515 copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1516
1517 memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1518 cmd_pos += copy;
1519
1520 /* However, treat copy_size the proper way, we need it below */
1521 if (copy_size < IWL_FIRST_TB_SIZE) {
1522 copy = IWL_FIRST_TB_SIZE - copy_size;
1523
1524 if (copy > cmd->len[i])
1525 copy = cmd->len[i];
1526 copy_size += copy;
1527 }
1528 }
1529
1530 IWL_DEBUG_HC(trans,
1531 "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1532 iwl_get_cmd_string(trans, cmd->id),
1533 group_id, out_cmd->hdr.cmd,
1534 le16_to_cpu(out_cmd->hdr.sequence),
1535 cmd_size, txq->write_ptr, idx, trans_pcie->txqs.cmd.q_id);
1536
1537 /* start the TFD with the minimum copy bytes */
1538 tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1539 memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1540 iwl_pcie_txq_build_tfd(trans, txq,
1541 iwl_txq_get_first_tb_dma(txq, idx),
1542 tb0_size, true);
1543
1544 /* map first command fragment, if any remains */
1545 if (copy_size > tb0_size) {
1546 phys_addr = dma_map_single(trans->dev,
1547 ((u8 *)&out_cmd->hdr) + tb0_size,
1548 copy_size - tb0_size,
1549 DMA_TO_DEVICE);
1550 if (dma_mapping_error(trans->dev, phys_addr)) {
1551 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1552 txq->write_ptr);
1553 idx = -ENOMEM;
1554 goto out;
1555 }
1556
1557 iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1558 copy_size - tb0_size, false);
1559 }
1560
1561 /* map the remaining (adjusted) nocopy/dup fragments */
1562 for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1563 void *data = (void *)(uintptr_t)cmddata[i];
1564
1565 if (!cmdlen[i])
1566 continue;
1567 if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1568 IWL_HCMD_DFL_DUP)))
1569 continue;
1570 if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1571 data = dup_buf;
1572 phys_addr = dma_map_single(trans->dev, data,
1573 cmdlen[i], DMA_TO_DEVICE);
1574 if (dma_mapping_error(trans->dev, phys_addr)) {
1575 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq,
1576 txq->write_ptr);
1577 idx = -ENOMEM;
1578 goto out;
1579 }
1580
1581 iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1582 }
1583
1584 BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1585 out_meta->flags = cmd->flags;
1586 if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1587 kfree_sensitive(txq->entries[idx].free_buf);
1588 txq->entries[idx].free_buf = dup_buf;
1589
1590 trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1591
1592 /* start timer if queue currently empty */
1593 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1594 mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1595
1596 ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1597 if (ret < 0) {
1598 idx = ret;
1599 goto out;
1600 }
1601
1602 if (cmd->flags & CMD_BLOCK_TXQS)
1603 iwl_trans_pcie_block_txq_ptrs(trans, true);
1604
1605 /* Increment and update queue's write index */
1606 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
1607 iwl_pcie_txq_inc_wr_ptr(trans, txq);
1608
1609 out:
1610 spin_unlock_irqrestore(&txq->lock, flags);
1611 free_dup_buf:
1612 if (idx < 0)
1613 kfree(dup_buf);
1614 return idx;
1615 }
1616
1617 /*
1618 * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1619 * @rxb: Rx buffer to reclaim
1620 */
iwl_pcie_hcmd_complete(struct iwl_trans * trans,struct iwl_rx_cmd_buffer * rxb)1621 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1622 struct iwl_rx_cmd_buffer *rxb)
1623 {
1624 struct iwl_rx_packet *pkt = rxb_addr(rxb);
1625 u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1626 u8 group_id;
1627 u32 cmd_id;
1628 int txq_id = SEQ_TO_QUEUE(sequence);
1629 int index = SEQ_TO_INDEX(sequence);
1630 int cmd_index;
1631 struct iwl_device_cmd *cmd;
1632 struct iwl_cmd_meta *meta;
1633 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1634 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
1635
1636 /* If a Tx command is being handled and it isn't in the actual
1637 * command queue then there a command routing bug has been introduced
1638 * in the queue management code. */
1639 if (WARN(txq_id != trans_pcie->txqs.cmd.q_id,
1640 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1641 txq_id, trans_pcie->txqs.cmd.q_id, sequence, txq->read_ptr,
1642 txq->write_ptr)) {
1643 iwl_print_hex_error(trans, pkt, 32);
1644 return;
1645 }
1646
1647 spin_lock_bh(&txq->lock);
1648
1649 cmd_index = iwl_txq_get_cmd_index(txq, index);
1650 cmd = txq->entries[cmd_index].cmd;
1651 meta = &txq->entries[cmd_index].meta;
1652 group_id = cmd->hdr.group_id;
1653 cmd_id = WIDE_ID(group_id, cmd->hdr.cmd);
1654
1655 if (trans->trans_cfg->gen2)
1656 iwl_txq_gen2_tfd_unmap(trans, meta,
1657 iwl_txq_get_tfd(trans, txq, index));
1658 else
1659 iwl_txq_gen1_tfd_unmap(trans, meta, txq, index);
1660
1661 /* Input error checking is done when commands are added to queue. */
1662 if (meta->flags & CMD_WANT_SKB) {
1663 struct page *p = rxb_steal_page(rxb);
1664
1665 meta->source->resp_pkt = pkt;
1666 meta->source->_rx_page_addr = (unsigned long)page_address(p);
1667 meta->source->_rx_page_order = trans_pcie->rx_page_order;
1668 }
1669
1670 if (meta->flags & CMD_BLOCK_TXQS)
1671 iwl_trans_pcie_block_txq_ptrs(trans, false);
1672
1673 iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1674
1675 if (!(meta->flags & CMD_ASYNC)) {
1676 if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1677 IWL_WARN(trans,
1678 "HCMD_ACTIVE already clear for command %s\n",
1679 iwl_get_cmd_string(trans, cmd_id));
1680 }
1681 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1682 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1683 iwl_get_cmd_string(trans, cmd_id));
1684 wake_up(&trans->wait_command_queue);
1685 }
1686
1687 meta->flags = 0;
1688
1689 spin_unlock_bh(&txq->lock);
1690 }
1691
iwl_fill_data_tbs(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta)1692 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1693 struct iwl_txq *txq, u8 hdr_len,
1694 struct iwl_cmd_meta *out_meta)
1695 {
1696 u16 head_tb_len;
1697 int i;
1698
1699 /*
1700 * Set up TFD's third entry to point directly to remainder
1701 * of skb's head, if any
1702 */
1703 head_tb_len = skb_headlen(skb) - hdr_len;
1704
1705 if (head_tb_len > 0) {
1706 dma_addr_t tb_phys = dma_map_single(trans->dev,
1707 skb->data + hdr_len,
1708 head_tb_len, DMA_TO_DEVICE);
1709 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1710 return -EINVAL;
1711 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb->data + hdr_len,
1712 tb_phys, head_tb_len);
1713 iwl_pcie_txq_build_tfd(trans, txq, tb_phys, head_tb_len, false);
1714 }
1715
1716 /* set up the remaining entries to point to the data */
1717 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1718 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
1719 dma_addr_t tb_phys;
1720 int tb_idx;
1721
1722 if (!skb_frag_size(frag))
1723 continue;
1724
1725 tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
1726 skb_frag_size(frag), DMA_TO_DEVICE);
1727
1728 if (unlikely(dma_mapping_error(trans->dev, tb_phys)))
1729 return -EINVAL;
1730 trace_iwlwifi_dev_tx_tb(trans->dev, skb, skb_frag_address(frag),
1731 tb_phys, skb_frag_size(frag));
1732 tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
1733 skb_frag_size(frag), false);
1734 if (tb_idx < 0)
1735 return tb_idx;
1736
1737 out_meta->tbs |= BIT(tb_idx);
1738 }
1739
1740 return 0;
1741 }
1742
1743 #ifdef CONFIG_INET
iwl_pcie_get_page_hdr(struct iwl_trans * trans,size_t len,struct sk_buff * skb)1744 static void *iwl_pcie_get_page_hdr(struct iwl_trans *trans,
1745 size_t len, struct sk_buff *skb)
1746 {
1747 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1748 struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->txqs.tso_hdr_page);
1749 struct iwl_tso_page_info *info;
1750 struct page **page_ptr;
1751 dma_addr_t phys;
1752 void *ret;
1753
1754 page_ptr = (void *)((u8 *)skb->cb + trans_pcie->txqs.page_offs);
1755
1756 if (WARN_ON(*page_ptr))
1757 return NULL;
1758
1759 if (!p->page)
1760 goto alloc;
1761
1762 /*
1763 * Check if there's enough room on this page
1764 *
1765 * Note that we put a page chaining pointer *last* in the
1766 * page - we need it somewhere, and if it's there then we
1767 * avoid DMA mapping the last bits of the page which may
1768 * trigger the 32-bit boundary hardware bug.
1769 *
1770 * (see also get_workaround_page() in tx-gen2.c)
1771 */
1772 if (((unsigned long)p->pos & ~PAGE_MASK) + len < IWL_TSO_PAGE_DATA_SIZE) {
1773 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1774 goto out;
1775 }
1776
1777 /* We don't have enough room on this page, get a new one. */
1778 iwl_pcie_free_and_unmap_tso_page(trans, p->page);
1779
1780 alloc:
1781 p->page = alloc_page(GFP_ATOMIC);
1782 if (!p->page)
1783 return NULL;
1784 p->pos = page_address(p->page);
1785
1786 info = IWL_TSO_PAGE_INFO(page_address(p->page));
1787
1788 /* set the chaining pointer to NULL */
1789 info->next = NULL;
1790
1791 /* Create a DMA mapping for the page */
1792 phys = dma_map_page_attrs(trans->dev, p->page, 0, PAGE_SIZE,
1793 DMA_TO_DEVICE, DMA_ATTR_SKIP_CPU_SYNC);
1794 if (unlikely(dma_mapping_error(trans->dev, phys))) {
1795 __free_page(p->page);
1796 p->page = NULL;
1797
1798 return NULL;
1799 }
1800
1801 /* Store physical address and set use count */
1802 info->dma_addr = phys;
1803 refcount_set(&info->use_count, 1);
1804 out:
1805 *page_ptr = p->page;
1806 /* Return an internal reference for the caller */
1807 refcount_inc(&info->use_count);
1808 ret = p->pos;
1809 p->pos += len;
1810
1811 return ret;
1812 }
1813
1814 /**
1815 * iwl_pcie_get_sgt_tb_phys - Find TB address in mapped SG list
1816 * @sgt: scatter gather table
1817 * @offset: Offset into the mapped memory (i.e. SKB payload data)
1818 * @len: Length of the area
1819 *
1820 * Find the DMA address that corresponds to the SKB payload data at the
1821 * position given by @offset.
1822 *
1823 * Returns: Address for TB entry
1824 */
iwl_pcie_get_sgt_tb_phys(struct sg_table * sgt,unsigned int offset,unsigned int len)1825 dma_addr_t iwl_pcie_get_sgt_tb_phys(struct sg_table *sgt, unsigned int offset,
1826 unsigned int len)
1827 {
1828 struct scatterlist *sg;
1829 unsigned int sg_offset = 0;
1830 int i;
1831
1832 /*
1833 * Search the mapped DMA areas in the SG for the area that contains the
1834 * data at offset with the given length.
1835 */
1836 for_each_sgtable_dma_sg(sgt, sg, i) {
1837 if (offset >= sg_offset &&
1838 offset + len <= sg_offset + sg_dma_len(sg))
1839 return sg_dma_address(sg) + offset - sg_offset;
1840
1841 sg_offset += sg_dma_len(sg);
1842 }
1843
1844 WARN_ON_ONCE(1);
1845
1846 return DMA_MAPPING_ERROR;
1847 }
1848
1849 /**
1850 * iwl_pcie_prep_tso - Prepare TSO page and SKB for sending
1851 * @trans: transport private data
1852 * @skb: the SKB to map
1853 * @cmd_meta: command meta to store the scatter list information for unmapping
1854 * @hdr: output argument for TSO headers
1855 * @hdr_room: requested length for TSO headers
1856 *
1857 * Allocate space for a scatter gather list and TSO headers and map the SKB
1858 * using the scatter gather list. The SKB is unmapped again when the page is
1859 * free'ed again at the end of the operation.
1860 *
1861 * Returns: newly allocated and mapped scatter gather table with list
1862 */
iwl_pcie_prep_tso(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_cmd_meta * cmd_meta,u8 ** hdr,unsigned int hdr_room)1863 struct sg_table *iwl_pcie_prep_tso(struct iwl_trans *trans, struct sk_buff *skb,
1864 struct iwl_cmd_meta *cmd_meta,
1865 u8 **hdr, unsigned int hdr_room)
1866 {
1867 struct sg_table *sgt;
1868
1869 if (WARN_ON_ONCE(skb_has_frag_list(skb)))
1870 return NULL;
1871
1872 *hdr = iwl_pcie_get_page_hdr(trans,
1873 hdr_room + __alignof__(struct sg_table) +
1874 sizeof(struct sg_table) +
1875 (skb_shinfo(skb)->nr_frags + 1) *
1876 sizeof(struct scatterlist),
1877 skb);
1878 if (!*hdr)
1879 return NULL;
1880
1881 sgt = (void *)PTR_ALIGN(*hdr + hdr_room, __alignof__(struct sg_table));
1882 sgt->sgl = (void *)(sgt + 1);
1883
1884 sg_init_table(sgt->sgl, skb_shinfo(skb)->nr_frags + 1);
1885
1886 /* Only map the data, not the header (it is copied to the TSO page) */
1887 sgt->orig_nents = skb_to_sgvec(skb, sgt->sgl, skb_headlen(skb),
1888 skb->data_len);
1889 if (WARN_ON_ONCE(sgt->orig_nents <= 0))
1890 return NULL;
1891
1892 /* And map the entire SKB */
1893 if (dma_map_sgtable(trans->dev, sgt, DMA_TO_DEVICE, 0) < 0)
1894 return NULL;
1895
1896 /* Store non-zero (i.e. valid) offset for unmapping */
1897 cmd_meta->sg_offset = (unsigned long) sgt & ~PAGE_MASK;
1898
1899 return sgt;
1900 }
1901
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)1902 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
1903 struct iwl_txq *txq, u8 hdr_len,
1904 struct iwl_cmd_meta *out_meta,
1905 struct iwl_device_tx_cmd *dev_cmd,
1906 u16 tb1_len)
1907 {
1908 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1909 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
1910 struct ieee80211_hdr *hdr = (void *)skb->data;
1911 unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
1912 unsigned int mss = skb_shinfo(skb)->gso_size;
1913 unsigned int data_offset = 0;
1914 u16 length, iv_len, amsdu_pad;
1915 dma_addr_t start_hdr_phys;
1916 u8 *start_hdr, *pos_hdr;
1917 struct sg_table *sgt;
1918 struct tso_t tso;
1919
1920 /* if the packet is protected, then it must be CCMP or GCMP */
1921 BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
1922 iv_len = ieee80211_has_protected(hdr->frame_control) ?
1923 IEEE80211_CCMP_HDR_LEN : 0;
1924
1925 trace_iwlwifi_dev_tx(trans->dev, skb,
1926 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
1927 trans_pcie->txqs.tfd.size,
1928 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
1929
1930 ip_hdrlen = skb_network_header_len(skb);
1931 snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
1932 total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
1933 amsdu_pad = 0;
1934
1935 /* total amount of header we may need for this A-MSDU */
1936 hdr_room = DIV_ROUND_UP(total_len, mss) *
1937 (3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
1938
1939 /* Our device supports 9 segments at most, it will fit in 1 page */
1940 sgt = iwl_pcie_prep_tso(trans, skb, out_meta, &start_hdr, hdr_room);
1941 if (!sgt)
1942 return -ENOMEM;
1943
1944 start_hdr_phys = iwl_pcie_get_tso_page_phys(start_hdr);
1945 pos_hdr = start_hdr;
1946 memcpy(pos_hdr, skb->data + hdr_len, iv_len);
1947 pos_hdr += iv_len;
1948
1949 /*
1950 * Pull the ieee80211 header + IV to be able to use TSO core,
1951 * we will restore it for the tx_status flow.
1952 */
1953 skb_pull(skb, hdr_len + iv_len);
1954
1955 /*
1956 * Remove the length of all the headers that we don't actually
1957 * have in the MPDU by themselves, but that we duplicate into
1958 * all the different MSDUs inside the A-MSDU.
1959 */
1960 le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
1961
1962 tso_start(skb, &tso);
1963
1964 while (total_len) {
1965 /* this is the data left for this subframe */
1966 unsigned int data_left =
1967 min_t(unsigned int, mss, total_len);
1968 unsigned int hdr_tb_len;
1969 dma_addr_t hdr_tb_phys;
1970 u8 *subf_hdrs_start = pos_hdr;
1971
1972 total_len -= data_left;
1973
1974 memset(pos_hdr, 0, amsdu_pad);
1975 pos_hdr += amsdu_pad;
1976 amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
1977 data_left)) & 0x3;
1978 ether_addr_copy(pos_hdr, ieee80211_get_DA(hdr));
1979 pos_hdr += ETH_ALEN;
1980 ether_addr_copy(pos_hdr, ieee80211_get_SA(hdr));
1981 pos_hdr += ETH_ALEN;
1982
1983 length = snap_ip_tcp_hdrlen + data_left;
1984 *((__be16 *)pos_hdr) = cpu_to_be16(length);
1985 pos_hdr += sizeof(length);
1986
1987 /*
1988 * This will copy the SNAP as well which will be considered
1989 * as MAC header.
1990 */
1991 tso_build_hdr(skb, pos_hdr, &tso, data_left, !total_len);
1992
1993 pos_hdr += snap_ip_tcp_hdrlen;
1994
1995 hdr_tb_len = pos_hdr - start_hdr;
1996 hdr_tb_phys = iwl_pcie_get_tso_page_phys(start_hdr);
1997
1998 iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
1999 hdr_tb_len, false);
2000 trace_iwlwifi_dev_tx_tb(trans->dev, skb, start_hdr,
2001 hdr_tb_phys, hdr_tb_len);
2002 /* add this subframe's headers' length to the tx_cmd */
2003 le16_add_cpu(&tx_cmd->len, pos_hdr - subf_hdrs_start);
2004
2005 /* prepare the start_hdr for the next subframe */
2006 start_hdr = pos_hdr;
2007
2008 /* put the payload */
2009 while (data_left) {
2010 unsigned int size = min_t(unsigned int, tso.size,
2011 data_left);
2012 dma_addr_t tb_phys;
2013
2014 tb_phys = iwl_pcie_get_sgt_tb_phys(sgt, data_offset, size);
2015 /* Not a real mapping error, use direct comparison */
2016 if (unlikely(tb_phys == DMA_MAPPING_ERROR))
2017 return -EINVAL;
2018
2019 iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2020 size, false);
2021 trace_iwlwifi_dev_tx_tb(trans->dev, skb, tso.data,
2022 tb_phys, size);
2023
2024 data_left -= size;
2025 data_offset += size;
2026 tso_build_data(skb, &tso, size);
2027 }
2028 }
2029
2030 dma_sync_single_for_device(trans->dev, start_hdr_phys, hdr_room,
2031 DMA_TO_DEVICE);
2032
2033 /* re -add the WiFi header and IV */
2034 skb_push(skb, hdr_len + iv_len);
2035
2036 return 0;
2037 }
2038 #else /* CONFIG_INET */
iwl_fill_data_tbs_amsdu(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_txq * txq,u8 hdr_len,struct iwl_cmd_meta * out_meta,struct iwl_device_tx_cmd * dev_cmd,u16 tb1_len)2039 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2040 struct iwl_txq *txq, u8 hdr_len,
2041 struct iwl_cmd_meta *out_meta,
2042 struct iwl_device_tx_cmd *dev_cmd,
2043 u16 tb1_len)
2044 {
2045 /* No A-MSDU without CONFIG_INET */
2046 WARN_ON(1);
2047
2048 return -1;
2049 }
2050 #endif /* CONFIG_INET */
2051
2052 #define IWL_TX_CRC_SIZE 4
2053 #define IWL_TX_DELIMITER_SIZE 4
2054
2055 /*
2056 * iwl_txq_gen1_update_byte_cnt_tbl - Set up entry in Tx byte-count array
2057 */
iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,u16 byte_cnt,int num_tbs)2058 static void iwl_txq_gen1_update_byte_cnt_tbl(struct iwl_trans *trans,
2059 struct iwl_txq *txq, u16 byte_cnt,
2060 int num_tbs)
2061 {
2062 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2063 struct iwlagn_scd_bc_tbl *scd_bc_tbl;
2064 int write_ptr = txq->write_ptr;
2065 int txq_id = txq->id;
2066 u8 sec_ctl = 0;
2067 u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
2068 __le16 bc_ent;
2069 struct iwl_device_tx_cmd *dev_cmd = txq->entries[txq->write_ptr].cmd;
2070 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2071 u8 sta_id = tx_cmd->sta_id;
2072
2073 scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2074
2075 sec_ctl = tx_cmd->sec_ctl;
2076
2077 switch (sec_ctl & TX_CMD_SEC_MSK) {
2078 case TX_CMD_SEC_CCM:
2079 len += IEEE80211_CCMP_MIC_LEN;
2080 break;
2081 case TX_CMD_SEC_TKIP:
2082 len += IEEE80211_TKIP_ICV_LEN;
2083 break;
2084 case TX_CMD_SEC_WEP:
2085 len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
2086 break;
2087 }
2088 if (trans_pcie->txqs.bc_table_dword)
2089 len = DIV_ROUND_UP(len, 4);
2090
2091 if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
2092 return;
2093
2094 bc_ent = cpu_to_le16(len | (sta_id << 12));
2095
2096 scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
2097
2098 if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
2099 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] =
2100 bc_ent;
2101 }
2102
iwl_trans_pcie_tx(struct iwl_trans * trans,struct sk_buff * skb,struct iwl_device_tx_cmd * dev_cmd,int txq_id)2103 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2104 struct iwl_device_tx_cmd *dev_cmd, int txq_id)
2105 {
2106 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2107 struct ieee80211_hdr *hdr;
2108 struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2109 struct iwl_cmd_meta *out_meta;
2110 struct iwl_txq *txq;
2111 dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2112 void *tb1_addr;
2113 void *tfd;
2114 u16 len, tb1_len;
2115 bool wait_write_ptr;
2116 __le16 fc;
2117 u8 hdr_len;
2118 u16 wifi_seq;
2119 bool amsdu;
2120
2121 txq = trans_pcie->txqs.txq[txq_id];
2122
2123 if (WARN_ONCE(!test_bit(txq_id, trans_pcie->txqs.queue_used),
2124 "TX on unused queue %d\n", txq_id))
2125 return -EINVAL;
2126
2127 if (skb_is_nonlinear(skb) &&
2128 skb_shinfo(skb)->nr_frags > IWL_TRANS_PCIE_MAX_FRAGS(trans_pcie) &&
2129 __skb_linearize(skb))
2130 return -ENOMEM;
2131
2132 /* mac80211 always puts the full header into the SKB's head,
2133 * so there's no need to check if it's readable there
2134 */
2135 hdr = (struct ieee80211_hdr *)skb->data;
2136 fc = hdr->frame_control;
2137 hdr_len = ieee80211_hdrlen(fc);
2138
2139 spin_lock(&txq->lock);
2140
2141 if (iwl_txq_space(trans, txq) < txq->high_mark) {
2142 iwl_txq_stop(trans, txq);
2143
2144 /* don't put the packet on the ring, if there is no room */
2145 if (unlikely(iwl_txq_space(trans, txq) < 3)) {
2146 struct iwl_device_tx_cmd **dev_cmd_ptr;
2147
2148 dev_cmd_ptr = (void *)((u8 *)skb->cb +
2149 trans_pcie->txqs.dev_cmd_offs);
2150
2151 *dev_cmd_ptr = dev_cmd;
2152 __skb_queue_tail(&txq->overflow_q, skb);
2153
2154 spin_unlock(&txq->lock);
2155 return 0;
2156 }
2157 }
2158
2159 /* In AGG mode, the index in the ring must correspond to the WiFi
2160 * sequence number. This is a HW requirements to help the SCD to parse
2161 * the BA.
2162 * Check here that the packets are in the right place on the ring.
2163 */
2164 wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2165 WARN_ONCE(txq->ampdu &&
2166 (wifi_seq & 0xff) != txq->write_ptr,
2167 "Q: %d WiFi Seq %d tfdNum %d",
2168 txq_id, wifi_seq, txq->write_ptr);
2169
2170 /* Set up driver data for this TFD */
2171 txq->entries[txq->write_ptr].skb = skb;
2172 txq->entries[txq->write_ptr].cmd = dev_cmd;
2173
2174 dev_cmd->hdr.sequence =
2175 cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2176 INDEX_TO_SEQ(txq->write_ptr)));
2177
2178 tb0_phys = iwl_txq_get_first_tb_dma(txq, txq->write_ptr);
2179 scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2180 offsetof(struct iwl_tx_cmd, scratch);
2181
2182 tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2183 tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2184
2185 /* Set up first empty entry in queue's array of Tx/cmd buffers */
2186 out_meta = &txq->entries[txq->write_ptr].meta;
2187 memset(out_meta, 0, sizeof(*out_meta));
2188
2189 /*
2190 * The second TB (tb1) points to the remainder of the TX command
2191 * and the 802.11 header - dword aligned size
2192 * (This calculation modifies the TX command, so do it before the
2193 * setup of the first TB)
2194 */
2195 len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2196 hdr_len - IWL_FIRST_TB_SIZE;
2197 /* do not align A-MSDU to dword as the subframe header aligns it */
2198 amsdu = ieee80211_is_data_qos(fc) &&
2199 (*ieee80211_get_qos_ctl(hdr) &
2200 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2201 if (!amsdu) {
2202 tb1_len = ALIGN(len, 4);
2203 /* Tell NIC about any 2-byte padding after MAC header */
2204 if (tb1_len != len)
2205 tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2206 } else {
2207 tb1_len = len;
2208 }
2209
2210 /*
2211 * The first TB points to bi-directional DMA data, we'll
2212 * memcpy the data into it later.
2213 */
2214 iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2215 IWL_FIRST_TB_SIZE, true);
2216
2217 /* there must be data left over for TB1 or this code must be changed */
2218 BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
2219 BUILD_BUG_ON(sizeof(struct iwl_cmd_header) +
2220 offsetofend(struct iwl_tx_cmd, scratch) >
2221 IWL_FIRST_TB_SIZE);
2222
2223 /* map the data for TB1 */
2224 tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2225 tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2226 if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2227 goto out_err;
2228 iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2229
2230 trace_iwlwifi_dev_tx(trans->dev, skb,
2231 iwl_txq_get_tfd(trans, txq, txq->write_ptr),
2232 trans_pcie->txqs.tfd.size,
2233 &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2234 hdr_len);
2235
2236 /*
2237 * If gso_size wasn't set, don't give the frame "amsdu treatment"
2238 * (adding subframes, etc.).
2239 * This can happen in some testing flows when the amsdu was already
2240 * pre-built, and we just need to send the resulting skb.
2241 */
2242 if (amsdu && skb_shinfo(skb)->gso_size) {
2243 if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2244 out_meta, dev_cmd,
2245 tb1_len)))
2246 goto out_err;
2247 } else {
2248 struct sk_buff *frag;
2249
2250 if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2251 out_meta)))
2252 goto out_err;
2253
2254 skb_walk_frags(skb, frag) {
2255 if (unlikely(iwl_fill_data_tbs(trans, frag, txq, 0,
2256 out_meta)))
2257 goto out_err;
2258 }
2259 }
2260
2261 /* building the A-MSDU might have changed this data, so memcpy it now */
2262 memcpy(&txq->first_tb_bufs[txq->write_ptr], dev_cmd, IWL_FIRST_TB_SIZE);
2263
2264 tfd = iwl_txq_get_tfd(trans, txq, txq->write_ptr);
2265 /* Set up entry for this TFD in Tx byte-count array */
2266 iwl_txq_gen1_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2267 iwl_txq_gen1_tfd_get_num_tbs(tfd));
2268
2269 wait_write_ptr = ieee80211_has_morefrags(fc);
2270
2271 /* start timer if queue currently empty */
2272 if (txq->read_ptr == txq->write_ptr && txq->wd_timeout) {
2273 /*
2274 * If the TXQ is active, then set the timer, if not,
2275 * set the timer in remainder so that the timer will
2276 * be armed with the right value when the station will
2277 * wake up.
2278 */
2279 if (!txq->frozen)
2280 mod_timer(&txq->stuck_timer,
2281 jiffies + txq->wd_timeout);
2282 else
2283 txq->frozen_expiry_remainder = txq->wd_timeout;
2284 }
2285
2286 /* Tell device the write index *just past* this latest filled TFD */
2287 txq->write_ptr = iwl_txq_inc_wrap(trans, txq->write_ptr);
2288 if (!wait_write_ptr)
2289 iwl_pcie_txq_inc_wr_ptr(trans, txq);
2290
2291 /*
2292 * At this point the frame is "transmitted" successfully
2293 * and we will get a TX status notification eventually.
2294 */
2295 spin_unlock(&txq->lock);
2296 return 0;
2297 out_err:
2298 iwl_txq_gen1_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2299 spin_unlock(&txq->lock);
2300 return -1;
2301 }
2302
iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans * trans,struct iwl_txq * txq,int read_ptr)2303 static void iwl_txq_gen1_inval_byte_cnt_tbl(struct iwl_trans *trans,
2304 struct iwl_txq *txq,
2305 int read_ptr)
2306 {
2307 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2308 struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->txqs.scd_bc_tbls.addr;
2309 int txq_id = txq->id;
2310 u8 sta_id = 0;
2311 __le16 bc_ent;
2312 struct iwl_device_tx_cmd *dev_cmd = txq->entries[read_ptr].cmd;
2313 struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2314
2315 WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
2316
2317 if (txq_id != trans_pcie->txqs.cmd.q_id)
2318 sta_id = tx_cmd->sta_id;
2319
2320 bc_ent = cpu_to_le16(1 | (sta_id << 12));
2321
2322 scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
2323
2324 if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
2325 scd_bc_tbl[txq_id].tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] =
2326 bc_ent;
2327 }
2328
2329 /* Frees buffers until index _not_ inclusive */
iwl_pcie_reclaim(struct iwl_trans * trans,int txq_id,int ssn,struct sk_buff_head * skbs,bool is_flush)2330 void iwl_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
2331 struct sk_buff_head *skbs, bool is_flush)
2332 {
2333 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2334 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2335 int tfd_num, read_ptr, last_to_free;
2336 int txq_read_ptr, txq_write_ptr;
2337
2338 /* This function is not meant to release cmd queue*/
2339 if (WARN_ON(txq_id == trans_pcie->txqs.cmd.q_id))
2340 return;
2341
2342 if (WARN_ON(!txq))
2343 return;
2344
2345 tfd_num = iwl_txq_get_cmd_index(txq, ssn);
2346
2347 spin_lock_bh(&txq->reclaim_lock);
2348
2349 spin_lock(&txq->lock);
2350 txq_read_ptr = txq->read_ptr;
2351 txq_write_ptr = txq->write_ptr;
2352 spin_unlock(&txq->lock);
2353
2354 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr);
2355
2356 if (!test_bit(txq_id, trans_pcie->txqs.queue_used)) {
2357 IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
2358 txq_id, ssn);
2359 goto out;
2360 }
2361
2362 if (read_ptr == tfd_num)
2363 goto out;
2364
2365 IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d (%d) -> %d (%d)\n",
2366 txq_id, read_ptr, txq_read_ptr, tfd_num, ssn);
2367
2368 /* Since we free until index _not_ inclusive, the one before index is
2369 * the last we will free. This one must be used
2370 */
2371 last_to_free = iwl_txq_dec_wrap(trans, tfd_num);
2372
2373 if (!iwl_txq_used(txq, last_to_free, txq_read_ptr, txq_write_ptr)) {
2374 IWL_ERR(trans,
2375 "%s: Read index for txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
2376 __func__, txq_id, last_to_free,
2377 trans->trans_cfg->base_params->max_tfd_queue_size,
2378 txq_write_ptr, txq_read_ptr);
2379
2380 iwl_op_mode_time_point(trans->op_mode,
2381 IWL_FW_INI_TIME_POINT_FAKE_TX,
2382 NULL);
2383 goto out;
2384 }
2385
2386 if (WARN_ON(!skb_queue_empty(skbs)))
2387 goto out;
2388
2389 for (;
2390 read_ptr != tfd_num;
2391 txq_read_ptr = iwl_txq_inc_wrap(trans, txq_read_ptr),
2392 read_ptr = iwl_txq_get_cmd_index(txq, txq_read_ptr)) {
2393 struct iwl_cmd_meta *cmd_meta = &txq->entries[read_ptr].meta;
2394 struct sk_buff *skb = txq->entries[read_ptr].skb;
2395
2396 if (WARN_ONCE(!skb, "no SKB at %d (%d) on queue %d\n",
2397 read_ptr, txq_read_ptr, txq_id))
2398 continue;
2399
2400 iwl_pcie_free_tso_pages(trans, skb, cmd_meta);
2401
2402 __skb_queue_tail(skbs, skb);
2403
2404 txq->entries[read_ptr].skb = NULL;
2405
2406 if (!trans->trans_cfg->gen2)
2407 iwl_txq_gen1_inval_byte_cnt_tbl(trans, txq,
2408 txq_read_ptr);
2409
2410 iwl_txq_free_tfd(trans, txq, txq_read_ptr);
2411 }
2412
2413 spin_lock(&txq->lock);
2414 txq->read_ptr = txq_read_ptr;
2415
2416 iwl_txq_progress(txq);
2417
2418 if (iwl_txq_space(trans, txq) > txq->low_mark &&
2419 test_bit(txq_id, trans_pcie->txqs.queue_stopped)) {
2420 struct sk_buff_head overflow_skbs;
2421 struct sk_buff *skb;
2422
2423 __skb_queue_head_init(&overflow_skbs);
2424 skb_queue_splice_init(&txq->overflow_q,
2425 is_flush ? skbs : &overflow_skbs);
2426
2427 /*
2428 * We are going to transmit from the overflow queue.
2429 * Remember this state so that wait_for_txq_empty will know we
2430 * are adding more packets to the TFD queue. It cannot rely on
2431 * the state of &txq->overflow_q, as we just emptied it, but
2432 * haven't TXed the content yet.
2433 */
2434 txq->overflow_tx = true;
2435
2436 /*
2437 * This is tricky: we are in reclaim path and are holding
2438 * reclaim_lock, so noone will try to access the txq data
2439 * from that path. We stopped tx, so we can't have tx as well.
2440 * Bottom line, we can unlock and re-lock later.
2441 */
2442 spin_unlock(&txq->lock);
2443
2444 while ((skb = __skb_dequeue(&overflow_skbs))) {
2445 struct iwl_device_tx_cmd *dev_cmd_ptr;
2446
2447 dev_cmd_ptr = *(void **)((u8 *)skb->cb +
2448 trans_pcie->txqs.dev_cmd_offs);
2449
2450 /*
2451 * Note that we can very well be overflowing again.
2452 * In that case, iwl_txq_space will be small again
2453 * and we won't wake mac80211's queue.
2454 */
2455 iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
2456 }
2457
2458 if (iwl_txq_space(trans, txq) > txq->low_mark)
2459 iwl_trans_pcie_wake_queue(trans, txq);
2460
2461 spin_lock(&txq->lock);
2462 txq->overflow_tx = false;
2463 }
2464
2465 spin_unlock(&txq->lock);
2466 out:
2467 spin_unlock_bh(&txq->reclaim_lock);
2468 }
2469
2470 /* Set wr_ptr of specific device and txq */
iwl_pcie_set_q_ptrs(struct iwl_trans * trans,int txq_id,int ptr)2471 void iwl_pcie_set_q_ptrs(struct iwl_trans *trans, int txq_id, int ptr)
2472 {
2473 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2474 struct iwl_txq *txq = trans_pcie->txqs.txq[txq_id];
2475
2476 spin_lock_bh(&txq->lock);
2477
2478 txq->write_ptr = ptr;
2479 txq->read_ptr = txq->write_ptr;
2480
2481 spin_unlock_bh(&txq->lock);
2482 }
2483
iwl_pcie_freeze_txq_timer(struct iwl_trans * trans,unsigned long txqs,bool freeze)2484 void iwl_pcie_freeze_txq_timer(struct iwl_trans *trans,
2485 unsigned long txqs, bool freeze)
2486 {
2487 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2488 int queue;
2489
2490 for_each_set_bit(queue, &txqs, BITS_PER_LONG) {
2491 struct iwl_txq *txq = trans_pcie->txqs.txq[queue];
2492 unsigned long now;
2493
2494 spin_lock_bh(&txq->lock);
2495
2496 now = jiffies;
2497
2498 if (txq->frozen == freeze)
2499 goto next_queue;
2500
2501 IWL_DEBUG_TX_QUEUES(trans, "%s TXQ %d\n",
2502 freeze ? "Freezing" : "Waking", queue);
2503
2504 txq->frozen = freeze;
2505
2506 if (txq->read_ptr == txq->write_ptr)
2507 goto next_queue;
2508
2509 if (freeze) {
2510 if (unlikely(time_after(now,
2511 txq->stuck_timer.expires))) {
2512 /*
2513 * The timer should have fired, maybe it is
2514 * spinning right now on the lock.
2515 */
2516 goto next_queue;
2517 }
2518 /* remember how long until the timer fires */
2519 txq->frozen_expiry_remainder =
2520 txq->stuck_timer.expires - now;
2521 del_timer(&txq->stuck_timer);
2522 goto next_queue;
2523 }
2524
2525 /*
2526 * Wake a non-empty queue -> arm timer with the
2527 * remainder before it froze
2528 */
2529 mod_timer(&txq->stuck_timer,
2530 now + txq->frozen_expiry_remainder);
2531
2532 next_queue:
2533 spin_unlock_bh(&txq->lock);
2534 }
2535 }
2536
2537 #define HOST_COMPLETE_TIMEOUT (2 * HZ)
2538
iwl_trans_pcie_send_hcmd_sync(struct iwl_trans * trans,struct iwl_host_cmd * cmd)2539 static int iwl_trans_pcie_send_hcmd_sync(struct iwl_trans *trans,
2540 struct iwl_host_cmd *cmd)
2541 {
2542 struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2543 const char *cmd_str = iwl_get_cmd_string(trans, cmd->id);
2544 struct iwl_txq *txq = trans_pcie->txqs.txq[trans_pcie->txqs.cmd.q_id];
2545 int cmd_idx;
2546 int ret;
2547
2548 IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n", cmd_str);
2549
2550 if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
2551 &trans->status),
2552 "Command %s: a command is already active!\n", cmd_str))
2553 return -EIO;
2554
2555 IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n", cmd_str);
2556
2557 if (trans->trans_cfg->gen2)
2558 cmd_idx = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2559 else
2560 cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
2561
2562 if (cmd_idx < 0) {
2563 ret = cmd_idx;
2564 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2565 IWL_ERR(trans, "Error sending %s: enqueue_hcmd failed: %d\n",
2566 cmd_str, ret);
2567 return ret;
2568 }
2569
2570 ret = wait_event_timeout(trans->wait_command_queue,
2571 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
2572 &trans->status),
2573 HOST_COMPLETE_TIMEOUT);
2574 if (!ret) {
2575 IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
2576 cmd_str, jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
2577
2578 IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
2579 txq->read_ptr, txq->write_ptr);
2580
2581 clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
2582 IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
2583 cmd_str);
2584 ret = -ETIMEDOUT;
2585
2586 iwl_trans_sync_nmi(trans);
2587 goto cancel;
2588 }
2589
2590 if (test_bit(STATUS_FW_ERROR, &trans->status)) {
2591 if (!test_and_clear_bit(STATUS_SUPPRESS_CMD_ERROR_ONCE,
2592 &trans->status)) {
2593 IWL_ERR(trans, "FW error in SYNC CMD %s\n", cmd_str);
2594 dump_stack();
2595 }
2596 ret = -EIO;
2597 goto cancel;
2598 }
2599
2600 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2601 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2602 IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
2603 ret = -ERFKILL;
2604 goto cancel;
2605 }
2606
2607 if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
2608 IWL_ERR(trans, "Error: Response NULL in '%s'\n", cmd_str);
2609 ret = -EIO;
2610 goto cancel;
2611 }
2612
2613 return 0;
2614
2615 cancel:
2616 if (cmd->flags & CMD_WANT_SKB) {
2617 /*
2618 * Cancel the CMD_WANT_SKB flag for the cmd in the
2619 * TX cmd queue. Otherwise in case the cmd comes
2620 * in later, it will possibly set an invalid
2621 * address (cmd->meta.source).
2622 */
2623 txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
2624 }
2625
2626 if (cmd->resp_pkt) {
2627 iwl_free_resp(cmd);
2628 cmd->resp_pkt = NULL;
2629 }
2630
2631 return ret;
2632 }
2633
iwl_trans_pcie_send_hcmd(struct iwl_trans * trans,struct iwl_host_cmd * cmd)2634 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans,
2635 struct iwl_host_cmd *cmd)
2636 {
2637 /* Make sure the NIC is still alive in the bus */
2638 if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2639 return -ENODEV;
2640
2641 if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
2642 test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
2643 IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
2644 cmd->id);
2645 return -ERFKILL;
2646 }
2647
2648 if (unlikely(trans->system_pm_mode == IWL_PLAT_PM_MODE_D3 &&
2649 !(cmd->flags & CMD_SEND_IN_D3))) {
2650 IWL_DEBUG_WOWLAN(trans, "Dropping CMD 0x%x: D3\n", cmd->id);
2651 return -EHOSTDOWN;
2652 }
2653
2654 if (cmd->flags & CMD_ASYNC) {
2655 int ret;
2656
2657 /* An asynchronous command can not expect an SKB to be set. */
2658 if (WARN_ON(cmd->flags & CMD_WANT_SKB))
2659 return -EINVAL;
2660
2661 if (trans->trans_cfg->gen2)
2662 ret = iwl_pcie_gen2_enqueue_hcmd(trans, cmd);
2663 else
2664 ret = iwl_pcie_enqueue_hcmd(trans, cmd);
2665
2666 if (ret < 0) {
2667 IWL_ERR(trans,
2668 "Error sending %s: enqueue_hcmd failed: %d\n",
2669 iwl_get_cmd_string(trans, cmd->id), ret);
2670 return ret;
2671 }
2672 return 0;
2673 }
2674
2675 return iwl_trans_pcie_send_hcmd_sync(trans, cmd);
2676 }
2677 IWL_EXPORT_SYMBOL(iwl_trans_pcie_send_hcmd);
2678