xref: /linux/drivers/net/wireless/intel/iwlwifi/pcie/tx.c (revision e9f0878c4b2004ac19581274c1ae4c61ae3ca70e)
1 /******************************************************************************
2  *
3  * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
4  * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH
5  * Copyright(c) 2016 - 2017 Intel Deutschland GmbH
6  * Copyright(c) 2018 Intel Corporation
7  *
8  * Portions of this file are derived from the ipw3945 project, as well
9  * as portions of the ieee80211 subsystem header files.
10  *
11  * This program is free software; you can redistribute it and/or modify it
12  * under the terms of version 2 of the GNU General Public License as
13  * published by the Free Software Foundation.
14  *
15  * This program is distributed in the hope that it will be useful, but WITHOUT
16  * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17  * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
18  * more details.
19  *
20  * You should have received a copy of the GNU General Public License along with
21  * this program; if not, write to the Free Software Foundation, Inc.,
22  * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA
23  *
24  * The full GNU General Public License is included in this distribution in the
25  * file called LICENSE.
26  *
27  * Contact Information:
28  *  Intel Linux Wireless <linuxwifi@intel.com>
29  * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
30  *
31  *****************************************************************************/
32 #include <linux/etherdevice.h>
33 #include <linux/ieee80211.h>
34 #include <linux/slab.h>
35 #include <linux/sched.h>
36 #include <linux/pm_runtime.h>
37 #include <net/ip6_checksum.h>
38 #include <net/tso.h>
39 
40 #include "iwl-debug.h"
41 #include "iwl-csr.h"
42 #include "iwl-prph.h"
43 #include "iwl-io.h"
44 #include "iwl-scd.h"
45 #include "iwl-op-mode.h"
46 #include "internal.h"
47 #include "fw/api/tx.h"
48 
49 #define IWL_TX_CRC_SIZE 4
50 #define IWL_TX_DELIMITER_SIZE 4
51 
52 /*************** DMA-QUEUE-GENERAL-FUNCTIONS  *****
53  * DMA services
54  *
55  * Theory of operation
56  *
57  * A Tx or Rx queue resides in host DRAM, and is comprised of a circular buffer
58  * of buffer descriptors, each of which points to one or more data buffers for
59  * the device to read from or fill.  Driver and device exchange status of each
60  * queue via "read" and "write" pointers.  Driver keeps minimum of 2 empty
61  * entries in each circular buffer, to protect against confusing empty and full
62  * queue states.
63  *
64  * The device reads or writes the data in the queues via the device's several
65  * DMA/FIFO channels.  Each queue is mapped to a single DMA channel.
66  *
67  * For Tx queue, there are low mark and high mark limits. If, after queuing
68  * the packet for Tx, free space become < low mark, Tx queue stopped. When
69  * reclaiming packets (on 'tx done IRQ), if free space become > high mark,
70  * Tx queue resumed.
71  *
72  ***************************************************/
73 
74 int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q)
75 {
76 	unsigned int max;
77 	unsigned int used;
78 
79 	/*
80 	 * To avoid ambiguity between empty and completely full queues, there
81 	 * should always be less than max_tfd_queue_size elements in the queue.
82 	 * If q->n_window is smaller than max_tfd_queue_size, there is no need
83 	 * to reserve any queue entries for this purpose.
84 	 */
85 	if (q->n_window < trans->cfg->base_params->max_tfd_queue_size)
86 		max = q->n_window;
87 	else
88 		max = trans->cfg->base_params->max_tfd_queue_size - 1;
89 
90 	/*
91 	 * max_tfd_queue_size is a power of 2, so the following is equivalent to
92 	 * modulo by max_tfd_queue_size and is well defined.
93 	 */
94 	used = (q->write_ptr - q->read_ptr) &
95 		(trans->cfg->base_params->max_tfd_queue_size - 1);
96 
97 	if (WARN_ON(used > max))
98 		return 0;
99 
100 	return max - used;
101 }
102 
103 /*
104  * iwl_queue_init - Initialize queue's high/low-water and read/write indexes
105  */
106 static int iwl_queue_init(struct iwl_txq *q, int slots_num)
107 {
108 	q->n_window = slots_num;
109 
110 	/* slots_num must be power-of-two size, otherwise
111 	 * iwl_pcie_get_cmd_index is broken. */
112 	if (WARN_ON(!is_power_of_2(slots_num)))
113 		return -EINVAL;
114 
115 	q->low_mark = q->n_window / 4;
116 	if (q->low_mark < 4)
117 		q->low_mark = 4;
118 
119 	q->high_mark = q->n_window / 8;
120 	if (q->high_mark < 2)
121 		q->high_mark = 2;
122 
123 	q->write_ptr = 0;
124 	q->read_ptr = 0;
125 
126 	return 0;
127 }
128 
129 int iwl_pcie_alloc_dma_ptr(struct iwl_trans *trans,
130 			   struct iwl_dma_ptr *ptr, size_t size)
131 {
132 	if (WARN_ON(ptr->addr))
133 		return -EINVAL;
134 
135 	ptr->addr = dma_alloc_coherent(trans->dev, size,
136 				       &ptr->dma, GFP_KERNEL);
137 	if (!ptr->addr)
138 		return -ENOMEM;
139 	ptr->size = size;
140 	return 0;
141 }
142 
143 void iwl_pcie_free_dma_ptr(struct iwl_trans *trans, struct iwl_dma_ptr *ptr)
144 {
145 	if (unlikely(!ptr->addr))
146 		return;
147 
148 	dma_free_coherent(trans->dev, ptr->size, ptr->addr, ptr->dma);
149 	memset(ptr, 0, sizeof(*ptr));
150 }
151 
152 static void iwl_pcie_txq_stuck_timer(struct timer_list *t)
153 {
154 	struct iwl_txq *txq = from_timer(txq, t, stuck_timer);
155 	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
156 	struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie);
157 
158 	spin_lock(&txq->lock);
159 	/* check if triggered erroneously */
160 	if (txq->read_ptr == txq->write_ptr) {
161 		spin_unlock(&txq->lock);
162 		return;
163 	}
164 	spin_unlock(&txq->lock);
165 
166 	iwl_trans_pcie_log_scd_error(trans, txq);
167 
168 	iwl_force_nmi(trans);
169 }
170 
171 /*
172  * iwl_pcie_txq_update_byte_cnt_tbl - Set up entry in Tx byte-count array
173  */
174 static void iwl_pcie_txq_update_byte_cnt_tbl(struct iwl_trans *trans,
175 					     struct iwl_txq *txq, u16 byte_cnt,
176 					     int num_tbs)
177 {
178 	struct iwlagn_scd_bc_tbl *scd_bc_tbl;
179 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
180 	int write_ptr = txq->write_ptr;
181 	int txq_id = txq->id;
182 	u8 sec_ctl = 0;
183 	u16 len = byte_cnt + IWL_TX_CRC_SIZE + IWL_TX_DELIMITER_SIZE;
184 	__le16 bc_ent;
185 	struct iwl_tx_cmd *tx_cmd =
186 		(void *)txq->entries[txq->write_ptr].cmd->payload;
187 	u8 sta_id = tx_cmd->sta_id;
188 
189 	scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
190 
191 	sec_ctl = tx_cmd->sec_ctl;
192 
193 	switch (sec_ctl & TX_CMD_SEC_MSK) {
194 	case TX_CMD_SEC_CCM:
195 		len += IEEE80211_CCMP_MIC_LEN;
196 		break;
197 	case TX_CMD_SEC_TKIP:
198 		len += IEEE80211_TKIP_ICV_LEN;
199 		break;
200 	case TX_CMD_SEC_WEP:
201 		len += IEEE80211_WEP_IV_LEN + IEEE80211_WEP_ICV_LEN;
202 		break;
203 	}
204 	if (trans_pcie->bc_table_dword)
205 		len = DIV_ROUND_UP(len, 4);
206 
207 	if (WARN_ON(len > 0xFFF || write_ptr >= TFD_QUEUE_SIZE_MAX))
208 		return;
209 
210 	bc_ent = cpu_to_le16(len | (sta_id << 12));
211 
212 	scd_bc_tbl[txq_id].tfd_offset[write_ptr] = bc_ent;
213 
214 	if (write_ptr < TFD_QUEUE_SIZE_BC_DUP)
215 		scd_bc_tbl[txq_id].
216 			tfd_offset[TFD_QUEUE_SIZE_MAX + write_ptr] = bc_ent;
217 }
218 
219 static void iwl_pcie_txq_inval_byte_cnt_tbl(struct iwl_trans *trans,
220 					    struct iwl_txq *txq)
221 {
222 	struct iwl_trans_pcie *trans_pcie =
223 		IWL_TRANS_GET_PCIE_TRANS(trans);
224 	struct iwlagn_scd_bc_tbl *scd_bc_tbl = trans_pcie->scd_bc_tbls.addr;
225 	int txq_id = txq->id;
226 	int read_ptr = txq->read_ptr;
227 	u8 sta_id = 0;
228 	__le16 bc_ent;
229 	struct iwl_tx_cmd *tx_cmd =
230 		(void *)txq->entries[read_ptr].cmd->payload;
231 
232 	WARN_ON(read_ptr >= TFD_QUEUE_SIZE_MAX);
233 
234 	if (txq_id != trans_pcie->cmd_queue)
235 		sta_id = tx_cmd->sta_id;
236 
237 	bc_ent = cpu_to_le16(1 | (sta_id << 12));
238 
239 	scd_bc_tbl[txq_id].tfd_offset[read_ptr] = bc_ent;
240 
241 	if (read_ptr < TFD_QUEUE_SIZE_BC_DUP)
242 		scd_bc_tbl[txq_id].
243 			tfd_offset[TFD_QUEUE_SIZE_MAX + read_ptr] = bc_ent;
244 }
245 
246 /*
247  * iwl_pcie_txq_inc_wr_ptr - Send new write index to hardware
248  */
249 static void iwl_pcie_txq_inc_wr_ptr(struct iwl_trans *trans,
250 				    struct iwl_txq *txq)
251 {
252 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
253 	u32 reg = 0;
254 	int txq_id = txq->id;
255 
256 	lockdep_assert_held(&txq->lock);
257 
258 	/*
259 	 * explicitly wake up the NIC if:
260 	 * 1. shadow registers aren't enabled
261 	 * 2. NIC is woken up for CMD regardless of shadow outside this function
262 	 * 3. there is a chance that the NIC is asleep
263 	 */
264 	if (!trans->cfg->base_params->shadow_reg_enable &&
265 	    txq_id != trans_pcie->cmd_queue &&
266 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
267 		/*
268 		 * wake up nic if it's powered down ...
269 		 * uCode will wake up, and interrupt us again, so next
270 		 * time we'll skip this part.
271 		 */
272 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
273 
274 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
275 			IWL_DEBUG_INFO(trans, "Tx queue %d requesting wakeup, GP1 = 0x%x\n",
276 				       txq_id, reg);
277 			iwl_set_bit(trans, CSR_GP_CNTRL,
278 				    BIT(trans->cfg->csr->flag_mac_access_req));
279 			txq->need_update = true;
280 			return;
281 		}
282 	}
283 
284 	/*
285 	 * if not in power-save mode, uCode will never sleep when we're
286 	 * trying to tx (during RFKILL, we're not trying to tx).
287 	 */
288 	IWL_DEBUG_TX(trans, "Q:%d WR: 0x%x\n", txq_id, txq->write_ptr);
289 	if (!txq->block)
290 		iwl_write32(trans, HBUS_TARG_WRPTR,
291 			    txq->write_ptr | (txq_id << 8));
292 }
293 
294 void iwl_pcie_txq_check_wrptrs(struct iwl_trans *trans)
295 {
296 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
297 	int i;
298 
299 	for (i = 0; i < trans->cfg->base_params->num_of_queues; i++) {
300 		struct iwl_txq *txq = trans_pcie->txq[i];
301 
302 		if (!test_bit(i, trans_pcie->queue_used))
303 			continue;
304 
305 		spin_lock_bh(&txq->lock);
306 		if (txq->need_update) {
307 			iwl_pcie_txq_inc_wr_ptr(trans, txq);
308 			txq->need_update = false;
309 		}
310 		spin_unlock_bh(&txq->lock);
311 	}
312 }
313 
314 static inline dma_addr_t iwl_pcie_tfd_tb_get_addr(struct iwl_trans *trans,
315 						  void *_tfd, u8 idx)
316 {
317 
318 	if (trans->cfg->use_tfh) {
319 		struct iwl_tfh_tfd *tfd = _tfd;
320 		struct iwl_tfh_tb *tb = &tfd->tbs[idx];
321 
322 		return (dma_addr_t)(le64_to_cpu(tb->addr));
323 	} else {
324 		struct iwl_tfd *tfd = _tfd;
325 		struct iwl_tfd_tb *tb = &tfd->tbs[idx];
326 		dma_addr_t addr = get_unaligned_le32(&tb->lo);
327 		dma_addr_t hi_len;
328 
329 		if (sizeof(dma_addr_t) <= sizeof(u32))
330 			return addr;
331 
332 		hi_len = le16_to_cpu(tb->hi_n_len) & 0xF;
333 
334 		/*
335 		 * shift by 16 twice to avoid warnings on 32-bit
336 		 * (where this code never runs anyway due to the
337 		 * if statement above)
338 		 */
339 		return addr | ((hi_len << 16) << 16);
340 	}
341 }
342 
343 static inline void iwl_pcie_tfd_set_tb(struct iwl_trans *trans, void *tfd,
344 				       u8 idx, dma_addr_t addr, u16 len)
345 {
346 	struct iwl_tfd *tfd_fh = (void *)tfd;
347 	struct iwl_tfd_tb *tb = &tfd_fh->tbs[idx];
348 
349 	u16 hi_n_len = len << 4;
350 
351 	put_unaligned_le32(addr, &tb->lo);
352 	hi_n_len |= iwl_get_dma_hi_addr(addr);
353 
354 	tb->hi_n_len = cpu_to_le16(hi_n_len);
355 
356 	tfd_fh->num_tbs = idx + 1;
357 }
358 
359 static inline u8 iwl_pcie_tfd_get_num_tbs(struct iwl_trans *trans, void *_tfd)
360 {
361 	if (trans->cfg->use_tfh) {
362 		struct iwl_tfh_tfd *tfd = _tfd;
363 
364 		return le16_to_cpu(tfd->num_tbs) & 0x1f;
365 	} else {
366 		struct iwl_tfd *tfd = _tfd;
367 
368 		return tfd->num_tbs & 0x1f;
369 	}
370 }
371 
372 static void iwl_pcie_tfd_unmap(struct iwl_trans *trans,
373 			       struct iwl_cmd_meta *meta,
374 			       struct iwl_txq *txq, int index)
375 {
376 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
377 	int i, num_tbs;
378 	void *tfd = iwl_pcie_get_tfd(trans, txq, index);
379 
380 	/* Sanity check on number of chunks */
381 	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
382 
383 	if (num_tbs > trans_pcie->max_tbs) {
384 		IWL_ERR(trans, "Too many chunks: %i\n", num_tbs);
385 		/* @todo issue fatal error, it is quite serious situation */
386 		return;
387 	}
388 
389 	/* first TB is never freed - it's the bidirectional DMA data */
390 
391 	for (i = 1; i < num_tbs; i++) {
392 		if (meta->tbs & BIT(i))
393 			dma_unmap_page(trans->dev,
394 				       iwl_pcie_tfd_tb_get_addr(trans, tfd, i),
395 				       iwl_pcie_tfd_tb_get_len(trans, tfd, i),
396 				       DMA_TO_DEVICE);
397 		else
398 			dma_unmap_single(trans->dev,
399 					 iwl_pcie_tfd_tb_get_addr(trans, tfd,
400 								  i),
401 					 iwl_pcie_tfd_tb_get_len(trans, tfd,
402 								 i),
403 					 DMA_TO_DEVICE);
404 	}
405 
406 	if (trans->cfg->use_tfh) {
407 		struct iwl_tfh_tfd *tfd_fh = (void *)tfd;
408 
409 		tfd_fh->num_tbs = 0;
410 	} else {
411 		struct iwl_tfd *tfd_fh = (void *)tfd;
412 
413 		tfd_fh->num_tbs = 0;
414 	}
415 
416 }
417 
418 /*
419  * iwl_pcie_txq_free_tfd - Free all chunks referenced by TFD [txq->q.read_ptr]
420  * @trans - transport private data
421  * @txq - tx queue
422  * @dma_dir - the direction of the DMA mapping
423  *
424  * Does NOT advance any TFD circular buffer read/write indexes
425  * Does NOT free the TFD itself (which is within circular buffer)
426  */
427 void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq)
428 {
429 	/* rd_ptr is bounded by TFD_QUEUE_SIZE_MAX and
430 	 * idx is bounded by n_window
431 	 */
432 	int rd_ptr = txq->read_ptr;
433 	int idx = iwl_pcie_get_cmd_index(txq, rd_ptr);
434 
435 	lockdep_assert_held(&txq->lock);
436 
437 	/* We have only q->n_window txq->entries, but we use
438 	 * TFD_QUEUE_SIZE_MAX tfds
439 	 */
440 	iwl_pcie_tfd_unmap(trans, &txq->entries[idx].meta, txq, rd_ptr);
441 
442 	/* free SKB */
443 	if (txq->entries) {
444 		struct sk_buff *skb;
445 
446 		skb = txq->entries[idx].skb;
447 
448 		/* Can be called from irqs-disabled context
449 		 * If skb is not NULL, it means that the whole queue is being
450 		 * freed and that the queue is not empty - free the skb
451 		 */
452 		if (skb) {
453 			iwl_op_mode_free_skb(trans->op_mode, skb);
454 			txq->entries[idx].skb = NULL;
455 		}
456 	}
457 }
458 
459 static int iwl_pcie_txq_build_tfd(struct iwl_trans *trans, struct iwl_txq *txq,
460 				  dma_addr_t addr, u16 len, bool reset)
461 {
462 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
463 	void *tfd;
464 	u32 num_tbs;
465 
466 	tfd = txq->tfds + trans_pcie->tfd_size * txq->write_ptr;
467 
468 	if (reset)
469 		memset(tfd, 0, trans_pcie->tfd_size);
470 
471 	num_tbs = iwl_pcie_tfd_get_num_tbs(trans, tfd);
472 
473 	/* Each TFD can point to a maximum max_tbs Tx buffers */
474 	if (num_tbs >= trans_pcie->max_tbs) {
475 		IWL_ERR(trans, "Error can not send more than %d chunks\n",
476 			trans_pcie->max_tbs);
477 		return -EINVAL;
478 	}
479 
480 	if (WARN(addr & ~IWL_TX_DMA_MASK,
481 		 "Unaligned address = %llx\n", (unsigned long long)addr))
482 		return -EINVAL;
483 
484 	iwl_pcie_tfd_set_tb(trans, tfd, num_tbs, addr, len);
485 
486 	return num_tbs;
487 }
488 
489 int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq,
490 		       int slots_num, bool cmd_queue)
491 {
492 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
493 	size_t tfd_sz = trans_pcie->tfd_size *
494 		trans->cfg->base_params->max_tfd_queue_size;
495 	size_t tb0_buf_sz;
496 	int i;
497 
498 	if (WARN_ON(txq->entries || txq->tfds))
499 		return -EINVAL;
500 
501 	if (trans->cfg->use_tfh)
502 		tfd_sz = trans_pcie->tfd_size * slots_num;
503 
504 	timer_setup(&txq->stuck_timer, iwl_pcie_txq_stuck_timer, 0);
505 	txq->trans_pcie = trans_pcie;
506 
507 	txq->n_window = slots_num;
508 
509 	txq->entries = kcalloc(slots_num,
510 			       sizeof(struct iwl_pcie_txq_entry),
511 			       GFP_KERNEL);
512 
513 	if (!txq->entries)
514 		goto error;
515 
516 	if (cmd_queue)
517 		for (i = 0; i < slots_num; i++) {
518 			txq->entries[i].cmd =
519 				kmalloc(sizeof(struct iwl_device_cmd),
520 					GFP_KERNEL);
521 			if (!txq->entries[i].cmd)
522 				goto error;
523 		}
524 
525 	/* Circular buffer of transmit frame descriptors (TFDs),
526 	 * shared with device */
527 	txq->tfds = dma_alloc_coherent(trans->dev, tfd_sz,
528 				       &txq->dma_addr, GFP_KERNEL);
529 	if (!txq->tfds)
530 		goto error;
531 
532 	BUILD_BUG_ON(IWL_FIRST_TB_SIZE_ALIGN != sizeof(*txq->first_tb_bufs));
533 
534 	tb0_buf_sz = sizeof(*txq->first_tb_bufs) * slots_num;
535 
536 	txq->first_tb_bufs = dma_alloc_coherent(trans->dev, tb0_buf_sz,
537 					      &txq->first_tb_dma,
538 					      GFP_KERNEL);
539 	if (!txq->first_tb_bufs)
540 		goto err_free_tfds;
541 
542 	return 0;
543 err_free_tfds:
544 	dma_free_coherent(trans->dev, tfd_sz, txq->tfds, txq->dma_addr);
545 error:
546 	if (txq->entries && cmd_queue)
547 		for (i = 0; i < slots_num; i++)
548 			kfree(txq->entries[i].cmd);
549 	kfree(txq->entries);
550 	txq->entries = NULL;
551 
552 	return -ENOMEM;
553 
554 }
555 
556 int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq,
557 		      int slots_num, bool cmd_queue)
558 {
559 	int ret;
560 	u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size;
561 
562 	txq->need_update = false;
563 
564 	/* max_tfd_queue_size must be power-of-two size, otherwise
565 	 * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */
566 	if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1),
567 		      "Max tfd queue size must be a power of two, but is %d",
568 		      tfd_queue_max_size))
569 		return -EINVAL;
570 
571 	/* Initialize queue's high/low-water marks, and head/tail indexes */
572 	ret = iwl_queue_init(txq, slots_num);
573 	if (ret)
574 		return ret;
575 
576 	spin_lock_init(&txq->lock);
577 
578 	if (cmd_queue) {
579 		static struct lock_class_key iwl_pcie_cmd_queue_lock_class;
580 
581 		lockdep_set_class(&txq->lock, &iwl_pcie_cmd_queue_lock_class);
582 	}
583 
584 	__skb_queue_head_init(&txq->overflow_q);
585 
586 	return 0;
587 }
588 
589 void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie,
590 			    struct sk_buff *skb)
591 {
592 	struct page **page_ptr;
593 
594 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
595 
596 	if (*page_ptr) {
597 		__free_page(*page_ptr);
598 		*page_ptr = NULL;
599 	}
600 }
601 
602 static void iwl_pcie_clear_cmd_in_flight(struct iwl_trans *trans)
603 {
604 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
605 
606 	lockdep_assert_held(&trans_pcie->reg_lock);
607 
608 	if (trans_pcie->ref_cmd_in_flight) {
609 		trans_pcie->ref_cmd_in_flight = false;
610 		IWL_DEBUG_RPM(trans, "clear ref_cmd_in_flight - unref\n");
611 		iwl_trans_unref(trans);
612 	}
613 
614 	if (!trans->cfg->base_params->apmg_wake_up_wa)
615 		return;
616 	if (WARN_ON(!trans_pcie->cmd_hold_nic_awake))
617 		return;
618 
619 	trans_pcie->cmd_hold_nic_awake = false;
620 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
621 				   BIT(trans->cfg->csr->flag_mac_access_req));
622 }
623 
624 /*
625  * iwl_pcie_txq_unmap -  Unmap any remaining DMA mappings and free skb's
626  */
627 static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id)
628 {
629 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
630 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
631 
632 	spin_lock_bh(&txq->lock);
633 	while (txq->write_ptr != txq->read_ptr) {
634 		IWL_DEBUG_TX_REPLY(trans, "Q %d Free %d\n",
635 				   txq_id, txq->read_ptr);
636 
637 		if (txq_id != trans_pcie->cmd_queue) {
638 			struct sk_buff *skb = txq->entries[txq->read_ptr].skb;
639 
640 			if (WARN_ON_ONCE(!skb))
641 				continue;
642 
643 			iwl_pcie_free_tso_page(trans_pcie, skb);
644 		}
645 		iwl_pcie_txq_free_tfd(trans, txq);
646 		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
647 
648 		if (txq->read_ptr == txq->write_ptr) {
649 			unsigned long flags;
650 
651 			spin_lock_irqsave(&trans_pcie->reg_lock, flags);
652 			if (txq_id != trans_pcie->cmd_queue) {
653 				IWL_DEBUG_RPM(trans, "Q %d - last tx freed\n",
654 					      txq->id);
655 				iwl_trans_unref(trans);
656 			} else {
657 				iwl_pcie_clear_cmd_in_flight(trans);
658 			}
659 			spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
660 		}
661 	}
662 
663 	while (!skb_queue_empty(&txq->overflow_q)) {
664 		struct sk_buff *skb = __skb_dequeue(&txq->overflow_q);
665 
666 		iwl_op_mode_free_skb(trans->op_mode, skb);
667 	}
668 
669 	spin_unlock_bh(&txq->lock);
670 
671 	/* just in case - this queue may have been stopped */
672 	iwl_wake_queue(trans, txq);
673 }
674 
675 /*
676  * iwl_pcie_txq_free - Deallocate DMA queue.
677  * @txq: Transmit queue to deallocate.
678  *
679  * Empty queue by removing and destroying all BD's.
680  * Free all buffers.
681  * 0-fill, but do not free "txq" descriptor structure.
682  */
683 static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id)
684 {
685 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
686 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
687 	struct device *dev = trans->dev;
688 	int i;
689 
690 	if (WARN_ON(!txq))
691 		return;
692 
693 	iwl_pcie_txq_unmap(trans, txq_id);
694 
695 	/* De-alloc array of command/tx buffers */
696 	if (txq_id == trans_pcie->cmd_queue)
697 		for (i = 0; i < txq->n_window; i++) {
698 			kzfree(txq->entries[i].cmd);
699 			kzfree(txq->entries[i].free_buf);
700 		}
701 
702 	/* De-alloc circular buffer of TFDs */
703 	if (txq->tfds) {
704 		dma_free_coherent(dev,
705 				  trans_pcie->tfd_size *
706 				  trans->cfg->base_params->max_tfd_queue_size,
707 				  txq->tfds, txq->dma_addr);
708 		txq->dma_addr = 0;
709 		txq->tfds = NULL;
710 
711 		dma_free_coherent(dev,
712 				  sizeof(*txq->first_tb_bufs) * txq->n_window,
713 				  txq->first_tb_bufs, txq->first_tb_dma);
714 	}
715 
716 	kfree(txq->entries);
717 	txq->entries = NULL;
718 
719 	del_timer_sync(&txq->stuck_timer);
720 
721 	/* 0-fill queue descriptor structure */
722 	memset(txq, 0, sizeof(*txq));
723 }
724 
725 void iwl_pcie_tx_start(struct iwl_trans *trans, u32 scd_base_addr)
726 {
727 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
728 	int nq = trans->cfg->base_params->num_of_queues;
729 	int chan;
730 	u32 reg_val;
731 	int clear_dwords = (SCD_TRANS_TBL_OFFSET_QUEUE(nq) -
732 				SCD_CONTEXT_MEM_LOWER_BOUND) / sizeof(u32);
733 
734 	/* make sure all queue are not stopped/used */
735 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
736 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
737 
738 	trans_pcie->scd_base_addr =
739 		iwl_read_prph(trans, SCD_SRAM_BASE_ADDR);
740 
741 	WARN_ON(scd_base_addr != 0 &&
742 		scd_base_addr != trans_pcie->scd_base_addr);
743 
744 	/* reset context data, TX status and translation data */
745 	iwl_trans_write_mem(trans, trans_pcie->scd_base_addr +
746 				   SCD_CONTEXT_MEM_LOWER_BOUND,
747 			    NULL, clear_dwords);
748 
749 	iwl_write_prph(trans, SCD_DRAM_BASE_ADDR,
750 		       trans_pcie->scd_bc_tbls.dma >> 10);
751 
752 	/* The chain extension of the SCD doesn't work well. This feature is
753 	 * enabled by default by the HW, so we need to disable it manually.
754 	 */
755 	if (trans->cfg->base_params->scd_chain_ext_wa)
756 		iwl_write_prph(trans, SCD_CHAINEXT_EN, 0);
757 
758 	iwl_trans_ac_txq_enable(trans, trans_pcie->cmd_queue,
759 				trans_pcie->cmd_fifo,
760 				trans_pcie->cmd_q_wdg_timeout);
761 
762 	/* Activate all Tx DMA/FIFO channels */
763 	iwl_scd_activate_fifos(trans);
764 
765 	/* Enable DMA channel */
766 	for (chan = 0; chan < FH_TCSR_CHNL_NUM; chan++)
767 		iwl_write_direct32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(chan),
768 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
769 				   FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_ENABLE);
770 
771 	/* Update FH chicken bits */
772 	reg_val = iwl_read_direct32(trans, FH_TX_CHICKEN_BITS_REG);
773 	iwl_write_direct32(trans, FH_TX_CHICKEN_BITS_REG,
774 			   reg_val | FH_TX_CHICKEN_BITS_SCD_AUTO_RETRY_EN);
775 
776 	/* Enable L1-Active */
777 	if (trans->cfg->device_family < IWL_DEVICE_FAMILY_8000)
778 		iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
779 				    APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
780 }
781 
782 void iwl_trans_pcie_tx_reset(struct iwl_trans *trans)
783 {
784 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
785 	int txq_id;
786 
787 	/*
788 	 * we should never get here in gen2 trans mode return early to avoid
789 	 * having invalid accesses
790 	 */
791 	if (WARN_ON_ONCE(trans->cfg->gen2))
792 		return;
793 
794 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
795 	     txq_id++) {
796 		struct iwl_txq *txq = trans_pcie->txq[txq_id];
797 		if (trans->cfg->use_tfh)
798 			iwl_write_direct64(trans,
799 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
800 					   txq->dma_addr);
801 		else
802 			iwl_write_direct32(trans,
803 					   FH_MEM_CBBC_QUEUE(trans, txq_id),
804 					   txq->dma_addr >> 8);
805 		iwl_pcie_txq_unmap(trans, txq_id);
806 		txq->read_ptr = 0;
807 		txq->write_ptr = 0;
808 	}
809 
810 	/* Tell NIC where to find the "keep warm" buffer */
811 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
812 			   trans_pcie->kw.dma >> 4);
813 
814 	/*
815 	 * Send 0 as the scd_base_addr since the device may have be reset
816 	 * while we were in WoWLAN in which case SCD_SRAM_BASE_ADDR will
817 	 * contain garbage.
818 	 */
819 	iwl_pcie_tx_start(trans, 0);
820 }
821 
822 static void iwl_pcie_tx_stop_fh(struct iwl_trans *trans)
823 {
824 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
825 	unsigned long flags;
826 	int ch, ret;
827 	u32 mask = 0;
828 
829 	spin_lock(&trans_pcie->irq_lock);
830 
831 	if (!iwl_trans_grab_nic_access(trans, &flags))
832 		goto out;
833 
834 	/* Stop each Tx DMA channel */
835 	for (ch = 0; ch < FH_TCSR_CHNL_NUM; ch++) {
836 		iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(ch), 0x0);
837 		mask |= FH_TSSR_TX_STATUS_REG_MSK_CHNL_IDLE(ch);
838 	}
839 
840 	/* Wait for DMA channels to be idle */
841 	ret = iwl_poll_bit(trans, FH_TSSR_TX_STATUS_REG, mask, mask, 5000);
842 	if (ret < 0)
843 		IWL_ERR(trans,
844 			"Failing on timeout while stopping DMA channel %d [0x%08x]\n",
845 			ch, iwl_read32(trans, FH_TSSR_TX_STATUS_REG));
846 
847 	iwl_trans_release_nic_access(trans, &flags);
848 
849 out:
850 	spin_unlock(&trans_pcie->irq_lock);
851 }
852 
853 /*
854  * iwl_pcie_tx_stop - Stop all Tx DMA channels
855  */
856 int iwl_pcie_tx_stop(struct iwl_trans *trans)
857 {
858 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
859 	int txq_id;
860 
861 	/* Turn off all Tx DMA fifos */
862 	iwl_scd_deactivate_fifos(trans);
863 
864 	/* Turn off all Tx DMA channels */
865 	iwl_pcie_tx_stop_fh(trans);
866 
867 	/*
868 	 * This function can be called before the op_mode disabled the
869 	 * queues. This happens when we have an rfkill interrupt.
870 	 * Since we stop Tx altogether - mark the queues as stopped.
871 	 */
872 	memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
873 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
874 
875 	/* This can happen: start_hw, stop_device */
876 	if (!trans_pcie->txq_memory)
877 		return 0;
878 
879 	/* Unmap DMA from host system and free skb's */
880 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
881 	     txq_id++)
882 		iwl_pcie_txq_unmap(trans, txq_id);
883 
884 	return 0;
885 }
886 
887 /*
888  * iwl_trans_tx_free - Free TXQ Context
889  *
890  * Destroy all TX DMA queues and structures
891  */
892 void iwl_pcie_tx_free(struct iwl_trans *trans)
893 {
894 	int txq_id;
895 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
896 
897 	memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
898 
899 	/* Tx queues */
900 	if (trans_pcie->txq_memory) {
901 		for (txq_id = 0;
902 		     txq_id < trans->cfg->base_params->num_of_queues;
903 		     txq_id++) {
904 			iwl_pcie_txq_free(trans, txq_id);
905 			trans_pcie->txq[txq_id] = NULL;
906 		}
907 	}
908 
909 	kfree(trans_pcie->txq_memory);
910 	trans_pcie->txq_memory = NULL;
911 
912 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->kw);
913 
914 	iwl_pcie_free_dma_ptr(trans, &trans_pcie->scd_bc_tbls);
915 }
916 
917 /*
918  * iwl_pcie_tx_alloc - allocate TX context
919  * Allocate all Tx DMA structures and initialize them
920  */
921 static int iwl_pcie_tx_alloc(struct iwl_trans *trans)
922 {
923 	int ret;
924 	int txq_id, slots_num;
925 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
926 	u16 bc_tbls_size = trans->cfg->base_params->num_of_queues;
927 
928 	bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ?
929 		sizeof(struct iwl_gen3_bc_tbl) :
930 		sizeof(struct iwlagn_scd_bc_tbl);
931 
932 	/*It is not allowed to alloc twice, so warn when this happens.
933 	 * We cannot rely on the previous allocation, so free and fail */
934 	if (WARN_ON(trans_pcie->txq_memory)) {
935 		ret = -EINVAL;
936 		goto error;
937 	}
938 
939 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls,
940 				     bc_tbls_size);
941 	if (ret) {
942 		IWL_ERR(trans, "Scheduler BC Table allocation failed\n");
943 		goto error;
944 	}
945 
946 	/* Alloc keep-warm buffer */
947 	ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->kw, IWL_KW_SIZE);
948 	if (ret) {
949 		IWL_ERR(trans, "Keep Warm allocation failed\n");
950 		goto error;
951 	}
952 
953 	trans_pcie->txq_memory = kcalloc(trans->cfg->base_params->num_of_queues,
954 					 sizeof(struct iwl_txq), GFP_KERNEL);
955 	if (!trans_pcie->txq_memory) {
956 		IWL_ERR(trans, "Not enough memory for txq\n");
957 		ret = -ENOMEM;
958 		goto error;
959 	}
960 
961 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
962 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
963 	     txq_id++) {
964 		bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
965 
966 		slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
967 		trans_pcie->txq[txq_id] = &trans_pcie->txq_memory[txq_id];
968 		ret = iwl_pcie_txq_alloc(trans, trans_pcie->txq[txq_id],
969 					 slots_num, cmd_queue);
970 		if (ret) {
971 			IWL_ERR(trans, "Tx %d queue alloc failed\n", txq_id);
972 			goto error;
973 		}
974 		trans_pcie->txq[txq_id]->id = txq_id;
975 	}
976 
977 	return 0;
978 
979 error:
980 	iwl_pcie_tx_free(trans);
981 
982 	return ret;
983 }
984 
985 int iwl_pcie_tx_init(struct iwl_trans *trans)
986 {
987 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
988 	int ret;
989 	int txq_id, slots_num;
990 	bool alloc = false;
991 
992 	if (!trans_pcie->txq_memory) {
993 		ret = iwl_pcie_tx_alloc(trans);
994 		if (ret)
995 			goto error;
996 		alloc = true;
997 	}
998 
999 	spin_lock(&trans_pcie->irq_lock);
1000 
1001 	/* Turn off all Tx DMA fifos */
1002 	iwl_scd_deactivate_fifos(trans);
1003 
1004 	/* Tell NIC where to find the "keep warm" buffer */
1005 	iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
1006 			   trans_pcie->kw.dma >> 4);
1007 
1008 	spin_unlock(&trans_pcie->irq_lock);
1009 
1010 	/* Alloc and init all Tx queues, including the command queue (#4/#9) */
1011 	for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
1012 	     txq_id++) {
1013 		bool cmd_queue = (txq_id == trans_pcie->cmd_queue);
1014 
1015 		slots_num = cmd_queue ? TFD_CMD_SLOTS : TFD_TX_CMD_SLOTS;
1016 		ret = iwl_pcie_txq_init(trans, trans_pcie->txq[txq_id],
1017 					slots_num, cmd_queue);
1018 		if (ret) {
1019 			IWL_ERR(trans, "Tx %d queue init failed\n", txq_id);
1020 			goto error;
1021 		}
1022 
1023 		/*
1024 		 * Tell nic where to find circular buffer of TFDs for a
1025 		 * given Tx queue, and enable the DMA channel used for that
1026 		 * queue.
1027 		 * Circular buffer (TFD queue in DRAM) physical base address
1028 		 */
1029 		iwl_write_direct32(trans, FH_MEM_CBBC_QUEUE(trans, txq_id),
1030 				   trans_pcie->txq[txq_id]->dma_addr >> 8);
1031 	}
1032 
1033 	iwl_set_bits_prph(trans, SCD_GP_CTRL, SCD_GP_CTRL_AUTO_ACTIVE_MODE);
1034 	if (trans->cfg->base_params->num_of_queues > 20)
1035 		iwl_set_bits_prph(trans, SCD_GP_CTRL,
1036 				  SCD_GP_CTRL_ENABLE_31_QUEUES);
1037 
1038 	return 0;
1039 error:
1040 	/*Upon error, free only if we allocated something */
1041 	if (alloc)
1042 		iwl_pcie_tx_free(trans);
1043 	return ret;
1044 }
1045 
1046 static inline void iwl_pcie_txq_progress(struct iwl_txq *txq)
1047 {
1048 	lockdep_assert_held(&txq->lock);
1049 
1050 	if (!txq->wd_timeout)
1051 		return;
1052 
1053 	/*
1054 	 * station is asleep and we send data - that must
1055 	 * be uAPSD or PS-Poll. Don't rearm the timer.
1056 	 */
1057 	if (txq->frozen)
1058 		return;
1059 
1060 	/*
1061 	 * if empty delete timer, otherwise move timer forward
1062 	 * since we're making progress on this queue
1063 	 */
1064 	if (txq->read_ptr == txq->write_ptr)
1065 		del_timer(&txq->stuck_timer);
1066 	else
1067 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1068 }
1069 
1070 /* Frees buffers until index _not_ inclusive */
1071 void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn,
1072 			    struct sk_buff_head *skbs)
1073 {
1074 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1075 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1076 	int tfd_num = iwl_pcie_get_cmd_index(txq, ssn);
1077 	int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1078 	int last_to_free;
1079 
1080 	/* This function is not meant to release cmd queue*/
1081 	if (WARN_ON(txq_id == trans_pcie->cmd_queue))
1082 		return;
1083 
1084 	spin_lock_bh(&txq->lock);
1085 
1086 	if (!test_bit(txq_id, trans_pcie->queue_used)) {
1087 		IWL_DEBUG_TX_QUEUES(trans, "Q %d inactive - ignoring idx %d\n",
1088 				    txq_id, ssn);
1089 		goto out;
1090 	}
1091 
1092 	if (read_ptr == tfd_num)
1093 		goto out;
1094 
1095 	IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n",
1096 			   txq_id, txq->read_ptr, tfd_num, ssn);
1097 
1098 	/*Since we free until index _not_ inclusive, the one before index is
1099 	 * the last we will free. This one must be used */
1100 	last_to_free = iwl_queue_dec_wrap(trans, tfd_num);
1101 
1102 	if (!iwl_queue_used(txq, last_to_free)) {
1103 		IWL_ERR(trans,
1104 			"%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n",
1105 			__func__, txq_id, last_to_free,
1106 			trans->cfg->base_params->max_tfd_queue_size,
1107 			txq->write_ptr, txq->read_ptr);
1108 		goto out;
1109 	}
1110 
1111 	if (WARN_ON(!skb_queue_empty(skbs)))
1112 		goto out;
1113 
1114 	for (;
1115 	     read_ptr != tfd_num;
1116 	     txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr),
1117 	     read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) {
1118 		struct sk_buff *skb = txq->entries[read_ptr].skb;
1119 
1120 		if (WARN_ON_ONCE(!skb))
1121 			continue;
1122 
1123 		iwl_pcie_free_tso_page(trans_pcie, skb);
1124 
1125 		__skb_queue_tail(skbs, skb);
1126 
1127 		txq->entries[read_ptr].skb = NULL;
1128 
1129 		if (!trans->cfg->use_tfh)
1130 			iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq);
1131 
1132 		iwl_pcie_txq_free_tfd(trans, txq);
1133 	}
1134 
1135 	iwl_pcie_txq_progress(txq);
1136 
1137 	if (iwl_queue_space(trans, txq) > txq->low_mark &&
1138 	    test_bit(txq_id, trans_pcie->queue_stopped)) {
1139 		struct sk_buff_head overflow_skbs;
1140 
1141 		__skb_queue_head_init(&overflow_skbs);
1142 		skb_queue_splice_init(&txq->overflow_q, &overflow_skbs);
1143 
1144 		/*
1145 		 * This is tricky: we are in reclaim path which is non
1146 		 * re-entrant, so noone will try to take the access the
1147 		 * txq data from that path. We stopped tx, so we can't
1148 		 * have tx as well. Bottom line, we can unlock and re-lock
1149 		 * later.
1150 		 */
1151 		spin_unlock_bh(&txq->lock);
1152 
1153 		while (!skb_queue_empty(&overflow_skbs)) {
1154 			struct sk_buff *skb = __skb_dequeue(&overflow_skbs);
1155 			struct iwl_device_cmd *dev_cmd_ptr;
1156 
1157 			dev_cmd_ptr = *(void **)((u8 *)skb->cb +
1158 						 trans_pcie->dev_cmd_offs);
1159 
1160 			/*
1161 			 * Note that we can very well be overflowing again.
1162 			 * In that case, iwl_queue_space will be small again
1163 			 * and we won't wake mac80211's queue.
1164 			 */
1165 			iwl_trans_tx(trans, skb, dev_cmd_ptr, txq_id);
1166 		}
1167 		spin_lock_bh(&txq->lock);
1168 
1169 		if (iwl_queue_space(trans, txq) > txq->low_mark)
1170 			iwl_wake_queue(trans, txq);
1171 	}
1172 
1173 	if (txq->read_ptr == txq->write_ptr) {
1174 		IWL_DEBUG_RPM(trans, "Q %d - last tx reclaimed\n", txq->id);
1175 		iwl_trans_unref(trans);
1176 	}
1177 
1178 out:
1179 	spin_unlock_bh(&txq->lock);
1180 }
1181 
1182 static int iwl_pcie_set_cmd_in_flight(struct iwl_trans *trans,
1183 				      const struct iwl_host_cmd *cmd)
1184 {
1185 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1186 	const struct iwl_cfg *cfg = trans->cfg;
1187 	int ret;
1188 
1189 	lockdep_assert_held(&trans_pcie->reg_lock);
1190 
1191 	if (!(cmd->flags & CMD_SEND_IN_IDLE) &&
1192 	    !trans_pcie->ref_cmd_in_flight) {
1193 		trans_pcie->ref_cmd_in_flight = true;
1194 		IWL_DEBUG_RPM(trans, "set ref_cmd_in_flight - ref\n");
1195 		iwl_trans_ref(trans);
1196 	}
1197 
1198 	/*
1199 	 * wake up the NIC to make sure that the firmware will see the host
1200 	 * command - we will let the NIC sleep once all the host commands
1201 	 * returned. This needs to be done only on NICs that have
1202 	 * apmg_wake_up_wa set.
1203 	 */
1204 	if (cfg->base_params->apmg_wake_up_wa &&
1205 	    !trans_pcie->cmd_hold_nic_awake) {
1206 		__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1207 					 BIT(cfg->csr->flag_mac_access_req));
1208 
1209 		ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
1210 				   BIT(cfg->csr->flag_val_mac_access_en),
1211 				   (BIT(cfg->csr->flag_mac_clock_ready) |
1212 				    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
1213 				   15000);
1214 		if (ret < 0) {
1215 			__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
1216 					BIT(cfg->csr->flag_mac_access_req));
1217 			IWL_ERR(trans, "Failed to wake NIC for hcmd\n");
1218 			return -EIO;
1219 		}
1220 		trans_pcie->cmd_hold_nic_awake = true;
1221 	}
1222 
1223 	return 0;
1224 }
1225 
1226 /*
1227  * iwl_pcie_cmdq_reclaim - Reclaim TX command queue entries already Tx'd
1228  *
1229  * When FW advances 'R' index, all entries between old and new 'R' index
1230  * need to be reclaimed. As result, some free space forms.  If there is
1231  * enough free space (> low mark), wake the stack that feeds us.
1232  */
1233 static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
1234 {
1235 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1236 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1237 	unsigned long flags;
1238 	int nfreed = 0;
1239 	u16 r;
1240 
1241 	lockdep_assert_held(&txq->lock);
1242 
1243 	idx = iwl_pcie_get_cmd_index(txq, idx);
1244 	r = iwl_pcie_get_cmd_index(txq, txq->read_ptr);
1245 
1246 	if (idx >= trans->cfg->base_params->max_tfd_queue_size ||
1247 	    (!iwl_queue_used(txq, idx))) {
1248 		IWL_ERR(trans,
1249 			"%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n",
1250 			__func__, txq_id, idx,
1251 			trans->cfg->base_params->max_tfd_queue_size,
1252 			txq->write_ptr, txq->read_ptr);
1253 		return;
1254 	}
1255 
1256 	for (idx = iwl_queue_inc_wrap(trans, idx); r != idx;
1257 	     r = iwl_queue_inc_wrap(trans, r)) {
1258 		txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr);
1259 
1260 		if (nfreed++ > 0) {
1261 			IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n",
1262 				idx, txq->write_ptr, r);
1263 			iwl_force_nmi(trans);
1264 		}
1265 	}
1266 
1267 	if (txq->read_ptr == txq->write_ptr) {
1268 		spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1269 		iwl_pcie_clear_cmd_in_flight(trans);
1270 		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1271 	}
1272 
1273 	iwl_pcie_txq_progress(txq);
1274 }
1275 
1276 static int iwl_pcie_txq_set_ratid_map(struct iwl_trans *trans, u16 ra_tid,
1277 				 u16 txq_id)
1278 {
1279 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1280 	u32 tbl_dw_addr;
1281 	u32 tbl_dw;
1282 	u16 scd_q2ratid;
1283 
1284 	scd_q2ratid = ra_tid & SCD_QUEUE_RA_TID_MAP_RATID_MSK;
1285 
1286 	tbl_dw_addr = trans_pcie->scd_base_addr +
1287 			SCD_TRANS_TBL_OFFSET_QUEUE(txq_id);
1288 
1289 	tbl_dw = iwl_trans_read_mem32(trans, tbl_dw_addr);
1290 
1291 	if (txq_id & 0x1)
1292 		tbl_dw = (scd_q2ratid << 16) | (tbl_dw & 0x0000FFFF);
1293 	else
1294 		tbl_dw = scd_q2ratid | (tbl_dw & 0xFFFF0000);
1295 
1296 	iwl_trans_write_mem32(trans, tbl_dw_addr, tbl_dw);
1297 
1298 	return 0;
1299 }
1300 
1301 /* Receiver address (actually, Rx station's index into station table),
1302  * combined with Traffic ID (QOS priority), in format used by Tx Scheduler */
1303 #define BUILD_RAxTID(sta_id, tid)	(((sta_id) << 4) + (tid))
1304 
1305 bool iwl_trans_pcie_txq_enable(struct iwl_trans *trans, int txq_id, u16 ssn,
1306 			       const struct iwl_trans_txq_scd_cfg *cfg,
1307 			       unsigned int wdg_timeout)
1308 {
1309 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1310 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1311 	int fifo = -1;
1312 	bool scd_bug = false;
1313 
1314 	if (test_and_set_bit(txq_id, trans_pcie->queue_used))
1315 		WARN_ONCE(1, "queue %d already used - expect issues", txq_id);
1316 
1317 	txq->wd_timeout = msecs_to_jiffies(wdg_timeout);
1318 
1319 	if (cfg) {
1320 		fifo = cfg->fifo;
1321 
1322 		/* Disable the scheduler prior configuring the cmd queue */
1323 		if (txq_id == trans_pcie->cmd_queue &&
1324 		    trans_pcie->scd_set_active)
1325 			iwl_scd_enable_set_active(trans, 0);
1326 
1327 		/* Stop this Tx queue before configuring it */
1328 		iwl_scd_txq_set_inactive(trans, txq_id);
1329 
1330 		/* Set this queue as a chain-building queue unless it is CMD */
1331 		if (txq_id != trans_pcie->cmd_queue)
1332 			iwl_scd_txq_set_chain(trans, txq_id);
1333 
1334 		if (cfg->aggregate) {
1335 			u16 ra_tid = BUILD_RAxTID(cfg->sta_id, cfg->tid);
1336 
1337 			/* Map receiver-address / traffic-ID to this queue */
1338 			iwl_pcie_txq_set_ratid_map(trans, ra_tid, txq_id);
1339 
1340 			/* enable aggregations for the queue */
1341 			iwl_scd_txq_enable_agg(trans, txq_id);
1342 			txq->ampdu = true;
1343 		} else {
1344 			/*
1345 			 * disable aggregations for the queue, this will also
1346 			 * make the ra_tid mapping configuration irrelevant
1347 			 * since it is now a non-AGG queue.
1348 			 */
1349 			iwl_scd_txq_disable_agg(trans, txq_id);
1350 
1351 			ssn = txq->read_ptr;
1352 		}
1353 	} else {
1354 		/*
1355 		 * If we need to move the SCD write pointer by steps of
1356 		 * 0x40, 0x80 or 0xc0, it gets stuck. Avoids this and let
1357 		 * the op_mode know by returning true later.
1358 		 * Do this only in case cfg is NULL since this trick can
1359 		 * be done only if we have DQA enabled which is true for mvm
1360 		 * only. And mvm never sets a cfg pointer.
1361 		 * This is really ugly, but this is the easiest way out for
1362 		 * this sad hardware issue.
1363 		 * This bug has been fixed on devices 9000 and up.
1364 		 */
1365 		scd_bug = !trans->cfg->mq_rx_supported &&
1366 			!((ssn - txq->write_ptr) & 0x3f) &&
1367 			(ssn != txq->write_ptr);
1368 		if (scd_bug)
1369 			ssn++;
1370 	}
1371 
1372 	/* Place first TFD at index corresponding to start sequence number.
1373 	 * Assumes that ssn_idx is valid (!= 0xFFF) */
1374 	txq->read_ptr = (ssn & 0xff);
1375 	txq->write_ptr = (ssn & 0xff);
1376 	iwl_write_direct32(trans, HBUS_TARG_WRPTR,
1377 			   (ssn & 0xff) | (txq_id << 8));
1378 
1379 	if (cfg) {
1380 		u8 frame_limit = cfg->frame_limit;
1381 
1382 		iwl_write_prph(trans, SCD_QUEUE_RDPTR(txq_id), ssn);
1383 
1384 		/* Set up Tx window size and frame limit for this queue */
1385 		iwl_trans_write_mem32(trans, trans_pcie->scd_base_addr +
1386 				SCD_CONTEXT_QUEUE_OFFSET(txq_id), 0);
1387 		iwl_trans_write_mem32(trans,
1388 			trans_pcie->scd_base_addr +
1389 			SCD_CONTEXT_QUEUE_OFFSET(txq_id) + sizeof(u32),
1390 			SCD_QUEUE_CTX_REG2_VAL(WIN_SIZE, frame_limit) |
1391 			SCD_QUEUE_CTX_REG2_VAL(FRAME_LIMIT, frame_limit));
1392 
1393 		/* Set up status area in SRAM, map to Tx DMA/FIFO, activate */
1394 		iwl_write_prph(trans, SCD_QUEUE_STATUS_BITS(txq_id),
1395 			       (1 << SCD_QUEUE_STTS_REG_POS_ACTIVE) |
1396 			       (cfg->fifo << SCD_QUEUE_STTS_REG_POS_TXF) |
1397 			       (1 << SCD_QUEUE_STTS_REG_POS_WSL) |
1398 			       SCD_QUEUE_STTS_REG_MSK);
1399 
1400 		/* enable the scheduler for this queue (only) */
1401 		if (txq_id == trans_pcie->cmd_queue &&
1402 		    trans_pcie->scd_set_active)
1403 			iwl_scd_enable_set_active(trans, BIT(txq_id));
1404 
1405 		IWL_DEBUG_TX_QUEUES(trans,
1406 				    "Activate queue %d on FIFO %d WrPtr: %d\n",
1407 				    txq_id, fifo, ssn & 0xff);
1408 	} else {
1409 		IWL_DEBUG_TX_QUEUES(trans,
1410 				    "Activate queue %d WrPtr: %d\n",
1411 				    txq_id, ssn & 0xff);
1412 	}
1413 
1414 	return scd_bug;
1415 }
1416 
1417 void iwl_trans_pcie_txq_set_shared_mode(struct iwl_trans *trans, u32 txq_id,
1418 					bool shared_mode)
1419 {
1420 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1421 	struct iwl_txq *txq = trans_pcie->txq[txq_id];
1422 
1423 	txq->ampdu = !shared_mode;
1424 }
1425 
1426 void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id,
1427 				bool configure_scd)
1428 {
1429 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1430 	u32 stts_addr = trans_pcie->scd_base_addr +
1431 			SCD_TX_STTS_QUEUE_OFFSET(txq_id);
1432 	static const u32 zero_val[4] = {};
1433 
1434 	trans_pcie->txq[txq_id]->frozen_expiry_remainder = 0;
1435 	trans_pcie->txq[txq_id]->frozen = false;
1436 
1437 	/*
1438 	 * Upon HW Rfkill - we stop the device, and then stop the queues
1439 	 * in the op_mode. Just for the sake of the simplicity of the op_mode,
1440 	 * allow the op_mode to call txq_disable after it already called
1441 	 * stop_device.
1442 	 */
1443 	if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
1444 		WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
1445 			  "queue %d not used", txq_id);
1446 		return;
1447 	}
1448 
1449 	if (configure_scd) {
1450 		iwl_scd_txq_set_inactive(trans, txq_id);
1451 
1452 		iwl_trans_write_mem(trans, stts_addr, (void *)zero_val,
1453 				    ARRAY_SIZE(zero_val));
1454 	}
1455 
1456 	iwl_pcie_txq_unmap(trans, txq_id);
1457 	trans_pcie->txq[txq_id]->ampdu = false;
1458 
1459 	IWL_DEBUG_TX_QUEUES(trans, "Deactivate queue %d\n", txq_id);
1460 }
1461 
1462 /*************** HOST COMMAND QUEUE FUNCTIONS   *****/
1463 
1464 /*
1465  * iwl_pcie_enqueue_hcmd - enqueue a uCode command
1466  * @priv: device private data point
1467  * @cmd: a pointer to the ucode command structure
1468  *
1469  * The function returns < 0 values to indicate the operation
1470  * failed. On success, it returns the index (>= 0) of command in the
1471  * command queue.
1472  */
1473 static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
1474 				 struct iwl_host_cmd *cmd)
1475 {
1476 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1477 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1478 	struct iwl_device_cmd *out_cmd;
1479 	struct iwl_cmd_meta *out_meta;
1480 	unsigned long flags;
1481 	void *dup_buf = NULL;
1482 	dma_addr_t phys_addr;
1483 	int idx;
1484 	u16 copy_size, cmd_size, tb0_size;
1485 	bool had_nocopy = false;
1486 	u8 group_id = iwl_cmd_groupid(cmd->id);
1487 	int i, ret;
1488 	u32 cmd_pos;
1489 	const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
1490 	u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
1491 
1492 	if (WARN(!trans->wide_cmd_header &&
1493 		 group_id > IWL_ALWAYS_LONG_GROUP,
1494 		 "unsupported wide command %#x\n", cmd->id))
1495 		return -EINVAL;
1496 
1497 	if (group_id != 0) {
1498 		copy_size = sizeof(struct iwl_cmd_header_wide);
1499 		cmd_size = sizeof(struct iwl_cmd_header_wide);
1500 	} else {
1501 		copy_size = sizeof(struct iwl_cmd_header);
1502 		cmd_size = sizeof(struct iwl_cmd_header);
1503 	}
1504 
1505 	/* need one for the header if the first is NOCOPY */
1506 	BUILD_BUG_ON(IWL_MAX_CMD_TBS_PER_TFD > IWL_NUM_OF_TBS - 1);
1507 
1508 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1509 		cmddata[i] = cmd->data[i];
1510 		cmdlen[i] = cmd->len[i];
1511 
1512 		if (!cmd->len[i])
1513 			continue;
1514 
1515 		/* need at least IWL_FIRST_TB_SIZE copied */
1516 		if (copy_size < IWL_FIRST_TB_SIZE) {
1517 			int copy = IWL_FIRST_TB_SIZE - copy_size;
1518 
1519 			if (copy > cmdlen[i])
1520 				copy = cmdlen[i];
1521 			cmdlen[i] -= copy;
1522 			cmddata[i] += copy;
1523 			copy_size += copy;
1524 		}
1525 
1526 		if (cmd->dataflags[i] & IWL_HCMD_DFL_NOCOPY) {
1527 			had_nocopy = true;
1528 			if (WARN_ON(cmd->dataflags[i] & IWL_HCMD_DFL_DUP)) {
1529 				idx = -EINVAL;
1530 				goto free_dup_buf;
1531 			}
1532 		} else if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP) {
1533 			/*
1534 			 * This is also a chunk that isn't copied
1535 			 * to the static buffer so set had_nocopy.
1536 			 */
1537 			had_nocopy = true;
1538 
1539 			/* only allowed once */
1540 			if (WARN_ON(dup_buf)) {
1541 				idx = -EINVAL;
1542 				goto free_dup_buf;
1543 			}
1544 
1545 			dup_buf = kmemdup(cmddata[i], cmdlen[i],
1546 					  GFP_ATOMIC);
1547 			if (!dup_buf)
1548 				return -ENOMEM;
1549 		} else {
1550 			/* NOCOPY must not be followed by normal! */
1551 			if (WARN_ON(had_nocopy)) {
1552 				idx = -EINVAL;
1553 				goto free_dup_buf;
1554 			}
1555 			copy_size += cmdlen[i];
1556 		}
1557 		cmd_size += cmd->len[i];
1558 	}
1559 
1560 	/*
1561 	 * If any of the command structures end up being larger than
1562 	 * the TFD_MAX_PAYLOAD_SIZE and they aren't dynamically
1563 	 * allocated into separate TFDs, then we will need to
1564 	 * increase the size of the buffers.
1565 	 */
1566 	if (WARN(copy_size > TFD_MAX_PAYLOAD_SIZE,
1567 		 "Command %s (%#x) is too large (%d bytes)\n",
1568 		 iwl_get_cmd_string(trans, cmd->id),
1569 		 cmd->id, copy_size)) {
1570 		idx = -EINVAL;
1571 		goto free_dup_buf;
1572 	}
1573 
1574 	spin_lock_bh(&txq->lock);
1575 
1576 	if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) {
1577 		spin_unlock_bh(&txq->lock);
1578 
1579 		IWL_ERR(trans, "No space in command queue\n");
1580 		iwl_op_mode_cmd_queue_full(trans->op_mode);
1581 		idx = -ENOSPC;
1582 		goto free_dup_buf;
1583 	}
1584 
1585 	idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr);
1586 	out_cmd = txq->entries[idx].cmd;
1587 	out_meta = &txq->entries[idx].meta;
1588 
1589 	memset(out_meta, 0, sizeof(*out_meta));	/* re-initialize to NULL */
1590 	if (cmd->flags & CMD_WANT_SKB)
1591 		out_meta->source = cmd;
1592 
1593 	/* set up the header */
1594 	if (group_id != 0) {
1595 		out_cmd->hdr_wide.cmd = iwl_cmd_opcode(cmd->id);
1596 		out_cmd->hdr_wide.group_id = group_id;
1597 		out_cmd->hdr_wide.version = iwl_cmd_version(cmd->id);
1598 		out_cmd->hdr_wide.length =
1599 			cpu_to_le16(cmd_size -
1600 				    sizeof(struct iwl_cmd_header_wide));
1601 		out_cmd->hdr_wide.reserved = 0;
1602 		out_cmd->hdr_wide.sequence =
1603 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1604 						 INDEX_TO_SEQ(txq->write_ptr));
1605 
1606 		cmd_pos = sizeof(struct iwl_cmd_header_wide);
1607 		copy_size = sizeof(struct iwl_cmd_header_wide);
1608 	} else {
1609 		out_cmd->hdr.cmd = iwl_cmd_opcode(cmd->id);
1610 		out_cmd->hdr.sequence =
1611 			cpu_to_le16(QUEUE_TO_SEQ(trans_pcie->cmd_queue) |
1612 						 INDEX_TO_SEQ(txq->write_ptr));
1613 		out_cmd->hdr.group_id = 0;
1614 
1615 		cmd_pos = sizeof(struct iwl_cmd_header);
1616 		copy_size = sizeof(struct iwl_cmd_header);
1617 	}
1618 
1619 	/* and copy the data that needs to be copied */
1620 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1621 		int copy;
1622 
1623 		if (!cmd->len[i])
1624 			continue;
1625 
1626 		/* copy everything if not nocopy/dup */
1627 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1628 					   IWL_HCMD_DFL_DUP))) {
1629 			copy = cmd->len[i];
1630 
1631 			memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1632 			cmd_pos += copy;
1633 			copy_size += copy;
1634 			continue;
1635 		}
1636 
1637 		/*
1638 		 * Otherwise we need at least IWL_FIRST_TB_SIZE copied
1639 		 * in total (for bi-directional DMA), but copy up to what
1640 		 * we can fit into the payload for debug dump purposes.
1641 		 */
1642 		copy = min_t(int, TFD_MAX_PAYLOAD_SIZE - cmd_pos, cmd->len[i]);
1643 
1644 		memcpy((u8 *)out_cmd + cmd_pos, cmd->data[i], copy);
1645 		cmd_pos += copy;
1646 
1647 		/* However, treat copy_size the proper way, we need it below */
1648 		if (copy_size < IWL_FIRST_TB_SIZE) {
1649 			copy = IWL_FIRST_TB_SIZE - copy_size;
1650 
1651 			if (copy > cmd->len[i])
1652 				copy = cmd->len[i];
1653 			copy_size += copy;
1654 		}
1655 	}
1656 
1657 	IWL_DEBUG_HC(trans,
1658 		     "Sending command %s (%.2x.%.2x), seq: 0x%04X, %d bytes at %d[%d]:%d\n",
1659 		     iwl_get_cmd_string(trans, cmd->id),
1660 		     group_id, out_cmd->hdr.cmd,
1661 		     le16_to_cpu(out_cmd->hdr.sequence),
1662 		     cmd_size, txq->write_ptr, idx, trans_pcie->cmd_queue);
1663 
1664 	/* start the TFD with the minimum copy bytes */
1665 	tb0_size = min_t(int, copy_size, IWL_FIRST_TB_SIZE);
1666 	memcpy(&txq->first_tb_bufs[idx], &out_cmd->hdr, tb0_size);
1667 	iwl_pcie_txq_build_tfd(trans, txq,
1668 			       iwl_pcie_get_first_tb_dma(txq, idx),
1669 			       tb0_size, true);
1670 
1671 	/* map first command fragment, if any remains */
1672 	if (copy_size > tb0_size) {
1673 		phys_addr = dma_map_single(trans->dev,
1674 					   ((u8 *)&out_cmd->hdr) + tb0_size,
1675 					   copy_size - tb0_size,
1676 					   DMA_TO_DEVICE);
1677 		if (dma_mapping_error(trans->dev, phys_addr)) {
1678 			iwl_pcie_tfd_unmap(trans, out_meta, txq,
1679 					   txq->write_ptr);
1680 			idx = -ENOMEM;
1681 			goto out;
1682 		}
1683 
1684 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr,
1685 				       copy_size - tb0_size, false);
1686 	}
1687 
1688 	/* map the remaining (adjusted) nocopy/dup fragments */
1689 	for (i = 0; i < IWL_MAX_CMD_TBS_PER_TFD; i++) {
1690 		const void *data = cmddata[i];
1691 
1692 		if (!cmdlen[i])
1693 			continue;
1694 		if (!(cmd->dataflags[i] & (IWL_HCMD_DFL_NOCOPY |
1695 					   IWL_HCMD_DFL_DUP)))
1696 			continue;
1697 		if (cmd->dataflags[i] & IWL_HCMD_DFL_DUP)
1698 			data = dup_buf;
1699 		phys_addr = dma_map_single(trans->dev, (void *)data,
1700 					   cmdlen[i], DMA_TO_DEVICE);
1701 		if (dma_mapping_error(trans->dev, phys_addr)) {
1702 			iwl_pcie_tfd_unmap(trans, out_meta, txq,
1703 					   txq->write_ptr);
1704 			idx = -ENOMEM;
1705 			goto out;
1706 		}
1707 
1708 		iwl_pcie_txq_build_tfd(trans, txq, phys_addr, cmdlen[i], false);
1709 	}
1710 
1711 	BUILD_BUG_ON(IWL_TFH_NUM_TBS > sizeof(out_meta->tbs) * BITS_PER_BYTE);
1712 	out_meta->flags = cmd->flags;
1713 	if (WARN_ON_ONCE(txq->entries[idx].free_buf))
1714 		kzfree(txq->entries[idx].free_buf);
1715 	txq->entries[idx].free_buf = dup_buf;
1716 
1717 	trace_iwlwifi_dev_hcmd(trans->dev, cmd, cmd_size, &out_cmd->hdr_wide);
1718 
1719 	/* start timer if queue currently empty */
1720 	if (txq->read_ptr == txq->write_ptr && txq->wd_timeout)
1721 		mod_timer(&txq->stuck_timer, jiffies + txq->wd_timeout);
1722 
1723 	spin_lock_irqsave(&trans_pcie->reg_lock, flags);
1724 	ret = iwl_pcie_set_cmd_in_flight(trans, cmd);
1725 	if (ret < 0) {
1726 		idx = ret;
1727 		spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1728 		goto out;
1729 	}
1730 
1731 	/* Increment and update queue's write index */
1732 	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
1733 	iwl_pcie_txq_inc_wr_ptr(trans, txq);
1734 
1735 	spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
1736 
1737  out:
1738 	spin_unlock_bh(&txq->lock);
1739  free_dup_buf:
1740 	if (idx < 0)
1741 		kfree(dup_buf);
1742 	return idx;
1743 }
1744 
1745 /*
1746  * iwl_pcie_hcmd_complete - Pull unused buffers off the queue and reclaim them
1747  * @rxb: Rx buffer to reclaim
1748  */
1749 void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
1750 			    struct iwl_rx_cmd_buffer *rxb)
1751 {
1752 	struct iwl_rx_packet *pkt = rxb_addr(rxb);
1753 	u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1754 	u8 group_id;
1755 	u32 cmd_id;
1756 	int txq_id = SEQ_TO_QUEUE(sequence);
1757 	int index = SEQ_TO_INDEX(sequence);
1758 	int cmd_index;
1759 	struct iwl_device_cmd *cmd;
1760 	struct iwl_cmd_meta *meta;
1761 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1762 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1763 
1764 	/* If a Tx command is being handled and it isn't in the actual
1765 	 * command queue then there a command routing bug has been introduced
1766 	 * in the queue management code. */
1767 	if (WARN(txq_id != trans_pcie->cmd_queue,
1768 		 "wrong command queue %d (should be %d), sequence 0x%X readp=%d writep=%d\n",
1769 		 txq_id, trans_pcie->cmd_queue, sequence, txq->read_ptr,
1770 		 txq->write_ptr)) {
1771 		iwl_print_hex_error(trans, pkt, 32);
1772 		return;
1773 	}
1774 
1775 	spin_lock_bh(&txq->lock);
1776 
1777 	cmd_index = iwl_pcie_get_cmd_index(txq, index);
1778 	cmd = txq->entries[cmd_index].cmd;
1779 	meta = &txq->entries[cmd_index].meta;
1780 	group_id = cmd->hdr.group_id;
1781 	cmd_id = iwl_cmd_id(cmd->hdr.cmd, group_id, 0);
1782 
1783 	iwl_pcie_tfd_unmap(trans, meta, txq, index);
1784 
1785 	/* Input error checking is done when commands are added to queue. */
1786 	if (meta->flags & CMD_WANT_SKB) {
1787 		struct page *p = rxb_steal_page(rxb);
1788 
1789 		meta->source->resp_pkt = pkt;
1790 		meta->source->_rx_page_addr = (unsigned long)page_address(p);
1791 		meta->source->_rx_page_order = trans_pcie->rx_page_order;
1792 	}
1793 
1794 	if (meta->flags & CMD_WANT_ASYNC_CALLBACK)
1795 		iwl_op_mode_async_cb(trans->op_mode, cmd);
1796 
1797 	iwl_pcie_cmdq_reclaim(trans, txq_id, index);
1798 
1799 	if (!(meta->flags & CMD_ASYNC)) {
1800 		if (!test_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status)) {
1801 			IWL_WARN(trans,
1802 				 "HCMD_ACTIVE already clear for command %s\n",
1803 				 iwl_get_cmd_string(trans, cmd_id));
1804 		}
1805 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1806 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1807 			       iwl_get_cmd_string(trans, cmd_id));
1808 		wake_up(&trans_pcie->wait_command_queue);
1809 	}
1810 
1811 	if (meta->flags & CMD_MAKE_TRANS_IDLE) {
1812 		IWL_DEBUG_INFO(trans, "complete %s - mark trans as idle\n",
1813 			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
1814 		set_bit(STATUS_TRANS_IDLE, &trans->status);
1815 		wake_up(&trans_pcie->d0i3_waitq);
1816 	}
1817 
1818 	if (meta->flags & CMD_WAKE_UP_TRANS) {
1819 		IWL_DEBUG_INFO(trans, "complete %s - clear trans idle flag\n",
1820 			       iwl_get_cmd_string(trans, cmd->hdr.cmd));
1821 		clear_bit(STATUS_TRANS_IDLE, &trans->status);
1822 		wake_up(&trans_pcie->d0i3_waitq);
1823 	}
1824 
1825 	meta->flags = 0;
1826 
1827 	spin_unlock_bh(&txq->lock);
1828 }
1829 
1830 #define HOST_COMPLETE_TIMEOUT	(2 * HZ)
1831 
1832 static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
1833 				    struct iwl_host_cmd *cmd)
1834 {
1835 	int ret;
1836 
1837 	/* An asynchronous command can not expect an SKB to be set. */
1838 	if (WARN_ON(cmd->flags & CMD_WANT_SKB))
1839 		return -EINVAL;
1840 
1841 	ret = iwl_pcie_enqueue_hcmd(trans, cmd);
1842 	if (ret < 0) {
1843 		IWL_ERR(trans,
1844 			"Error sending %s: enqueue_hcmd failed: %d\n",
1845 			iwl_get_cmd_string(trans, cmd->id), ret);
1846 		return ret;
1847 	}
1848 	return 0;
1849 }
1850 
1851 static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
1852 				   struct iwl_host_cmd *cmd)
1853 {
1854 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1855 	struct iwl_txq *txq = trans_pcie->txq[trans_pcie->cmd_queue];
1856 	int cmd_idx;
1857 	int ret;
1858 
1859 	IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
1860 		       iwl_get_cmd_string(trans, cmd->id));
1861 
1862 	if (WARN(test_and_set_bit(STATUS_SYNC_HCMD_ACTIVE,
1863 				  &trans->status),
1864 		 "Command %s: a command is already active!\n",
1865 		 iwl_get_cmd_string(trans, cmd->id)))
1866 		return -EIO;
1867 
1868 	IWL_DEBUG_INFO(trans, "Setting HCMD_ACTIVE for command %s\n",
1869 		       iwl_get_cmd_string(trans, cmd->id));
1870 
1871 	if (pm_runtime_suspended(&trans_pcie->pci_dev->dev)) {
1872 		ret = wait_event_timeout(trans_pcie->d0i3_waitq,
1873 				 pm_runtime_active(&trans_pcie->pci_dev->dev),
1874 				 msecs_to_jiffies(IWL_TRANS_IDLE_TIMEOUT));
1875 		if (!ret) {
1876 			IWL_ERR(trans, "Timeout exiting D0i3 before hcmd\n");
1877 			return -ETIMEDOUT;
1878 		}
1879 	}
1880 
1881 	cmd_idx = iwl_pcie_enqueue_hcmd(trans, cmd);
1882 	if (cmd_idx < 0) {
1883 		ret = cmd_idx;
1884 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1885 		IWL_ERR(trans,
1886 			"Error sending %s: enqueue_hcmd failed: %d\n",
1887 			iwl_get_cmd_string(trans, cmd->id), ret);
1888 		return ret;
1889 	}
1890 
1891 	ret = wait_event_timeout(trans_pcie->wait_command_queue,
1892 				 !test_bit(STATUS_SYNC_HCMD_ACTIVE,
1893 					   &trans->status),
1894 				 HOST_COMPLETE_TIMEOUT);
1895 	if (!ret) {
1896 		IWL_ERR(trans, "Error sending %s: time out after %dms.\n",
1897 			iwl_get_cmd_string(trans, cmd->id),
1898 			jiffies_to_msecs(HOST_COMPLETE_TIMEOUT));
1899 
1900 		IWL_ERR(trans, "Current CMD queue read_ptr %d write_ptr %d\n",
1901 			txq->read_ptr, txq->write_ptr);
1902 
1903 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1904 		IWL_DEBUG_INFO(trans, "Clearing HCMD_ACTIVE for command %s\n",
1905 			       iwl_get_cmd_string(trans, cmd->id));
1906 		ret = -ETIMEDOUT;
1907 
1908 		iwl_force_nmi(trans);
1909 		iwl_trans_fw_error(trans);
1910 
1911 		goto cancel;
1912 	}
1913 
1914 	if (test_bit(STATUS_FW_ERROR, &trans->status)) {
1915 		iwl_trans_dump_regs(trans);
1916 		IWL_ERR(trans, "FW error in SYNC CMD %s\n",
1917 			iwl_get_cmd_string(trans, cmd->id));
1918 		dump_stack();
1919 		ret = -EIO;
1920 		goto cancel;
1921 	}
1922 
1923 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1924 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1925 		IWL_DEBUG_RF_KILL(trans, "RFKILL in SYNC CMD... no rsp\n");
1926 		ret = -ERFKILL;
1927 		goto cancel;
1928 	}
1929 
1930 	if ((cmd->flags & CMD_WANT_SKB) && !cmd->resp_pkt) {
1931 		IWL_ERR(trans, "Error: Response NULL in '%s'\n",
1932 			iwl_get_cmd_string(trans, cmd->id));
1933 		ret = -EIO;
1934 		goto cancel;
1935 	}
1936 
1937 	return 0;
1938 
1939 cancel:
1940 	if (cmd->flags & CMD_WANT_SKB) {
1941 		/*
1942 		 * Cancel the CMD_WANT_SKB flag for the cmd in the
1943 		 * TX cmd queue. Otherwise in case the cmd comes
1944 		 * in later, it will possibly set an invalid
1945 		 * address (cmd->meta.source).
1946 		 */
1947 		txq->entries[cmd_idx].meta.flags &= ~CMD_WANT_SKB;
1948 	}
1949 
1950 	if (cmd->resp_pkt) {
1951 		iwl_free_resp(cmd);
1952 		cmd->resp_pkt = NULL;
1953 	}
1954 
1955 	return ret;
1956 }
1957 
1958 int iwl_trans_pcie_send_hcmd(struct iwl_trans *trans, struct iwl_host_cmd *cmd)
1959 {
1960 	if (!(cmd->flags & CMD_SEND_IN_RFKILL) &&
1961 	    test_bit(STATUS_RFKILL_OPMODE, &trans->status)) {
1962 		IWL_DEBUG_RF_KILL(trans, "Dropping CMD 0x%x: RF KILL\n",
1963 				  cmd->id);
1964 		return -ERFKILL;
1965 	}
1966 
1967 	if (cmd->flags & CMD_ASYNC)
1968 		return iwl_pcie_send_hcmd_async(trans, cmd);
1969 
1970 	/* We still can fail on RFKILL that can be asserted while we wait */
1971 	return iwl_pcie_send_hcmd_sync(trans, cmd);
1972 }
1973 
1974 static int iwl_fill_data_tbs(struct iwl_trans *trans, struct sk_buff *skb,
1975 			     struct iwl_txq *txq, u8 hdr_len,
1976 			     struct iwl_cmd_meta *out_meta,
1977 			     struct iwl_device_cmd *dev_cmd, u16 tb1_len)
1978 {
1979 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1980 	u16 tb2_len;
1981 	int i;
1982 
1983 	/*
1984 	 * Set up TFD's third entry to point directly to remainder
1985 	 * of skb's head, if any
1986 	 */
1987 	tb2_len = skb_headlen(skb) - hdr_len;
1988 
1989 	if (tb2_len > 0) {
1990 		dma_addr_t tb2_phys = dma_map_single(trans->dev,
1991 						     skb->data + hdr_len,
1992 						     tb2_len, DMA_TO_DEVICE);
1993 		if (unlikely(dma_mapping_error(trans->dev, tb2_phys))) {
1994 			iwl_pcie_tfd_unmap(trans, out_meta, txq,
1995 					   txq->write_ptr);
1996 			return -EINVAL;
1997 		}
1998 		iwl_pcie_txq_build_tfd(trans, txq, tb2_phys, tb2_len, false);
1999 	}
2000 
2001 	/* set up the remaining entries to point to the data */
2002 	for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2003 		const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2004 		dma_addr_t tb_phys;
2005 		int tb_idx;
2006 
2007 		if (!skb_frag_size(frag))
2008 			continue;
2009 
2010 		tb_phys = skb_frag_dma_map(trans->dev, frag, 0,
2011 					   skb_frag_size(frag), DMA_TO_DEVICE);
2012 
2013 		if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2014 			iwl_pcie_tfd_unmap(trans, out_meta, txq,
2015 					   txq->write_ptr);
2016 			return -EINVAL;
2017 		}
2018 		tb_idx = iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2019 						skb_frag_size(frag), false);
2020 
2021 		out_meta->tbs |= BIT(tb_idx);
2022 	}
2023 
2024 	trace_iwlwifi_dev_tx(trans->dev, skb,
2025 			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2026 			     trans_pcie->tfd_size,
2027 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len,
2028 			     hdr_len);
2029 	trace_iwlwifi_dev_tx_data(trans->dev, skb, hdr_len);
2030 	return 0;
2031 }
2032 
2033 #ifdef CONFIG_INET
2034 struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len)
2035 {
2036 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2037 	struct iwl_tso_hdr_page *p = this_cpu_ptr(trans_pcie->tso_hdr_page);
2038 
2039 	if (!p->page)
2040 		goto alloc;
2041 
2042 	/* enough room on this page */
2043 	if (p->pos + len < (u8 *)page_address(p->page) + PAGE_SIZE)
2044 		return p;
2045 
2046 	/* We don't have enough room on this page, get a new one. */
2047 	__free_page(p->page);
2048 
2049 alloc:
2050 	p->page = alloc_page(GFP_ATOMIC);
2051 	if (!p->page)
2052 		return NULL;
2053 	p->pos = page_address(p->page);
2054 	return p;
2055 }
2056 
2057 static void iwl_compute_pseudo_hdr_csum(void *iph, struct tcphdr *tcph,
2058 					bool ipv6, unsigned int len)
2059 {
2060 	if (ipv6) {
2061 		struct ipv6hdr *iphv6 = iph;
2062 
2063 		tcph->check = ~csum_ipv6_magic(&iphv6->saddr, &iphv6->daddr,
2064 					       len + tcph->doff * 4,
2065 					       IPPROTO_TCP, 0);
2066 	} else {
2067 		struct iphdr *iphv4 = iph;
2068 
2069 		ip_send_check(iphv4);
2070 		tcph->check = ~csum_tcpudp_magic(iphv4->saddr, iphv4->daddr,
2071 						 len + tcph->doff * 4,
2072 						 IPPROTO_TCP, 0);
2073 	}
2074 }
2075 
2076 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2077 				   struct iwl_txq *txq, u8 hdr_len,
2078 				   struct iwl_cmd_meta *out_meta,
2079 				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2080 {
2081 	struct iwl_tx_cmd *tx_cmd = (void *)dev_cmd->payload;
2082 	struct iwl_trans_pcie *trans_pcie = txq->trans_pcie;
2083 	struct ieee80211_hdr *hdr = (void *)skb->data;
2084 	unsigned int snap_ip_tcp_hdrlen, ip_hdrlen, total_len, hdr_room;
2085 	unsigned int mss = skb_shinfo(skb)->gso_size;
2086 	u16 length, iv_len, amsdu_pad;
2087 	u8 *start_hdr;
2088 	struct iwl_tso_hdr_page *hdr_page;
2089 	struct page **page_ptr;
2090 	int ret;
2091 	struct tso_t tso;
2092 
2093 	/* if the packet is protected, then it must be CCMP or GCMP */
2094 	BUILD_BUG_ON(IEEE80211_CCMP_HDR_LEN != IEEE80211_GCMP_HDR_LEN);
2095 	iv_len = ieee80211_has_protected(hdr->frame_control) ?
2096 		IEEE80211_CCMP_HDR_LEN : 0;
2097 
2098 	trace_iwlwifi_dev_tx(trans->dev, skb,
2099 			     iwl_pcie_get_tfd(trans, txq, txq->write_ptr),
2100 			     trans_pcie->tfd_size,
2101 			     &dev_cmd->hdr, IWL_FIRST_TB_SIZE + tb1_len, 0);
2102 
2103 	ip_hdrlen = skb_transport_header(skb) - skb_network_header(skb);
2104 	snap_ip_tcp_hdrlen = 8 + ip_hdrlen + tcp_hdrlen(skb);
2105 	total_len = skb->len - snap_ip_tcp_hdrlen - hdr_len - iv_len;
2106 	amsdu_pad = 0;
2107 
2108 	/* total amount of header we may need for this A-MSDU */
2109 	hdr_room = DIV_ROUND_UP(total_len, mss) *
2110 		(3 + snap_ip_tcp_hdrlen + sizeof(struct ethhdr)) + iv_len;
2111 
2112 	/* Our device supports 9 segments at most, it will fit in 1 page */
2113 	hdr_page = get_page_hdr(trans, hdr_room);
2114 	if (!hdr_page)
2115 		return -ENOMEM;
2116 
2117 	get_page(hdr_page->page);
2118 	start_hdr = hdr_page->pos;
2119 	page_ptr = (void *)((u8 *)skb->cb + trans_pcie->page_offs);
2120 	*page_ptr = hdr_page->page;
2121 	memcpy(hdr_page->pos, skb->data + hdr_len, iv_len);
2122 	hdr_page->pos += iv_len;
2123 
2124 	/*
2125 	 * Pull the ieee80211 header + IV to be able to use TSO core,
2126 	 * we will restore it for the tx_status flow.
2127 	 */
2128 	skb_pull(skb, hdr_len + iv_len);
2129 
2130 	/*
2131 	 * Remove the length of all the headers that we don't actually
2132 	 * have in the MPDU by themselves, but that we duplicate into
2133 	 * all the different MSDUs inside the A-MSDU.
2134 	 */
2135 	le16_add_cpu(&tx_cmd->len, -snap_ip_tcp_hdrlen);
2136 
2137 	tso_start(skb, &tso);
2138 
2139 	while (total_len) {
2140 		/* this is the data left for this subframe */
2141 		unsigned int data_left =
2142 			min_t(unsigned int, mss, total_len);
2143 		struct sk_buff *csum_skb = NULL;
2144 		unsigned int hdr_tb_len;
2145 		dma_addr_t hdr_tb_phys;
2146 		struct tcphdr *tcph;
2147 		u8 *iph, *subf_hdrs_start = hdr_page->pos;
2148 
2149 		total_len -= data_left;
2150 
2151 		memset(hdr_page->pos, 0, amsdu_pad);
2152 		hdr_page->pos += amsdu_pad;
2153 		amsdu_pad = (4 - (sizeof(struct ethhdr) + snap_ip_tcp_hdrlen +
2154 				  data_left)) & 0x3;
2155 		ether_addr_copy(hdr_page->pos, ieee80211_get_DA(hdr));
2156 		hdr_page->pos += ETH_ALEN;
2157 		ether_addr_copy(hdr_page->pos, ieee80211_get_SA(hdr));
2158 		hdr_page->pos += ETH_ALEN;
2159 
2160 		length = snap_ip_tcp_hdrlen + data_left;
2161 		*((__be16 *)hdr_page->pos) = cpu_to_be16(length);
2162 		hdr_page->pos += sizeof(length);
2163 
2164 		/*
2165 		 * This will copy the SNAP as well which will be considered
2166 		 * as MAC header.
2167 		 */
2168 		tso_build_hdr(skb, hdr_page->pos, &tso, data_left, !total_len);
2169 		iph = hdr_page->pos + 8;
2170 		tcph = (void *)(iph + ip_hdrlen);
2171 
2172 		/* For testing on current hardware only */
2173 		if (trans_pcie->sw_csum_tx) {
2174 			csum_skb = alloc_skb(data_left + tcp_hdrlen(skb),
2175 					     GFP_ATOMIC);
2176 			if (!csum_skb) {
2177 				ret = -ENOMEM;
2178 				goto out_unmap;
2179 			}
2180 
2181 			iwl_compute_pseudo_hdr_csum(iph, tcph,
2182 						    skb->protocol ==
2183 							htons(ETH_P_IPV6),
2184 						    data_left);
2185 
2186 			skb_put_data(csum_skb, tcph, tcp_hdrlen(skb));
2187 			skb_reset_transport_header(csum_skb);
2188 			csum_skb->csum_start =
2189 				(unsigned char *)tcp_hdr(csum_skb) -
2190 						 csum_skb->head;
2191 		}
2192 
2193 		hdr_page->pos += snap_ip_tcp_hdrlen;
2194 
2195 		hdr_tb_len = hdr_page->pos - start_hdr;
2196 		hdr_tb_phys = dma_map_single(trans->dev, start_hdr,
2197 					     hdr_tb_len, DMA_TO_DEVICE);
2198 		if (unlikely(dma_mapping_error(trans->dev, hdr_tb_phys))) {
2199 			dev_kfree_skb(csum_skb);
2200 			ret = -EINVAL;
2201 			goto out_unmap;
2202 		}
2203 		iwl_pcie_txq_build_tfd(trans, txq, hdr_tb_phys,
2204 				       hdr_tb_len, false);
2205 		trace_iwlwifi_dev_tx_tso_chunk(trans->dev, start_hdr,
2206 					       hdr_tb_len);
2207 		/* add this subframe's headers' length to the tx_cmd */
2208 		le16_add_cpu(&tx_cmd->len, hdr_page->pos - subf_hdrs_start);
2209 
2210 		/* prepare the start_hdr for the next subframe */
2211 		start_hdr = hdr_page->pos;
2212 
2213 		/* put the payload */
2214 		while (data_left) {
2215 			unsigned int size = min_t(unsigned int, tso.size,
2216 						  data_left);
2217 			dma_addr_t tb_phys;
2218 
2219 			if (trans_pcie->sw_csum_tx)
2220 				skb_put_data(csum_skb, tso.data, size);
2221 
2222 			tb_phys = dma_map_single(trans->dev, tso.data,
2223 						 size, DMA_TO_DEVICE);
2224 			if (unlikely(dma_mapping_error(trans->dev, tb_phys))) {
2225 				dev_kfree_skb(csum_skb);
2226 				ret = -EINVAL;
2227 				goto out_unmap;
2228 			}
2229 
2230 			iwl_pcie_txq_build_tfd(trans, txq, tb_phys,
2231 					       size, false);
2232 			trace_iwlwifi_dev_tx_tso_chunk(trans->dev, tso.data,
2233 						       size);
2234 
2235 			data_left -= size;
2236 			tso_build_data(skb, &tso, size);
2237 		}
2238 
2239 		/* For testing on early hardware only */
2240 		if (trans_pcie->sw_csum_tx) {
2241 			__wsum csum;
2242 
2243 			csum = skb_checksum(csum_skb,
2244 					    skb_checksum_start_offset(csum_skb),
2245 					    csum_skb->len -
2246 					    skb_checksum_start_offset(csum_skb),
2247 					    0);
2248 			dev_kfree_skb(csum_skb);
2249 			dma_sync_single_for_cpu(trans->dev, hdr_tb_phys,
2250 						hdr_tb_len, DMA_TO_DEVICE);
2251 			tcph->check = csum_fold(csum);
2252 			dma_sync_single_for_device(trans->dev, hdr_tb_phys,
2253 						   hdr_tb_len, DMA_TO_DEVICE);
2254 		}
2255 	}
2256 
2257 	/* re -add the WiFi header and IV */
2258 	skb_push(skb, hdr_len + iv_len);
2259 
2260 	return 0;
2261 
2262 out_unmap:
2263 	iwl_pcie_tfd_unmap(trans, out_meta, txq, txq->write_ptr);
2264 	return ret;
2265 }
2266 #else /* CONFIG_INET */
2267 static int iwl_fill_data_tbs_amsdu(struct iwl_trans *trans, struct sk_buff *skb,
2268 				   struct iwl_txq *txq, u8 hdr_len,
2269 				   struct iwl_cmd_meta *out_meta,
2270 				   struct iwl_device_cmd *dev_cmd, u16 tb1_len)
2271 {
2272 	/* No A-MSDU without CONFIG_INET */
2273 	WARN_ON(1);
2274 
2275 	return -1;
2276 }
2277 #endif /* CONFIG_INET */
2278 
2279 int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb,
2280 		      struct iwl_device_cmd *dev_cmd, int txq_id)
2281 {
2282 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2283 	struct ieee80211_hdr *hdr;
2284 	struct iwl_tx_cmd *tx_cmd = (struct iwl_tx_cmd *)dev_cmd->payload;
2285 	struct iwl_cmd_meta *out_meta;
2286 	struct iwl_txq *txq;
2287 	dma_addr_t tb0_phys, tb1_phys, scratch_phys;
2288 	void *tb1_addr;
2289 	void *tfd;
2290 	u16 len, tb1_len;
2291 	bool wait_write_ptr;
2292 	__le16 fc;
2293 	u8 hdr_len;
2294 	u16 wifi_seq;
2295 	bool amsdu;
2296 
2297 	txq = trans_pcie->txq[txq_id];
2298 
2299 	if (WARN_ONCE(!test_bit(txq_id, trans_pcie->queue_used),
2300 		      "TX on unused queue %d\n", txq_id))
2301 		return -EINVAL;
2302 
2303 	if (unlikely(trans_pcie->sw_csum_tx &&
2304 		     skb->ip_summed == CHECKSUM_PARTIAL)) {
2305 		int offs = skb_checksum_start_offset(skb);
2306 		int csum_offs = offs + skb->csum_offset;
2307 		__wsum csum;
2308 
2309 		if (skb_ensure_writable(skb, csum_offs + sizeof(__sum16)))
2310 			return -1;
2311 
2312 		csum = skb_checksum(skb, offs, skb->len - offs, 0);
2313 		*(__sum16 *)(skb->data + csum_offs) = csum_fold(csum);
2314 
2315 		skb->ip_summed = CHECKSUM_UNNECESSARY;
2316 	}
2317 
2318 	if (skb_is_nonlinear(skb) &&
2319 	    skb_shinfo(skb)->nr_frags > IWL_PCIE_MAX_FRAGS(trans_pcie) &&
2320 	    __skb_linearize(skb))
2321 		return -ENOMEM;
2322 
2323 	/* mac80211 always puts the full header into the SKB's head,
2324 	 * so there's no need to check if it's readable there
2325 	 */
2326 	hdr = (struct ieee80211_hdr *)skb->data;
2327 	fc = hdr->frame_control;
2328 	hdr_len = ieee80211_hdrlen(fc);
2329 
2330 	spin_lock(&txq->lock);
2331 
2332 	if (iwl_queue_space(trans, txq) < txq->high_mark) {
2333 		iwl_stop_queue(trans, txq);
2334 
2335 		/* don't put the packet on the ring, if there is no room */
2336 		if (unlikely(iwl_queue_space(trans, txq) < 3)) {
2337 			struct iwl_device_cmd **dev_cmd_ptr;
2338 
2339 			dev_cmd_ptr = (void *)((u8 *)skb->cb +
2340 					       trans_pcie->dev_cmd_offs);
2341 
2342 			*dev_cmd_ptr = dev_cmd;
2343 			__skb_queue_tail(&txq->overflow_q, skb);
2344 
2345 			spin_unlock(&txq->lock);
2346 			return 0;
2347 		}
2348 	}
2349 
2350 	/* In AGG mode, the index in the ring must correspond to the WiFi
2351 	 * sequence number. This is a HW requirements to help the SCD to parse
2352 	 * the BA.
2353 	 * Check here that the packets are in the right place on the ring.
2354 	 */
2355 	wifi_seq = IEEE80211_SEQ_TO_SN(le16_to_cpu(hdr->seq_ctrl));
2356 	WARN_ONCE(txq->ampdu &&
2357 		  (wifi_seq & 0xff) != txq->write_ptr,
2358 		  "Q: %d WiFi Seq %d tfdNum %d",
2359 		  txq_id, wifi_seq, txq->write_ptr);
2360 
2361 	/* Set up driver data for this TFD */
2362 	txq->entries[txq->write_ptr].skb = skb;
2363 	txq->entries[txq->write_ptr].cmd = dev_cmd;
2364 
2365 	dev_cmd->hdr.sequence =
2366 		cpu_to_le16((u16)(QUEUE_TO_SEQ(txq_id) |
2367 			    INDEX_TO_SEQ(txq->write_ptr)));
2368 
2369 	tb0_phys = iwl_pcie_get_first_tb_dma(txq, txq->write_ptr);
2370 	scratch_phys = tb0_phys + sizeof(struct iwl_cmd_header) +
2371 		       offsetof(struct iwl_tx_cmd, scratch);
2372 
2373 	tx_cmd->dram_lsb_ptr = cpu_to_le32(scratch_phys);
2374 	tx_cmd->dram_msb_ptr = iwl_get_dma_hi_addr(scratch_phys);
2375 
2376 	/* Set up first empty entry in queue's array of Tx/cmd buffers */
2377 	out_meta = &txq->entries[txq->write_ptr].meta;
2378 	out_meta->flags = 0;
2379 
2380 	/*
2381 	 * The second TB (tb1) points to the remainder of the TX command
2382 	 * and the 802.11 header - dword aligned size
2383 	 * (This calculation modifies the TX command, so do it before the
2384 	 * setup of the first TB)
2385 	 */
2386 	len = sizeof(struct iwl_tx_cmd) + sizeof(struct iwl_cmd_header) +
2387 	      hdr_len - IWL_FIRST_TB_SIZE;
2388 	/* do not align A-MSDU to dword as the subframe header aligns it */
2389 	amsdu = ieee80211_is_data_qos(fc) &&
2390 		(*ieee80211_get_qos_ctl(hdr) &
2391 		 IEEE80211_QOS_CTL_A_MSDU_PRESENT);
2392 	if (trans_pcie->sw_csum_tx || !amsdu) {
2393 		tb1_len = ALIGN(len, 4);
2394 		/* Tell NIC about any 2-byte padding after MAC header */
2395 		if (tb1_len != len)
2396 			tx_cmd->tx_flags |= cpu_to_le32(TX_CMD_FLG_MH_PAD);
2397 	} else {
2398 		tb1_len = len;
2399 	}
2400 
2401 	/*
2402 	 * The first TB points to bi-directional DMA data, we'll
2403 	 * memcpy the data into it later.
2404 	 */
2405 	iwl_pcie_txq_build_tfd(trans, txq, tb0_phys,
2406 			       IWL_FIRST_TB_SIZE, true);
2407 
2408 	/* there must be data left over for TB1 or this code must be changed */
2409 	BUILD_BUG_ON(sizeof(struct iwl_tx_cmd) < IWL_FIRST_TB_SIZE);
2410 
2411 	/* map the data for TB1 */
2412 	tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE;
2413 	tb1_phys = dma_map_single(trans->dev, tb1_addr, tb1_len, DMA_TO_DEVICE);
2414 	if (unlikely(dma_mapping_error(trans->dev, tb1_phys)))
2415 		goto out_err;
2416 	iwl_pcie_txq_build_tfd(trans, txq, tb1_phys, tb1_len, false);
2417 
2418 	/*
2419 	 * If gso_size wasn't set, don't give the frame "amsdu treatment"
2420 	 * (adding subframes, etc.).
2421 	 * This can happen in some testing flows when the amsdu was already
2422 	 * pre-built, and we just need to send the resulting skb.
2423 	 */
2424 	if (amsdu && skb_shinfo(skb)->gso_size) {
2425 		if (unlikely(iwl_fill_data_tbs_amsdu(trans, skb, txq, hdr_len,
2426 						     out_meta, dev_cmd,
2427 						     tb1_len)))
2428 			goto out_err;
2429 	} else if (unlikely(iwl_fill_data_tbs(trans, skb, txq, hdr_len,
2430 				       out_meta, dev_cmd, tb1_len))) {
2431 		goto out_err;
2432 	}
2433 
2434 	/* building the A-MSDU might have changed this data, so memcpy it now */
2435 	memcpy(&txq->first_tb_bufs[txq->write_ptr], &dev_cmd->hdr,
2436 	       IWL_FIRST_TB_SIZE);
2437 
2438 	tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr);
2439 	/* Set up entry for this TFD in Tx byte-count array */
2440 	iwl_pcie_txq_update_byte_cnt_tbl(trans, txq, le16_to_cpu(tx_cmd->len),
2441 					 iwl_pcie_tfd_get_num_tbs(trans, tfd));
2442 
2443 	wait_write_ptr = ieee80211_has_morefrags(fc);
2444 
2445 	/* start timer if queue currently empty */
2446 	if (txq->read_ptr == txq->write_ptr) {
2447 		if (txq->wd_timeout) {
2448 			/*
2449 			 * If the TXQ is active, then set the timer, if not,
2450 			 * set the timer in remainder so that the timer will
2451 			 * be armed with the right value when the station will
2452 			 * wake up.
2453 			 */
2454 			if (!txq->frozen)
2455 				mod_timer(&txq->stuck_timer,
2456 					  jiffies + txq->wd_timeout);
2457 			else
2458 				txq->frozen_expiry_remainder = txq->wd_timeout;
2459 		}
2460 		IWL_DEBUG_RPM(trans, "Q: %d first tx - take ref\n", txq->id);
2461 		iwl_trans_ref(trans);
2462 	}
2463 
2464 	/* Tell device the write index *just past* this latest filled TFD */
2465 	txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr);
2466 	if (!wait_write_ptr)
2467 		iwl_pcie_txq_inc_wr_ptr(trans, txq);
2468 
2469 	/*
2470 	 * At this point the frame is "transmitted" successfully
2471 	 * and we will get a TX status notification eventually.
2472 	 */
2473 	spin_unlock(&txq->lock);
2474 	return 0;
2475 out_err:
2476 	spin_unlock(&txq->lock);
2477 	return -1;
2478 }
2479