xref: /freebsd/sys/contrib/dev/iwlwifi/pcie/rx.c (revision bfcc09ddd422c95a1a2e4e794b63ee54c4902398)
1*bfcc09ddSBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*bfcc09ddSBjoern A. Zeeb /*
3*bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2003-2014, 2018-2021 Intel Corporation
4*bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5*bfcc09ddSBjoern A. Zeeb  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6*bfcc09ddSBjoern A. Zeeb  */
7*bfcc09ddSBjoern A. Zeeb #include <linux/sched.h>
8*bfcc09ddSBjoern A. Zeeb #include <linux/wait.h>
9*bfcc09ddSBjoern A. Zeeb #include <linux/gfp.h>
10*bfcc09ddSBjoern A. Zeeb 
11*bfcc09ddSBjoern A. Zeeb #include "iwl-prph.h"
12*bfcc09ddSBjoern A. Zeeb #include "iwl-io.h"
13*bfcc09ddSBjoern A. Zeeb #include "internal.h"
14*bfcc09ddSBjoern A. Zeeb #include "iwl-op-mode.h"
15*bfcc09ddSBjoern A. Zeeb #include "iwl-context-info-gen3.h"
16*bfcc09ddSBjoern A. Zeeb 
17*bfcc09ddSBjoern A. Zeeb /******************************************************************************
18*bfcc09ddSBjoern A. Zeeb  *
19*bfcc09ddSBjoern A. Zeeb  * RX path functions
20*bfcc09ddSBjoern A. Zeeb  *
21*bfcc09ddSBjoern A. Zeeb  ******************************************************************************/
22*bfcc09ddSBjoern A. Zeeb 
23*bfcc09ddSBjoern A. Zeeb /*
24*bfcc09ddSBjoern A. Zeeb  * Rx theory of operation
25*bfcc09ddSBjoern A. Zeeb  *
26*bfcc09ddSBjoern A. Zeeb  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
27*bfcc09ddSBjoern A. Zeeb  * each of which point to Receive Buffers to be filled by the NIC.  These get
28*bfcc09ddSBjoern A. Zeeb  * used not only for Rx frames, but for any command response or notification
29*bfcc09ddSBjoern A. Zeeb  * from the NIC.  The driver and NIC manage the Rx buffers by means
30*bfcc09ddSBjoern A. Zeeb  * of indexes into the circular buffer.
31*bfcc09ddSBjoern A. Zeeb  *
32*bfcc09ddSBjoern A. Zeeb  * Rx Queue Indexes
33*bfcc09ddSBjoern A. Zeeb  * The host/firmware share two index registers for managing the Rx buffers.
34*bfcc09ddSBjoern A. Zeeb  *
35*bfcc09ddSBjoern A. Zeeb  * The READ index maps to the first position that the firmware may be writing
36*bfcc09ddSBjoern A. Zeeb  * to -- the driver can read up to (but not including) this position and get
37*bfcc09ddSBjoern A. Zeeb  * good data.
38*bfcc09ddSBjoern A. Zeeb  * The READ index is managed by the firmware once the card is enabled.
39*bfcc09ddSBjoern A. Zeeb  *
40*bfcc09ddSBjoern A. Zeeb  * The WRITE index maps to the last position the driver has read from -- the
41*bfcc09ddSBjoern A. Zeeb  * position preceding WRITE is the last slot the firmware can place a packet.
42*bfcc09ddSBjoern A. Zeeb  *
43*bfcc09ddSBjoern A. Zeeb  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
44*bfcc09ddSBjoern A. Zeeb  * WRITE = READ.
45*bfcc09ddSBjoern A. Zeeb  *
46*bfcc09ddSBjoern A. Zeeb  * During initialization, the host sets up the READ queue position to the first
47*bfcc09ddSBjoern A. Zeeb  * INDEX position, and WRITE to the last (READ - 1 wrapped)
48*bfcc09ddSBjoern A. Zeeb  *
49*bfcc09ddSBjoern A. Zeeb  * When the firmware places a packet in a buffer, it will advance the READ index
50*bfcc09ddSBjoern A. Zeeb  * and fire the RX interrupt.  The driver can then query the READ index and
51*bfcc09ddSBjoern A. Zeeb  * process as many packets as possible, moving the WRITE index forward as it
52*bfcc09ddSBjoern A. Zeeb  * resets the Rx queue buffers with new memory.
53*bfcc09ddSBjoern A. Zeeb  *
54*bfcc09ddSBjoern A. Zeeb  * The management in the driver is as follows:
55*bfcc09ddSBjoern A. Zeeb  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
56*bfcc09ddSBjoern A. Zeeb  *   When the interrupt handler is called, the request is processed.
57*bfcc09ddSBjoern A. Zeeb  *   The page is either stolen - transferred to the upper layer
58*bfcc09ddSBjoern A. Zeeb  *   or reused - added immediately to the iwl->rxq->rx_free list.
59*bfcc09ddSBjoern A. Zeeb  * + When the page is stolen - the driver updates the matching queue's used
60*bfcc09ddSBjoern A. Zeeb  *   count, detaches the RBD and transfers it to the queue used list.
61*bfcc09ddSBjoern A. Zeeb  *   When there are two used RBDs - they are transferred to the allocator empty
62*bfcc09ddSBjoern A. Zeeb  *   list. Work is then scheduled for the allocator to start allocating
63*bfcc09ddSBjoern A. Zeeb  *   eight buffers.
64*bfcc09ddSBjoern A. Zeeb  *   When there are another 6 used RBDs - they are transferred to the allocator
65*bfcc09ddSBjoern A. Zeeb  *   empty list and the driver tries to claim the pre-allocated buffers and
66*bfcc09ddSBjoern A. Zeeb  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
67*bfcc09ddSBjoern A. Zeeb  *   until ready.
68*bfcc09ddSBjoern A. Zeeb  *   When there are 8+ buffers in the free list - either from allocation or from
69*bfcc09ddSBjoern A. Zeeb  *   8 reused unstolen pages - restock is called to update the FW and indexes.
70*bfcc09ddSBjoern A. Zeeb  * + In order to make sure the allocator always has RBDs to use for allocation
71*bfcc09ddSBjoern A. Zeeb  *   the allocator has initial pool in the size of num_queues*(8-2) - the
72*bfcc09ddSBjoern A. Zeeb  *   maximum missing RBDs per allocation request (request posted with 2
73*bfcc09ddSBjoern A. Zeeb  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
74*bfcc09ddSBjoern A. Zeeb  *   The queues supplies the recycle of the rest of the RBDs.
75*bfcc09ddSBjoern A. Zeeb  * + A received packet is processed and handed to the kernel network stack,
76*bfcc09ddSBjoern A. Zeeb  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
77*bfcc09ddSBjoern A. Zeeb  * + If there are no allocated buffers in iwl->rxq->rx_free,
78*bfcc09ddSBjoern A. Zeeb  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
79*bfcc09ddSBjoern A. Zeeb  *   If there were enough free buffers and RX_STALLED is set it is cleared.
80*bfcc09ddSBjoern A. Zeeb  *
81*bfcc09ddSBjoern A. Zeeb  *
82*bfcc09ddSBjoern A. Zeeb  * Driver sequence:
83*bfcc09ddSBjoern A. Zeeb  *
84*bfcc09ddSBjoern A. Zeeb  * iwl_rxq_alloc()            Allocates rx_free
85*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
86*bfcc09ddSBjoern A. Zeeb  *                            iwl_pcie_rxq_restock.
87*bfcc09ddSBjoern A. Zeeb  *                            Used only during initialization.
88*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
89*bfcc09ddSBjoern A. Zeeb  *                            queue, updates firmware pointers, and updates
90*bfcc09ddSBjoern A. Zeeb  *                            the WRITE index.
91*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_allocator()     Background work for allocating pages.
92*bfcc09ddSBjoern A. Zeeb  *
93*bfcc09ddSBjoern A. Zeeb  * -- enable interrupts --
94*bfcc09ddSBjoern A. Zeeb  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
95*bfcc09ddSBjoern A. Zeeb  *                            READ INDEX, detaching the SKB from the pool.
96*bfcc09ddSBjoern A. Zeeb  *                            Moves the packet buffer from queue to rx_used.
97*bfcc09ddSBjoern A. Zeeb  *                            Posts and claims requests to the allocator.
98*bfcc09ddSBjoern A. Zeeb  *                            Calls iwl_pcie_rxq_restock to refill any empty
99*bfcc09ddSBjoern A. Zeeb  *                            slots.
100*bfcc09ddSBjoern A. Zeeb  *
101*bfcc09ddSBjoern A. Zeeb  * RBD life-cycle:
102*bfcc09ddSBjoern A. Zeeb  *
103*bfcc09ddSBjoern A. Zeeb  * Init:
104*bfcc09ddSBjoern A. Zeeb  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
105*bfcc09ddSBjoern A. Zeeb  *
106*bfcc09ddSBjoern A. Zeeb  * Regular Receive interrupt:
107*bfcc09ddSBjoern A. Zeeb  * Page Stolen:
108*bfcc09ddSBjoern A. Zeeb  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
109*bfcc09ddSBjoern A. Zeeb  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
110*bfcc09ddSBjoern A. Zeeb  * Page not Stolen:
111*bfcc09ddSBjoern A. Zeeb  * rxq.queue -> rxq.rx_free -> rxq.queue
112*bfcc09ddSBjoern A. Zeeb  * ...
113*bfcc09ddSBjoern A. Zeeb  *
114*bfcc09ddSBjoern A. Zeeb  */
115*bfcc09ddSBjoern A. Zeeb 
116*bfcc09ddSBjoern A. Zeeb /*
117*bfcc09ddSBjoern A. Zeeb  * iwl_rxq_space - Return number of free slots available in queue.
118*bfcc09ddSBjoern A. Zeeb  */
119*bfcc09ddSBjoern A. Zeeb static int iwl_rxq_space(const struct iwl_rxq *rxq)
120*bfcc09ddSBjoern A. Zeeb {
121*bfcc09ddSBjoern A. Zeeb 	/* Make sure rx queue size is a power of 2 */
122*bfcc09ddSBjoern A. Zeeb 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
123*bfcc09ddSBjoern A. Zeeb 
124*bfcc09ddSBjoern A. Zeeb 	/*
125*bfcc09ddSBjoern A. Zeeb 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
126*bfcc09ddSBjoern A. Zeeb 	 * between empty and completely full queues.
127*bfcc09ddSBjoern A. Zeeb 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
128*bfcc09ddSBjoern A. Zeeb 	 * defined for negative dividends.
129*bfcc09ddSBjoern A. Zeeb 	 */
130*bfcc09ddSBjoern A. Zeeb 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
131*bfcc09ddSBjoern A. Zeeb }
132*bfcc09ddSBjoern A. Zeeb 
133*bfcc09ddSBjoern A. Zeeb /*
134*bfcc09ddSBjoern A. Zeeb  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
135*bfcc09ddSBjoern A. Zeeb  */
136*bfcc09ddSBjoern A. Zeeb static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
137*bfcc09ddSBjoern A. Zeeb {
138*bfcc09ddSBjoern A. Zeeb 	return cpu_to_le32((u32)(dma_addr >> 8));
139*bfcc09ddSBjoern A. Zeeb }
140*bfcc09ddSBjoern A. Zeeb 
141*bfcc09ddSBjoern A. Zeeb /*
142*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_stop - stops the Rx DMA
143*bfcc09ddSBjoern A. Zeeb  */
144*bfcc09ddSBjoern A. Zeeb int iwl_pcie_rx_stop(struct iwl_trans *trans)
145*bfcc09ddSBjoern A. Zeeb {
146*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
147*bfcc09ddSBjoern A. Zeeb 		/* TODO: remove this once fw does it */
148*bfcc09ddSBjoern A. Zeeb 		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0);
149*bfcc09ddSBjoern A. Zeeb 		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_GEN3,
150*bfcc09ddSBjoern A. Zeeb 					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
151*bfcc09ddSBjoern A. Zeeb 	} else if (trans->trans_cfg->mq_rx_supported) {
152*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
153*bfcc09ddSBjoern A. Zeeb 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
154*bfcc09ddSBjoern A. Zeeb 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
155*bfcc09ddSBjoern A. Zeeb 	} else {
156*bfcc09ddSBjoern A. Zeeb 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
157*bfcc09ddSBjoern A. Zeeb 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
158*bfcc09ddSBjoern A. Zeeb 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
159*bfcc09ddSBjoern A. Zeeb 					   1000);
160*bfcc09ddSBjoern A. Zeeb 	}
161*bfcc09ddSBjoern A. Zeeb }
162*bfcc09ddSBjoern A. Zeeb 
163*bfcc09ddSBjoern A. Zeeb /*
164*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
165*bfcc09ddSBjoern A. Zeeb  */
166*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
167*bfcc09ddSBjoern A. Zeeb 				    struct iwl_rxq *rxq)
168*bfcc09ddSBjoern A. Zeeb {
169*bfcc09ddSBjoern A. Zeeb 	u32 reg;
170*bfcc09ddSBjoern A. Zeeb 
171*bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
172*bfcc09ddSBjoern A. Zeeb 
173*bfcc09ddSBjoern A. Zeeb 	/*
174*bfcc09ddSBjoern A. Zeeb 	 * explicitly wake up the NIC if:
175*bfcc09ddSBjoern A. Zeeb 	 * 1. shadow registers aren't enabled
176*bfcc09ddSBjoern A. Zeeb 	 * 2. there is a chance that the NIC is asleep
177*bfcc09ddSBjoern A. Zeeb 	 */
178*bfcc09ddSBjoern A. Zeeb 	if (!trans->trans_cfg->base_params->shadow_reg_enable &&
179*bfcc09ddSBjoern A. Zeeb 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
180*bfcc09ddSBjoern A. Zeeb 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
181*bfcc09ddSBjoern A. Zeeb 
182*bfcc09ddSBjoern A. Zeeb 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
183*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
184*bfcc09ddSBjoern A. Zeeb 				       reg);
185*bfcc09ddSBjoern A. Zeeb 			iwl_set_bit(trans, CSR_GP_CNTRL,
186*bfcc09ddSBjoern A. Zeeb 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
187*bfcc09ddSBjoern A. Zeeb 			rxq->need_update = true;
188*bfcc09ddSBjoern A. Zeeb 			return;
189*bfcc09ddSBjoern A. Zeeb 		}
190*bfcc09ddSBjoern A. Zeeb 	}
191*bfcc09ddSBjoern A. Zeeb 
192*bfcc09ddSBjoern A. Zeeb 	rxq->write_actual = round_down(rxq->write, 8);
193*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->mq_rx_supported)
194*bfcc09ddSBjoern A. Zeeb 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
195*bfcc09ddSBjoern A. Zeeb 			    rxq->write_actual);
196*bfcc09ddSBjoern A. Zeeb 	else
197*bfcc09ddSBjoern A. Zeeb 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
198*bfcc09ddSBjoern A. Zeeb }
199*bfcc09ddSBjoern A. Zeeb 
200*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
201*bfcc09ddSBjoern A. Zeeb {
202*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
203*bfcc09ddSBjoern A. Zeeb 	int i;
204*bfcc09ddSBjoern A. Zeeb 
205*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
206*bfcc09ddSBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
207*bfcc09ddSBjoern A. Zeeb 
208*bfcc09ddSBjoern A. Zeeb 		if (!rxq->need_update)
209*bfcc09ddSBjoern A. Zeeb 			continue;
210*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
211*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
212*bfcc09ddSBjoern A. Zeeb 		rxq->need_update = false;
213*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
214*bfcc09ddSBjoern A. Zeeb 	}
215*bfcc09ddSBjoern A. Zeeb }
216*bfcc09ddSBjoern A. Zeeb 
217*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_restock_bd(struct iwl_trans *trans,
218*bfcc09ddSBjoern A. Zeeb 				struct iwl_rxq *rxq,
219*bfcc09ddSBjoern A. Zeeb 				struct iwl_rx_mem_buffer *rxb)
220*bfcc09ddSBjoern A. Zeeb {
221*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
222*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_transfer_desc *bd = rxq->bd;
223*bfcc09ddSBjoern A. Zeeb 
224*bfcc09ddSBjoern A. Zeeb 		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
225*bfcc09ddSBjoern A. Zeeb 
226*bfcc09ddSBjoern A. Zeeb 		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
227*bfcc09ddSBjoern A. Zeeb 		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
228*bfcc09ddSBjoern A. Zeeb 	} else {
229*bfcc09ddSBjoern A. Zeeb 		__le64 *bd = rxq->bd;
230*bfcc09ddSBjoern A. Zeeb 
231*bfcc09ddSBjoern A. Zeeb 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
232*bfcc09ddSBjoern A. Zeeb 	}
233*bfcc09ddSBjoern A. Zeeb 
234*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_PCI_RW(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
235*bfcc09ddSBjoern A. Zeeb 		     (u32)rxb->vid, rxq->id, rxq->write);
236*bfcc09ddSBjoern A. Zeeb }
237*bfcc09ddSBjoern A. Zeeb 
238*bfcc09ddSBjoern A. Zeeb /*
239*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
240*bfcc09ddSBjoern A. Zeeb  */
241*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
242*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rxq *rxq)
243*bfcc09ddSBjoern A. Zeeb {
244*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
245*bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
246*bfcc09ddSBjoern A. Zeeb 
247*bfcc09ddSBjoern A. Zeeb 	/*
248*bfcc09ddSBjoern A. Zeeb 	 * If the device isn't enabled - no need to try to add buffers...
249*bfcc09ddSBjoern A. Zeeb 	 * This can happen when we stop the device and still have an interrupt
250*bfcc09ddSBjoern A. Zeeb 	 * pending. We stop the APM before we sync the interrupts because we
251*bfcc09ddSBjoern A. Zeeb 	 * have to (see comment there). On the other hand, since the APM is
252*bfcc09ddSBjoern A. Zeeb 	 * stopped, we cannot access the HW (in particular not prph).
253*bfcc09ddSBjoern A. Zeeb 	 * So don't try to restock if the APM has been already stopped.
254*bfcc09ddSBjoern A. Zeeb 	 */
255*bfcc09ddSBjoern A. Zeeb 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
256*bfcc09ddSBjoern A. Zeeb 		return;
257*bfcc09ddSBjoern A. Zeeb 
258*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&rxq->lock);
259*bfcc09ddSBjoern A. Zeeb 	while (rxq->free_count) {
260*bfcc09ddSBjoern A. Zeeb 		/* Get next free Rx buffer, remove from free list */
261*bfcc09ddSBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
262*bfcc09ddSBjoern A. Zeeb 				       list);
263*bfcc09ddSBjoern A. Zeeb 		list_del(&rxb->list);
264*bfcc09ddSBjoern A. Zeeb 		rxb->invalid = false;
265*bfcc09ddSBjoern A. Zeeb 		/* some low bits are expected to be unset (depending on hw) */
266*bfcc09ddSBjoern A. Zeeb 		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
267*bfcc09ddSBjoern A. Zeeb 		/* Point to Rx buffer via next RBD in circular buffer */
268*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_restock_bd(trans, rxq, rxb);
269*bfcc09ddSBjoern A. Zeeb 		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
270*bfcc09ddSBjoern A. Zeeb 		rxq->free_count--;
271*bfcc09ddSBjoern A. Zeeb 	}
272*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&rxq->lock);
273*bfcc09ddSBjoern A. Zeeb 
274*bfcc09ddSBjoern A. Zeeb 	/*
275*bfcc09ddSBjoern A. Zeeb 	 * If we've added more space for the firmware to place data, tell it.
276*bfcc09ddSBjoern A. Zeeb 	 * Increment device's write pointer in multiples of 8.
277*bfcc09ddSBjoern A. Zeeb 	 */
278*bfcc09ddSBjoern A. Zeeb 	if (rxq->write_actual != (rxq->write & ~0x7)) {
279*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
280*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
281*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
282*bfcc09ddSBjoern A. Zeeb 	}
283*bfcc09ddSBjoern A. Zeeb }
284*bfcc09ddSBjoern A. Zeeb 
285*bfcc09ddSBjoern A. Zeeb /*
286*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
287*bfcc09ddSBjoern A. Zeeb  */
288*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
289*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rxq *rxq)
290*bfcc09ddSBjoern A. Zeeb {
291*bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
292*bfcc09ddSBjoern A. Zeeb 
293*bfcc09ddSBjoern A. Zeeb 	/*
294*bfcc09ddSBjoern A. Zeeb 	 * If the device isn't enabled - not need to try to add buffers...
295*bfcc09ddSBjoern A. Zeeb 	 * This can happen when we stop the device and still have an interrupt
296*bfcc09ddSBjoern A. Zeeb 	 * pending. We stop the APM before we sync the interrupts because we
297*bfcc09ddSBjoern A. Zeeb 	 * have to (see comment there). On the other hand, since the APM is
298*bfcc09ddSBjoern A. Zeeb 	 * stopped, we cannot access the HW (in particular not prph).
299*bfcc09ddSBjoern A. Zeeb 	 * So don't try to restock if the APM has been already stopped.
300*bfcc09ddSBjoern A. Zeeb 	 */
301*bfcc09ddSBjoern A. Zeeb 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
302*bfcc09ddSBjoern A. Zeeb 		return;
303*bfcc09ddSBjoern A. Zeeb 
304*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&rxq->lock);
305*bfcc09ddSBjoern A. Zeeb 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
306*bfcc09ddSBjoern A. Zeeb 		__le32 *bd = (__le32 *)rxq->bd;
307*bfcc09ddSBjoern A. Zeeb 		/* The overwritten rxb must be a used one */
308*bfcc09ddSBjoern A. Zeeb 		rxb = rxq->queue[rxq->write];
309*bfcc09ddSBjoern A. Zeeb 		BUG_ON(rxb && rxb->page);
310*bfcc09ddSBjoern A. Zeeb 
311*bfcc09ddSBjoern A. Zeeb 		/* Get next free Rx buffer, remove from free list */
312*bfcc09ddSBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
313*bfcc09ddSBjoern A. Zeeb 				       list);
314*bfcc09ddSBjoern A. Zeeb 		list_del(&rxb->list);
315*bfcc09ddSBjoern A. Zeeb 		rxb->invalid = false;
316*bfcc09ddSBjoern A. Zeeb 
317*bfcc09ddSBjoern A. Zeeb 		/* Point to Rx buffer via next RBD in circular buffer */
318*bfcc09ddSBjoern A. Zeeb 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
319*bfcc09ddSBjoern A. Zeeb 		rxq->queue[rxq->write] = rxb;
320*bfcc09ddSBjoern A. Zeeb 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
321*bfcc09ddSBjoern A. Zeeb 		rxq->free_count--;
322*bfcc09ddSBjoern A. Zeeb 	}
323*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&rxq->lock);
324*bfcc09ddSBjoern A. Zeeb 
325*bfcc09ddSBjoern A. Zeeb 	/* If we've added more space for the firmware to place data, tell it.
326*bfcc09ddSBjoern A. Zeeb 	 * Increment device's write pointer in multiples of 8. */
327*bfcc09ddSBjoern A. Zeeb 	if (rxq->write_actual != (rxq->write & ~0x7)) {
328*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
329*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
330*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
331*bfcc09ddSBjoern A. Zeeb 	}
332*bfcc09ddSBjoern A. Zeeb }
333*bfcc09ddSBjoern A. Zeeb 
334*bfcc09ddSBjoern A. Zeeb /*
335*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
336*bfcc09ddSBjoern A. Zeeb  *
337*bfcc09ddSBjoern A. Zeeb  * If there are slots in the RX queue that need to be restocked,
338*bfcc09ddSBjoern A. Zeeb  * and we have free pre-allocated buffers, fill the ranks as much
339*bfcc09ddSBjoern A. Zeeb  * as we can, pulling from rx_free.
340*bfcc09ddSBjoern A. Zeeb  *
341*bfcc09ddSBjoern A. Zeeb  * This moves the 'write' index forward to catch up with 'processed', and
342*bfcc09ddSBjoern A. Zeeb  * also updates the memory address in the firmware to reference the new
343*bfcc09ddSBjoern A. Zeeb  * target buffer.
344*bfcc09ddSBjoern A. Zeeb  */
345*bfcc09ddSBjoern A. Zeeb static
346*bfcc09ddSBjoern A. Zeeb void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
347*bfcc09ddSBjoern A. Zeeb {
348*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->mq_rx_supported)
349*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxmq_restock(trans, rxq);
350*bfcc09ddSBjoern A. Zeeb 	else
351*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxsq_restock(trans, rxq);
352*bfcc09ddSBjoern A. Zeeb }
353*bfcc09ddSBjoern A. Zeeb 
354*bfcc09ddSBjoern A. Zeeb /*
355*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_alloc_page - allocates and returns a page.
356*bfcc09ddSBjoern A. Zeeb  *
357*bfcc09ddSBjoern A. Zeeb  */
358*bfcc09ddSBjoern A. Zeeb static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
359*bfcc09ddSBjoern A. Zeeb 					   u32 *offset, gfp_t priority)
360*bfcc09ddSBjoern A. Zeeb {
361*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
362*bfcc09ddSBjoern A. Zeeb 	unsigned int rbsize = iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
363*bfcc09ddSBjoern A. Zeeb 	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
364*bfcc09ddSBjoern A. Zeeb 	struct page *page;
365*bfcc09ddSBjoern A. Zeeb 	gfp_t gfp_mask = priority;
366*bfcc09ddSBjoern A. Zeeb 
367*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->rx_page_order > 0)
368*bfcc09ddSBjoern A. Zeeb 		gfp_mask |= __GFP_COMP;
369*bfcc09ddSBjoern A. Zeeb 
370*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->alloc_page) {
371*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->alloc_page_lock);
372*bfcc09ddSBjoern A. Zeeb 		/* recheck */
373*bfcc09ddSBjoern A. Zeeb 		if (trans_pcie->alloc_page) {
374*bfcc09ddSBjoern A. Zeeb 			*offset = trans_pcie->alloc_page_used;
375*bfcc09ddSBjoern A. Zeeb 			page = trans_pcie->alloc_page;
376*bfcc09ddSBjoern A. Zeeb 			trans_pcie->alloc_page_used += rbsize;
377*bfcc09ddSBjoern A. Zeeb 			if (trans_pcie->alloc_page_used >= allocsize)
378*bfcc09ddSBjoern A. Zeeb 				trans_pcie->alloc_page = NULL;
379*bfcc09ddSBjoern A. Zeeb 			else
380*bfcc09ddSBjoern A. Zeeb 				get_page(page);
381*bfcc09ddSBjoern A. Zeeb 			spin_unlock_bh(&trans_pcie->alloc_page_lock);
382*bfcc09ddSBjoern A. Zeeb 			return page;
383*bfcc09ddSBjoern A. Zeeb 		}
384*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
385*bfcc09ddSBjoern A. Zeeb 	}
386*bfcc09ddSBjoern A. Zeeb 
387*bfcc09ddSBjoern A. Zeeb 	/* Alloc a new receive buffer */
388*bfcc09ddSBjoern A. Zeeb 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
389*bfcc09ddSBjoern A. Zeeb 	if (!page) {
390*bfcc09ddSBjoern A. Zeeb 		if (net_ratelimit())
391*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
392*bfcc09ddSBjoern A. Zeeb 				       trans_pcie->rx_page_order);
393*bfcc09ddSBjoern A. Zeeb 		/*
394*bfcc09ddSBjoern A. Zeeb 		 * Issue an error if we don't have enough pre-allocated
395*bfcc09ddSBjoern A. Zeeb 		  * buffers.
396*bfcc09ddSBjoern A. Zeeb 		 */
397*bfcc09ddSBjoern A. Zeeb 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
398*bfcc09ddSBjoern A. Zeeb 			IWL_CRIT(trans,
399*bfcc09ddSBjoern A. Zeeb 				 "Failed to alloc_pages\n");
400*bfcc09ddSBjoern A. Zeeb 		return NULL;
401*bfcc09ddSBjoern A. Zeeb 	}
402*bfcc09ddSBjoern A. Zeeb 
403*bfcc09ddSBjoern A. Zeeb 	if (2 * rbsize <= allocsize) {
404*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->alloc_page_lock);
405*bfcc09ddSBjoern A. Zeeb 		if (!trans_pcie->alloc_page) {
406*bfcc09ddSBjoern A. Zeeb 			get_page(page);
407*bfcc09ddSBjoern A. Zeeb 			trans_pcie->alloc_page = page;
408*bfcc09ddSBjoern A. Zeeb 			trans_pcie->alloc_page_used = rbsize;
409*bfcc09ddSBjoern A. Zeeb 		}
410*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
411*bfcc09ddSBjoern A. Zeeb 	}
412*bfcc09ddSBjoern A. Zeeb 
413*bfcc09ddSBjoern A. Zeeb 	*offset = 0;
414*bfcc09ddSBjoern A. Zeeb 	return page;
415*bfcc09ddSBjoern A. Zeeb }
416*bfcc09ddSBjoern A. Zeeb 
417*bfcc09ddSBjoern A. Zeeb /*
418*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
419*bfcc09ddSBjoern A. Zeeb  *
420*bfcc09ddSBjoern A. Zeeb  * A used RBD is an Rx buffer that has been given to the stack. To use it again
421*bfcc09ddSBjoern A. Zeeb  * a page must be allocated and the RBD must point to the page. This function
422*bfcc09ddSBjoern A. Zeeb  * doesn't change the HW pointer but handles the list of pages that is used by
423*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
424*bfcc09ddSBjoern A. Zeeb  * allocated buffers.
425*bfcc09ddSBjoern A. Zeeb  */
426*bfcc09ddSBjoern A. Zeeb void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
427*bfcc09ddSBjoern A. Zeeb 			    struct iwl_rxq *rxq)
428*bfcc09ddSBjoern A. Zeeb {
429*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
430*bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
431*bfcc09ddSBjoern A. Zeeb 	struct page *page;
432*bfcc09ddSBjoern A. Zeeb 
433*bfcc09ddSBjoern A. Zeeb 	while (1) {
434*bfcc09ddSBjoern A. Zeeb 		unsigned int offset;
435*bfcc09ddSBjoern A. Zeeb 
436*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
437*bfcc09ddSBjoern A. Zeeb 		if (list_empty(&rxq->rx_used)) {
438*bfcc09ddSBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
439*bfcc09ddSBjoern A. Zeeb 			return;
440*bfcc09ddSBjoern A. Zeeb 		}
441*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
442*bfcc09ddSBjoern A. Zeeb 
443*bfcc09ddSBjoern A. Zeeb 		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
444*bfcc09ddSBjoern A. Zeeb 		if (!page)
445*bfcc09ddSBjoern A. Zeeb 			return;
446*bfcc09ddSBjoern A. Zeeb 
447*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
448*bfcc09ddSBjoern A. Zeeb 
449*bfcc09ddSBjoern A. Zeeb 		if (list_empty(&rxq->rx_used)) {
450*bfcc09ddSBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
451*bfcc09ddSBjoern A. Zeeb 			__free_pages(page, trans_pcie->rx_page_order);
452*bfcc09ddSBjoern A. Zeeb 			return;
453*bfcc09ddSBjoern A. Zeeb 		}
454*bfcc09ddSBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
455*bfcc09ddSBjoern A. Zeeb 				       list);
456*bfcc09ddSBjoern A. Zeeb 		list_del(&rxb->list);
457*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
458*bfcc09ddSBjoern A. Zeeb 
459*bfcc09ddSBjoern A. Zeeb 		BUG_ON(rxb->page);
460*bfcc09ddSBjoern A. Zeeb 		rxb->page = page;
461*bfcc09ddSBjoern A. Zeeb 		rxb->offset = offset;
462*bfcc09ddSBjoern A. Zeeb 		/* Get physical address of the RB */
463*bfcc09ddSBjoern A. Zeeb 		rxb->page_dma =
464*bfcc09ddSBjoern A. Zeeb 			dma_map_page(trans->dev, page, rxb->offset,
465*bfcc09ddSBjoern A. Zeeb 				     trans_pcie->rx_buf_bytes,
466*bfcc09ddSBjoern A. Zeeb 				     DMA_FROM_DEVICE);
467*bfcc09ddSBjoern A. Zeeb 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
468*bfcc09ddSBjoern A. Zeeb 			rxb->page = NULL;
469*bfcc09ddSBjoern A. Zeeb 			spin_lock_bh(&rxq->lock);
470*bfcc09ddSBjoern A. Zeeb 			list_add(&rxb->list, &rxq->rx_used);
471*bfcc09ddSBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
472*bfcc09ddSBjoern A. Zeeb 			__free_pages(page, trans_pcie->rx_page_order);
473*bfcc09ddSBjoern A. Zeeb 			return;
474*bfcc09ddSBjoern A. Zeeb 		}
475*bfcc09ddSBjoern A. Zeeb 
476*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
477*bfcc09ddSBjoern A. Zeeb 
478*bfcc09ddSBjoern A. Zeeb 		list_add_tail(&rxb->list, &rxq->rx_free);
479*bfcc09ddSBjoern A. Zeeb 		rxq->free_count++;
480*bfcc09ddSBjoern A. Zeeb 
481*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
482*bfcc09ddSBjoern A. Zeeb 	}
483*bfcc09ddSBjoern A. Zeeb }
484*bfcc09ddSBjoern A. Zeeb 
485*bfcc09ddSBjoern A. Zeeb void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
486*bfcc09ddSBjoern A. Zeeb {
487*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
488*bfcc09ddSBjoern A. Zeeb 	int i;
489*bfcc09ddSBjoern A. Zeeb 
490*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->rx_pool)
491*bfcc09ddSBjoern A. Zeeb 		return;
492*bfcc09ddSBjoern A. Zeeb 
493*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
494*bfcc09ddSBjoern A. Zeeb 		if (!trans_pcie->rx_pool[i].page)
495*bfcc09ddSBjoern A. Zeeb 			continue;
496*bfcc09ddSBjoern A. Zeeb 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
497*bfcc09ddSBjoern A. Zeeb 			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
498*bfcc09ddSBjoern A. Zeeb 		__free_pages(trans_pcie->rx_pool[i].page,
499*bfcc09ddSBjoern A. Zeeb 			     trans_pcie->rx_page_order);
500*bfcc09ddSBjoern A. Zeeb 		trans_pcie->rx_pool[i].page = NULL;
501*bfcc09ddSBjoern A. Zeeb 	}
502*bfcc09ddSBjoern A. Zeeb }
503*bfcc09ddSBjoern A. Zeeb 
504*bfcc09ddSBjoern A. Zeeb /*
505*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
506*bfcc09ddSBjoern A. Zeeb  *
507*bfcc09ddSBjoern A. Zeeb  * Allocates for each received request 8 pages
508*bfcc09ddSBjoern A. Zeeb  * Called as a scheduled work item.
509*bfcc09ddSBjoern A. Zeeb  */
510*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
511*bfcc09ddSBjoern A. Zeeb {
512*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
513*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
514*bfcc09ddSBjoern A. Zeeb 	struct list_head local_empty;
515*bfcc09ddSBjoern A. Zeeb 	int pending = atomic_read(&rba->req_pending);
516*bfcc09ddSBjoern A. Zeeb 
517*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
518*bfcc09ddSBjoern A. Zeeb 
519*bfcc09ddSBjoern A. Zeeb 	/* If we were scheduled - there is at least one request */
520*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
521*bfcc09ddSBjoern A. Zeeb 	/* swap out the rba->rbd_empty to a local list */
522*bfcc09ddSBjoern A. Zeeb 	list_replace_init(&rba->rbd_empty, &local_empty);
523*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
524*bfcc09ddSBjoern A. Zeeb 
525*bfcc09ddSBjoern A. Zeeb 	while (pending) {
526*bfcc09ddSBjoern A. Zeeb 		int i;
527*bfcc09ddSBjoern A. Zeeb 		LIST_HEAD(local_allocated);
528*bfcc09ddSBjoern A. Zeeb 		gfp_t gfp_mask = GFP_KERNEL;
529*bfcc09ddSBjoern A. Zeeb 
530*bfcc09ddSBjoern A. Zeeb 		/* Do not post a warning if there are only a few requests */
531*bfcc09ddSBjoern A. Zeeb 		if (pending < RX_PENDING_WATERMARK)
532*bfcc09ddSBjoern A. Zeeb 			gfp_mask |= __GFP_NOWARN;
533*bfcc09ddSBjoern A. Zeeb 
534*bfcc09ddSBjoern A. Zeeb 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
535*bfcc09ddSBjoern A. Zeeb 			struct iwl_rx_mem_buffer *rxb;
536*bfcc09ddSBjoern A. Zeeb 			struct page *page;
537*bfcc09ddSBjoern A. Zeeb 
538*bfcc09ddSBjoern A. Zeeb 			/* List should never be empty - each reused RBD is
539*bfcc09ddSBjoern A. Zeeb 			 * returned to the list, and initial pool covers any
540*bfcc09ddSBjoern A. Zeeb 			 * possible gap between the time the page is allocated
541*bfcc09ddSBjoern A. Zeeb 			 * to the time the RBD is added.
542*bfcc09ddSBjoern A. Zeeb 			 */
543*bfcc09ddSBjoern A. Zeeb 			BUG_ON(list_empty(&local_empty));
544*bfcc09ddSBjoern A. Zeeb 			/* Get the first rxb from the rbd list */
545*bfcc09ddSBjoern A. Zeeb 			rxb = list_first_entry(&local_empty,
546*bfcc09ddSBjoern A. Zeeb 					       struct iwl_rx_mem_buffer, list);
547*bfcc09ddSBjoern A. Zeeb 			BUG_ON(rxb->page);
548*bfcc09ddSBjoern A. Zeeb 
549*bfcc09ddSBjoern A. Zeeb 			/* Alloc a new receive buffer */
550*bfcc09ddSBjoern A. Zeeb 			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
551*bfcc09ddSBjoern A. Zeeb 						      gfp_mask);
552*bfcc09ddSBjoern A. Zeeb 			if (!page)
553*bfcc09ddSBjoern A. Zeeb 				continue;
554*bfcc09ddSBjoern A. Zeeb 			rxb->page = page;
555*bfcc09ddSBjoern A. Zeeb 
556*bfcc09ddSBjoern A. Zeeb 			/* Get physical address of the RB */
557*bfcc09ddSBjoern A. Zeeb 			rxb->page_dma = dma_map_page(trans->dev, page,
558*bfcc09ddSBjoern A. Zeeb 						     rxb->offset,
559*bfcc09ddSBjoern A. Zeeb 						     trans_pcie->rx_buf_bytes,
560*bfcc09ddSBjoern A. Zeeb 						     DMA_FROM_DEVICE);
561*bfcc09ddSBjoern A. Zeeb 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
562*bfcc09ddSBjoern A. Zeeb 				rxb->page = NULL;
563*bfcc09ddSBjoern A. Zeeb 				__free_pages(page, trans_pcie->rx_page_order);
564*bfcc09ddSBjoern A. Zeeb 				continue;
565*bfcc09ddSBjoern A. Zeeb 			}
566*bfcc09ddSBjoern A. Zeeb 
567*bfcc09ddSBjoern A. Zeeb 			/* move the allocated entry to the out list */
568*bfcc09ddSBjoern A. Zeeb 			list_move(&rxb->list, &local_allocated);
569*bfcc09ddSBjoern A. Zeeb 			i++;
570*bfcc09ddSBjoern A. Zeeb 		}
571*bfcc09ddSBjoern A. Zeeb 
572*bfcc09ddSBjoern A. Zeeb 		atomic_dec(&rba->req_pending);
573*bfcc09ddSBjoern A. Zeeb 		pending--;
574*bfcc09ddSBjoern A. Zeeb 
575*bfcc09ddSBjoern A. Zeeb 		if (!pending) {
576*bfcc09ddSBjoern A. Zeeb 			pending = atomic_read(&rba->req_pending);
577*bfcc09ddSBjoern A. Zeeb 			if (pending)
578*bfcc09ddSBjoern A. Zeeb 				IWL_DEBUG_TPT(trans,
579*bfcc09ddSBjoern A. Zeeb 					      "Got more pending allocation requests = %d\n",
580*bfcc09ddSBjoern A. Zeeb 					      pending);
581*bfcc09ddSBjoern A. Zeeb 		}
582*bfcc09ddSBjoern A. Zeeb 
583*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rba->lock);
584*bfcc09ddSBjoern A. Zeeb 		/* add the allocated rbds to the allocator allocated list */
585*bfcc09ddSBjoern A. Zeeb 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
586*bfcc09ddSBjoern A. Zeeb 		/* get more empty RBDs for current pending requests */
587*bfcc09ddSBjoern A. Zeeb 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
588*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rba->lock);
589*bfcc09ddSBjoern A. Zeeb 
590*bfcc09ddSBjoern A. Zeeb 		atomic_inc(&rba->req_ready);
591*bfcc09ddSBjoern A. Zeeb 
592*bfcc09ddSBjoern A. Zeeb 	}
593*bfcc09ddSBjoern A. Zeeb 
594*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
595*bfcc09ddSBjoern A. Zeeb 	/* return unused rbds to the allocator empty list */
596*bfcc09ddSBjoern A. Zeeb 	list_splice_tail(&local_empty, &rba->rbd_empty);
597*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
598*bfcc09ddSBjoern A. Zeeb 
599*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
600*bfcc09ddSBjoern A. Zeeb }
601*bfcc09ddSBjoern A. Zeeb 
602*bfcc09ddSBjoern A. Zeeb /*
603*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
604*bfcc09ddSBjoern A. Zeeb .*
605*bfcc09ddSBjoern A. Zeeb .* Called by queue when the queue posted allocation request and
606*bfcc09ddSBjoern A. Zeeb  * has freed 8 RBDs in order to restock itself.
607*bfcc09ddSBjoern A. Zeeb  * This function directly moves the allocated RBs to the queue's ownership
608*bfcc09ddSBjoern A. Zeeb  * and updates the relevant counters.
609*bfcc09ddSBjoern A. Zeeb  */
610*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
611*bfcc09ddSBjoern A. Zeeb 				      struct iwl_rxq *rxq)
612*bfcc09ddSBjoern A. Zeeb {
613*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
614*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
615*bfcc09ddSBjoern A. Zeeb 	int i;
616*bfcc09ddSBjoern A. Zeeb 
617*bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
618*bfcc09ddSBjoern A. Zeeb 
619*bfcc09ddSBjoern A. Zeeb 	/*
620*bfcc09ddSBjoern A. Zeeb 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
621*bfcc09ddSBjoern A. Zeeb 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
622*bfcc09ddSBjoern A. Zeeb 	 * function will return early, as there are no ready requests.
623*bfcc09ddSBjoern A. Zeeb 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
624*bfcc09ddSBjoern A. Zeeb 	 * req_ready > 0, i.e. - there are ready requests and the function
625*bfcc09ddSBjoern A. Zeeb 	 * hands one request to the caller.
626*bfcc09ddSBjoern A. Zeeb 	 */
627*bfcc09ddSBjoern A. Zeeb 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
628*bfcc09ddSBjoern A. Zeeb 		return;
629*bfcc09ddSBjoern A. Zeeb 
630*bfcc09ddSBjoern A. Zeeb 	spin_lock(&rba->lock);
631*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
632*bfcc09ddSBjoern A. Zeeb 		/* Get next free Rx buffer, remove it from free list */
633*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb =
634*bfcc09ddSBjoern A. Zeeb 			list_first_entry(&rba->rbd_allocated,
635*bfcc09ddSBjoern A. Zeeb 					 struct iwl_rx_mem_buffer, list);
636*bfcc09ddSBjoern A. Zeeb 
637*bfcc09ddSBjoern A. Zeeb 		list_move(&rxb->list, &rxq->rx_free);
638*bfcc09ddSBjoern A. Zeeb 	}
639*bfcc09ddSBjoern A. Zeeb 	spin_unlock(&rba->lock);
640*bfcc09ddSBjoern A. Zeeb 
641*bfcc09ddSBjoern A. Zeeb 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
642*bfcc09ddSBjoern A. Zeeb 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
643*bfcc09ddSBjoern A. Zeeb }
644*bfcc09ddSBjoern A. Zeeb 
645*bfcc09ddSBjoern A. Zeeb void iwl_pcie_rx_allocator_work(struct work_struct *data)
646*bfcc09ddSBjoern A. Zeeb {
647*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba_p =
648*bfcc09ddSBjoern A. Zeeb 		container_of(data, struct iwl_rb_allocator, rx_alloc);
649*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie =
650*bfcc09ddSBjoern A. Zeeb 		container_of(rba_p, struct iwl_trans_pcie, rba);
651*bfcc09ddSBjoern A. Zeeb 
652*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_rx_allocator(trans_pcie->trans);
653*bfcc09ddSBjoern A. Zeeb }
654*bfcc09ddSBjoern A. Zeeb 
655*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td)
656*bfcc09ddSBjoern A. Zeeb {
657*bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_transfer_desc *rx_td;
658*bfcc09ddSBjoern A. Zeeb 
659*bfcc09ddSBjoern A. Zeeb 	if (use_rx_td)
660*bfcc09ddSBjoern A. Zeeb 		return sizeof(*rx_td);
661*bfcc09ddSBjoern A. Zeeb 	else
662*bfcc09ddSBjoern A. Zeeb 		return trans->trans_cfg->mq_rx_supported ? sizeof(__le64) :
663*bfcc09ddSBjoern A. Zeeb 			sizeof(__le32);
664*bfcc09ddSBjoern A. Zeeb }
665*bfcc09ddSBjoern A. Zeeb 
666*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
667*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rxq *rxq)
668*bfcc09ddSBjoern A. Zeeb {
669*bfcc09ddSBjoern A. Zeeb 	bool use_rx_td = (trans->trans_cfg->device_family >=
670*bfcc09ddSBjoern A. Zeeb 			  IWL_DEVICE_FAMILY_AX210);
671*bfcc09ddSBjoern A. Zeeb 	int free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
672*bfcc09ddSBjoern A. Zeeb 
673*bfcc09ddSBjoern A. Zeeb 	if (rxq->bd)
674*bfcc09ddSBjoern A. Zeeb 		dma_free_coherent(trans->dev,
675*bfcc09ddSBjoern A. Zeeb 				  free_size * rxq->queue_size,
676*bfcc09ddSBjoern A. Zeeb 				  rxq->bd, rxq->bd_dma);
677*bfcc09ddSBjoern A. Zeeb 	rxq->bd_dma = 0;
678*bfcc09ddSBjoern A. Zeeb 	rxq->bd = NULL;
679*bfcc09ddSBjoern A. Zeeb 
680*bfcc09ddSBjoern A. Zeeb 	rxq->rb_stts_dma = 0;
681*bfcc09ddSBjoern A. Zeeb 	rxq->rb_stts = NULL;
682*bfcc09ddSBjoern A. Zeeb 
683*bfcc09ddSBjoern A. Zeeb 	if (rxq->used_bd)
684*bfcc09ddSBjoern A. Zeeb 		dma_free_coherent(trans->dev,
685*bfcc09ddSBjoern A. Zeeb 				  (use_rx_td ? sizeof(*rxq->cd) :
686*bfcc09ddSBjoern A. Zeeb 				   sizeof(__le32)) * rxq->queue_size,
687*bfcc09ddSBjoern A. Zeeb 				  rxq->used_bd, rxq->used_bd_dma);
688*bfcc09ddSBjoern A. Zeeb 	rxq->used_bd_dma = 0;
689*bfcc09ddSBjoern A. Zeeb 	rxq->used_bd = NULL;
690*bfcc09ddSBjoern A. Zeeb }
691*bfcc09ddSBjoern A. Zeeb 
692*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
693*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rxq *rxq)
694*bfcc09ddSBjoern A. Zeeb {
695*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
696*bfcc09ddSBjoern A. Zeeb 	struct device *dev = trans->dev;
697*bfcc09ddSBjoern A. Zeeb 	int i;
698*bfcc09ddSBjoern A. Zeeb 	int free_size;
699*bfcc09ddSBjoern A. Zeeb 	bool use_rx_td = (trans->trans_cfg->device_family >=
700*bfcc09ddSBjoern A. Zeeb 			  IWL_DEVICE_FAMILY_AX210);
701*bfcc09ddSBjoern A. Zeeb 	size_t rb_stts_size = use_rx_td ? sizeof(__le16) :
702*bfcc09ddSBjoern A. Zeeb 			      sizeof(struct iwl_rb_status);
703*bfcc09ddSBjoern A. Zeeb 
704*bfcc09ddSBjoern A. Zeeb 	spin_lock_init(&rxq->lock);
705*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->mq_rx_supported)
706*bfcc09ddSBjoern A. Zeeb 		rxq->queue_size = trans->cfg->num_rbds;
707*bfcc09ddSBjoern A. Zeeb 	else
708*bfcc09ddSBjoern A. Zeeb 		rxq->queue_size = RX_QUEUE_SIZE;
709*bfcc09ddSBjoern A. Zeeb 
710*bfcc09ddSBjoern A. Zeeb 	free_size = iwl_pcie_free_bd_size(trans, use_rx_td);
711*bfcc09ddSBjoern A. Zeeb 
712*bfcc09ddSBjoern A. Zeeb 	/*
713*bfcc09ddSBjoern A. Zeeb 	 * Allocate the circular buffer of Read Buffer Descriptors
714*bfcc09ddSBjoern A. Zeeb 	 * (RBDs)
715*bfcc09ddSBjoern A. Zeeb 	 */
716*bfcc09ddSBjoern A. Zeeb 	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
717*bfcc09ddSBjoern A. Zeeb 				     &rxq->bd_dma, GFP_KERNEL);
718*bfcc09ddSBjoern A. Zeeb 	if (!rxq->bd)
719*bfcc09ddSBjoern A. Zeeb 		goto err;
720*bfcc09ddSBjoern A. Zeeb 
721*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->mq_rx_supported) {
722*bfcc09ddSBjoern A. Zeeb 		rxq->used_bd = dma_alloc_coherent(dev,
723*bfcc09ddSBjoern A. Zeeb 						  (use_rx_td ? sizeof(*rxq->cd) : sizeof(__le32)) * rxq->queue_size,
724*bfcc09ddSBjoern A. Zeeb 						  &rxq->used_bd_dma,
725*bfcc09ddSBjoern A. Zeeb 						  GFP_KERNEL);
726*bfcc09ddSBjoern A. Zeeb 		if (!rxq->used_bd)
727*bfcc09ddSBjoern A. Zeeb 			goto err;
728*bfcc09ddSBjoern A. Zeeb 	}
729*bfcc09ddSBjoern A. Zeeb 
730*bfcc09ddSBjoern A. Zeeb 	rxq->rb_stts = (void *)((u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size);
731*bfcc09ddSBjoern A. Zeeb 	rxq->rb_stts_dma =
732*bfcc09ddSBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
733*bfcc09ddSBjoern A. Zeeb 
734*bfcc09ddSBjoern A. Zeeb 	return 0;
735*bfcc09ddSBjoern A. Zeeb 
736*bfcc09ddSBjoern A. Zeeb err:
737*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
738*bfcc09ddSBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
739*bfcc09ddSBjoern A. Zeeb 
740*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_free_rxq_dma(trans, rxq);
741*bfcc09ddSBjoern A. Zeeb 	}
742*bfcc09ddSBjoern A. Zeeb 
743*bfcc09ddSBjoern A. Zeeb 	return -ENOMEM;
744*bfcc09ddSBjoern A. Zeeb }
745*bfcc09ddSBjoern A. Zeeb 
746*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
747*bfcc09ddSBjoern A. Zeeb {
748*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
749*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
750*bfcc09ddSBjoern A. Zeeb 	int i, ret;
751*bfcc09ddSBjoern A. Zeeb 	size_t rb_stts_size = trans->trans_cfg->device_family >=
752*bfcc09ddSBjoern A. Zeeb 				IWL_DEVICE_FAMILY_AX210 ?
753*bfcc09ddSBjoern A. Zeeb 			      sizeof(__le16) : sizeof(struct iwl_rb_status);
754*bfcc09ddSBjoern A. Zeeb 
755*bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(trans_pcie->rxq))
756*bfcc09ddSBjoern A. Zeeb 		return -EINVAL;
757*bfcc09ddSBjoern A. Zeeb 
758*bfcc09ddSBjoern A. Zeeb 	trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq),
759*bfcc09ddSBjoern A. Zeeb 				  GFP_KERNEL);
760*bfcc09ddSBjoern A. Zeeb 	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
761*bfcc09ddSBjoern A. Zeeb 				      sizeof(trans_pcie->rx_pool[0]),
762*bfcc09ddSBjoern A. Zeeb 				      GFP_KERNEL);
763*bfcc09ddSBjoern A. Zeeb 	trans_pcie->global_table =
764*bfcc09ddSBjoern A. Zeeb 		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
765*bfcc09ddSBjoern A. Zeeb 			sizeof(trans_pcie->global_table[0]),
766*bfcc09ddSBjoern A. Zeeb 			GFP_KERNEL);
767*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
768*bfcc09ddSBjoern A. Zeeb 	    !trans_pcie->global_table) {
769*bfcc09ddSBjoern A. Zeeb 		ret = -ENOMEM;
770*bfcc09ddSBjoern A. Zeeb 		goto err;
771*bfcc09ddSBjoern A. Zeeb 	}
772*bfcc09ddSBjoern A. Zeeb 
773*bfcc09ddSBjoern A. Zeeb 	spin_lock_init(&rba->lock);
774*bfcc09ddSBjoern A. Zeeb 
775*bfcc09ddSBjoern A. Zeeb 	/*
776*bfcc09ddSBjoern A. Zeeb 	 * Allocate the driver's pointer to receive buffer status.
777*bfcc09ddSBjoern A. Zeeb 	 * Allocate for all queues continuously (HW requirement).
778*bfcc09ddSBjoern A. Zeeb 	 */
779*bfcc09ddSBjoern A. Zeeb 	trans_pcie->base_rb_stts =
780*bfcc09ddSBjoern A. Zeeb 			dma_alloc_coherent(trans->dev,
781*bfcc09ddSBjoern A. Zeeb 					   rb_stts_size * trans->num_rx_queues,
782*bfcc09ddSBjoern A. Zeeb 					   &trans_pcie->base_rb_stts_dma,
783*bfcc09ddSBjoern A. Zeeb 					   GFP_KERNEL);
784*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->base_rb_stts) {
785*bfcc09ddSBjoern A. Zeeb 		ret = -ENOMEM;
786*bfcc09ddSBjoern A. Zeeb 		goto err;
787*bfcc09ddSBjoern A. Zeeb 	}
788*bfcc09ddSBjoern A. Zeeb 
789*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
790*bfcc09ddSBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
791*bfcc09ddSBjoern A. Zeeb 
792*bfcc09ddSBjoern A. Zeeb 		rxq->id = i;
793*bfcc09ddSBjoern A. Zeeb 		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
794*bfcc09ddSBjoern A. Zeeb 		if (ret)
795*bfcc09ddSBjoern A. Zeeb 			goto err;
796*bfcc09ddSBjoern A. Zeeb 	}
797*bfcc09ddSBjoern A. Zeeb 	return 0;
798*bfcc09ddSBjoern A. Zeeb 
799*bfcc09ddSBjoern A. Zeeb err:
800*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->base_rb_stts) {
801*bfcc09ddSBjoern A. Zeeb 		dma_free_coherent(trans->dev,
802*bfcc09ddSBjoern A. Zeeb 				  rb_stts_size * trans->num_rx_queues,
803*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->base_rb_stts,
804*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->base_rb_stts_dma);
805*bfcc09ddSBjoern A. Zeeb 		trans_pcie->base_rb_stts = NULL;
806*bfcc09ddSBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma = 0;
807*bfcc09ddSBjoern A. Zeeb 	}
808*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->rx_pool);
809*bfcc09ddSBjoern A. Zeeb 	trans_pcie->rx_pool = NULL;
810*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->global_table);
811*bfcc09ddSBjoern A. Zeeb 	trans_pcie->global_table = NULL;
812*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->rxq);
813*bfcc09ddSBjoern A. Zeeb 	trans_pcie->rxq = NULL;
814*bfcc09ddSBjoern A. Zeeb 
815*bfcc09ddSBjoern A. Zeeb 	return ret;
816*bfcc09ddSBjoern A. Zeeb }
817*bfcc09ddSBjoern A. Zeeb 
818*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
819*bfcc09ddSBjoern A. Zeeb {
820*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
821*bfcc09ddSBjoern A. Zeeb 	u32 rb_size;
822*bfcc09ddSBjoern A. Zeeb 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
823*bfcc09ddSBjoern A. Zeeb 
824*bfcc09ddSBjoern A. Zeeb 	switch (trans_pcie->rx_buf_size) {
825*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_4K:
826*bfcc09ddSBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
827*bfcc09ddSBjoern A. Zeeb 		break;
828*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_8K:
829*bfcc09ddSBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
830*bfcc09ddSBjoern A. Zeeb 		break;
831*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_12K:
832*bfcc09ddSBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
833*bfcc09ddSBjoern A. Zeeb 		break;
834*bfcc09ddSBjoern A. Zeeb 	default:
835*bfcc09ddSBjoern A. Zeeb 		WARN_ON(1);
836*bfcc09ddSBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
837*bfcc09ddSBjoern A. Zeeb 	}
838*bfcc09ddSBjoern A. Zeeb 
839*bfcc09ddSBjoern A. Zeeb 	if (!iwl_trans_grab_nic_access(trans))
840*bfcc09ddSBjoern A. Zeeb 		return;
841*bfcc09ddSBjoern A. Zeeb 
842*bfcc09ddSBjoern A. Zeeb 	/* Stop Rx DMA */
843*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
844*bfcc09ddSBjoern A. Zeeb 	/* reset and flush pointers */
845*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
846*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
847*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
848*bfcc09ddSBjoern A. Zeeb 
849*bfcc09ddSBjoern A. Zeeb 	/* Reset driver's Rx queue write index */
850*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
851*bfcc09ddSBjoern A. Zeeb 
852*bfcc09ddSBjoern A. Zeeb 	/* Tell device where to find RBD circular buffer in DRAM */
853*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
854*bfcc09ddSBjoern A. Zeeb 		    (u32)(rxq->bd_dma >> 8));
855*bfcc09ddSBjoern A. Zeeb 
856*bfcc09ddSBjoern A. Zeeb 	/* Tell device where in DRAM to update its Rx status */
857*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
858*bfcc09ddSBjoern A. Zeeb 		    rxq->rb_stts_dma >> 4);
859*bfcc09ddSBjoern A. Zeeb 
860*bfcc09ddSBjoern A. Zeeb 	/* Enable Rx DMA
861*bfcc09ddSBjoern A. Zeeb 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
862*bfcc09ddSBjoern A. Zeeb 	 *      the credit mechanism in 5000 HW RX FIFO
863*bfcc09ddSBjoern A. Zeeb 	 * Direct rx interrupts to hosts
864*bfcc09ddSBjoern A. Zeeb 	 * Rx buffer size 4 or 8k or 12k
865*bfcc09ddSBjoern A. Zeeb 	 * RB timeout 0x10
866*bfcc09ddSBjoern A. Zeeb 	 * 256 RBDs
867*bfcc09ddSBjoern A. Zeeb 	 */
868*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
869*bfcc09ddSBjoern A. Zeeb 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
870*bfcc09ddSBjoern A. Zeeb 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
871*bfcc09ddSBjoern A. Zeeb 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
872*bfcc09ddSBjoern A. Zeeb 		    rb_size |
873*bfcc09ddSBjoern A. Zeeb 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
874*bfcc09ddSBjoern A. Zeeb 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
875*bfcc09ddSBjoern A. Zeeb 
876*bfcc09ddSBjoern A. Zeeb 	iwl_trans_release_nic_access(trans);
877*bfcc09ddSBjoern A. Zeeb 
878*bfcc09ddSBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
879*bfcc09ddSBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
880*bfcc09ddSBjoern A. Zeeb 
881*bfcc09ddSBjoern A. Zeeb 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
882*bfcc09ddSBjoern A. Zeeb 	if (trans->cfg->host_interrupt_operation_mode)
883*bfcc09ddSBjoern A. Zeeb 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
884*bfcc09ddSBjoern A. Zeeb }
885*bfcc09ddSBjoern A. Zeeb 
886*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
887*bfcc09ddSBjoern A. Zeeb {
888*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
889*bfcc09ddSBjoern A. Zeeb 	u32 rb_size, enabled = 0;
890*bfcc09ddSBjoern A. Zeeb 	int i;
891*bfcc09ddSBjoern A. Zeeb 
892*bfcc09ddSBjoern A. Zeeb 	switch (trans_pcie->rx_buf_size) {
893*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_2K:
894*bfcc09ddSBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
895*bfcc09ddSBjoern A. Zeeb 		break;
896*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_4K:
897*bfcc09ddSBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
898*bfcc09ddSBjoern A. Zeeb 		break;
899*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_8K:
900*bfcc09ddSBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
901*bfcc09ddSBjoern A. Zeeb 		break;
902*bfcc09ddSBjoern A. Zeeb 	case IWL_AMSDU_12K:
903*bfcc09ddSBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
904*bfcc09ddSBjoern A. Zeeb 		break;
905*bfcc09ddSBjoern A. Zeeb 	default:
906*bfcc09ddSBjoern A. Zeeb 		WARN_ON(1);
907*bfcc09ddSBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
908*bfcc09ddSBjoern A. Zeeb 	}
909*bfcc09ddSBjoern A. Zeeb 
910*bfcc09ddSBjoern A. Zeeb 	if (!iwl_trans_grab_nic_access(trans))
911*bfcc09ddSBjoern A. Zeeb 		return;
912*bfcc09ddSBjoern A. Zeeb 
913*bfcc09ddSBjoern A. Zeeb 	/* Stop Rx DMA */
914*bfcc09ddSBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
915*bfcc09ddSBjoern A. Zeeb 	/* disable free amd used rx queue operation */
916*bfcc09ddSBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
917*bfcc09ddSBjoern A. Zeeb 
918*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
919*bfcc09ddSBjoern A. Zeeb 		/* Tell device where to find RBD free table in DRAM */
920*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
921*bfcc09ddSBjoern A. Zeeb 					 RFH_Q_FRBDCB_BA_LSB(i),
922*bfcc09ddSBjoern A. Zeeb 					 trans_pcie->rxq[i].bd_dma);
923*bfcc09ddSBjoern A. Zeeb 		/* Tell device where to find RBD used table in DRAM */
924*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
925*bfcc09ddSBjoern A. Zeeb 					 RFH_Q_URBDCB_BA_LSB(i),
926*bfcc09ddSBjoern A. Zeeb 					 trans_pcie->rxq[i].used_bd_dma);
927*bfcc09ddSBjoern A. Zeeb 		/* Tell device where in DRAM to update its Rx status */
928*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
929*bfcc09ddSBjoern A. Zeeb 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
930*bfcc09ddSBjoern A. Zeeb 					 trans_pcie->rxq[i].rb_stts_dma);
931*bfcc09ddSBjoern A. Zeeb 		/* Reset device indice tables */
932*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
933*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
934*bfcc09ddSBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
935*bfcc09ddSBjoern A. Zeeb 
936*bfcc09ddSBjoern A. Zeeb 		enabled |= BIT(i) | BIT(i + 16);
937*bfcc09ddSBjoern A. Zeeb 	}
938*bfcc09ddSBjoern A. Zeeb 
939*bfcc09ddSBjoern A. Zeeb 	/*
940*bfcc09ddSBjoern A. Zeeb 	 * Enable Rx DMA
941*bfcc09ddSBjoern A. Zeeb 	 * Rx buffer size 4 or 8k or 12k
942*bfcc09ddSBjoern A. Zeeb 	 * Min RB size 4 or 8
943*bfcc09ddSBjoern A. Zeeb 	 * Drop frames that exceed RB size
944*bfcc09ddSBjoern A. Zeeb 	 * 512 RBDs
945*bfcc09ddSBjoern A. Zeeb 	 */
946*bfcc09ddSBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
947*bfcc09ddSBjoern A. Zeeb 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
948*bfcc09ddSBjoern A. Zeeb 			       RFH_RXF_DMA_MIN_RB_4_8 |
949*bfcc09ddSBjoern A. Zeeb 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
950*bfcc09ddSBjoern A. Zeeb 			       RFH_RXF_DMA_RBDCB_SIZE_512);
951*bfcc09ddSBjoern A. Zeeb 
952*bfcc09ddSBjoern A. Zeeb 	/*
953*bfcc09ddSBjoern A. Zeeb 	 * Activate DMA snooping.
954*bfcc09ddSBjoern A. Zeeb 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
955*bfcc09ddSBjoern A. Zeeb 	 * Default queue is 0
956*bfcc09ddSBjoern A. Zeeb 	 */
957*bfcc09ddSBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
958*bfcc09ddSBjoern A. Zeeb 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
959*bfcc09ddSBjoern A. Zeeb 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
960*bfcc09ddSBjoern A. Zeeb 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
961*bfcc09ddSBjoern A. Zeeb 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
962*bfcc09ddSBjoern A. Zeeb 					       trans->trans_cfg->integrated ?
963*bfcc09ddSBjoern A. Zeeb 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
964*bfcc09ddSBjoern A. Zeeb 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
965*bfcc09ddSBjoern A. Zeeb 	/* Enable the relevant rx queues */
966*bfcc09ddSBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
967*bfcc09ddSBjoern A. Zeeb 
968*bfcc09ddSBjoern A. Zeeb 	iwl_trans_release_nic_access(trans);
969*bfcc09ddSBjoern A. Zeeb 
970*bfcc09ddSBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
971*bfcc09ddSBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
972*bfcc09ddSBjoern A. Zeeb }
973*bfcc09ddSBjoern A. Zeeb 
974*bfcc09ddSBjoern A. Zeeb void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
975*bfcc09ddSBjoern A. Zeeb {
976*bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
977*bfcc09ddSBjoern A. Zeeb 
978*bfcc09ddSBjoern A. Zeeb 	INIT_LIST_HEAD(&rxq->rx_free);
979*bfcc09ddSBjoern A. Zeeb 	INIT_LIST_HEAD(&rxq->rx_used);
980*bfcc09ddSBjoern A. Zeeb 	rxq->free_count = 0;
981*bfcc09ddSBjoern A. Zeeb 	rxq->used_count = 0;
982*bfcc09ddSBjoern A. Zeeb }
983*bfcc09ddSBjoern A. Zeeb 
984*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
985*bfcc09ddSBjoern A. Zeeb 
986*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
987*bfcc09ddSBjoern A. Zeeb {
988*bfcc09ddSBjoern A. Zeeb 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
989*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie;
990*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans;
991*bfcc09ddSBjoern A. Zeeb 	int ret;
992*bfcc09ddSBjoern A. Zeeb 
993*bfcc09ddSBjoern A. Zeeb 	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
994*bfcc09ddSBjoern A. Zeeb 	trans = trans_pcie->trans;
995*bfcc09ddSBjoern A. Zeeb 
996*bfcc09ddSBjoern A. Zeeb 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
997*bfcc09ddSBjoern A. Zeeb 
998*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
999*bfcc09ddSBjoern A. Zeeb 		      rxq->id, ret, budget);
1000*bfcc09ddSBjoern A. Zeeb 
1001*bfcc09ddSBjoern A. Zeeb 	if (ret < budget) {
1002*bfcc09ddSBjoern A. Zeeb 		spin_lock(&trans_pcie->irq_lock);
1003*bfcc09ddSBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1004*bfcc09ddSBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
1005*bfcc09ddSBjoern A. Zeeb 		spin_unlock(&trans_pcie->irq_lock);
1006*bfcc09ddSBjoern A. Zeeb 
1007*bfcc09ddSBjoern A. Zeeb 		napi_complete_done(&rxq->napi, ret);
1008*bfcc09ddSBjoern A. Zeeb 	}
1009*bfcc09ddSBjoern A. Zeeb 
1010*bfcc09ddSBjoern A. Zeeb 	return ret;
1011*bfcc09ddSBjoern A. Zeeb }
1012*bfcc09ddSBjoern A. Zeeb 
1013*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1014*bfcc09ddSBjoern A. Zeeb {
1015*bfcc09ddSBjoern A. Zeeb 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1016*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie;
1017*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans;
1018*bfcc09ddSBjoern A. Zeeb 	int ret;
1019*bfcc09ddSBjoern A. Zeeb 
1020*bfcc09ddSBjoern A. Zeeb 	trans_pcie = container_of(napi->dev, struct iwl_trans_pcie, napi_dev);
1021*bfcc09ddSBjoern A. Zeeb 	trans = trans_pcie->trans;
1022*bfcc09ddSBjoern A. Zeeb 
1023*bfcc09ddSBjoern A. Zeeb 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1024*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1025*bfcc09ddSBjoern A. Zeeb 		      budget);
1026*bfcc09ddSBjoern A. Zeeb 
1027*bfcc09ddSBjoern A. Zeeb 	if (ret < budget) {
1028*bfcc09ddSBjoern A. Zeeb 		int irq_line = rxq->id;
1029*bfcc09ddSBjoern A. Zeeb 
1030*bfcc09ddSBjoern A. Zeeb 		/* FIRST_RSS is shared with line 0 */
1031*bfcc09ddSBjoern A. Zeeb 		if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1032*bfcc09ddSBjoern A. Zeeb 		    rxq->id == 1)
1033*bfcc09ddSBjoern A. Zeeb 			irq_line = 0;
1034*bfcc09ddSBjoern A. Zeeb 
1035*bfcc09ddSBjoern A. Zeeb 		spin_lock(&trans_pcie->irq_lock);
1036*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, irq_line);
1037*bfcc09ddSBjoern A. Zeeb 		spin_unlock(&trans_pcie->irq_lock);
1038*bfcc09ddSBjoern A. Zeeb 
1039*bfcc09ddSBjoern A. Zeeb 		napi_complete_done(&rxq->napi, ret);
1040*bfcc09ddSBjoern A. Zeeb 	}
1041*bfcc09ddSBjoern A. Zeeb 
1042*bfcc09ddSBjoern A. Zeeb 	return ret;
1043*bfcc09ddSBjoern A. Zeeb }
1044*bfcc09ddSBjoern A. Zeeb 
1045*bfcc09ddSBjoern A. Zeeb static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1046*bfcc09ddSBjoern A. Zeeb {
1047*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1048*bfcc09ddSBjoern A. Zeeb 	struct iwl_rxq *def_rxq;
1049*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1050*bfcc09ddSBjoern A. Zeeb 	int i, err, queue_size, allocator_pool_size, num_alloc;
1051*bfcc09ddSBjoern A. Zeeb 
1052*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->rxq) {
1053*bfcc09ddSBjoern A. Zeeb 		err = iwl_pcie_rx_alloc(trans);
1054*bfcc09ddSBjoern A. Zeeb 		if (err)
1055*bfcc09ddSBjoern A. Zeeb 			return err;
1056*bfcc09ddSBjoern A. Zeeb 	}
1057*bfcc09ddSBjoern A. Zeeb 	def_rxq = trans_pcie->rxq;
1058*bfcc09ddSBjoern A. Zeeb 
1059*bfcc09ddSBjoern A. Zeeb 	cancel_work_sync(&rba->rx_alloc);
1060*bfcc09ddSBjoern A. Zeeb 
1061*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
1062*bfcc09ddSBjoern A. Zeeb 	atomic_set(&rba->req_pending, 0);
1063*bfcc09ddSBjoern A. Zeeb 	atomic_set(&rba->req_ready, 0);
1064*bfcc09ddSBjoern A. Zeeb 	INIT_LIST_HEAD(&rba->rbd_allocated);
1065*bfcc09ddSBjoern A. Zeeb 	INIT_LIST_HEAD(&rba->rbd_empty);
1066*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
1067*bfcc09ddSBjoern A. Zeeb 
1068*bfcc09ddSBjoern A. Zeeb 	/* free all first - we overwrite everything here */
1069*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_free_rbs_pool(trans);
1070*bfcc09ddSBjoern A. Zeeb 
1071*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < RX_QUEUE_SIZE; i++)
1072*bfcc09ddSBjoern A. Zeeb 		def_rxq->queue[i] = NULL;
1073*bfcc09ddSBjoern A. Zeeb 
1074*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
1075*bfcc09ddSBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1076*bfcc09ddSBjoern A. Zeeb 
1077*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
1078*bfcc09ddSBjoern A. Zeeb 		/*
1079*bfcc09ddSBjoern A. Zeeb 		 * Set read write pointer to reflect that we have processed
1080*bfcc09ddSBjoern A. Zeeb 		 * and used all buffers, but have not restocked the Rx queue
1081*bfcc09ddSBjoern A. Zeeb 		 * with fresh buffers
1082*bfcc09ddSBjoern A. Zeeb 		 */
1083*bfcc09ddSBjoern A. Zeeb 		rxq->read = 0;
1084*bfcc09ddSBjoern A. Zeeb 		rxq->write = 0;
1085*bfcc09ddSBjoern A. Zeeb 		rxq->write_actual = 0;
1086*bfcc09ddSBjoern A. Zeeb 		memset(rxq->rb_stts, 0,
1087*bfcc09ddSBjoern A. Zeeb 		       (trans->trans_cfg->device_family >=
1088*bfcc09ddSBjoern A. Zeeb 			IWL_DEVICE_FAMILY_AX210) ?
1089*bfcc09ddSBjoern A. Zeeb 		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1090*bfcc09ddSBjoern A. Zeeb 
1091*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rx_init_rxb_lists(rxq);
1092*bfcc09ddSBjoern A. Zeeb 
1093*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
1094*bfcc09ddSBjoern A. Zeeb 
1095*bfcc09ddSBjoern A. Zeeb 		if (!rxq->napi.poll) {
1096*bfcc09ddSBjoern A. Zeeb 			int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1097*bfcc09ddSBjoern A. Zeeb 
1098*bfcc09ddSBjoern A. Zeeb 			if (trans_pcie->msix_enabled)
1099*bfcc09ddSBjoern A. Zeeb 				poll = iwl_pcie_napi_poll_msix;
1100*bfcc09ddSBjoern A. Zeeb 
1101*bfcc09ddSBjoern A. Zeeb 			netif_napi_add(&trans_pcie->napi_dev, &rxq->napi,
1102*bfcc09ddSBjoern A. Zeeb 				       poll, NAPI_POLL_WEIGHT);
1103*bfcc09ddSBjoern A. Zeeb 			napi_enable(&rxq->napi);
1104*bfcc09ddSBjoern A. Zeeb 		}
1105*bfcc09ddSBjoern A. Zeeb 
1106*bfcc09ddSBjoern A. Zeeb 	}
1107*bfcc09ddSBjoern A. Zeeb 
1108*bfcc09ddSBjoern A. Zeeb 	/* move the pool to the default queue and allocator ownerships */
1109*bfcc09ddSBjoern A. Zeeb 	queue_size = trans->trans_cfg->mq_rx_supported ?
1110*bfcc09ddSBjoern A. Zeeb 			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1111*bfcc09ddSBjoern A. Zeeb 	allocator_pool_size = trans->num_rx_queues *
1112*bfcc09ddSBjoern A. Zeeb 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1113*bfcc09ddSBjoern A. Zeeb 	num_alloc = queue_size + allocator_pool_size;
1114*bfcc09ddSBjoern A. Zeeb 
1115*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < num_alloc; i++) {
1116*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1117*bfcc09ddSBjoern A. Zeeb 
1118*bfcc09ddSBjoern A. Zeeb 		if (i < allocator_pool_size)
1119*bfcc09ddSBjoern A. Zeeb 			list_add(&rxb->list, &rba->rbd_empty);
1120*bfcc09ddSBjoern A. Zeeb 		else
1121*bfcc09ddSBjoern A. Zeeb 			list_add(&rxb->list, &def_rxq->rx_used);
1122*bfcc09ddSBjoern A. Zeeb 		trans_pcie->global_table[i] = rxb;
1123*bfcc09ddSBjoern A. Zeeb 		rxb->vid = (u16)(i + 1);
1124*bfcc09ddSBjoern A. Zeeb 		rxb->invalid = true;
1125*bfcc09ddSBjoern A. Zeeb 	}
1126*bfcc09ddSBjoern A. Zeeb 
1127*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1128*bfcc09ddSBjoern A. Zeeb 
1129*bfcc09ddSBjoern A. Zeeb 	return 0;
1130*bfcc09ddSBjoern A. Zeeb }
1131*bfcc09ddSBjoern A. Zeeb 
1132*bfcc09ddSBjoern A. Zeeb int iwl_pcie_rx_init(struct iwl_trans *trans)
1133*bfcc09ddSBjoern A. Zeeb {
1134*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1135*bfcc09ddSBjoern A. Zeeb 	int ret = _iwl_pcie_rx_init(trans);
1136*bfcc09ddSBjoern A. Zeeb 
1137*bfcc09ddSBjoern A. Zeeb 	if (ret)
1138*bfcc09ddSBjoern A. Zeeb 		return ret;
1139*bfcc09ddSBjoern A. Zeeb 
1140*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->mq_rx_supported)
1141*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rx_mq_hw_init(trans);
1142*bfcc09ddSBjoern A. Zeeb 	else
1143*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1144*bfcc09ddSBjoern A. Zeeb 
1145*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1146*bfcc09ddSBjoern A. Zeeb 
1147*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->rxq->lock);
1148*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1149*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->rxq->lock);
1150*bfcc09ddSBjoern A. Zeeb 
1151*bfcc09ddSBjoern A. Zeeb 	return 0;
1152*bfcc09ddSBjoern A. Zeeb }
1153*bfcc09ddSBjoern A. Zeeb 
1154*bfcc09ddSBjoern A. Zeeb int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1155*bfcc09ddSBjoern A. Zeeb {
1156*bfcc09ddSBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
1157*bfcc09ddSBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1158*bfcc09ddSBjoern A. Zeeb 
1159*bfcc09ddSBjoern A. Zeeb 	/*
1160*bfcc09ddSBjoern A. Zeeb 	 * We don't configure the RFH.
1161*bfcc09ddSBjoern A. Zeeb 	 * Restock will be done at alive, after firmware configured the RFH.
1162*bfcc09ddSBjoern A. Zeeb 	 */
1163*bfcc09ddSBjoern A. Zeeb 	return _iwl_pcie_rx_init(trans);
1164*bfcc09ddSBjoern A. Zeeb }
1165*bfcc09ddSBjoern A. Zeeb 
1166*bfcc09ddSBjoern A. Zeeb void iwl_pcie_rx_free(struct iwl_trans *trans)
1167*bfcc09ddSBjoern A. Zeeb {
1168*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1169*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1170*bfcc09ddSBjoern A. Zeeb 	int i;
1171*bfcc09ddSBjoern A. Zeeb 	size_t rb_stts_size = trans->trans_cfg->device_family >=
1172*bfcc09ddSBjoern A. Zeeb 				IWL_DEVICE_FAMILY_AX210 ?
1173*bfcc09ddSBjoern A. Zeeb 			      sizeof(__le16) : sizeof(struct iwl_rb_status);
1174*bfcc09ddSBjoern A. Zeeb 
1175*bfcc09ddSBjoern A. Zeeb 	/*
1176*bfcc09ddSBjoern A. Zeeb 	 * if rxq is NULL, it means that nothing has been allocated,
1177*bfcc09ddSBjoern A. Zeeb 	 * exit now
1178*bfcc09ddSBjoern A. Zeeb 	 */
1179*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->rxq) {
1180*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1181*bfcc09ddSBjoern A. Zeeb 		return;
1182*bfcc09ddSBjoern A. Zeeb 	}
1183*bfcc09ddSBjoern A. Zeeb 
1184*bfcc09ddSBjoern A. Zeeb 	cancel_work_sync(&rba->rx_alloc);
1185*bfcc09ddSBjoern A. Zeeb 
1186*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_free_rbs_pool(trans);
1187*bfcc09ddSBjoern A. Zeeb 
1188*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->base_rb_stts) {
1189*bfcc09ddSBjoern A. Zeeb 		dma_free_coherent(trans->dev,
1190*bfcc09ddSBjoern A. Zeeb 				  rb_stts_size * trans->num_rx_queues,
1191*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->base_rb_stts,
1192*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->base_rb_stts_dma);
1193*bfcc09ddSBjoern A. Zeeb 		trans_pcie->base_rb_stts = NULL;
1194*bfcc09ddSBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma = 0;
1195*bfcc09ddSBjoern A. Zeeb 	}
1196*bfcc09ddSBjoern A. Zeeb 
1197*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->num_rx_queues; i++) {
1198*bfcc09ddSBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1199*bfcc09ddSBjoern A. Zeeb 
1200*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_free_rxq_dma(trans, rxq);
1201*bfcc09ddSBjoern A. Zeeb 
1202*bfcc09ddSBjoern A. Zeeb 		if (rxq->napi.poll) {
1203*bfcc09ddSBjoern A. Zeeb 			napi_disable(&rxq->napi);
1204*bfcc09ddSBjoern A. Zeeb 			netif_napi_del(&rxq->napi);
1205*bfcc09ddSBjoern A. Zeeb 		}
1206*bfcc09ddSBjoern A. Zeeb 	}
1207*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->rx_pool);
1208*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->global_table);
1209*bfcc09ddSBjoern A. Zeeb 	kfree(trans_pcie->rxq);
1210*bfcc09ddSBjoern A. Zeeb 
1211*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->alloc_page)
1212*bfcc09ddSBjoern A. Zeeb 		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1213*bfcc09ddSBjoern A. Zeeb }
1214*bfcc09ddSBjoern A. Zeeb 
1215*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1216*bfcc09ddSBjoern A. Zeeb 					  struct iwl_rb_allocator *rba)
1217*bfcc09ddSBjoern A. Zeeb {
1218*bfcc09ddSBjoern A. Zeeb 	spin_lock(&rba->lock);
1219*bfcc09ddSBjoern A. Zeeb 	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1220*bfcc09ddSBjoern A. Zeeb 	spin_unlock(&rba->lock);
1221*bfcc09ddSBjoern A. Zeeb }
1222*bfcc09ddSBjoern A. Zeeb 
1223*bfcc09ddSBjoern A. Zeeb /*
1224*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1225*bfcc09ddSBjoern A. Zeeb  *
1226*bfcc09ddSBjoern A. Zeeb  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1227*bfcc09ddSBjoern A. Zeeb  * When there are 2 empty RBDs - a request for allocation is posted
1228*bfcc09ddSBjoern A. Zeeb  */
1229*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1230*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rx_mem_buffer *rxb,
1231*bfcc09ddSBjoern A. Zeeb 				  struct iwl_rxq *rxq, bool emergency)
1232*bfcc09ddSBjoern A. Zeeb {
1233*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1234*bfcc09ddSBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1235*bfcc09ddSBjoern A. Zeeb 
1236*bfcc09ddSBjoern A. Zeeb 	/* Move the RBD to the used list, will be moved to allocator in batches
1237*bfcc09ddSBjoern A. Zeeb 	 * before claiming or posting a request*/
1238*bfcc09ddSBjoern A. Zeeb 	list_add_tail(&rxb->list, &rxq->rx_used);
1239*bfcc09ddSBjoern A. Zeeb 
1240*bfcc09ddSBjoern A. Zeeb 	if (unlikely(emergency))
1241*bfcc09ddSBjoern A. Zeeb 		return;
1242*bfcc09ddSBjoern A. Zeeb 
1243*bfcc09ddSBjoern A. Zeeb 	/* Count the allocator owned RBDs */
1244*bfcc09ddSBjoern A. Zeeb 	rxq->used_count++;
1245*bfcc09ddSBjoern A. Zeeb 
1246*bfcc09ddSBjoern A. Zeeb 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1247*bfcc09ddSBjoern A. Zeeb 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1248*bfcc09ddSBjoern A. Zeeb 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1249*bfcc09ddSBjoern A. Zeeb 	 * after but we still need to post another request.
1250*bfcc09ddSBjoern A. Zeeb 	 */
1251*bfcc09ddSBjoern A. Zeeb 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1252*bfcc09ddSBjoern A. Zeeb 		/* Move the 2 RBDs to the allocator ownership.
1253*bfcc09ddSBjoern A. Zeeb 		 Allocator has another 6 from pool for the request completion*/
1254*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rx_move_to_allocator(rxq, rba);
1255*bfcc09ddSBjoern A. Zeeb 
1256*bfcc09ddSBjoern A. Zeeb 		atomic_inc(&rba->req_pending);
1257*bfcc09ddSBjoern A. Zeeb 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1258*bfcc09ddSBjoern A. Zeeb 	}
1259*bfcc09ddSBjoern A. Zeeb }
1260*bfcc09ddSBjoern A. Zeeb 
1261*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1262*bfcc09ddSBjoern A. Zeeb 				struct iwl_rxq *rxq,
1263*bfcc09ddSBjoern A. Zeeb 				struct iwl_rx_mem_buffer *rxb,
1264*bfcc09ddSBjoern A. Zeeb 				bool emergency,
1265*bfcc09ddSBjoern A. Zeeb 				int i)
1266*bfcc09ddSBjoern A. Zeeb {
1267*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1268*bfcc09ddSBjoern A. Zeeb 	struct iwl_txq *txq = trans->txqs.txq[trans->txqs.cmd.q_id];
1269*bfcc09ddSBjoern A. Zeeb 	bool page_stolen = false;
1270*bfcc09ddSBjoern A. Zeeb 	int max_len = trans_pcie->rx_buf_bytes;
1271*bfcc09ddSBjoern A. Zeeb 	u32 offset = 0;
1272*bfcc09ddSBjoern A. Zeeb 
1273*bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(!rxb))
1274*bfcc09ddSBjoern A. Zeeb 		return;
1275*bfcc09ddSBjoern A. Zeeb 
1276*bfcc09ddSBjoern A. Zeeb 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1277*bfcc09ddSBjoern A. Zeeb 
1278*bfcc09ddSBjoern A. Zeeb 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1279*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_packet *pkt;
1280*bfcc09ddSBjoern A. Zeeb 		bool reclaim;
1281*bfcc09ddSBjoern A. Zeeb 		int len;
1282*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_cmd_buffer rxcb = {
1283*bfcc09ddSBjoern A. Zeeb 			._offset = rxb->offset + offset,
1284*bfcc09ddSBjoern A. Zeeb 			._rx_page_order = trans_pcie->rx_page_order,
1285*bfcc09ddSBjoern A. Zeeb 			._page = rxb->page,
1286*bfcc09ddSBjoern A. Zeeb 			._page_stolen = false,
1287*bfcc09ddSBjoern A. Zeeb 			.truesize = max_len,
1288*bfcc09ddSBjoern A. Zeeb 		};
1289*bfcc09ddSBjoern A. Zeeb 
1290*bfcc09ddSBjoern A. Zeeb 		pkt = rxb_addr(&rxcb);
1291*bfcc09ddSBjoern A. Zeeb 
1292*bfcc09ddSBjoern A. Zeeb 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1293*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_RX(trans,
1294*bfcc09ddSBjoern A. Zeeb 				     "Q %d: RB end marker at offset %d\n",
1295*bfcc09ddSBjoern A. Zeeb 				     rxq->id, offset);
1296*bfcc09ddSBjoern A. Zeeb 			break;
1297*bfcc09ddSBjoern A. Zeeb 		}
1298*bfcc09ddSBjoern A. Zeeb 
1299*bfcc09ddSBjoern A. Zeeb 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1300*bfcc09ddSBjoern A. Zeeb 			FH_RSCSR_RXQ_POS != rxq->id,
1301*bfcc09ddSBjoern A. Zeeb 		     "frame on invalid queue - is on %d and indicates %d\n",
1302*bfcc09ddSBjoern A. Zeeb 		     rxq->id,
1303*bfcc09ddSBjoern A. Zeeb 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1304*bfcc09ddSBjoern A. Zeeb 			FH_RSCSR_RXQ_POS);
1305*bfcc09ddSBjoern A. Zeeb 
1306*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_RX(trans,
1307*bfcc09ddSBjoern A. Zeeb 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1308*bfcc09ddSBjoern A. Zeeb 			     rxq->id, offset,
1309*bfcc09ddSBjoern A. Zeeb 			     iwl_get_cmd_string(trans,
1310*bfcc09ddSBjoern A. Zeeb 						iwl_cmd_id(pkt->hdr.cmd,
1311*bfcc09ddSBjoern A. Zeeb 							   pkt->hdr.group_id,
1312*bfcc09ddSBjoern A. Zeeb 							   0)),
1313*bfcc09ddSBjoern A. Zeeb 			     pkt->hdr.group_id, pkt->hdr.cmd,
1314*bfcc09ddSBjoern A. Zeeb 			     le16_to_cpu(pkt->hdr.sequence));
1315*bfcc09ddSBjoern A. Zeeb 
1316*bfcc09ddSBjoern A. Zeeb 		len = iwl_rx_packet_len(pkt);
1317*bfcc09ddSBjoern A. Zeeb 		len += sizeof(u32); /* account for status word */
1318*bfcc09ddSBjoern A. Zeeb 
1319*bfcc09ddSBjoern A. Zeeb 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1320*bfcc09ddSBjoern A. Zeeb 
1321*bfcc09ddSBjoern A. Zeeb 		/* check that what the device tells us made sense */
1322*bfcc09ddSBjoern A. Zeeb 		if (offset > max_len)
1323*bfcc09ddSBjoern A. Zeeb 			break;
1324*bfcc09ddSBjoern A. Zeeb 
1325*bfcc09ddSBjoern A. Zeeb 		trace_iwlwifi_dev_rx(trans->dev, trans, pkt, len);
1326*bfcc09ddSBjoern A. Zeeb 		trace_iwlwifi_dev_rx_data(trans->dev, trans, pkt, len);
1327*bfcc09ddSBjoern A. Zeeb 
1328*bfcc09ddSBjoern A. Zeeb 		/* Reclaim a command buffer only if this packet is a response
1329*bfcc09ddSBjoern A. Zeeb 		 *   to a (driver-originated) command.
1330*bfcc09ddSBjoern A. Zeeb 		 * If the packet (e.g. Rx frame) originated from uCode,
1331*bfcc09ddSBjoern A. Zeeb 		 *   there is no command buffer to reclaim.
1332*bfcc09ddSBjoern A. Zeeb 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1333*bfcc09ddSBjoern A. Zeeb 		 *   but apparently a few don't get set; catch them here. */
1334*bfcc09ddSBjoern A. Zeeb 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1335*bfcc09ddSBjoern A. Zeeb 		if (reclaim && !pkt->hdr.group_id) {
1336*bfcc09ddSBjoern A. Zeeb 			int i;
1337*bfcc09ddSBjoern A. Zeeb 
1338*bfcc09ddSBjoern A. Zeeb 			for (i = 0; i < trans_pcie->n_no_reclaim_cmds; i++) {
1339*bfcc09ddSBjoern A. Zeeb 				if (trans_pcie->no_reclaim_cmds[i] ==
1340*bfcc09ddSBjoern A. Zeeb 							pkt->hdr.cmd) {
1341*bfcc09ddSBjoern A. Zeeb 					reclaim = false;
1342*bfcc09ddSBjoern A. Zeeb 					break;
1343*bfcc09ddSBjoern A. Zeeb 				}
1344*bfcc09ddSBjoern A. Zeeb 			}
1345*bfcc09ddSBjoern A. Zeeb 		}
1346*bfcc09ddSBjoern A. Zeeb 
1347*bfcc09ddSBjoern A. Zeeb 		if (rxq->id == trans_pcie->def_rx_queue)
1348*bfcc09ddSBjoern A. Zeeb 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1349*bfcc09ddSBjoern A. Zeeb 				       &rxcb);
1350*bfcc09ddSBjoern A. Zeeb 		else
1351*bfcc09ddSBjoern A. Zeeb 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1352*bfcc09ddSBjoern A. Zeeb 					   &rxcb, rxq->id);
1353*bfcc09ddSBjoern A. Zeeb 
1354*bfcc09ddSBjoern A. Zeeb 		/*
1355*bfcc09ddSBjoern A. Zeeb 		 * After here, we should always check rxcb._page_stolen,
1356*bfcc09ddSBjoern A. Zeeb 		 * if it is true then one of the handlers took the page.
1357*bfcc09ddSBjoern A. Zeeb 		 */
1358*bfcc09ddSBjoern A. Zeeb 
1359*bfcc09ddSBjoern A. Zeeb 		if (reclaim) {
1360*bfcc09ddSBjoern A. Zeeb 			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1361*bfcc09ddSBjoern A. Zeeb 			int index = SEQ_TO_INDEX(sequence);
1362*bfcc09ddSBjoern A. Zeeb 			int cmd_index = iwl_txq_get_cmd_index(txq, index);
1363*bfcc09ddSBjoern A. Zeeb 
1364*bfcc09ddSBjoern A. Zeeb 			kfree_sensitive(txq->entries[cmd_index].free_buf);
1365*bfcc09ddSBjoern A. Zeeb 			txq->entries[cmd_index].free_buf = NULL;
1366*bfcc09ddSBjoern A. Zeeb 
1367*bfcc09ddSBjoern A. Zeeb 			/* Invoke any callbacks, transfer the buffer to caller,
1368*bfcc09ddSBjoern A. Zeeb 			 * and fire off the (possibly) blocking
1369*bfcc09ddSBjoern A. Zeeb 			 * iwl_trans_send_cmd()
1370*bfcc09ddSBjoern A. Zeeb 			 * as we reclaim the driver command queue */
1371*bfcc09ddSBjoern A. Zeeb 			if (!rxcb._page_stolen)
1372*bfcc09ddSBjoern A. Zeeb 				iwl_pcie_hcmd_complete(trans, &rxcb);
1373*bfcc09ddSBjoern A. Zeeb 			else
1374*bfcc09ddSBjoern A. Zeeb 				IWL_WARN(trans, "Claim null rxb?\n");
1375*bfcc09ddSBjoern A. Zeeb 		}
1376*bfcc09ddSBjoern A. Zeeb 
1377*bfcc09ddSBjoern A. Zeeb 		page_stolen |= rxcb._page_stolen;
1378*bfcc09ddSBjoern A. Zeeb 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1379*bfcc09ddSBjoern A. Zeeb 			break;
1380*bfcc09ddSBjoern A. Zeeb 	}
1381*bfcc09ddSBjoern A. Zeeb 
1382*bfcc09ddSBjoern A. Zeeb 	/* page was stolen from us -- free our reference */
1383*bfcc09ddSBjoern A. Zeeb 	if (page_stolen) {
1384*bfcc09ddSBjoern A. Zeeb 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1385*bfcc09ddSBjoern A. Zeeb 		rxb->page = NULL;
1386*bfcc09ddSBjoern A. Zeeb 	}
1387*bfcc09ddSBjoern A. Zeeb 
1388*bfcc09ddSBjoern A. Zeeb 	/* Reuse the page if possible. For notification packets and
1389*bfcc09ddSBjoern A. Zeeb 	 * SKBs that fail to Rx correctly, add them back into the
1390*bfcc09ddSBjoern A. Zeeb 	 * rx_free list for reuse later. */
1391*bfcc09ddSBjoern A. Zeeb 	if (rxb->page != NULL) {
1392*bfcc09ddSBjoern A. Zeeb 		rxb->page_dma =
1393*bfcc09ddSBjoern A. Zeeb 			dma_map_page(trans->dev, rxb->page, rxb->offset,
1394*bfcc09ddSBjoern A. Zeeb 				     trans_pcie->rx_buf_bytes,
1395*bfcc09ddSBjoern A. Zeeb 				     DMA_FROM_DEVICE);
1396*bfcc09ddSBjoern A. Zeeb 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1397*bfcc09ddSBjoern A. Zeeb 			/*
1398*bfcc09ddSBjoern A. Zeeb 			 * free the page(s) as well to not break
1399*bfcc09ddSBjoern A. Zeeb 			 * the invariant that the items on the used
1400*bfcc09ddSBjoern A. Zeeb 			 * list have no page(s)
1401*bfcc09ddSBjoern A. Zeeb 			 */
1402*bfcc09ddSBjoern A. Zeeb 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1403*bfcc09ddSBjoern A. Zeeb 			rxb->page = NULL;
1404*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1405*bfcc09ddSBjoern A. Zeeb 		} else {
1406*bfcc09ddSBjoern A. Zeeb 			list_add_tail(&rxb->list, &rxq->rx_free);
1407*bfcc09ddSBjoern A. Zeeb 			rxq->free_count++;
1408*bfcc09ddSBjoern A. Zeeb 		}
1409*bfcc09ddSBjoern A. Zeeb 	} else
1410*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1411*bfcc09ddSBjoern A. Zeeb }
1412*bfcc09ddSBjoern A. Zeeb 
1413*bfcc09ddSBjoern A. Zeeb static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1414*bfcc09ddSBjoern A. Zeeb 						  struct iwl_rxq *rxq, int i,
1415*bfcc09ddSBjoern A. Zeeb 						  bool *join)
1416*bfcc09ddSBjoern A. Zeeb {
1417*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1418*bfcc09ddSBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
1419*bfcc09ddSBjoern A. Zeeb 	u16 vid;
1420*bfcc09ddSBjoern A. Zeeb 
1421*bfcc09ddSBjoern A. Zeeb 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1422*bfcc09ddSBjoern A. Zeeb 
1423*bfcc09ddSBjoern A. Zeeb 	if (!trans->trans_cfg->mq_rx_supported) {
1424*bfcc09ddSBjoern A. Zeeb 		rxb = rxq->queue[i];
1425*bfcc09ddSBjoern A. Zeeb 		rxq->queue[i] = NULL;
1426*bfcc09ddSBjoern A. Zeeb 		return rxb;
1427*bfcc09ddSBjoern A. Zeeb 	}
1428*bfcc09ddSBjoern A. Zeeb 
1429*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1430*bfcc09ddSBjoern A. Zeeb 		vid = le16_to_cpu(rxq->cd[i].rbid);
1431*bfcc09ddSBjoern A. Zeeb 		*join = rxq->cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1432*bfcc09ddSBjoern A. Zeeb 	} else {
1433*bfcc09ddSBjoern A. Zeeb 		vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; /* 12-bit VID */
1434*bfcc09ddSBjoern A. Zeeb 	}
1435*bfcc09ddSBjoern A. Zeeb 
1436*bfcc09ddSBjoern A. Zeeb 	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1437*bfcc09ddSBjoern A. Zeeb 		goto out_err;
1438*bfcc09ddSBjoern A. Zeeb 
1439*bfcc09ddSBjoern A. Zeeb 	rxb = trans_pcie->global_table[vid - 1];
1440*bfcc09ddSBjoern A. Zeeb 	if (rxb->invalid)
1441*bfcc09ddSBjoern A. Zeeb 		goto out_err;
1442*bfcc09ddSBjoern A. Zeeb 
1443*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1444*bfcc09ddSBjoern A. Zeeb 
1445*bfcc09ddSBjoern A. Zeeb 	rxb->invalid = true;
1446*bfcc09ddSBjoern A. Zeeb 
1447*bfcc09ddSBjoern A. Zeeb 	return rxb;
1448*bfcc09ddSBjoern A. Zeeb 
1449*bfcc09ddSBjoern A. Zeeb out_err:
1450*bfcc09ddSBjoern A. Zeeb 	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1451*bfcc09ddSBjoern A. Zeeb 	iwl_force_nmi(trans);
1452*bfcc09ddSBjoern A. Zeeb 	return NULL;
1453*bfcc09ddSBjoern A. Zeeb }
1454*bfcc09ddSBjoern A. Zeeb 
1455*bfcc09ddSBjoern A. Zeeb /*
1456*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1457*bfcc09ddSBjoern A. Zeeb  */
1458*bfcc09ddSBjoern A. Zeeb static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1459*bfcc09ddSBjoern A. Zeeb {
1460*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1461*bfcc09ddSBjoern A. Zeeb 	struct iwl_rxq *rxq;
1462*bfcc09ddSBjoern A. Zeeb 	u32 r, i, count = 0, handled = 0;
1463*bfcc09ddSBjoern A. Zeeb 	bool emergency = false;
1464*bfcc09ddSBjoern A. Zeeb 
1465*bfcc09ddSBjoern A. Zeeb 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1466*bfcc09ddSBjoern A. Zeeb 		return budget;
1467*bfcc09ddSBjoern A. Zeeb 
1468*bfcc09ddSBjoern A. Zeeb 	rxq = &trans_pcie->rxq[queue];
1469*bfcc09ddSBjoern A. Zeeb 
1470*bfcc09ddSBjoern A. Zeeb restart:
1471*bfcc09ddSBjoern A. Zeeb 	spin_lock(&rxq->lock);
1472*bfcc09ddSBjoern A. Zeeb 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1473*bfcc09ddSBjoern A. Zeeb 	 * buffer that the driver may process (last buffer filled by ucode). */
1474*bfcc09ddSBjoern A. Zeeb 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
1475*bfcc09ddSBjoern A. Zeeb 	i = rxq->read;
1476*bfcc09ddSBjoern A. Zeeb 
1477*bfcc09ddSBjoern A. Zeeb 	/* W/A 9000 device step A0 wrap-around bug */
1478*bfcc09ddSBjoern A. Zeeb 	r &= (rxq->queue_size - 1);
1479*bfcc09ddSBjoern A. Zeeb 
1480*bfcc09ddSBjoern A. Zeeb 	/* Rx interrupt, but nothing sent from uCode */
1481*bfcc09ddSBjoern A. Zeeb 	if (i == r)
1482*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1483*bfcc09ddSBjoern A. Zeeb 
1484*bfcc09ddSBjoern A. Zeeb 	while (i != r && ++handled < budget) {
1485*bfcc09ddSBjoern A. Zeeb 		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1486*bfcc09ddSBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb;
1487*bfcc09ddSBjoern A. Zeeb 		/* number of RBDs still waiting for page allocation */
1488*bfcc09ddSBjoern A. Zeeb 		u32 rb_pending_alloc =
1489*bfcc09ddSBjoern A. Zeeb 			atomic_read(&trans_pcie->rba.req_pending) *
1490*bfcc09ddSBjoern A. Zeeb 			RX_CLAIM_REQ_ALLOC;
1491*bfcc09ddSBjoern A. Zeeb 		bool join = false;
1492*bfcc09ddSBjoern A. Zeeb 
1493*bfcc09ddSBjoern A. Zeeb 		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1494*bfcc09ddSBjoern A. Zeeb 			     !emergency)) {
1495*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1496*bfcc09ddSBjoern A. Zeeb 			emergency = true;
1497*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_TPT(trans,
1498*bfcc09ddSBjoern A. Zeeb 				      "RX path is in emergency. Pending allocations %d\n",
1499*bfcc09ddSBjoern A. Zeeb 				      rb_pending_alloc);
1500*bfcc09ddSBjoern A. Zeeb 		}
1501*bfcc09ddSBjoern A. Zeeb 
1502*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1503*bfcc09ddSBjoern A. Zeeb 
1504*bfcc09ddSBjoern A. Zeeb 		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1505*bfcc09ddSBjoern A. Zeeb 		if (!rxb)
1506*bfcc09ddSBjoern A. Zeeb 			goto out;
1507*bfcc09ddSBjoern A. Zeeb 
1508*bfcc09ddSBjoern A. Zeeb 		if (unlikely(join || rxq->next_rb_is_fragment)) {
1509*bfcc09ddSBjoern A. Zeeb 			rxq->next_rb_is_fragment = join;
1510*bfcc09ddSBjoern A. Zeeb 			/*
1511*bfcc09ddSBjoern A. Zeeb 			 * We can only get a multi-RB in the following cases:
1512*bfcc09ddSBjoern A. Zeeb 			 *  - firmware issue, sending a too big notification
1513*bfcc09ddSBjoern A. Zeeb 			 *  - sniffer mode with a large A-MSDU
1514*bfcc09ddSBjoern A. Zeeb 			 *  - large MTU frames (>2k)
1515*bfcc09ddSBjoern A. Zeeb 			 * since the multi-RB functionality is limited to newer
1516*bfcc09ddSBjoern A. Zeeb 			 * hardware that cannot put multiple entries into a
1517*bfcc09ddSBjoern A. Zeeb 			 * single RB.
1518*bfcc09ddSBjoern A. Zeeb 			 *
1519*bfcc09ddSBjoern A. Zeeb 			 * Right now, the higher layers aren't set up to deal
1520*bfcc09ddSBjoern A. Zeeb 			 * with that, so discard all of these.
1521*bfcc09ddSBjoern A. Zeeb 			 */
1522*bfcc09ddSBjoern A. Zeeb 			list_add_tail(&rxb->list, &rxq->rx_free);
1523*bfcc09ddSBjoern A. Zeeb 			rxq->free_count++;
1524*bfcc09ddSBjoern A. Zeeb 		} else {
1525*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1526*bfcc09ddSBjoern A. Zeeb 		}
1527*bfcc09ddSBjoern A. Zeeb 
1528*bfcc09ddSBjoern A. Zeeb 		i = (i + 1) & (rxq->queue_size - 1);
1529*bfcc09ddSBjoern A. Zeeb 
1530*bfcc09ddSBjoern A. Zeeb 		/*
1531*bfcc09ddSBjoern A. Zeeb 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1532*bfcc09ddSBjoern A. Zeeb 		 * try to claim the pre-allocated buffers from the allocator.
1533*bfcc09ddSBjoern A. Zeeb 		 * If not ready - will try to reclaim next time.
1534*bfcc09ddSBjoern A. Zeeb 		 * There is no need to reschedule work - allocator exits only
1535*bfcc09ddSBjoern A. Zeeb 		 * on success
1536*bfcc09ddSBjoern A. Zeeb 		 */
1537*bfcc09ddSBjoern A. Zeeb 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1538*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rx_allocator_get(trans, rxq);
1539*bfcc09ddSBjoern A. Zeeb 
1540*bfcc09ddSBjoern A. Zeeb 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1541*bfcc09ddSBjoern A. Zeeb 			/* Add the remaining empty RBDs for allocator use */
1542*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1543*bfcc09ddSBjoern A. Zeeb 		} else if (emergency) {
1544*bfcc09ddSBjoern A. Zeeb 			count++;
1545*bfcc09ddSBjoern A. Zeeb 			if (count == 8) {
1546*bfcc09ddSBjoern A. Zeeb 				count = 0;
1547*bfcc09ddSBjoern A. Zeeb 				if (rb_pending_alloc < rxq->queue_size / 3) {
1548*bfcc09ddSBjoern A. Zeeb 					IWL_DEBUG_TPT(trans,
1549*bfcc09ddSBjoern A. Zeeb 						      "RX path exited emergency. Pending allocations %d\n",
1550*bfcc09ddSBjoern A. Zeeb 						      rb_pending_alloc);
1551*bfcc09ddSBjoern A. Zeeb 					emergency = false;
1552*bfcc09ddSBjoern A. Zeeb 				}
1553*bfcc09ddSBjoern A. Zeeb 
1554*bfcc09ddSBjoern A. Zeeb 				rxq->read = i;
1555*bfcc09ddSBjoern A. Zeeb 				spin_unlock(&rxq->lock);
1556*bfcc09ddSBjoern A. Zeeb 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1557*bfcc09ddSBjoern A. Zeeb 				iwl_pcie_rxq_restock(trans, rxq);
1558*bfcc09ddSBjoern A. Zeeb 				goto restart;
1559*bfcc09ddSBjoern A. Zeeb 			}
1560*bfcc09ddSBjoern A. Zeeb 		}
1561*bfcc09ddSBjoern A. Zeeb 	}
1562*bfcc09ddSBjoern A. Zeeb out:
1563*bfcc09ddSBjoern A. Zeeb 	/* Backtrack one entry */
1564*bfcc09ddSBjoern A. Zeeb 	rxq->read = i;
1565*bfcc09ddSBjoern A. Zeeb 	spin_unlock(&rxq->lock);
1566*bfcc09ddSBjoern A. Zeeb 
1567*bfcc09ddSBjoern A. Zeeb 	/*
1568*bfcc09ddSBjoern A. Zeeb 	 * handle a case where in emergency there are some unallocated RBDs.
1569*bfcc09ddSBjoern A. Zeeb 	 * those RBDs are in the used list, but are not tracked by the queue's
1570*bfcc09ddSBjoern A. Zeeb 	 * used_count which counts allocator owned RBDs.
1571*bfcc09ddSBjoern A. Zeeb 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1572*bfcc09ddSBjoern A. Zeeb 	 * when called again the function may not be in emergency mode and
1573*bfcc09ddSBjoern A. Zeeb 	 * they will be handed to the allocator with no tracking in the RBD
1574*bfcc09ddSBjoern A. Zeeb 	 * allocator counters, which will lead to them never being claimed back
1575*bfcc09ddSBjoern A. Zeeb 	 * by the queue.
1576*bfcc09ddSBjoern A. Zeeb 	 * by allocating them here, they are now in the queue free list, and
1577*bfcc09ddSBjoern A. Zeeb 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1578*bfcc09ddSBjoern A. Zeeb 	 */
1579*bfcc09ddSBjoern A. Zeeb 	if (unlikely(emergency && count))
1580*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1581*bfcc09ddSBjoern A. Zeeb 
1582*bfcc09ddSBjoern A. Zeeb 	iwl_pcie_rxq_restock(trans, rxq);
1583*bfcc09ddSBjoern A. Zeeb 
1584*bfcc09ddSBjoern A. Zeeb 	return handled;
1585*bfcc09ddSBjoern A. Zeeb }
1586*bfcc09ddSBjoern A. Zeeb 
1587*bfcc09ddSBjoern A. Zeeb static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1588*bfcc09ddSBjoern A. Zeeb {
1589*bfcc09ddSBjoern A. Zeeb 	u8 queue = entry->entry;
1590*bfcc09ddSBjoern A. Zeeb 	struct msix_entry *entries = entry - queue;
1591*bfcc09ddSBjoern A. Zeeb 
1592*bfcc09ddSBjoern A. Zeeb 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1593*bfcc09ddSBjoern A. Zeeb }
1594*bfcc09ddSBjoern A. Zeeb 
1595*bfcc09ddSBjoern A. Zeeb /*
1596*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1597*bfcc09ddSBjoern A. Zeeb  * This interrupt handler should be used with RSS queue only.
1598*bfcc09ddSBjoern A. Zeeb  */
1599*bfcc09ddSBjoern A. Zeeb irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1600*bfcc09ddSBjoern A. Zeeb {
1601*bfcc09ddSBjoern A. Zeeb 	struct msix_entry *entry = dev_id;
1602*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1603*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans = trans_pcie->trans;
1604*bfcc09ddSBjoern A. Zeeb 	struct iwl_rxq *rxq = &trans_pcie->rxq[entry->entry];
1605*bfcc09ddSBjoern A. Zeeb 
1606*bfcc09ddSBjoern A. Zeeb 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1607*bfcc09ddSBjoern A. Zeeb 
1608*bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(entry->entry >= trans->num_rx_queues))
1609*bfcc09ddSBjoern A. Zeeb 		return IRQ_NONE;
1610*bfcc09ddSBjoern A. Zeeb 
1611*bfcc09ddSBjoern A. Zeeb 	if (WARN_ONCE(!rxq,
1612*bfcc09ddSBjoern A. Zeeb 		      "[%d] Got MSI-X interrupt before we have Rx queues",
1613*bfcc09ddSBjoern A. Zeeb 		      entry->entry))
1614*bfcc09ddSBjoern A. Zeeb 		return IRQ_NONE;
1615*bfcc09ddSBjoern A. Zeeb 
1616*bfcc09ddSBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1617*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1618*bfcc09ddSBjoern A. Zeeb 
1619*bfcc09ddSBjoern A. Zeeb 	local_bh_disable();
1620*bfcc09ddSBjoern A. Zeeb 	if (napi_schedule_prep(&rxq->napi))
1621*bfcc09ddSBjoern A. Zeeb 		__napi_schedule(&rxq->napi);
1622*bfcc09ddSBjoern A. Zeeb 	else
1623*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, entry->entry);
1624*bfcc09ddSBjoern A. Zeeb 	local_bh_enable();
1625*bfcc09ddSBjoern A. Zeeb 
1626*bfcc09ddSBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
1627*bfcc09ddSBjoern A. Zeeb 
1628*bfcc09ddSBjoern A. Zeeb 	return IRQ_HANDLED;
1629*bfcc09ddSBjoern A. Zeeb }
1630*bfcc09ddSBjoern A. Zeeb 
1631*bfcc09ddSBjoern A. Zeeb /*
1632*bfcc09ddSBjoern A. Zeeb  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1633*bfcc09ddSBjoern A. Zeeb  */
1634*bfcc09ddSBjoern A. Zeeb static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1635*bfcc09ddSBjoern A. Zeeb {
1636*bfcc09ddSBjoern A. Zeeb 	int i;
1637*bfcc09ddSBjoern A. Zeeb 
1638*bfcc09ddSBjoern A. Zeeb 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1639*bfcc09ddSBjoern A. Zeeb 	if (trans->cfg->internal_wimax_coex &&
1640*bfcc09ddSBjoern A. Zeeb 	    !trans->cfg->apmg_not_supported &&
1641*bfcc09ddSBjoern A. Zeeb 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1642*bfcc09ddSBjoern A. Zeeb 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1643*bfcc09ddSBjoern A. Zeeb 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1644*bfcc09ddSBjoern A. Zeeb 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1645*bfcc09ddSBjoern A. Zeeb 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1646*bfcc09ddSBjoern A. Zeeb 		iwl_op_mode_wimax_active(trans->op_mode);
1647*bfcc09ddSBjoern A. Zeeb 		wake_up(&trans->wait_command_queue);
1648*bfcc09ddSBjoern A. Zeeb 		return;
1649*bfcc09ddSBjoern A. Zeeb 	}
1650*bfcc09ddSBjoern A. Zeeb 
1651*bfcc09ddSBjoern A. Zeeb 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
1652*bfcc09ddSBjoern A. Zeeb 		if (!trans->txqs.txq[i])
1653*bfcc09ddSBjoern A. Zeeb 			continue;
1654*bfcc09ddSBjoern A. Zeeb 		del_timer(&trans->txqs.txq[i]->stuck_timer);
1655*bfcc09ddSBjoern A. Zeeb 	}
1656*bfcc09ddSBjoern A. Zeeb 
1657*bfcc09ddSBjoern A. Zeeb 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1658*bfcc09ddSBjoern A. Zeeb 	 * before we wake up the command caller, to ensure a proper cleanup. */
1659*bfcc09ddSBjoern A. Zeeb 	iwl_trans_fw_error(trans, false);
1660*bfcc09ddSBjoern A. Zeeb 
1661*bfcc09ddSBjoern A. Zeeb 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1662*bfcc09ddSBjoern A. Zeeb 	wake_up(&trans->wait_command_queue);
1663*bfcc09ddSBjoern A. Zeeb }
1664*bfcc09ddSBjoern A. Zeeb 
1665*bfcc09ddSBjoern A. Zeeb static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1666*bfcc09ddSBjoern A. Zeeb {
1667*bfcc09ddSBjoern A. Zeeb 	u32 inta;
1668*bfcc09ddSBjoern A. Zeeb 
1669*bfcc09ddSBjoern A. Zeeb 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1670*bfcc09ddSBjoern A. Zeeb 
1671*bfcc09ddSBjoern A. Zeeb 	trace_iwlwifi_dev_irq(trans->dev);
1672*bfcc09ddSBjoern A. Zeeb 
1673*bfcc09ddSBjoern A. Zeeb 	/* Discover which interrupts are active/pending */
1674*bfcc09ddSBjoern A. Zeeb 	inta = iwl_read32(trans, CSR_INT);
1675*bfcc09ddSBjoern A. Zeeb 
1676*bfcc09ddSBjoern A. Zeeb 	/* the thread will service interrupts and re-enable them */
1677*bfcc09ddSBjoern A. Zeeb 	return inta;
1678*bfcc09ddSBjoern A. Zeeb }
1679*bfcc09ddSBjoern A. Zeeb 
1680*bfcc09ddSBjoern A. Zeeb /* a device (PCI-E) page is 4096 bytes long */
1681*bfcc09ddSBjoern A. Zeeb #define ICT_SHIFT	12
1682*bfcc09ddSBjoern A. Zeeb #define ICT_SIZE	(1 << ICT_SHIFT)
1683*bfcc09ddSBjoern A. Zeeb #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1684*bfcc09ddSBjoern A. Zeeb 
1685*bfcc09ddSBjoern A. Zeeb /* interrupt handler using ict table, with this interrupt driver will
1686*bfcc09ddSBjoern A. Zeeb  * stop using INTA register to get device's interrupt, reading this register
1687*bfcc09ddSBjoern A. Zeeb  * is expensive, device will write interrupts in ICT dram table, increment
1688*bfcc09ddSBjoern A. Zeeb  * index then will fire interrupt to driver, driver will OR all ICT table
1689*bfcc09ddSBjoern A. Zeeb  * entries from current index up to table entry with 0 value. the result is
1690*bfcc09ddSBjoern A. Zeeb  * the interrupt we need to service, driver will set the entries back to 0 and
1691*bfcc09ddSBjoern A. Zeeb  * set index.
1692*bfcc09ddSBjoern A. Zeeb  */
1693*bfcc09ddSBjoern A. Zeeb static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1694*bfcc09ddSBjoern A. Zeeb {
1695*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1696*bfcc09ddSBjoern A. Zeeb 	u32 inta;
1697*bfcc09ddSBjoern A. Zeeb 	u32 val = 0;
1698*bfcc09ddSBjoern A. Zeeb 	u32 read;
1699*bfcc09ddSBjoern A. Zeeb 
1700*bfcc09ddSBjoern A. Zeeb 	trace_iwlwifi_dev_irq(trans->dev);
1701*bfcc09ddSBjoern A. Zeeb 
1702*bfcc09ddSBjoern A. Zeeb 	/* Ignore interrupt if there's nothing in NIC to service.
1703*bfcc09ddSBjoern A. Zeeb 	 * This may be due to IRQ shared with another device,
1704*bfcc09ddSBjoern A. Zeeb 	 * or due to sporadic interrupts thrown from our NIC. */
1705*bfcc09ddSBjoern A. Zeeb 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1706*bfcc09ddSBjoern A. Zeeb 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1707*bfcc09ddSBjoern A. Zeeb 	if (!read)
1708*bfcc09ddSBjoern A. Zeeb 		return 0;
1709*bfcc09ddSBjoern A. Zeeb 
1710*bfcc09ddSBjoern A. Zeeb 	/*
1711*bfcc09ddSBjoern A. Zeeb 	 * Collect all entries up to the first 0, starting from ict_index;
1712*bfcc09ddSBjoern A. Zeeb 	 * note we already read at ict_index.
1713*bfcc09ddSBjoern A. Zeeb 	 */
1714*bfcc09ddSBjoern A. Zeeb 	do {
1715*bfcc09ddSBjoern A. Zeeb 		val |= read;
1716*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1717*bfcc09ddSBjoern A. Zeeb 				trans_pcie->ict_index, read);
1718*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1719*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ict_index =
1720*bfcc09ddSBjoern A. Zeeb 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1721*bfcc09ddSBjoern A. Zeeb 
1722*bfcc09ddSBjoern A. Zeeb 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1723*bfcc09ddSBjoern A. Zeeb 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1724*bfcc09ddSBjoern A. Zeeb 					   read);
1725*bfcc09ddSBjoern A. Zeeb 	} while (read);
1726*bfcc09ddSBjoern A. Zeeb 
1727*bfcc09ddSBjoern A. Zeeb 	/* We should not get this value, just ignore it. */
1728*bfcc09ddSBjoern A. Zeeb 	if (val == 0xffffffff)
1729*bfcc09ddSBjoern A. Zeeb 		val = 0;
1730*bfcc09ddSBjoern A. Zeeb 
1731*bfcc09ddSBjoern A. Zeeb 	/*
1732*bfcc09ddSBjoern A. Zeeb 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1733*bfcc09ddSBjoern A. Zeeb 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1734*bfcc09ddSBjoern A. Zeeb 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1735*bfcc09ddSBjoern A. Zeeb 	 * so we use them to decide on the real state of the Rx bit.
1736*bfcc09ddSBjoern A. Zeeb 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1737*bfcc09ddSBjoern A. Zeeb 	 */
1738*bfcc09ddSBjoern A. Zeeb 	if (val & 0xC0000)
1739*bfcc09ddSBjoern A. Zeeb 		val |= 0x8000;
1740*bfcc09ddSBjoern A. Zeeb 
1741*bfcc09ddSBjoern A. Zeeb 	inta = (0xff & val) | ((0xff00 & val) << 16);
1742*bfcc09ddSBjoern A. Zeeb 	return inta;
1743*bfcc09ddSBjoern A. Zeeb }
1744*bfcc09ddSBjoern A. Zeeb 
1745*bfcc09ddSBjoern A. Zeeb void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans)
1746*bfcc09ddSBjoern A. Zeeb {
1747*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1748*bfcc09ddSBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1749*bfcc09ddSBjoern A. Zeeb 	bool hw_rfkill, prev, report;
1750*bfcc09ddSBjoern A. Zeeb 
1751*bfcc09ddSBjoern A. Zeeb 	mutex_lock(&trans_pcie->mutex);
1752*bfcc09ddSBjoern A. Zeeb 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1753*bfcc09ddSBjoern A. Zeeb 	hw_rfkill = iwl_is_rfkill_set(trans);
1754*bfcc09ddSBjoern A. Zeeb 	if (hw_rfkill) {
1755*bfcc09ddSBjoern A. Zeeb 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1756*bfcc09ddSBjoern A. Zeeb 		set_bit(STATUS_RFKILL_HW, &trans->status);
1757*bfcc09ddSBjoern A. Zeeb 	}
1758*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->opmode_down)
1759*bfcc09ddSBjoern A. Zeeb 		report = hw_rfkill;
1760*bfcc09ddSBjoern A. Zeeb 	else
1761*bfcc09ddSBjoern A. Zeeb 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1762*bfcc09ddSBjoern A. Zeeb 
1763*bfcc09ddSBjoern A. Zeeb 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1764*bfcc09ddSBjoern A. Zeeb 		 hw_rfkill ? "disable radio" : "enable radio");
1765*bfcc09ddSBjoern A. Zeeb 
1766*bfcc09ddSBjoern A. Zeeb 	isr_stats->rfkill++;
1767*bfcc09ddSBjoern A. Zeeb 
1768*bfcc09ddSBjoern A. Zeeb 	if (prev != report)
1769*bfcc09ddSBjoern A. Zeeb 		iwl_trans_pcie_rf_kill(trans, report);
1770*bfcc09ddSBjoern A. Zeeb 	mutex_unlock(&trans_pcie->mutex);
1771*bfcc09ddSBjoern A. Zeeb 
1772*bfcc09ddSBjoern A. Zeeb 	if (hw_rfkill) {
1773*bfcc09ddSBjoern A. Zeeb 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1774*bfcc09ddSBjoern A. Zeeb 				       &trans->status))
1775*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_RF_KILL(trans,
1776*bfcc09ddSBjoern A. Zeeb 					  "Rfkill while SYNC HCMD in flight\n");
1777*bfcc09ddSBjoern A. Zeeb 		wake_up(&trans->wait_command_queue);
1778*bfcc09ddSBjoern A. Zeeb 	} else {
1779*bfcc09ddSBjoern A. Zeeb 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1780*bfcc09ddSBjoern A. Zeeb 		if (trans_pcie->opmode_down)
1781*bfcc09ddSBjoern A. Zeeb 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1782*bfcc09ddSBjoern A. Zeeb 	}
1783*bfcc09ddSBjoern A. Zeeb }
1784*bfcc09ddSBjoern A. Zeeb 
1785*bfcc09ddSBjoern A. Zeeb irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1786*bfcc09ddSBjoern A. Zeeb {
1787*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans = dev_id;
1788*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1789*bfcc09ddSBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1790*bfcc09ddSBjoern A. Zeeb 	u32 inta = 0;
1791*bfcc09ddSBjoern A. Zeeb 	u32 handled = 0;
1792*bfcc09ddSBjoern A. Zeeb 	bool polling = false;
1793*bfcc09ddSBjoern A. Zeeb 
1794*bfcc09ddSBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1795*bfcc09ddSBjoern A. Zeeb 
1796*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
1797*bfcc09ddSBjoern A. Zeeb 
1798*bfcc09ddSBjoern A. Zeeb 	/* dram interrupt table not set yet,
1799*bfcc09ddSBjoern A. Zeeb 	 * use legacy interrupt.
1800*bfcc09ddSBjoern A. Zeeb 	 */
1801*bfcc09ddSBjoern A. Zeeb 	if (likely(trans_pcie->use_ict))
1802*bfcc09ddSBjoern A. Zeeb 		inta = iwl_pcie_int_cause_ict(trans);
1803*bfcc09ddSBjoern A. Zeeb 	else
1804*bfcc09ddSBjoern A. Zeeb 		inta = iwl_pcie_int_cause_non_ict(trans);
1805*bfcc09ddSBjoern A. Zeeb 
1806*bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
1807*bfcc09ddSBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1808*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
1809*bfcc09ddSBjoern A. Zeeb 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1810*bfcc09ddSBjoern A. Zeeb 			      inta, trans_pcie->inta_mask,
1811*bfcc09ddSBjoern A. Zeeb 			      iwl_read32(trans, CSR_INT_MASK),
1812*bfcc09ddSBjoern A. Zeeb 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1813*bfcc09ddSBjoern A. Zeeb 		if (inta & (~trans_pcie->inta_mask))
1814*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
1815*bfcc09ddSBjoern A. Zeeb 				      "We got a masked interrupt (0x%08x)\n",
1816*bfcc09ddSBjoern A. Zeeb 				      inta & (~trans_pcie->inta_mask));
1817*bfcc09ddSBjoern A. Zeeb 	}
1818*bfcc09ddSBjoern A. Zeeb #endif
1819*bfcc09ddSBjoern A. Zeeb 
1820*bfcc09ddSBjoern A. Zeeb 	inta &= trans_pcie->inta_mask;
1821*bfcc09ddSBjoern A. Zeeb 
1822*bfcc09ddSBjoern A. Zeeb 	/*
1823*bfcc09ddSBjoern A. Zeeb 	 * Ignore interrupt if there's nothing in NIC to service.
1824*bfcc09ddSBjoern A. Zeeb 	 * This may be due to IRQ shared with another device,
1825*bfcc09ddSBjoern A. Zeeb 	 * or due to sporadic interrupts thrown from our NIC.
1826*bfcc09ddSBjoern A. Zeeb 	 */
1827*bfcc09ddSBjoern A. Zeeb 	if (unlikely(!inta)) {
1828*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1829*bfcc09ddSBjoern A. Zeeb 		/*
1830*bfcc09ddSBjoern A. Zeeb 		 * Re-enable interrupts here since we don't
1831*bfcc09ddSBjoern A. Zeeb 		 * have anything to service
1832*bfcc09ddSBjoern A. Zeeb 		 */
1833*bfcc09ddSBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1834*bfcc09ddSBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
1835*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
1836*bfcc09ddSBjoern A. Zeeb 		lock_map_release(&trans->sync_cmd_lockdep_map);
1837*bfcc09ddSBjoern A. Zeeb 		return IRQ_NONE;
1838*bfcc09ddSBjoern A. Zeeb 	}
1839*bfcc09ddSBjoern A. Zeeb 
1840*bfcc09ddSBjoern A. Zeeb 	if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
1841*bfcc09ddSBjoern A. Zeeb 		/*
1842*bfcc09ddSBjoern A. Zeeb 		 * Hardware disappeared. It might have
1843*bfcc09ddSBjoern A. Zeeb 		 * already raised an interrupt.
1844*bfcc09ddSBjoern A. Zeeb 		 */
1845*bfcc09ddSBjoern A. Zeeb 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1846*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
1847*bfcc09ddSBjoern A. Zeeb 		goto out;
1848*bfcc09ddSBjoern A. Zeeb 	}
1849*bfcc09ddSBjoern A. Zeeb 
1850*bfcc09ddSBjoern A. Zeeb 	/* Ack/clear/reset pending uCode interrupts.
1851*bfcc09ddSBjoern A. Zeeb 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1852*bfcc09ddSBjoern A. Zeeb 	 */
1853*bfcc09ddSBjoern A. Zeeb 	/* There is a hardware bug in the interrupt mask function that some
1854*bfcc09ddSBjoern A. Zeeb 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1855*bfcc09ddSBjoern A. Zeeb 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1856*bfcc09ddSBjoern A. Zeeb 	 * ICT interrupt handling mechanism has another bug that might cause
1857*bfcc09ddSBjoern A. Zeeb 	 * these unmasked interrupts fail to be detected. We workaround the
1858*bfcc09ddSBjoern A. Zeeb 	 * hardware bugs here by ACKing all the possible interrupts so that
1859*bfcc09ddSBjoern A. Zeeb 	 * interrupt coalescing can still be achieved.
1860*bfcc09ddSBjoern A. Zeeb 	 */
1861*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1862*bfcc09ddSBjoern A. Zeeb 
1863*bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
1864*bfcc09ddSBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR))
1865*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1866*bfcc09ddSBjoern A. Zeeb 			      inta, iwl_read32(trans, CSR_INT_MASK));
1867*bfcc09ddSBjoern A. Zeeb #endif
1868*bfcc09ddSBjoern A. Zeeb 
1869*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
1870*bfcc09ddSBjoern A. Zeeb 
1871*bfcc09ddSBjoern A. Zeeb 	/* Now service all interrupt bits discovered above. */
1872*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_HW_ERR) {
1873*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1874*bfcc09ddSBjoern A. Zeeb 
1875*bfcc09ddSBjoern A. Zeeb 		/* Tell the device to stop sending interrupts */
1876*bfcc09ddSBjoern A. Zeeb 		iwl_disable_interrupts(trans);
1877*bfcc09ddSBjoern A. Zeeb 
1878*bfcc09ddSBjoern A. Zeeb 		isr_stats->hw++;
1879*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_irq_handle_error(trans);
1880*bfcc09ddSBjoern A. Zeeb 
1881*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_HW_ERR;
1882*bfcc09ddSBjoern A. Zeeb 
1883*bfcc09ddSBjoern A. Zeeb 		goto out;
1884*bfcc09ddSBjoern A. Zeeb 	}
1885*bfcc09ddSBjoern A. Zeeb 
1886*bfcc09ddSBjoern A. Zeeb 	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1887*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_SCD) {
1888*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
1889*bfcc09ddSBjoern A. Zeeb 			      "Scheduler finished to transmit the frame/frames.\n");
1890*bfcc09ddSBjoern A. Zeeb 		isr_stats->sch++;
1891*bfcc09ddSBjoern A. Zeeb 	}
1892*bfcc09ddSBjoern A. Zeeb 
1893*bfcc09ddSBjoern A. Zeeb 	/* Alive notification via Rx interrupt will do the real work */
1894*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_ALIVE) {
1895*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
1896*bfcc09ddSBjoern A. Zeeb 		isr_stats->alive++;
1897*bfcc09ddSBjoern A. Zeeb 		if (trans->trans_cfg->gen2) {
1898*bfcc09ddSBjoern A. Zeeb 			/*
1899*bfcc09ddSBjoern A. Zeeb 			 * We can restock, since firmware configured
1900*bfcc09ddSBjoern A. Zeeb 			 * the RFH
1901*bfcc09ddSBjoern A. Zeeb 			 */
1902*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
1903*bfcc09ddSBjoern A. Zeeb 		}
1904*bfcc09ddSBjoern A. Zeeb 
1905*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_ALIVE;
1906*bfcc09ddSBjoern A. Zeeb 	}
1907*bfcc09ddSBjoern A. Zeeb 
1908*bfcc09ddSBjoern A. Zeeb 	/* Safely ignore these bits for debug checks below */
1909*bfcc09ddSBjoern A. Zeeb 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
1910*bfcc09ddSBjoern A. Zeeb 
1911*bfcc09ddSBjoern A. Zeeb 	/* HW RF KILL switch toggled */
1912*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_RF_KILL) {
1913*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_handle_rfkill_irq(trans);
1914*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_RF_KILL;
1915*bfcc09ddSBjoern A. Zeeb 	}
1916*bfcc09ddSBjoern A. Zeeb 
1917*bfcc09ddSBjoern A. Zeeb 	/* Chip got too hot and stopped itself */
1918*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_CT_KILL) {
1919*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
1920*bfcc09ddSBjoern A. Zeeb 		isr_stats->ctkill++;
1921*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_CT_KILL;
1922*bfcc09ddSBjoern A. Zeeb 	}
1923*bfcc09ddSBjoern A. Zeeb 
1924*bfcc09ddSBjoern A. Zeeb 	/* Error detected by uCode */
1925*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_SW_ERR) {
1926*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans, "Microcode SW error detected. "
1927*bfcc09ddSBjoern A. Zeeb 			" Restarting 0x%X.\n", inta);
1928*bfcc09ddSBjoern A. Zeeb 		isr_stats->sw++;
1929*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_irq_handle_error(trans);
1930*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_SW_ERR;
1931*bfcc09ddSBjoern A. Zeeb 	}
1932*bfcc09ddSBjoern A. Zeeb 
1933*bfcc09ddSBjoern A. Zeeb 	/* uCode wakes up after power-down sleep */
1934*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_WAKEUP) {
1935*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
1936*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_rxq_check_wrptr(trans);
1937*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_txq_check_wrptrs(trans);
1938*bfcc09ddSBjoern A. Zeeb 
1939*bfcc09ddSBjoern A. Zeeb 		isr_stats->wakeup++;
1940*bfcc09ddSBjoern A. Zeeb 
1941*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_WAKEUP;
1942*bfcc09ddSBjoern A. Zeeb 	}
1943*bfcc09ddSBjoern A. Zeeb 
1944*bfcc09ddSBjoern A. Zeeb 	/* All uCode command responses, including Tx command responses,
1945*bfcc09ddSBjoern A. Zeeb 	 * Rx "responses" (frame-received notification), and other
1946*bfcc09ddSBjoern A. Zeeb 	 * notifications from uCode come through here*/
1947*bfcc09ddSBjoern A. Zeeb 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
1948*bfcc09ddSBjoern A. Zeeb 		    CSR_INT_BIT_RX_PERIODIC)) {
1949*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
1950*bfcc09ddSBjoern A. Zeeb 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
1951*bfcc09ddSBjoern A. Zeeb 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
1952*bfcc09ddSBjoern A. Zeeb 			iwl_write32(trans, CSR_FH_INT_STATUS,
1953*bfcc09ddSBjoern A. Zeeb 					CSR_FH_INT_RX_MASK);
1954*bfcc09ddSBjoern A. Zeeb 		}
1955*bfcc09ddSBjoern A. Zeeb 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
1956*bfcc09ddSBjoern A. Zeeb 			handled |= CSR_INT_BIT_RX_PERIODIC;
1957*bfcc09ddSBjoern A. Zeeb 			iwl_write32(trans,
1958*bfcc09ddSBjoern A. Zeeb 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
1959*bfcc09ddSBjoern A. Zeeb 		}
1960*bfcc09ddSBjoern A. Zeeb 		/* Sending RX interrupt require many steps to be done in the
1961*bfcc09ddSBjoern A. Zeeb 		 * the device:
1962*bfcc09ddSBjoern A. Zeeb 		 * 1- write interrupt to current index in ICT table.
1963*bfcc09ddSBjoern A. Zeeb 		 * 2- dma RX frame.
1964*bfcc09ddSBjoern A. Zeeb 		 * 3- update RX shared data to indicate last write index.
1965*bfcc09ddSBjoern A. Zeeb 		 * 4- send interrupt.
1966*bfcc09ddSBjoern A. Zeeb 		 * This could lead to RX race, driver could receive RX interrupt
1967*bfcc09ddSBjoern A. Zeeb 		 * but the shared data changes does not reflect this;
1968*bfcc09ddSBjoern A. Zeeb 		 * periodic interrupt will detect any dangling Rx activity.
1969*bfcc09ddSBjoern A. Zeeb 		 */
1970*bfcc09ddSBjoern A. Zeeb 
1971*bfcc09ddSBjoern A. Zeeb 		/* Disable periodic interrupt; we use it as just a one-shot. */
1972*bfcc09ddSBjoern A. Zeeb 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
1973*bfcc09ddSBjoern A. Zeeb 			    CSR_INT_PERIODIC_DIS);
1974*bfcc09ddSBjoern A. Zeeb 
1975*bfcc09ddSBjoern A. Zeeb 		/*
1976*bfcc09ddSBjoern A. Zeeb 		 * Enable periodic interrupt in 8 msec only if we received
1977*bfcc09ddSBjoern A. Zeeb 		 * real RX interrupt (instead of just periodic int), to catch
1978*bfcc09ddSBjoern A. Zeeb 		 * any dangling Rx interrupt.  If it was just the periodic
1979*bfcc09ddSBjoern A. Zeeb 		 * interrupt, there was no dangling Rx activity, and no need
1980*bfcc09ddSBjoern A. Zeeb 		 * to extend the periodic interrupt; one-shot is enough.
1981*bfcc09ddSBjoern A. Zeeb 		 */
1982*bfcc09ddSBjoern A. Zeeb 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
1983*bfcc09ddSBjoern A. Zeeb 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
1984*bfcc09ddSBjoern A. Zeeb 				   CSR_INT_PERIODIC_ENA);
1985*bfcc09ddSBjoern A. Zeeb 
1986*bfcc09ddSBjoern A. Zeeb 		isr_stats->rx++;
1987*bfcc09ddSBjoern A. Zeeb 
1988*bfcc09ddSBjoern A. Zeeb 		local_bh_disable();
1989*bfcc09ddSBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
1990*bfcc09ddSBjoern A. Zeeb 			polling = true;
1991*bfcc09ddSBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[0].napi);
1992*bfcc09ddSBjoern A. Zeeb 		}
1993*bfcc09ddSBjoern A. Zeeb 		local_bh_enable();
1994*bfcc09ddSBjoern A. Zeeb 	}
1995*bfcc09ddSBjoern A. Zeeb 
1996*bfcc09ddSBjoern A. Zeeb 	/* This "Tx" DMA channel is used only for loading uCode */
1997*bfcc09ddSBjoern A. Zeeb 	if (inta & CSR_INT_BIT_FH_TX) {
1998*bfcc09ddSBjoern A. Zeeb 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
1999*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2000*bfcc09ddSBjoern A. Zeeb 		isr_stats->tx++;
2001*bfcc09ddSBjoern A. Zeeb 		handled |= CSR_INT_BIT_FH_TX;
2002*bfcc09ddSBjoern A. Zeeb 		/* Wake up uCode load routine, now that load is complete */
2003*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ucode_write_complete = true;
2004*bfcc09ddSBjoern A. Zeeb 		wake_up(&trans_pcie->ucode_write_waitq);
2005*bfcc09ddSBjoern A. Zeeb 	}
2006*bfcc09ddSBjoern A. Zeeb 
2007*bfcc09ddSBjoern A. Zeeb 	if (inta & ~handled) {
2008*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2009*bfcc09ddSBjoern A. Zeeb 		isr_stats->unhandled++;
2010*bfcc09ddSBjoern A. Zeeb 	}
2011*bfcc09ddSBjoern A. Zeeb 
2012*bfcc09ddSBjoern A. Zeeb 	if (inta & ~(trans_pcie->inta_mask)) {
2013*bfcc09ddSBjoern A. Zeeb 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2014*bfcc09ddSBjoern A. Zeeb 			 inta & ~trans_pcie->inta_mask);
2015*bfcc09ddSBjoern A. Zeeb 	}
2016*bfcc09ddSBjoern A. Zeeb 
2017*bfcc09ddSBjoern A. Zeeb 	if (!polling) {
2018*bfcc09ddSBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->irq_lock);
2019*bfcc09ddSBjoern A. Zeeb 		/* only Re-enable all interrupt if disabled by irq */
2020*bfcc09ddSBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
2021*bfcc09ddSBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
2022*bfcc09ddSBjoern A. Zeeb 		/* we are loading the firmware, enable FH_TX interrupt only */
2023*bfcc09ddSBjoern A. Zeeb 		else if (handled & CSR_INT_BIT_FH_TX)
2024*bfcc09ddSBjoern A. Zeeb 			iwl_enable_fw_load_int(trans);
2025*bfcc09ddSBjoern A. Zeeb 		/* Re-enable RF_KILL if it occurred */
2026*bfcc09ddSBjoern A. Zeeb 		else if (handled & CSR_INT_BIT_RF_KILL)
2027*bfcc09ddSBjoern A. Zeeb 			iwl_enable_rfkill_int(trans);
2028*bfcc09ddSBjoern A. Zeeb 		/* Re-enable the ALIVE / Rx interrupt if it occurred */
2029*bfcc09ddSBjoern A. Zeeb 		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2030*bfcc09ddSBjoern A. Zeeb 			iwl_enable_fw_load_int_ctx_info(trans);
2031*bfcc09ddSBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
2032*bfcc09ddSBjoern A. Zeeb 	}
2033*bfcc09ddSBjoern A. Zeeb 
2034*bfcc09ddSBjoern A. Zeeb out:
2035*bfcc09ddSBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
2036*bfcc09ddSBjoern A. Zeeb 	return IRQ_HANDLED;
2037*bfcc09ddSBjoern A. Zeeb }
2038*bfcc09ddSBjoern A. Zeeb 
2039*bfcc09ddSBjoern A. Zeeb /******************************************************************************
2040*bfcc09ddSBjoern A. Zeeb  *
2041*bfcc09ddSBjoern A. Zeeb  * ICT functions
2042*bfcc09ddSBjoern A. Zeeb  *
2043*bfcc09ddSBjoern A. Zeeb  ******************************************************************************/
2044*bfcc09ddSBjoern A. Zeeb 
2045*bfcc09ddSBjoern A. Zeeb /* Free dram table */
2046*bfcc09ddSBjoern A. Zeeb void iwl_pcie_free_ict(struct iwl_trans *trans)
2047*bfcc09ddSBjoern A. Zeeb {
2048*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2049*bfcc09ddSBjoern A. Zeeb 
2050*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->ict_tbl) {
2051*bfcc09ddSBjoern A. Zeeb 		dma_free_coherent(trans->dev, ICT_SIZE,
2052*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->ict_tbl,
2053*bfcc09ddSBjoern A. Zeeb 				  trans_pcie->ict_tbl_dma);
2054*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ict_tbl = NULL;
2055*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ict_tbl_dma = 0;
2056*bfcc09ddSBjoern A. Zeeb 	}
2057*bfcc09ddSBjoern A. Zeeb }
2058*bfcc09ddSBjoern A. Zeeb 
2059*bfcc09ddSBjoern A. Zeeb /*
2060*bfcc09ddSBjoern A. Zeeb  * allocate dram shared table, it is an aligned memory
2061*bfcc09ddSBjoern A. Zeeb  * block of ICT_SIZE.
2062*bfcc09ddSBjoern A. Zeeb  * also reset all data related to ICT table interrupt.
2063*bfcc09ddSBjoern A. Zeeb  */
2064*bfcc09ddSBjoern A. Zeeb int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2065*bfcc09ddSBjoern A. Zeeb {
2066*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2067*bfcc09ddSBjoern A. Zeeb 
2068*bfcc09ddSBjoern A. Zeeb 	trans_pcie->ict_tbl =
2069*bfcc09ddSBjoern A. Zeeb 		dma_alloc_coherent(trans->dev, ICT_SIZE,
2070*bfcc09ddSBjoern A. Zeeb 				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2071*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->ict_tbl)
2072*bfcc09ddSBjoern A. Zeeb 		return -ENOMEM;
2073*bfcc09ddSBjoern A. Zeeb 
2074*bfcc09ddSBjoern A. Zeeb 	/* just an API sanity check ... it is guaranteed to be aligned */
2075*bfcc09ddSBjoern A. Zeeb 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2076*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_free_ict(trans);
2077*bfcc09ddSBjoern A. Zeeb 		return -EINVAL;
2078*bfcc09ddSBjoern A. Zeeb 	}
2079*bfcc09ddSBjoern A. Zeeb 
2080*bfcc09ddSBjoern A. Zeeb 	return 0;
2081*bfcc09ddSBjoern A. Zeeb }
2082*bfcc09ddSBjoern A. Zeeb 
2083*bfcc09ddSBjoern A. Zeeb /* Device is going up inform it about using ICT interrupt table,
2084*bfcc09ddSBjoern A. Zeeb  * also we need to tell the driver to start using ICT interrupt.
2085*bfcc09ddSBjoern A. Zeeb  */
2086*bfcc09ddSBjoern A. Zeeb void iwl_pcie_reset_ict(struct iwl_trans *trans)
2087*bfcc09ddSBjoern A. Zeeb {
2088*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2089*bfcc09ddSBjoern A. Zeeb 	u32 val;
2090*bfcc09ddSBjoern A. Zeeb 
2091*bfcc09ddSBjoern A. Zeeb 	if (!trans_pcie->ict_tbl)
2092*bfcc09ddSBjoern A. Zeeb 		return;
2093*bfcc09ddSBjoern A. Zeeb 
2094*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2095*bfcc09ddSBjoern A. Zeeb 	_iwl_disable_interrupts(trans);
2096*bfcc09ddSBjoern A. Zeeb 
2097*bfcc09ddSBjoern A. Zeeb 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2098*bfcc09ddSBjoern A. Zeeb 
2099*bfcc09ddSBjoern A. Zeeb 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2100*bfcc09ddSBjoern A. Zeeb 
2101*bfcc09ddSBjoern A. Zeeb 	val |= CSR_DRAM_INT_TBL_ENABLE |
2102*bfcc09ddSBjoern A. Zeeb 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2103*bfcc09ddSBjoern A. Zeeb 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2104*bfcc09ddSBjoern A. Zeeb 
2105*bfcc09ddSBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2106*bfcc09ddSBjoern A. Zeeb 
2107*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2108*bfcc09ddSBjoern A. Zeeb 	trans_pcie->use_ict = true;
2109*bfcc09ddSBjoern A. Zeeb 	trans_pcie->ict_index = 0;
2110*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2111*bfcc09ddSBjoern A. Zeeb 	_iwl_enable_interrupts(trans);
2112*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2113*bfcc09ddSBjoern A. Zeeb }
2114*bfcc09ddSBjoern A. Zeeb 
2115*bfcc09ddSBjoern A. Zeeb /* Device is going down disable ict interrupt usage */
2116*bfcc09ddSBjoern A. Zeeb void iwl_pcie_disable_ict(struct iwl_trans *trans)
2117*bfcc09ddSBjoern A. Zeeb {
2118*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2119*bfcc09ddSBjoern A. Zeeb 
2120*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2121*bfcc09ddSBjoern A. Zeeb 	trans_pcie->use_ict = false;
2122*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2123*bfcc09ddSBjoern A. Zeeb }
2124*bfcc09ddSBjoern A. Zeeb 
2125*bfcc09ddSBjoern A. Zeeb irqreturn_t iwl_pcie_isr(int irq, void *data)
2126*bfcc09ddSBjoern A. Zeeb {
2127*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans = data;
2128*bfcc09ddSBjoern A. Zeeb 
2129*bfcc09ddSBjoern A. Zeeb 	if (!trans)
2130*bfcc09ddSBjoern A. Zeeb 		return IRQ_NONE;
2131*bfcc09ddSBjoern A. Zeeb 
2132*bfcc09ddSBjoern A. Zeeb 	/* Disable (but don't clear!) interrupts here to avoid
2133*bfcc09ddSBjoern A. Zeeb 	 * back-to-back ISRs and sporadic interrupts from our NIC.
2134*bfcc09ddSBjoern A. Zeeb 	 * If we have something to service, the tasklet will re-enable ints.
2135*bfcc09ddSBjoern A. Zeeb 	 * If we *don't* have something, we'll re-enable before leaving here.
2136*bfcc09ddSBjoern A. Zeeb 	 */
2137*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2138*bfcc09ddSBjoern A. Zeeb 
2139*bfcc09ddSBjoern A. Zeeb 	return IRQ_WAKE_THREAD;
2140*bfcc09ddSBjoern A. Zeeb }
2141*bfcc09ddSBjoern A. Zeeb 
2142*bfcc09ddSBjoern A. Zeeb irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2143*bfcc09ddSBjoern A. Zeeb {
2144*bfcc09ddSBjoern A. Zeeb 	return IRQ_WAKE_THREAD;
2145*bfcc09ddSBjoern A. Zeeb }
2146*bfcc09ddSBjoern A. Zeeb 
2147*bfcc09ddSBjoern A. Zeeb irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2148*bfcc09ddSBjoern A. Zeeb {
2149*bfcc09ddSBjoern A. Zeeb 	struct msix_entry *entry = dev_id;
2150*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2151*bfcc09ddSBjoern A. Zeeb 	struct iwl_trans *trans = trans_pcie->trans;
2152*bfcc09ddSBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2153*bfcc09ddSBjoern A. Zeeb 	u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2154*bfcc09ddSBjoern A. Zeeb 	u32 inta_fh, inta_hw;
2155*bfcc09ddSBjoern A. Zeeb 	bool polling = false;
2156*bfcc09ddSBjoern A. Zeeb 	bool sw_err;
2157*bfcc09ddSBjoern A. Zeeb 
2158*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2159*bfcc09ddSBjoern A. Zeeb 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2160*bfcc09ddSBjoern A. Zeeb 
2161*bfcc09ddSBjoern A. Zeeb 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2162*bfcc09ddSBjoern A. Zeeb 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2163*bfcc09ddSBjoern A. Zeeb 
2164*bfcc09ddSBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
2165*bfcc09ddSBjoern A. Zeeb 
2166*bfcc09ddSBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2167*bfcc09ddSBjoern A. Zeeb 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2168*bfcc09ddSBjoern A. Zeeb 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2169*bfcc09ddSBjoern A. Zeeb 	/*
2170*bfcc09ddSBjoern A. Zeeb 	 * Clear causes registers to avoid being handling the same cause.
2171*bfcc09ddSBjoern A. Zeeb 	 */
2172*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2173*bfcc09ddSBjoern A. Zeeb 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2174*bfcc09ddSBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2175*bfcc09ddSBjoern A. Zeeb 
2176*bfcc09ddSBjoern A. Zeeb 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2177*bfcc09ddSBjoern A. Zeeb 
2178*bfcc09ddSBjoern A. Zeeb 	if (unlikely(!(inta_fh | inta_hw))) {
2179*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2180*bfcc09ddSBjoern A. Zeeb 		lock_map_release(&trans->sync_cmd_lockdep_map);
2181*bfcc09ddSBjoern A. Zeeb 		return IRQ_NONE;
2182*bfcc09ddSBjoern A. Zeeb 	}
2183*bfcc09ddSBjoern A. Zeeb 
2184*bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
2185*bfcc09ddSBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2186*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
2187*bfcc09ddSBjoern A. Zeeb 			      "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2188*bfcc09ddSBjoern A. Zeeb 			      entry->entry, inta_fh, trans_pcie->fh_mask,
2189*bfcc09ddSBjoern A. Zeeb 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2190*bfcc09ddSBjoern A. Zeeb 		if (inta_fh & ~trans_pcie->fh_mask)
2191*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2192*bfcc09ddSBjoern A. Zeeb 				      "We got a masked interrupt (0x%08x)\n",
2193*bfcc09ddSBjoern A. Zeeb 				      inta_fh & ~trans_pcie->fh_mask);
2194*bfcc09ddSBjoern A. Zeeb 	}
2195*bfcc09ddSBjoern A. Zeeb #endif
2196*bfcc09ddSBjoern A. Zeeb 
2197*bfcc09ddSBjoern A. Zeeb 	inta_fh &= trans_pcie->fh_mask;
2198*bfcc09ddSBjoern A. Zeeb 
2199*bfcc09ddSBjoern A. Zeeb 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2200*bfcc09ddSBjoern A. Zeeb 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2201*bfcc09ddSBjoern A. Zeeb 		local_bh_disable();
2202*bfcc09ddSBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2203*bfcc09ddSBjoern A. Zeeb 			polling = true;
2204*bfcc09ddSBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[0].napi);
2205*bfcc09ddSBjoern A. Zeeb 		}
2206*bfcc09ddSBjoern A. Zeeb 		local_bh_enable();
2207*bfcc09ddSBjoern A. Zeeb 	}
2208*bfcc09ddSBjoern A. Zeeb 
2209*bfcc09ddSBjoern A. Zeeb 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2210*bfcc09ddSBjoern A. Zeeb 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2211*bfcc09ddSBjoern A. Zeeb 		local_bh_disable();
2212*bfcc09ddSBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2213*bfcc09ddSBjoern A. Zeeb 			polling = true;
2214*bfcc09ddSBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[1].napi);
2215*bfcc09ddSBjoern A. Zeeb 		}
2216*bfcc09ddSBjoern A. Zeeb 		local_bh_enable();
2217*bfcc09ddSBjoern A. Zeeb 	}
2218*bfcc09ddSBjoern A. Zeeb 
2219*bfcc09ddSBjoern A. Zeeb 	/* This "Tx" DMA channel is used only for loading uCode */
2220*bfcc09ddSBjoern A. Zeeb 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2221*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2222*bfcc09ddSBjoern A. Zeeb 		isr_stats->tx++;
2223*bfcc09ddSBjoern A. Zeeb 		/*
2224*bfcc09ddSBjoern A. Zeeb 		 * Wake up uCode load routine,
2225*bfcc09ddSBjoern A. Zeeb 		 * now that load is complete
2226*bfcc09ddSBjoern A. Zeeb 		 */
2227*bfcc09ddSBjoern A. Zeeb 		trans_pcie->ucode_write_complete = true;
2228*bfcc09ddSBjoern A. Zeeb 		wake_up(&trans_pcie->ucode_write_waitq);
2229*bfcc09ddSBjoern A. Zeeb 	}
2230*bfcc09ddSBjoern A. Zeeb 
2231*bfcc09ddSBjoern A. Zeeb 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2232*bfcc09ddSBjoern A. Zeeb 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2233*bfcc09ddSBjoern A. Zeeb 	else
2234*bfcc09ddSBjoern A. Zeeb 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2235*bfcc09ddSBjoern A. Zeeb 
2236*bfcc09ddSBjoern A. Zeeb 	/* Error detected by uCode */
2237*bfcc09ddSBjoern A. Zeeb 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2238*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans,
2239*bfcc09ddSBjoern A. Zeeb 			"Microcode SW error detected. Restarting 0x%X.\n",
2240*bfcc09ddSBjoern A. Zeeb 			inta_fh);
2241*bfcc09ddSBjoern A. Zeeb 		isr_stats->sw++;
2242*bfcc09ddSBjoern A. Zeeb 		/* during FW reset flow report errors from there */
2243*bfcc09ddSBjoern A. Zeeb 		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2244*bfcc09ddSBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_ERROR;
2245*bfcc09ddSBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
2246*bfcc09ddSBjoern A. Zeeb 		} else {
2247*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_irq_handle_error(trans);
2248*bfcc09ddSBjoern A. Zeeb 		}
2249*bfcc09ddSBjoern A. Zeeb 	}
2250*bfcc09ddSBjoern A. Zeeb 
2251*bfcc09ddSBjoern A. Zeeb 	/* After checking FH register check HW register */
2252*bfcc09ddSBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
2253*bfcc09ddSBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2254*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
2255*bfcc09ddSBjoern A. Zeeb 			      "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2256*bfcc09ddSBjoern A. Zeeb 			      entry->entry, inta_hw, trans_pcie->hw_mask,
2257*bfcc09ddSBjoern A. Zeeb 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2258*bfcc09ddSBjoern A. Zeeb 		if (inta_hw & ~trans_pcie->hw_mask)
2259*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2260*bfcc09ddSBjoern A. Zeeb 				      "We got a masked interrupt 0x%08x\n",
2261*bfcc09ddSBjoern A. Zeeb 				      inta_hw & ~trans_pcie->hw_mask);
2262*bfcc09ddSBjoern A. Zeeb 	}
2263*bfcc09ddSBjoern A. Zeeb #endif
2264*bfcc09ddSBjoern A. Zeeb 
2265*bfcc09ddSBjoern A. Zeeb 	inta_hw &= trans_pcie->hw_mask;
2266*bfcc09ddSBjoern A. Zeeb 
2267*bfcc09ddSBjoern A. Zeeb 	/* Alive notification via Rx interrupt will do the real work */
2268*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2269*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2270*bfcc09ddSBjoern A. Zeeb 		isr_stats->alive++;
2271*bfcc09ddSBjoern A. Zeeb 		if (trans->trans_cfg->gen2) {
2272*bfcc09ddSBjoern A. Zeeb 			/* We can restock, since firmware configured the RFH */
2273*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2274*bfcc09ddSBjoern A. Zeeb 		}
2275*bfcc09ddSBjoern A. Zeeb 	}
2276*bfcc09ddSBjoern A. Zeeb 
2277*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) {
2278*bfcc09ddSBjoern A. Zeeb 		u32 sleep_notif =
2279*bfcc09ddSBjoern A. Zeeb 			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2280*bfcc09ddSBjoern A. Zeeb 		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2281*bfcc09ddSBjoern A. Zeeb 		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2282*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2283*bfcc09ddSBjoern A. Zeeb 				      "Sx interrupt: sleep notification = 0x%x\n",
2284*bfcc09ddSBjoern A. Zeeb 				      sleep_notif);
2285*bfcc09ddSBjoern A. Zeeb 			trans_pcie->sx_complete = true;
2286*bfcc09ddSBjoern A. Zeeb 			wake_up(&trans_pcie->sx_waitq);
2287*bfcc09ddSBjoern A. Zeeb 		} else {
2288*bfcc09ddSBjoern A. Zeeb 			/* uCode wakes up after power-down sleep */
2289*bfcc09ddSBjoern A. Zeeb 			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2290*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_rxq_check_wrptr(trans);
2291*bfcc09ddSBjoern A. Zeeb 			iwl_pcie_txq_check_wrptrs(trans);
2292*bfcc09ddSBjoern A. Zeeb 
2293*bfcc09ddSBjoern A. Zeeb 			isr_stats->wakeup++;
2294*bfcc09ddSBjoern A. Zeeb 		}
2295*bfcc09ddSBjoern A. Zeeb 	}
2296*bfcc09ddSBjoern A. Zeeb 
2297*bfcc09ddSBjoern A. Zeeb 	/* Chip got too hot and stopped itself */
2298*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2299*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2300*bfcc09ddSBjoern A. Zeeb 		isr_stats->ctkill++;
2301*bfcc09ddSBjoern A. Zeeb 	}
2302*bfcc09ddSBjoern A. Zeeb 
2303*bfcc09ddSBjoern A. Zeeb 	/* HW RF KILL switch toggled */
2304*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2305*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_handle_rfkill_irq(trans);
2306*bfcc09ddSBjoern A. Zeeb 
2307*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2308*bfcc09ddSBjoern A. Zeeb 		IWL_ERR(trans,
2309*bfcc09ddSBjoern A. Zeeb 			"Hardware error detected. Restarting.\n");
2310*bfcc09ddSBjoern A. Zeeb 
2311*bfcc09ddSBjoern A. Zeeb 		isr_stats->hw++;
2312*bfcc09ddSBjoern A. Zeeb 		trans->dbg.hw_error = true;
2313*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_irq_handle_error(trans);
2314*bfcc09ddSBjoern A. Zeeb 	}
2315*bfcc09ddSBjoern A. Zeeb 
2316*bfcc09ddSBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE) {
2317*bfcc09ddSBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Reset flow completed\n");
2318*bfcc09ddSBjoern A. Zeeb 		trans_pcie->fw_reset_state = FW_RESET_OK;
2319*bfcc09ddSBjoern A. Zeeb 		wake_up(&trans_pcie->fw_reset_waitq);
2320*bfcc09ddSBjoern A. Zeeb 	}
2321*bfcc09ddSBjoern A. Zeeb 
2322*bfcc09ddSBjoern A. Zeeb 	if (!polling)
2323*bfcc09ddSBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, entry->entry);
2324*bfcc09ddSBjoern A. Zeeb 
2325*bfcc09ddSBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
2326*bfcc09ddSBjoern A. Zeeb 
2327*bfcc09ddSBjoern A. Zeeb 	return IRQ_HANDLED;
2328*bfcc09ddSBjoern A. Zeeb }
2329