xref: /freebsd/sys/contrib/dev/iwlwifi/pcie/gen1_2/rx.c (revision 6b627f88584ce13118e0a24951b503c0b1f2d5a7)
1*6b627f88SBjoern A. Zeeb // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2*6b627f88SBjoern A. Zeeb /*
3*6b627f88SBjoern A. Zeeb  * Copyright (C) 2003-2014, 2018-2024 Intel Corporation
4*6b627f88SBjoern A. Zeeb  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5*6b627f88SBjoern A. Zeeb  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6*6b627f88SBjoern A. Zeeb  */
7*6b627f88SBjoern A. Zeeb #include <linux/sched.h>
8*6b627f88SBjoern A. Zeeb #include <linux/wait.h>
9*6b627f88SBjoern A. Zeeb #include <linux/gfp.h>
10*6b627f88SBjoern A. Zeeb 
11*6b627f88SBjoern A. Zeeb #include "iwl-prph.h"
12*6b627f88SBjoern A. Zeeb #include "iwl-io.h"
13*6b627f88SBjoern A. Zeeb #include "internal.h"
14*6b627f88SBjoern A. Zeeb #include "iwl-op-mode.h"
15*6b627f88SBjoern A. Zeeb #include "pcie/iwl-context-info-v2.h"
16*6b627f88SBjoern A. Zeeb #include "fw/dbg.h"
17*6b627f88SBjoern A. Zeeb 
18*6b627f88SBjoern A. Zeeb /******************************************************************************
19*6b627f88SBjoern A. Zeeb  *
20*6b627f88SBjoern A. Zeeb  * RX path functions
21*6b627f88SBjoern A. Zeeb  *
22*6b627f88SBjoern A. Zeeb  ******************************************************************************/
23*6b627f88SBjoern A. Zeeb 
24*6b627f88SBjoern A. Zeeb /*
25*6b627f88SBjoern A. Zeeb  * Rx theory of operation
26*6b627f88SBjoern A. Zeeb  *
27*6b627f88SBjoern A. Zeeb  * Driver allocates a circular buffer of Receive Buffer Descriptors (RBDs),
28*6b627f88SBjoern A. Zeeb  * each of which point to Receive Buffers to be filled by the NIC.  These get
29*6b627f88SBjoern A. Zeeb  * used not only for Rx frames, but for any command response or notification
30*6b627f88SBjoern A. Zeeb  * from the NIC.  The driver and NIC manage the Rx buffers by means
31*6b627f88SBjoern A. Zeeb  * of indexes into the circular buffer.
32*6b627f88SBjoern A. Zeeb  *
33*6b627f88SBjoern A. Zeeb  * Rx Queue Indexes
34*6b627f88SBjoern A. Zeeb  * The host/firmware share two index registers for managing the Rx buffers.
35*6b627f88SBjoern A. Zeeb  *
36*6b627f88SBjoern A. Zeeb  * The READ index maps to the first position that the firmware may be writing
37*6b627f88SBjoern A. Zeeb  * to -- the driver can read up to (but not including) this position and get
38*6b627f88SBjoern A. Zeeb  * good data.
39*6b627f88SBjoern A. Zeeb  * The READ index is managed by the firmware once the card is enabled.
40*6b627f88SBjoern A. Zeeb  *
41*6b627f88SBjoern A. Zeeb  * The WRITE index maps to the last position the driver has read from -- the
42*6b627f88SBjoern A. Zeeb  * position preceding WRITE is the last slot the firmware can place a packet.
43*6b627f88SBjoern A. Zeeb  *
44*6b627f88SBjoern A. Zeeb  * The queue is empty (no good data) if WRITE = READ - 1, and is full if
45*6b627f88SBjoern A. Zeeb  * WRITE = READ.
46*6b627f88SBjoern A. Zeeb  *
47*6b627f88SBjoern A. Zeeb  * During initialization, the host sets up the READ queue position to the first
48*6b627f88SBjoern A. Zeeb  * INDEX position, and WRITE to the last (READ - 1 wrapped)
49*6b627f88SBjoern A. Zeeb  *
50*6b627f88SBjoern A. Zeeb  * When the firmware places a packet in a buffer, it will advance the READ index
51*6b627f88SBjoern A. Zeeb  * and fire the RX interrupt.  The driver can then query the READ index and
52*6b627f88SBjoern A. Zeeb  * process as many packets as possible, moving the WRITE index forward as it
53*6b627f88SBjoern A. Zeeb  * resets the Rx queue buffers with new memory.
54*6b627f88SBjoern A. Zeeb  *
55*6b627f88SBjoern A. Zeeb  * The management in the driver is as follows:
56*6b627f88SBjoern A. Zeeb  * + A list of pre-allocated RBDs is stored in iwl->rxq->rx_free.
57*6b627f88SBjoern A. Zeeb  *   When the interrupt handler is called, the request is processed.
58*6b627f88SBjoern A. Zeeb  *   The page is either stolen - transferred to the upper layer
59*6b627f88SBjoern A. Zeeb  *   or reused - added immediately to the iwl->rxq->rx_free list.
60*6b627f88SBjoern A. Zeeb  * + When the page is stolen - the driver updates the matching queue's used
61*6b627f88SBjoern A. Zeeb  *   count, detaches the RBD and transfers it to the queue used list.
62*6b627f88SBjoern A. Zeeb  *   When there are two used RBDs - they are transferred to the allocator empty
63*6b627f88SBjoern A. Zeeb  *   list. Work is then scheduled for the allocator to start allocating
64*6b627f88SBjoern A. Zeeb  *   eight buffers.
65*6b627f88SBjoern A. Zeeb  *   When there are another 6 used RBDs - they are transferred to the allocator
66*6b627f88SBjoern A. Zeeb  *   empty list and the driver tries to claim the pre-allocated buffers and
67*6b627f88SBjoern A. Zeeb  *   add them to iwl->rxq->rx_free. If it fails - it continues to claim them
68*6b627f88SBjoern A. Zeeb  *   until ready.
69*6b627f88SBjoern A. Zeeb  *   When there are 8+ buffers in the free list - either from allocation or from
70*6b627f88SBjoern A. Zeeb  *   8 reused unstolen pages - restock is called to update the FW and indexes.
71*6b627f88SBjoern A. Zeeb  * + In order to make sure the allocator always has RBDs to use for allocation
72*6b627f88SBjoern A. Zeeb  *   the allocator has initial pool in the size of num_queues*(8-2) - the
73*6b627f88SBjoern A. Zeeb  *   maximum missing RBDs per allocation request (request posted with 2
74*6b627f88SBjoern A. Zeeb  *    empty RBDs, there is no guarantee when the other 6 RBDs are supplied).
75*6b627f88SBjoern A. Zeeb  *   The queues supplies the recycle of the rest of the RBDs.
76*6b627f88SBjoern A. Zeeb  * + A received packet is processed and handed to the kernel network stack,
77*6b627f88SBjoern A. Zeeb  *   detached from the iwl->rxq.  The driver 'processed' index is updated.
78*6b627f88SBjoern A. Zeeb  * + If there are no allocated buffers in iwl->rxq->rx_free,
79*6b627f88SBjoern A. Zeeb  *   the READ INDEX is not incremented and iwl->status(RX_STALLED) is set.
80*6b627f88SBjoern A. Zeeb  *   If there were enough free buffers and RX_STALLED is set it is cleared.
81*6b627f88SBjoern A. Zeeb  *
82*6b627f88SBjoern A. Zeeb  *
83*6b627f88SBjoern A. Zeeb  * Driver sequence:
84*6b627f88SBjoern A. Zeeb  *
85*6b627f88SBjoern A. Zeeb  * iwl_rxq_alloc()            Allocates rx_free
86*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_replenish()    Replenishes rx_free list from rx_used, and calls
87*6b627f88SBjoern A. Zeeb  *                            iwl_pcie_rxq_restock.
88*6b627f88SBjoern A. Zeeb  *                            Used only during initialization.
89*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxq_restock()     Moves available buffers from rx_free into Rx
90*6b627f88SBjoern A. Zeeb  *                            queue, updates firmware pointers, and updates
91*6b627f88SBjoern A. Zeeb  *                            the WRITE index.
92*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_allocator()     Background work for allocating pages.
93*6b627f88SBjoern A. Zeeb  *
94*6b627f88SBjoern A. Zeeb  * -- enable interrupts --
95*6b627f88SBjoern A. Zeeb  * ISR - iwl_rx()             Detach iwl_rx_mem_buffers from pool up to the
96*6b627f88SBjoern A. Zeeb  *                            READ INDEX, detaching the SKB from the pool.
97*6b627f88SBjoern A. Zeeb  *                            Moves the packet buffer from queue to rx_used.
98*6b627f88SBjoern A. Zeeb  *                            Posts and claims requests to the allocator.
99*6b627f88SBjoern A. Zeeb  *                            Calls iwl_pcie_rxq_restock to refill any empty
100*6b627f88SBjoern A. Zeeb  *                            slots.
101*6b627f88SBjoern A. Zeeb  *
102*6b627f88SBjoern A. Zeeb  * RBD life-cycle:
103*6b627f88SBjoern A. Zeeb  *
104*6b627f88SBjoern A. Zeeb  * Init:
105*6b627f88SBjoern A. Zeeb  * rxq.pool -> rxq.rx_used -> rxq.rx_free -> rxq.queue
106*6b627f88SBjoern A. Zeeb  *
107*6b627f88SBjoern A. Zeeb  * Regular Receive interrupt:
108*6b627f88SBjoern A. Zeeb  * Page Stolen:
109*6b627f88SBjoern A. Zeeb  * rxq.queue -> rxq.rx_used -> allocator.rbd_empty ->
110*6b627f88SBjoern A. Zeeb  * allocator.rbd_allocated -> rxq.rx_free -> rxq.queue
111*6b627f88SBjoern A. Zeeb  * Page not Stolen:
112*6b627f88SBjoern A. Zeeb  * rxq.queue -> rxq.rx_free -> rxq.queue
113*6b627f88SBjoern A. Zeeb  * ...
114*6b627f88SBjoern A. Zeeb  *
115*6b627f88SBjoern A. Zeeb  */
116*6b627f88SBjoern A. Zeeb 
117*6b627f88SBjoern A. Zeeb /*
118*6b627f88SBjoern A. Zeeb  * iwl_rxq_space - Return number of free slots available in queue.
119*6b627f88SBjoern A. Zeeb  */
iwl_rxq_space(const struct iwl_rxq * rxq)120*6b627f88SBjoern A. Zeeb static int iwl_rxq_space(const struct iwl_rxq *rxq)
121*6b627f88SBjoern A. Zeeb {
122*6b627f88SBjoern A. Zeeb 	/* Make sure rx queue size is a power of 2 */
123*6b627f88SBjoern A. Zeeb 	WARN_ON(rxq->queue_size & (rxq->queue_size - 1));
124*6b627f88SBjoern A. Zeeb 
125*6b627f88SBjoern A. Zeeb 	/*
126*6b627f88SBjoern A. Zeeb 	 * There can be up to (RX_QUEUE_SIZE - 1) free slots, to avoid ambiguity
127*6b627f88SBjoern A. Zeeb 	 * between empty and completely full queues.
128*6b627f88SBjoern A. Zeeb 	 * The following is equivalent to modulo by RX_QUEUE_SIZE and is well
129*6b627f88SBjoern A. Zeeb 	 * defined for negative dividends.
130*6b627f88SBjoern A. Zeeb 	 */
131*6b627f88SBjoern A. Zeeb 	return (rxq->read - rxq->write - 1) & (rxq->queue_size - 1);
132*6b627f88SBjoern A. Zeeb }
133*6b627f88SBjoern A. Zeeb 
134*6b627f88SBjoern A. Zeeb /*
135*6b627f88SBjoern A. Zeeb  * iwl_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
136*6b627f88SBjoern A. Zeeb  */
iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)137*6b627f88SBjoern A. Zeeb static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr)
138*6b627f88SBjoern A. Zeeb {
139*6b627f88SBjoern A. Zeeb 	return cpu_to_le32((u32)(dma_addr >> 8));
140*6b627f88SBjoern A. Zeeb }
141*6b627f88SBjoern A. Zeeb 
142*6b627f88SBjoern A. Zeeb /*
143*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_stop - stops the Rx DMA
144*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_stop(struct iwl_trans * trans)145*6b627f88SBjoern A. Zeeb int iwl_pcie_rx_stop(struct iwl_trans *trans)
146*6b627f88SBjoern A. Zeeb {
147*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
148*6b627f88SBjoern A. Zeeb 		/* TODO: remove this once fw does it */
149*6b627f88SBjoern A. Zeeb 		iwl_write_umac_prph(trans, RFH_RXF_DMA_CFG_AX210, 0);
150*6b627f88SBjoern A. Zeeb 		return iwl_poll_umac_prph_bit(trans, RFH_GEN_STATUS_AX210,
151*6b627f88SBjoern A. Zeeb 					      RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
152*6b627f88SBjoern A. Zeeb 	} else if (trans->mac_cfg->mq_rx_supported) {
153*6b627f88SBjoern A. Zeeb 		iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0);
154*6b627f88SBjoern A. Zeeb 		return iwl_poll_prph_bit(trans, RFH_GEN_STATUS,
155*6b627f88SBjoern A. Zeeb 					   RXF_DMA_IDLE, RXF_DMA_IDLE, 1000);
156*6b627f88SBjoern A. Zeeb 	} else {
157*6b627f88SBjoern A. Zeeb 		iwl_write_direct32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
158*6b627f88SBjoern A. Zeeb 		return iwl_poll_direct_bit(trans, FH_MEM_RSSR_RX_STATUS_REG,
159*6b627f88SBjoern A. Zeeb 					   FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE,
160*6b627f88SBjoern A. Zeeb 					   1000);
161*6b627f88SBjoern A. Zeeb 	}
162*6b627f88SBjoern A. Zeeb }
163*6b627f88SBjoern A. Zeeb 
164*6b627f88SBjoern A. Zeeb /*
165*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxq_inc_wr_ptr - Update the write pointer for the RX queue
166*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans * trans,struct iwl_rxq * rxq)167*6b627f88SBjoern A. Zeeb static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
168*6b627f88SBjoern A. Zeeb 				    struct iwl_rxq *rxq)
169*6b627f88SBjoern A. Zeeb {
170*6b627f88SBjoern A. Zeeb 	u32 reg;
171*6b627f88SBjoern A. Zeeb 
172*6b627f88SBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
173*6b627f88SBjoern A. Zeeb 
174*6b627f88SBjoern A. Zeeb 	/*
175*6b627f88SBjoern A. Zeeb 	 * explicitly wake up the NIC if:
176*6b627f88SBjoern A. Zeeb 	 * 1. shadow registers aren't enabled
177*6b627f88SBjoern A. Zeeb 	 * 2. there is a chance that the NIC is asleep
178*6b627f88SBjoern A. Zeeb 	 */
179*6b627f88SBjoern A. Zeeb 	if (!trans->mac_cfg->base->shadow_reg_enable &&
180*6b627f88SBjoern A. Zeeb 	    test_bit(STATUS_TPOWER_PMI, &trans->status)) {
181*6b627f88SBjoern A. Zeeb 		reg = iwl_read32(trans, CSR_UCODE_DRV_GP1);
182*6b627f88SBjoern A. Zeeb 
183*6b627f88SBjoern A. Zeeb 		if (reg & CSR_UCODE_DRV_GP1_BIT_MAC_SLEEP) {
184*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_INFO(trans, "Rx queue requesting wakeup, GP1 = 0x%x\n",
185*6b627f88SBjoern A. Zeeb 				       reg);
186*6b627f88SBjoern A. Zeeb 			iwl_set_bit(trans, CSR_GP_CNTRL,
187*6b627f88SBjoern A. Zeeb 				    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
188*6b627f88SBjoern A. Zeeb 			rxq->need_update = true;
189*6b627f88SBjoern A. Zeeb 			return;
190*6b627f88SBjoern A. Zeeb 		}
191*6b627f88SBjoern A. Zeeb 	}
192*6b627f88SBjoern A. Zeeb 
193*6b627f88SBjoern A. Zeeb 	rxq->write_actual = round_down(rxq->write, 8);
194*6b627f88SBjoern A. Zeeb 	if (!trans->mac_cfg->mq_rx_supported)
195*6b627f88SBjoern A. Zeeb 		iwl_write32(trans, FH_RSCSR_CHNL0_WPTR, rxq->write_actual);
196*6b627f88SBjoern A. Zeeb 	else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
197*6b627f88SBjoern A. Zeeb 		iwl_write32(trans, HBUS_TARG_WRPTR, rxq->write_actual |
198*6b627f88SBjoern A. Zeeb 			    HBUS_TARG_WRPTR_RX_Q(rxq->id));
199*6b627f88SBjoern A. Zeeb 	else
200*6b627f88SBjoern A. Zeeb 		iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id),
201*6b627f88SBjoern A. Zeeb 			    rxq->write_actual);
202*6b627f88SBjoern A. Zeeb }
203*6b627f88SBjoern A. Zeeb 
iwl_pcie_rxq_check_wrptr(struct iwl_trans * trans)204*6b627f88SBjoern A. Zeeb static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans)
205*6b627f88SBjoern A. Zeeb {
206*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
207*6b627f88SBjoern A. Zeeb 	int i;
208*6b627f88SBjoern A. Zeeb 
209*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
210*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
211*6b627f88SBjoern A. Zeeb 
212*6b627f88SBjoern A. Zeeb 		if (!rxq->need_update)
213*6b627f88SBjoern A. Zeeb 			continue;
214*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
215*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
216*6b627f88SBjoern A. Zeeb 		rxq->need_update = false;
217*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
218*6b627f88SBjoern A. Zeeb 	}
219*6b627f88SBjoern A. Zeeb }
220*6b627f88SBjoern A. Zeeb 
iwl_pcie_restock_bd(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb)221*6b627f88SBjoern A. Zeeb static void iwl_pcie_restock_bd(struct iwl_trans *trans,
222*6b627f88SBjoern A. Zeeb 				struct iwl_rxq *rxq,
223*6b627f88SBjoern A. Zeeb 				struct iwl_rx_mem_buffer *rxb)
224*6b627f88SBjoern A. Zeeb {
225*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
226*6b627f88SBjoern A. Zeeb 		struct iwl_rx_transfer_desc *bd = rxq->bd;
227*6b627f88SBjoern A. Zeeb 
228*6b627f88SBjoern A. Zeeb 		BUILD_BUG_ON(sizeof(*bd) != 2 * sizeof(u64));
229*6b627f88SBjoern A. Zeeb 
230*6b627f88SBjoern A. Zeeb 		bd[rxq->write].addr = cpu_to_le64(rxb->page_dma);
231*6b627f88SBjoern A. Zeeb 		bd[rxq->write].rbid = cpu_to_le16(rxb->vid);
232*6b627f88SBjoern A. Zeeb 	} else {
233*6b627f88SBjoern A. Zeeb 		__le64 *bd = rxq->bd;
234*6b627f88SBjoern A. Zeeb 
235*6b627f88SBjoern A. Zeeb 		bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid);
236*6b627f88SBjoern A. Zeeb 	}
237*6b627f88SBjoern A. Zeeb 
238*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_RX(trans, "Assigned virtual RB ID %u to queue %d index %d\n",
239*6b627f88SBjoern A. Zeeb 		     (u32)rxb->vid, rxq->id, rxq->write);
240*6b627f88SBjoern A. Zeeb }
241*6b627f88SBjoern A. Zeeb 
242*6b627f88SBjoern A. Zeeb /*
243*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx
244*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rxmq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)245*6b627f88SBjoern A. Zeeb static void iwl_pcie_rxmq_restock(struct iwl_trans *trans,
246*6b627f88SBjoern A. Zeeb 				  struct iwl_rxq *rxq)
247*6b627f88SBjoern A. Zeeb {
248*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
249*6b627f88SBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
250*6b627f88SBjoern A. Zeeb 
251*6b627f88SBjoern A. Zeeb 	/*
252*6b627f88SBjoern A. Zeeb 	 * If the device isn't enabled - no need to try to add buffers...
253*6b627f88SBjoern A. Zeeb 	 * This can happen when we stop the device and still have an interrupt
254*6b627f88SBjoern A. Zeeb 	 * pending. We stop the APM before we sync the interrupts because we
255*6b627f88SBjoern A. Zeeb 	 * have to (see comment there). On the other hand, since the APM is
256*6b627f88SBjoern A. Zeeb 	 * stopped, we cannot access the HW (in particular not prph).
257*6b627f88SBjoern A. Zeeb 	 * So don't try to restock if the APM has been already stopped.
258*6b627f88SBjoern A. Zeeb 	 */
259*6b627f88SBjoern A. Zeeb 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
260*6b627f88SBjoern A. Zeeb 		return;
261*6b627f88SBjoern A. Zeeb 
262*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&rxq->lock);
263*6b627f88SBjoern A. Zeeb 	while (rxq->free_count) {
264*6b627f88SBjoern A. Zeeb 		/* Get next free Rx buffer, remove from free list */
265*6b627f88SBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
266*6b627f88SBjoern A. Zeeb 				       list);
267*6b627f88SBjoern A. Zeeb 		list_del(&rxb->list);
268*6b627f88SBjoern A. Zeeb 		rxb->invalid = false;
269*6b627f88SBjoern A. Zeeb 		/* some low bits are expected to be unset (depending on hw) */
270*6b627f88SBjoern A. Zeeb 		WARN_ON(rxb->page_dma & trans_pcie->supported_dma_mask);
271*6b627f88SBjoern A. Zeeb 		/* Point to Rx buffer via next RBD in circular buffer */
272*6b627f88SBjoern A. Zeeb 		iwl_pcie_restock_bd(trans, rxq, rxb);
273*6b627f88SBjoern A. Zeeb 		rxq->write = (rxq->write + 1) & (rxq->queue_size - 1);
274*6b627f88SBjoern A. Zeeb 		rxq->free_count--;
275*6b627f88SBjoern A. Zeeb 	}
276*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&rxq->lock);
277*6b627f88SBjoern A. Zeeb 
278*6b627f88SBjoern A. Zeeb 	/*
279*6b627f88SBjoern A. Zeeb 	 * If we've added more space for the firmware to place data, tell it.
280*6b627f88SBjoern A. Zeeb 	 * Increment device's write pointer in multiples of 8.
281*6b627f88SBjoern A. Zeeb 	 */
282*6b627f88SBjoern A. Zeeb 	if (rxq->write_actual != (rxq->write & ~0x7)) {
283*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
284*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
285*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
286*6b627f88SBjoern A. Zeeb 	}
287*6b627f88SBjoern A. Zeeb }
288*6b627f88SBjoern A. Zeeb 
289*6b627f88SBjoern A. Zeeb /*
290*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxsq_restock - restock implementation for single queue rx
291*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rxsq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)292*6b627f88SBjoern A. Zeeb static void iwl_pcie_rxsq_restock(struct iwl_trans *trans,
293*6b627f88SBjoern A. Zeeb 				  struct iwl_rxq *rxq)
294*6b627f88SBjoern A. Zeeb {
295*6b627f88SBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
296*6b627f88SBjoern A. Zeeb 
297*6b627f88SBjoern A. Zeeb 	/*
298*6b627f88SBjoern A. Zeeb 	 * If the device isn't enabled - not need to try to add buffers...
299*6b627f88SBjoern A. Zeeb 	 * This can happen when we stop the device and still have an interrupt
300*6b627f88SBjoern A. Zeeb 	 * pending. We stop the APM before we sync the interrupts because we
301*6b627f88SBjoern A. Zeeb 	 * have to (see comment there). On the other hand, since the APM is
302*6b627f88SBjoern A. Zeeb 	 * stopped, we cannot access the HW (in particular not prph).
303*6b627f88SBjoern A. Zeeb 	 * So don't try to restock if the APM has been already stopped.
304*6b627f88SBjoern A. Zeeb 	 */
305*6b627f88SBjoern A. Zeeb 	if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
306*6b627f88SBjoern A. Zeeb 		return;
307*6b627f88SBjoern A. Zeeb 
308*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&rxq->lock);
309*6b627f88SBjoern A. Zeeb 	while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
310*6b627f88SBjoern A. Zeeb 		__le32 *bd = (__le32 *)rxq->bd;
311*6b627f88SBjoern A. Zeeb 		/* The overwritten rxb must be a used one */
312*6b627f88SBjoern A. Zeeb 		rxb = rxq->queue[rxq->write];
313*6b627f88SBjoern A. Zeeb 		BUG_ON(rxb && rxb->page);
314*6b627f88SBjoern A. Zeeb 
315*6b627f88SBjoern A. Zeeb 		/* Get next free Rx buffer, remove from free list */
316*6b627f88SBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer,
317*6b627f88SBjoern A. Zeeb 				       list);
318*6b627f88SBjoern A. Zeeb 		list_del(&rxb->list);
319*6b627f88SBjoern A. Zeeb 		rxb->invalid = false;
320*6b627f88SBjoern A. Zeeb 
321*6b627f88SBjoern A. Zeeb 		/* Point to Rx buffer via next RBD in circular buffer */
322*6b627f88SBjoern A. Zeeb 		bd[rxq->write] = iwl_pcie_dma_addr2rbd_ptr(rxb->page_dma);
323*6b627f88SBjoern A. Zeeb 		rxq->queue[rxq->write] = rxb;
324*6b627f88SBjoern A. Zeeb 		rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
325*6b627f88SBjoern A. Zeeb 		rxq->free_count--;
326*6b627f88SBjoern A. Zeeb 	}
327*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&rxq->lock);
328*6b627f88SBjoern A. Zeeb 
329*6b627f88SBjoern A. Zeeb 	/* If we've added more space for the firmware to place data, tell it.
330*6b627f88SBjoern A. Zeeb 	 * Increment device's write pointer in multiples of 8. */
331*6b627f88SBjoern A. Zeeb 	if (rxq->write_actual != (rxq->write & ~0x7)) {
332*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
333*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
334*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
335*6b627f88SBjoern A. Zeeb 	}
336*6b627f88SBjoern A. Zeeb }
337*6b627f88SBjoern A. Zeeb 
338*6b627f88SBjoern A. Zeeb /*
339*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxq_restock - refill RX queue from pre-allocated pool
340*6b627f88SBjoern A. Zeeb  *
341*6b627f88SBjoern A. Zeeb  * If there are slots in the RX queue that need to be restocked,
342*6b627f88SBjoern A. Zeeb  * and we have free pre-allocated buffers, fill the ranks as much
343*6b627f88SBjoern A. Zeeb  * as we can, pulling from rx_free.
344*6b627f88SBjoern A. Zeeb  *
345*6b627f88SBjoern A. Zeeb  * This moves the 'write' index forward to catch up with 'processed', and
346*6b627f88SBjoern A. Zeeb  * also updates the memory address in the firmware to reference the new
347*6b627f88SBjoern A. Zeeb  * target buffer.
348*6b627f88SBjoern A. Zeeb  */
349*6b627f88SBjoern A. Zeeb static
iwl_pcie_rxq_restock(struct iwl_trans * trans,struct iwl_rxq * rxq)350*6b627f88SBjoern A. Zeeb void iwl_pcie_rxq_restock(struct iwl_trans *trans, struct iwl_rxq *rxq)
351*6b627f88SBjoern A. Zeeb {
352*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->mq_rx_supported)
353*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxmq_restock(trans, rxq);
354*6b627f88SBjoern A. Zeeb 	else
355*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxsq_restock(trans, rxq);
356*6b627f88SBjoern A. Zeeb }
357*6b627f88SBjoern A. Zeeb 
358*6b627f88SBjoern A. Zeeb /*
359*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_alloc_page - allocates and returns a page.
360*6b627f88SBjoern A. Zeeb  *
361*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_alloc_page(struct iwl_trans * trans,u32 * offset,gfp_t priority)362*6b627f88SBjoern A. Zeeb static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans,
363*6b627f88SBjoern A. Zeeb 					   u32 *offset, gfp_t priority)
364*6b627f88SBjoern A. Zeeb {
365*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
366*6b627f88SBjoern A. Zeeb 	unsigned int allocsize = PAGE_SIZE << trans_pcie->rx_page_order;
367*6b627f88SBjoern A. Zeeb 	unsigned int rbsize = trans_pcie->rx_buf_bytes;
368*6b627f88SBjoern A. Zeeb 	struct page *page;
369*6b627f88SBjoern A. Zeeb 	gfp_t gfp_mask = priority;
370*6b627f88SBjoern A. Zeeb 
371*6b627f88SBjoern A. Zeeb 	if (trans_pcie->rx_page_order > 0)
372*6b627f88SBjoern A. Zeeb 		gfp_mask |= __GFP_COMP;
373*6b627f88SBjoern A. Zeeb 
374*6b627f88SBjoern A. Zeeb 	if (trans_pcie->alloc_page) {
375*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->alloc_page_lock);
376*6b627f88SBjoern A. Zeeb 		/* recheck */
377*6b627f88SBjoern A. Zeeb 		if (trans_pcie->alloc_page) {
378*6b627f88SBjoern A. Zeeb 			*offset = trans_pcie->alloc_page_used;
379*6b627f88SBjoern A. Zeeb 			page = trans_pcie->alloc_page;
380*6b627f88SBjoern A. Zeeb 			trans_pcie->alloc_page_used += rbsize;
381*6b627f88SBjoern A. Zeeb 			if (trans_pcie->alloc_page_used >= allocsize)
382*6b627f88SBjoern A. Zeeb 				trans_pcie->alloc_page = NULL;
383*6b627f88SBjoern A. Zeeb 			else
384*6b627f88SBjoern A. Zeeb 				get_page(page);
385*6b627f88SBjoern A. Zeeb 			spin_unlock_bh(&trans_pcie->alloc_page_lock);
386*6b627f88SBjoern A. Zeeb 			return page;
387*6b627f88SBjoern A. Zeeb 		}
388*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
389*6b627f88SBjoern A. Zeeb 	}
390*6b627f88SBjoern A. Zeeb 
391*6b627f88SBjoern A. Zeeb 	/* Alloc a new receive buffer */
392*6b627f88SBjoern A. Zeeb 	page = alloc_pages(gfp_mask, trans_pcie->rx_page_order);
393*6b627f88SBjoern A. Zeeb 	if (!page) {
394*6b627f88SBjoern A. Zeeb 		if (net_ratelimit())
395*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_INFO(trans, "alloc_pages failed, order: %d\n",
396*6b627f88SBjoern A. Zeeb 				       trans_pcie->rx_page_order);
397*6b627f88SBjoern A. Zeeb 		/*
398*6b627f88SBjoern A. Zeeb 		 * Issue an error if we don't have enough pre-allocated
399*6b627f88SBjoern A. Zeeb 		  * buffers.
400*6b627f88SBjoern A. Zeeb 		 */
401*6b627f88SBjoern A. Zeeb 		if (!(gfp_mask & __GFP_NOWARN) && net_ratelimit())
402*6b627f88SBjoern A. Zeeb 			IWL_CRIT(trans,
403*6b627f88SBjoern A. Zeeb 				 "Failed to alloc_pages\n");
404*6b627f88SBjoern A. Zeeb 		return NULL;
405*6b627f88SBjoern A. Zeeb 	}
406*6b627f88SBjoern A. Zeeb 
407*6b627f88SBjoern A. Zeeb 	if (2 * rbsize <= allocsize) {
408*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->alloc_page_lock);
409*6b627f88SBjoern A. Zeeb 		if (!trans_pcie->alloc_page) {
410*6b627f88SBjoern A. Zeeb 			get_page(page);
411*6b627f88SBjoern A. Zeeb 			trans_pcie->alloc_page = page;
412*6b627f88SBjoern A. Zeeb 			trans_pcie->alloc_page_used = rbsize;
413*6b627f88SBjoern A. Zeeb 		}
414*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->alloc_page_lock);
415*6b627f88SBjoern A. Zeeb 	}
416*6b627f88SBjoern A. Zeeb 
417*6b627f88SBjoern A. Zeeb 	*offset = 0;
418*6b627f88SBjoern A. Zeeb 	return page;
419*6b627f88SBjoern A. Zeeb }
420*6b627f88SBjoern A. Zeeb 
421*6b627f88SBjoern A. Zeeb /*
422*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxq_alloc_rbs - allocate a page for each used RBD
423*6b627f88SBjoern A. Zeeb  *
424*6b627f88SBjoern A. Zeeb  * A used RBD is an Rx buffer that has been given to the stack. To use it again
425*6b627f88SBjoern A. Zeeb  * a page must be allocated and the RBD must point to the page. This function
426*6b627f88SBjoern A. Zeeb  * doesn't change the HW pointer but handles the list of pages that is used by
427*6b627f88SBjoern A. Zeeb  * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly
428*6b627f88SBjoern A. Zeeb  * allocated buffers.
429*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rxq_alloc_rbs(struct iwl_trans * trans,gfp_t priority,struct iwl_rxq * rxq)430*6b627f88SBjoern A. Zeeb void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority,
431*6b627f88SBjoern A. Zeeb 			    struct iwl_rxq *rxq)
432*6b627f88SBjoern A. Zeeb {
433*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
434*6b627f88SBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
435*6b627f88SBjoern A. Zeeb 	struct page *page;
436*6b627f88SBjoern A. Zeeb 
437*6b627f88SBjoern A. Zeeb 	while (1) {
438*6b627f88SBjoern A. Zeeb 		unsigned int offset;
439*6b627f88SBjoern A. Zeeb 
440*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
441*6b627f88SBjoern A. Zeeb 		if (list_empty(&rxq->rx_used)) {
442*6b627f88SBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
443*6b627f88SBjoern A. Zeeb 			return;
444*6b627f88SBjoern A. Zeeb 		}
445*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
446*6b627f88SBjoern A. Zeeb 
447*6b627f88SBjoern A. Zeeb 		page = iwl_pcie_rx_alloc_page(trans, &offset, priority);
448*6b627f88SBjoern A. Zeeb 		if (!page)
449*6b627f88SBjoern A. Zeeb 			return;
450*6b627f88SBjoern A. Zeeb 
451*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
452*6b627f88SBjoern A. Zeeb 
453*6b627f88SBjoern A. Zeeb 		if (list_empty(&rxq->rx_used)) {
454*6b627f88SBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
455*6b627f88SBjoern A. Zeeb 			__free_pages(page, trans_pcie->rx_page_order);
456*6b627f88SBjoern A. Zeeb 			return;
457*6b627f88SBjoern A. Zeeb 		}
458*6b627f88SBjoern A. Zeeb 		rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
459*6b627f88SBjoern A. Zeeb 				       list);
460*6b627f88SBjoern A. Zeeb 		list_del(&rxb->list);
461*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
462*6b627f88SBjoern A. Zeeb 
463*6b627f88SBjoern A. Zeeb 		BUG_ON(rxb->page);
464*6b627f88SBjoern A. Zeeb 		rxb->page = page;
465*6b627f88SBjoern A. Zeeb 		rxb->offset = offset;
466*6b627f88SBjoern A. Zeeb 		/* Get physical address of the RB */
467*6b627f88SBjoern A. Zeeb 		rxb->page_dma =
468*6b627f88SBjoern A. Zeeb 			dma_map_page(trans->dev, page, rxb->offset,
469*6b627f88SBjoern A. Zeeb 				     trans_pcie->rx_buf_bytes,
470*6b627f88SBjoern A. Zeeb 				     DMA_FROM_DEVICE);
471*6b627f88SBjoern A. Zeeb 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
472*6b627f88SBjoern A. Zeeb 			rxb->page = NULL;
473*6b627f88SBjoern A. Zeeb 			spin_lock_bh(&rxq->lock);
474*6b627f88SBjoern A. Zeeb 			list_add(&rxb->list, &rxq->rx_used);
475*6b627f88SBjoern A. Zeeb 			spin_unlock_bh(&rxq->lock);
476*6b627f88SBjoern A. Zeeb 			__free_pages(page, trans_pcie->rx_page_order);
477*6b627f88SBjoern A. Zeeb 			return;
478*6b627f88SBjoern A. Zeeb 		}
479*6b627f88SBjoern A. Zeeb 
480*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
481*6b627f88SBjoern A. Zeeb 
482*6b627f88SBjoern A. Zeeb 		list_add_tail(&rxb->list, &rxq->rx_free);
483*6b627f88SBjoern A. Zeeb 		rxq->free_count++;
484*6b627f88SBjoern A. Zeeb 
485*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
486*6b627f88SBjoern A. Zeeb 	}
487*6b627f88SBjoern A. Zeeb }
488*6b627f88SBjoern A. Zeeb 
iwl_pcie_free_rbs_pool(struct iwl_trans * trans)489*6b627f88SBjoern A. Zeeb void iwl_pcie_free_rbs_pool(struct iwl_trans *trans)
490*6b627f88SBjoern A. Zeeb {
491*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
492*6b627f88SBjoern A. Zeeb 	int i;
493*6b627f88SBjoern A. Zeeb 
494*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->rx_pool)
495*6b627f88SBjoern A. Zeeb 		return;
496*6b627f88SBjoern A. Zeeb 
497*6b627f88SBjoern A. Zeeb 	for (i = 0; i < RX_POOL_SIZE(trans_pcie->num_rx_bufs); i++) {
498*6b627f88SBjoern A. Zeeb 		if (!trans_pcie->rx_pool[i].page)
499*6b627f88SBjoern A. Zeeb 			continue;
500*6b627f88SBjoern A. Zeeb 		dma_unmap_page(trans->dev, trans_pcie->rx_pool[i].page_dma,
501*6b627f88SBjoern A. Zeeb 			       trans_pcie->rx_buf_bytes, DMA_FROM_DEVICE);
502*6b627f88SBjoern A. Zeeb 		__free_pages(trans_pcie->rx_pool[i].page,
503*6b627f88SBjoern A. Zeeb 			     trans_pcie->rx_page_order);
504*6b627f88SBjoern A. Zeeb 		trans_pcie->rx_pool[i].page = NULL;
505*6b627f88SBjoern A. Zeeb 	}
506*6b627f88SBjoern A. Zeeb }
507*6b627f88SBjoern A. Zeeb 
508*6b627f88SBjoern A. Zeeb /*
509*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_allocator - Allocates pages in the background for RX queues
510*6b627f88SBjoern A. Zeeb  *
511*6b627f88SBjoern A. Zeeb  * Allocates for each received request 8 pages
512*6b627f88SBjoern A. Zeeb  * Called as a scheduled work item.
513*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_allocator(struct iwl_trans * trans)514*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_allocator(struct iwl_trans *trans)
515*6b627f88SBjoern A. Zeeb {
516*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
517*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
518*6b627f88SBjoern A. Zeeb 	struct list_head local_empty;
519*6b627f88SBjoern A. Zeeb 	int pending = atomic_read(&rba->req_pending);
520*6b627f88SBjoern A. Zeeb 
521*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_TPT(trans, "Pending allocation requests = %d\n", pending);
522*6b627f88SBjoern A. Zeeb 
523*6b627f88SBjoern A. Zeeb 	/* If we were scheduled - there is at least one request */
524*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
525*6b627f88SBjoern A. Zeeb 	/* swap out the rba->rbd_empty to a local list */
526*6b627f88SBjoern A. Zeeb 	list_replace_init(&rba->rbd_empty, &local_empty);
527*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
528*6b627f88SBjoern A. Zeeb 
529*6b627f88SBjoern A. Zeeb 	while (pending) {
530*6b627f88SBjoern A. Zeeb 		int i;
531*6b627f88SBjoern A. Zeeb 		LIST_HEAD(local_allocated);
532*6b627f88SBjoern A. Zeeb 		gfp_t gfp_mask = GFP_KERNEL;
533*6b627f88SBjoern A. Zeeb 
534*6b627f88SBjoern A. Zeeb 		/* Do not post a warning if there are only a few requests */
535*6b627f88SBjoern A. Zeeb 		if (pending < RX_PENDING_WATERMARK)
536*6b627f88SBjoern A. Zeeb 			gfp_mask |= __GFP_NOWARN;
537*6b627f88SBjoern A. Zeeb 
538*6b627f88SBjoern A. Zeeb 		for (i = 0; i < RX_CLAIM_REQ_ALLOC;) {
539*6b627f88SBjoern A. Zeeb 			struct iwl_rx_mem_buffer *rxb;
540*6b627f88SBjoern A. Zeeb 			struct page *page;
541*6b627f88SBjoern A. Zeeb 
542*6b627f88SBjoern A. Zeeb 			/* List should never be empty - each reused RBD is
543*6b627f88SBjoern A. Zeeb 			 * returned to the list, and initial pool covers any
544*6b627f88SBjoern A. Zeeb 			 * possible gap between the time the page is allocated
545*6b627f88SBjoern A. Zeeb 			 * to the time the RBD is added.
546*6b627f88SBjoern A. Zeeb 			 */
547*6b627f88SBjoern A. Zeeb 			BUG_ON(list_empty(&local_empty));
548*6b627f88SBjoern A. Zeeb 			/* Get the first rxb from the rbd list */
549*6b627f88SBjoern A. Zeeb 			rxb = list_first_entry(&local_empty,
550*6b627f88SBjoern A. Zeeb 					       struct iwl_rx_mem_buffer, list);
551*6b627f88SBjoern A. Zeeb 			BUG_ON(rxb->page);
552*6b627f88SBjoern A. Zeeb 
553*6b627f88SBjoern A. Zeeb 			/* Alloc a new receive buffer */
554*6b627f88SBjoern A. Zeeb 			page = iwl_pcie_rx_alloc_page(trans, &rxb->offset,
555*6b627f88SBjoern A. Zeeb 						      gfp_mask);
556*6b627f88SBjoern A. Zeeb 			if (!page)
557*6b627f88SBjoern A. Zeeb 				continue;
558*6b627f88SBjoern A. Zeeb 			rxb->page = page;
559*6b627f88SBjoern A. Zeeb 
560*6b627f88SBjoern A. Zeeb 			/* Get physical address of the RB */
561*6b627f88SBjoern A. Zeeb 			rxb->page_dma = dma_map_page(trans->dev, page,
562*6b627f88SBjoern A. Zeeb 						     rxb->offset,
563*6b627f88SBjoern A. Zeeb 						     trans_pcie->rx_buf_bytes,
564*6b627f88SBjoern A. Zeeb 						     DMA_FROM_DEVICE);
565*6b627f88SBjoern A. Zeeb 			if (dma_mapping_error(trans->dev, rxb->page_dma)) {
566*6b627f88SBjoern A. Zeeb 				rxb->page = NULL;
567*6b627f88SBjoern A. Zeeb 				__free_pages(page, trans_pcie->rx_page_order);
568*6b627f88SBjoern A. Zeeb 				continue;
569*6b627f88SBjoern A. Zeeb 			}
570*6b627f88SBjoern A. Zeeb 
571*6b627f88SBjoern A. Zeeb 			/* move the allocated entry to the out list */
572*6b627f88SBjoern A. Zeeb 			list_move(&rxb->list, &local_allocated);
573*6b627f88SBjoern A. Zeeb 			i++;
574*6b627f88SBjoern A. Zeeb 		}
575*6b627f88SBjoern A. Zeeb 
576*6b627f88SBjoern A. Zeeb 		atomic_dec(&rba->req_pending);
577*6b627f88SBjoern A. Zeeb 		pending--;
578*6b627f88SBjoern A. Zeeb 
579*6b627f88SBjoern A. Zeeb 		if (!pending) {
580*6b627f88SBjoern A. Zeeb 			pending = atomic_read(&rba->req_pending);
581*6b627f88SBjoern A. Zeeb 			if (pending)
582*6b627f88SBjoern A. Zeeb 				IWL_DEBUG_TPT(trans,
583*6b627f88SBjoern A. Zeeb 					      "Got more pending allocation requests = %d\n",
584*6b627f88SBjoern A. Zeeb 					      pending);
585*6b627f88SBjoern A. Zeeb 		}
586*6b627f88SBjoern A. Zeeb 
587*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rba->lock);
588*6b627f88SBjoern A. Zeeb 		/* add the allocated rbds to the allocator allocated list */
589*6b627f88SBjoern A. Zeeb 		list_splice_tail(&local_allocated, &rba->rbd_allocated);
590*6b627f88SBjoern A. Zeeb 		/* get more empty RBDs for current pending requests */
591*6b627f88SBjoern A. Zeeb 		list_splice_tail_init(&rba->rbd_empty, &local_empty);
592*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rba->lock);
593*6b627f88SBjoern A. Zeeb 
594*6b627f88SBjoern A. Zeeb 		atomic_inc(&rba->req_ready);
595*6b627f88SBjoern A. Zeeb 
596*6b627f88SBjoern A. Zeeb 	}
597*6b627f88SBjoern A. Zeeb 
598*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
599*6b627f88SBjoern A. Zeeb 	/* return unused rbds to the allocator empty list */
600*6b627f88SBjoern A. Zeeb 	list_splice_tail(&local_empty, &rba->rbd_empty);
601*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
602*6b627f88SBjoern A. Zeeb 
603*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_TPT(trans, "%s, exit.\n", __func__);
604*6b627f88SBjoern A. Zeeb }
605*6b627f88SBjoern A. Zeeb 
606*6b627f88SBjoern A. Zeeb /*
607*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_allocator_get - returns the pre-allocated pages
608*6b627f88SBjoern A. Zeeb .*
609*6b627f88SBjoern A. Zeeb .* Called by queue when the queue posted allocation request and
610*6b627f88SBjoern A. Zeeb  * has freed 8 RBDs in order to restock itself.
611*6b627f88SBjoern A. Zeeb  * This function directly moves the allocated RBs to the queue's ownership
612*6b627f88SBjoern A. Zeeb  * and updates the relevant counters.
613*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_allocator_get(struct iwl_trans * trans,struct iwl_rxq * rxq)614*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_allocator_get(struct iwl_trans *trans,
615*6b627f88SBjoern A. Zeeb 				      struct iwl_rxq *rxq)
616*6b627f88SBjoern A. Zeeb {
617*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
618*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
619*6b627f88SBjoern A. Zeeb 	int i;
620*6b627f88SBjoern A. Zeeb 
621*6b627f88SBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
622*6b627f88SBjoern A. Zeeb 
623*6b627f88SBjoern A. Zeeb 	/*
624*6b627f88SBjoern A. Zeeb 	 * atomic_dec_if_positive returns req_ready - 1 for any scenario.
625*6b627f88SBjoern A. Zeeb 	 * If req_ready is 0 atomic_dec_if_positive will return -1 and this
626*6b627f88SBjoern A. Zeeb 	 * function will return early, as there are no ready requests.
627*6b627f88SBjoern A. Zeeb 	 * atomic_dec_if_positive will perofrm the *actual* decrement only if
628*6b627f88SBjoern A. Zeeb 	 * req_ready > 0, i.e. - there are ready requests and the function
629*6b627f88SBjoern A. Zeeb 	 * hands one request to the caller.
630*6b627f88SBjoern A. Zeeb 	 */
631*6b627f88SBjoern A. Zeeb 	if (atomic_dec_if_positive(&rba->req_ready) < 0)
632*6b627f88SBjoern A. Zeeb 		return;
633*6b627f88SBjoern A. Zeeb 
634*6b627f88SBjoern A. Zeeb 	spin_lock(&rba->lock);
635*6b627f88SBjoern A. Zeeb 	for (i = 0; i < RX_CLAIM_REQ_ALLOC; i++) {
636*6b627f88SBjoern A. Zeeb 		/* Get next free Rx buffer, remove it from free list */
637*6b627f88SBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb =
638*6b627f88SBjoern A. Zeeb 			list_first_entry(&rba->rbd_allocated,
639*6b627f88SBjoern A. Zeeb 					 struct iwl_rx_mem_buffer, list);
640*6b627f88SBjoern A. Zeeb 
641*6b627f88SBjoern A. Zeeb 		list_move(&rxb->list, &rxq->rx_free);
642*6b627f88SBjoern A. Zeeb 	}
643*6b627f88SBjoern A. Zeeb 	spin_unlock(&rba->lock);
644*6b627f88SBjoern A. Zeeb 
645*6b627f88SBjoern A. Zeeb 	rxq->used_count -= RX_CLAIM_REQ_ALLOC;
646*6b627f88SBjoern A. Zeeb 	rxq->free_count += RX_CLAIM_REQ_ALLOC;
647*6b627f88SBjoern A. Zeeb }
648*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_allocator_work(struct work_struct * data)649*6b627f88SBjoern A. Zeeb void iwl_pcie_rx_allocator_work(struct work_struct *data)
650*6b627f88SBjoern A. Zeeb {
651*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba_p =
652*6b627f88SBjoern A. Zeeb 		container_of(data, struct iwl_rb_allocator, rx_alloc);
653*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie =
654*6b627f88SBjoern A. Zeeb 		container_of(rba_p, struct iwl_trans_pcie, rba);
655*6b627f88SBjoern A. Zeeb 
656*6b627f88SBjoern A. Zeeb 	iwl_pcie_rx_allocator(trans_pcie->trans);
657*6b627f88SBjoern A. Zeeb }
658*6b627f88SBjoern A. Zeeb 
iwl_pcie_free_bd_size(struct iwl_trans * trans)659*6b627f88SBjoern A. Zeeb static int iwl_pcie_free_bd_size(struct iwl_trans *trans)
660*6b627f88SBjoern A. Zeeb {
661*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
662*6b627f88SBjoern A. Zeeb 		return sizeof(struct iwl_rx_transfer_desc);
663*6b627f88SBjoern A. Zeeb 
664*6b627f88SBjoern A. Zeeb 	return trans->mac_cfg->mq_rx_supported ?
665*6b627f88SBjoern A. Zeeb 			sizeof(__le64) : sizeof(__le32);
666*6b627f88SBjoern A. Zeeb }
667*6b627f88SBjoern A. Zeeb 
iwl_pcie_used_bd_size(struct iwl_trans * trans)668*6b627f88SBjoern A. Zeeb static int iwl_pcie_used_bd_size(struct iwl_trans *trans)
669*6b627f88SBjoern A. Zeeb {
670*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
671*6b627f88SBjoern A. Zeeb 		return sizeof(struct iwl_rx_completion_desc_bz);
672*6b627f88SBjoern A. Zeeb 
673*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
674*6b627f88SBjoern A. Zeeb 		return sizeof(struct iwl_rx_completion_desc);
675*6b627f88SBjoern A. Zeeb 
676*6b627f88SBjoern A. Zeeb 	return sizeof(__le32);
677*6b627f88SBjoern A. Zeeb }
678*6b627f88SBjoern A. Zeeb 
iwl_pcie_free_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq)679*6b627f88SBjoern A. Zeeb static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans,
680*6b627f88SBjoern A. Zeeb 				  struct iwl_rxq *rxq)
681*6b627f88SBjoern A. Zeeb {
682*6b627f88SBjoern A. Zeeb 	int free_size = iwl_pcie_free_bd_size(trans);
683*6b627f88SBjoern A. Zeeb 
684*6b627f88SBjoern A. Zeeb 	if (rxq->bd)
685*6b627f88SBjoern A. Zeeb 		dma_free_coherent(trans->dev,
686*6b627f88SBjoern A. Zeeb 				  free_size * rxq->queue_size,
687*6b627f88SBjoern A. Zeeb 				  rxq->bd, rxq->bd_dma);
688*6b627f88SBjoern A. Zeeb 	rxq->bd_dma = 0;
689*6b627f88SBjoern A. Zeeb 	rxq->bd = NULL;
690*6b627f88SBjoern A. Zeeb 
691*6b627f88SBjoern A. Zeeb 	rxq->rb_stts_dma = 0;
692*6b627f88SBjoern A. Zeeb 	rxq->rb_stts = NULL;
693*6b627f88SBjoern A. Zeeb 
694*6b627f88SBjoern A. Zeeb 	if (rxq->used_bd)
695*6b627f88SBjoern A. Zeeb 		dma_free_coherent(trans->dev,
696*6b627f88SBjoern A. Zeeb 				  iwl_pcie_used_bd_size(trans) *
697*6b627f88SBjoern A. Zeeb 					rxq->queue_size,
698*6b627f88SBjoern A. Zeeb 				  rxq->used_bd, rxq->used_bd_dma);
699*6b627f88SBjoern A. Zeeb 	rxq->used_bd_dma = 0;
700*6b627f88SBjoern A. Zeeb 	rxq->used_bd = NULL;
701*6b627f88SBjoern A. Zeeb }
702*6b627f88SBjoern A. Zeeb 
iwl_pcie_rb_stts_size(struct iwl_trans * trans)703*6b627f88SBjoern A. Zeeb static size_t iwl_pcie_rb_stts_size(struct iwl_trans *trans)
704*6b627f88SBjoern A. Zeeb {
705*6b627f88SBjoern A. Zeeb 	bool use_rx_td = (trans->mac_cfg->device_family >=
706*6b627f88SBjoern A. Zeeb 			  IWL_DEVICE_FAMILY_AX210);
707*6b627f88SBjoern A. Zeeb 
708*6b627f88SBjoern A. Zeeb 	if (use_rx_td)
709*6b627f88SBjoern A. Zeeb 		return sizeof(__le16);
710*6b627f88SBjoern A. Zeeb 
711*6b627f88SBjoern A. Zeeb 	return sizeof(struct iwl_rb_status);
712*6b627f88SBjoern A. Zeeb }
713*6b627f88SBjoern A. Zeeb 
iwl_pcie_alloc_rxq_dma(struct iwl_trans * trans,struct iwl_rxq * rxq)714*6b627f88SBjoern A. Zeeb static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans,
715*6b627f88SBjoern A. Zeeb 				  struct iwl_rxq *rxq)
716*6b627f88SBjoern A. Zeeb {
717*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
718*6b627f88SBjoern A. Zeeb 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
719*6b627f88SBjoern A. Zeeb 	struct device *dev = trans->dev;
720*6b627f88SBjoern A. Zeeb 	int i;
721*6b627f88SBjoern A. Zeeb 	int free_size;
722*6b627f88SBjoern A. Zeeb 
723*6b627f88SBjoern A. Zeeb 	spin_lock_init(&rxq->lock);
724*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->mq_rx_supported)
725*6b627f88SBjoern A. Zeeb 		rxq->queue_size = iwl_trans_get_num_rbds(trans);
726*6b627f88SBjoern A. Zeeb 	else
727*6b627f88SBjoern A. Zeeb 		rxq->queue_size = RX_QUEUE_SIZE;
728*6b627f88SBjoern A. Zeeb 
729*6b627f88SBjoern A. Zeeb 	free_size = iwl_pcie_free_bd_size(trans);
730*6b627f88SBjoern A. Zeeb 
731*6b627f88SBjoern A. Zeeb 	/*
732*6b627f88SBjoern A. Zeeb 	 * Allocate the circular buffer of Read Buffer Descriptors
733*6b627f88SBjoern A. Zeeb 	 * (RBDs)
734*6b627f88SBjoern A. Zeeb 	 */
735*6b627f88SBjoern A. Zeeb 	rxq->bd = dma_alloc_coherent(dev, free_size * rxq->queue_size,
736*6b627f88SBjoern A. Zeeb 				     &rxq->bd_dma, GFP_KERNEL);
737*6b627f88SBjoern A. Zeeb 	if (!rxq->bd)
738*6b627f88SBjoern A. Zeeb 		goto err;
739*6b627f88SBjoern A. Zeeb 
740*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->mq_rx_supported) {
741*6b627f88SBjoern A. Zeeb 		rxq->used_bd = dma_alloc_coherent(dev,
742*6b627f88SBjoern A. Zeeb 						  iwl_pcie_used_bd_size(trans) *
743*6b627f88SBjoern A. Zeeb 							rxq->queue_size,
744*6b627f88SBjoern A. Zeeb 						  &rxq->used_bd_dma,
745*6b627f88SBjoern A. Zeeb 						  GFP_KERNEL);
746*6b627f88SBjoern A. Zeeb 		if (!rxq->used_bd)
747*6b627f88SBjoern A. Zeeb 			goto err;
748*6b627f88SBjoern A. Zeeb 	}
749*6b627f88SBjoern A. Zeeb 
750*6b627f88SBjoern A. Zeeb 	rxq->rb_stts = (u8 *)trans_pcie->base_rb_stts + rxq->id * rb_stts_size;
751*6b627f88SBjoern A. Zeeb 	rxq->rb_stts_dma =
752*6b627f88SBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma + rxq->id * rb_stts_size;
753*6b627f88SBjoern A. Zeeb 
754*6b627f88SBjoern A. Zeeb 	return 0;
755*6b627f88SBjoern A. Zeeb 
756*6b627f88SBjoern A. Zeeb err:
757*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
758*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
759*6b627f88SBjoern A. Zeeb 
760*6b627f88SBjoern A. Zeeb 		iwl_pcie_free_rxq_dma(trans, rxq);
761*6b627f88SBjoern A. Zeeb 	}
762*6b627f88SBjoern A. Zeeb 
763*6b627f88SBjoern A. Zeeb 	return -ENOMEM;
764*6b627f88SBjoern A. Zeeb }
765*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_alloc(struct iwl_trans * trans)766*6b627f88SBjoern A. Zeeb static int iwl_pcie_rx_alloc(struct iwl_trans *trans)
767*6b627f88SBjoern A. Zeeb {
768*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
769*6b627f88SBjoern A. Zeeb 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
770*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
771*6b627f88SBjoern A. Zeeb 	int i, ret;
772*6b627f88SBjoern A. Zeeb 
773*6b627f88SBjoern A. Zeeb 	if (WARN_ON(trans_pcie->rxq))
774*6b627f88SBjoern A. Zeeb 		return -EINVAL;
775*6b627f88SBjoern A. Zeeb 
776*6b627f88SBjoern A. Zeeb 	trans_pcie->rxq = kcalloc(trans->info.num_rxqs, sizeof(struct iwl_rxq),
777*6b627f88SBjoern A. Zeeb 				  GFP_KERNEL);
778*6b627f88SBjoern A. Zeeb 	trans_pcie->rx_pool = kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
779*6b627f88SBjoern A. Zeeb 				      sizeof(trans_pcie->rx_pool[0]),
780*6b627f88SBjoern A. Zeeb 				      GFP_KERNEL);
781*6b627f88SBjoern A. Zeeb 	trans_pcie->global_table =
782*6b627f88SBjoern A. Zeeb 		kcalloc(RX_POOL_SIZE(trans_pcie->num_rx_bufs),
783*6b627f88SBjoern A. Zeeb 			sizeof(trans_pcie->global_table[0]),
784*6b627f88SBjoern A. Zeeb 			GFP_KERNEL);
785*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->rxq || !trans_pcie->rx_pool ||
786*6b627f88SBjoern A. Zeeb 	    !trans_pcie->global_table) {
787*6b627f88SBjoern A. Zeeb 		ret = -ENOMEM;
788*6b627f88SBjoern A. Zeeb 		goto err;
789*6b627f88SBjoern A. Zeeb 	}
790*6b627f88SBjoern A. Zeeb 
791*6b627f88SBjoern A. Zeeb 	spin_lock_init(&rba->lock);
792*6b627f88SBjoern A. Zeeb 
793*6b627f88SBjoern A. Zeeb 	/*
794*6b627f88SBjoern A. Zeeb 	 * Allocate the driver's pointer to receive buffer status.
795*6b627f88SBjoern A. Zeeb 	 * Allocate for all queues continuously (HW requirement).
796*6b627f88SBjoern A. Zeeb 	 */
797*6b627f88SBjoern A. Zeeb 	trans_pcie->base_rb_stts =
798*6b627f88SBjoern A. Zeeb 			dma_alloc_coherent(trans->dev,
799*6b627f88SBjoern A. Zeeb 					   rb_stts_size * trans->info.num_rxqs,
800*6b627f88SBjoern A. Zeeb 					   &trans_pcie->base_rb_stts_dma,
801*6b627f88SBjoern A. Zeeb 					   GFP_KERNEL);
802*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->base_rb_stts) {
803*6b627f88SBjoern A. Zeeb 		ret = -ENOMEM;
804*6b627f88SBjoern A. Zeeb 		goto err;
805*6b627f88SBjoern A. Zeeb 	}
806*6b627f88SBjoern A. Zeeb 
807*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
808*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
809*6b627f88SBjoern A. Zeeb 
810*6b627f88SBjoern A. Zeeb 		rxq->id = i;
811*6b627f88SBjoern A. Zeeb 		ret = iwl_pcie_alloc_rxq_dma(trans, rxq);
812*6b627f88SBjoern A. Zeeb 		if (ret)
813*6b627f88SBjoern A. Zeeb 			goto err;
814*6b627f88SBjoern A. Zeeb 	}
815*6b627f88SBjoern A. Zeeb 	return 0;
816*6b627f88SBjoern A. Zeeb 
817*6b627f88SBjoern A. Zeeb err:
818*6b627f88SBjoern A. Zeeb 	if (trans_pcie->base_rb_stts) {
819*6b627f88SBjoern A. Zeeb 		dma_free_coherent(trans->dev,
820*6b627f88SBjoern A. Zeeb 				  rb_stts_size * trans->info.num_rxqs,
821*6b627f88SBjoern A. Zeeb 				  trans_pcie->base_rb_stts,
822*6b627f88SBjoern A. Zeeb 				  trans_pcie->base_rb_stts_dma);
823*6b627f88SBjoern A. Zeeb 		trans_pcie->base_rb_stts = NULL;
824*6b627f88SBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma = 0;
825*6b627f88SBjoern A. Zeeb 	}
826*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->rx_pool);
827*6b627f88SBjoern A. Zeeb 	trans_pcie->rx_pool = NULL;
828*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->global_table);
829*6b627f88SBjoern A. Zeeb 	trans_pcie->global_table = NULL;
830*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->rxq);
831*6b627f88SBjoern A. Zeeb 	trans_pcie->rxq = NULL;
832*6b627f88SBjoern A. Zeeb 
833*6b627f88SBjoern A. Zeeb 	return ret;
834*6b627f88SBjoern A. Zeeb }
835*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_hw_init(struct iwl_trans * trans,struct iwl_rxq * rxq)836*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq)
837*6b627f88SBjoern A. Zeeb {
838*6b627f88SBjoern A. Zeeb 	u32 rb_size;
839*6b627f88SBjoern A. Zeeb 	const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
840*6b627f88SBjoern A. Zeeb 
841*6b627f88SBjoern A. Zeeb 	switch (trans->conf.rx_buf_size) {
842*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_4K:
843*6b627f88SBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
844*6b627f88SBjoern A. Zeeb 		break;
845*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_8K:
846*6b627f88SBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
847*6b627f88SBjoern A. Zeeb 		break;
848*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_12K:
849*6b627f88SBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_12K;
850*6b627f88SBjoern A. Zeeb 		break;
851*6b627f88SBjoern A. Zeeb 	default:
852*6b627f88SBjoern A. Zeeb 		WARN_ON(1);
853*6b627f88SBjoern A. Zeeb 		rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
854*6b627f88SBjoern A. Zeeb 	}
855*6b627f88SBjoern A. Zeeb 
856*6b627f88SBjoern A. Zeeb 	if (!iwl_trans_grab_nic_access(trans))
857*6b627f88SBjoern A. Zeeb 		return;
858*6b627f88SBjoern A. Zeeb 
859*6b627f88SBjoern A. Zeeb 	/* Stop Rx DMA */
860*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
861*6b627f88SBjoern A. Zeeb 	/* reset and flush pointers */
862*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_RBDCB_WPTR, 0);
863*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_FLUSH_RB_REQ, 0);
864*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RDPTR, 0);
865*6b627f88SBjoern A. Zeeb 
866*6b627f88SBjoern A. Zeeb 	/* Reset driver's Rx queue write index */
867*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
868*6b627f88SBjoern A. Zeeb 
869*6b627f88SBjoern A. Zeeb 	/* Tell device where to find RBD circular buffer in DRAM */
870*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
871*6b627f88SBjoern A. Zeeb 		    (u32)(rxq->bd_dma >> 8));
872*6b627f88SBjoern A. Zeeb 
873*6b627f88SBjoern A. Zeeb 	/* Tell device where in DRAM to update its Rx status */
874*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_RSCSR_CHNL0_STTS_WPTR_REG,
875*6b627f88SBjoern A. Zeeb 		    rxq->rb_stts_dma >> 4);
876*6b627f88SBjoern A. Zeeb 
877*6b627f88SBjoern A. Zeeb 	/* Enable Rx DMA
878*6b627f88SBjoern A. Zeeb 	 * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
879*6b627f88SBjoern A. Zeeb 	 *      the credit mechanism in 5000 HW RX FIFO
880*6b627f88SBjoern A. Zeeb 	 * Direct rx interrupts to hosts
881*6b627f88SBjoern A. Zeeb 	 * Rx buffer size 4 or 8k or 12k
882*6b627f88SBjoern A. Zeeb 	 * RB timeout 0x10
883*6b627f88SBjoern A. Zeeb 	 * 256 RBDs
884*6b627f88SBjoern A. Zeeb 	 */
885*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, FH_MEM_RCSR_CHNL0_CONFIG_REG,
886*6b627f88SBjoern A. Zeeb 		    FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
887*6b627f88SBjoern A. Zeeb 		    FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
888*6b627f88SBjoern A. Zeeb 		    FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
889*6b627f88SBjoern A. Zeeb 		    rb_size |
890*6b627f88SBjoern A. Zeeb 		    (RX_RB_TIMEOUT << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS) |
891*6b627f88SBjoern A. Zeeb 		    (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
892*6b627f88SBjoern A. Zeeb 
893*6b627f88SBjoern A. Zeeb 	iwl_trans_release_nic_access(trans);
894*6b627f88SBjoern A. Zeeb 
895*6b627f88SBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
896*6b627f88SBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
897*6b627f88SBjoern A. Zeeb 
898*6b627f88SBjoern A. Zeeb 	/* W/A for interrupt coalescing bug in 7260 and 3160 */
899*6b627f88SBjoern A. Zeeb 	if (trans->cfg->host_interrupt_operation_mode)
900*6b627f88SBjoern A. Zeeb 		iwl_set_bit(trans, CSR_INT_COALESCING, IWL_HOST_INT_OPER_MODE);
901*6b627f88SBjoern A. Zeeb }
902*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_mq_hw_init(struct iwl_trans * trans)903*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans)
904*6b627f88SBjoern A. Zeeb {
905*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
906*6b627f88SBjoern A. Zeeb 	u32 rb_size, enabled = 0;
907*6b627f88SBjoern A. Zeeb 	int i;
908*6b627f88SBjoern A. Zeeb 
909*6b627f88SBjoern A. Zeeb 	switch (trans->conf.rx_buf_size) {
910*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_2K:
911*6b627f88SBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_2K;
912*6b627f88SBjoern A. Zeeb 		break;
913*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_4K:
914*6b627f88SBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
915*6b627f88SBjoern A. Zeeb 		break;
916*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_8K:
917*6b627f88SBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_8K;
918*6b627f88SBjoern A. Zeeb 		break;
919*6b627f88SBjoern A. Zeeb 	case IWL_AMSDU_12K:
920*6b627f88SBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_12K;
921*6b627f88SBjoern A. Zeeb 		break;
922*6b627f88SBjoern A. Zeeb 	default:
923*6b627f88SBjoern A. Zeeb 		WARN_ON(1);
924*6b627f88SBjoern A. Zeeb 		rb_size = RFH_RXF_DMA_RB_SIZE_4K;
925*6b627f88SBjoern A. Zeeb 	}
926*6b627f88SBjoern A. Zeeb 
927*6b627f88SBjoern A. Zeeb 	if (!iwl_trans_grab_nic_access(trans))
928*6b627f88SBjoern A. Zeeb 		return;
929*6b627f88SBjoern A. Zeeb 
930*6b627f88SBjoern A. Zeeb 	/* Stop Rx DMA */
931*6b627f88SBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG, 0);
932*6b627f88SBjoern A. Zeeb 	/* disable free amd used rx queue operation */
933*6b627f88SBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, 0);
934*6b627f88SBjoern A. Zeeb 
935*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
936*6b627f88SBjoern A. Zeeb 		/* Tell device where to find RBD free table in DRAM */
937*6b627f88SBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
938*6b627f88SBjoern A. Zeeb 					 RFH_Q_FRBDCB_BA_LSB(i),
939*6b627f88SBjoern A. Zeeb 					 trans_pcie->rxq[i].bd_dma);
940*6b627f88SBjoern A. Zeeb 		/* Tell device where to find RBD used table in DRAM */
941*6b627f88SBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
942*6b627f88SBjoern A. Zeeb 					 RFH_Q_URBDCB_BA_LSB(i),
943*6b627f88SBjoern A. Zeeb 					 trans_pcie->rxq[i].used_bd_dma);
944*6b627f88SBjoern A. Zeeb 		/* Tell device where in DRAM to update its Rx status */
945*6b627f88SBjoern A. Zeeb 		iwl_write_prph64_no_grab(trans,
946*6b627f88SBjoern A. Zeeb 					 RFH_Q_URBD_STTS_WPTR_LSB(i),
947*6b627f88SBjoern A. Zeeb 					 trans_pcie->rxq[i].rb_stts_dma);
948*6b627f88SBjoern A. Zeeb 		/* Reset device indice tables */
949*6b627f88SBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_WIDX(i), 0);
950*6b627f88SBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_FRBDCB_RIDX(i), 0);
951*6b627f88SBjoern A. Zeeb 		iwl_write_prph_no_grab(trans, RFH_Q_URBDCB_WIDX(i), 0);
952*6b627f88SBjoern A. Zeeb 
953*6b627f88SBjoern A. Zeeb 		enabled |= BIT(i) | BIT(i + 16);
954*6b627f88SBjoern A. Zeeb 	}
955*6b627f88SBjoern A. Zeeb 
956*6b627f88SBjoern A. Zeeb 	/*
957*6b627f88SBjoern A. Zeeb 	 * Enable Rx DMA
958*6b627f88SBjoern A. Zeeb 	 * Rx buffer size 4 or 8k or 12k
959*6b627f88SBjoern A. Zeeb 	 * Min RB size 4 or 8
960*6b627f88SBjoern A. Zeeb 	 * Drop frames that exceed RB size
961*6b627f88SBjoern A. Zeeb 	 * 512 RBDs
962*6b627f88SBjoern A. Zeeb 	 */
963*6b627f88SBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_DMA_CFG,
964*6b627f88SBjoern A. Zeeb 			       RFH_DMA_EN_ENABLE_VAL | rb_size |
965*6b627f88SBjoern A. Zeeb 			       RFH_RXF_DMA_MIN_RB_4_8 |
966*6b627f88SBjoern A. Zeeb 			       RFH_RXF_DMA_DROP_TOO_LARGE_MASK |
967*6b627f88SBjoern A. Zeeb 			       RFH_RXF_DMA_RBDCB_SIZE_512);
968*6b627f88SBjoern A. Zeeb 
969*6b627f88SBjoern A. Zeeb 	/*
970*6b627f88SBjoern A. Zeeb 	 * Activate DMA snooping.
971*6b627f88SBjoern A. Zeeb 	 * Set RX DMA chunk size to 64B for IOSF and 128B for PCIe
972*6b627f88SBjoern A. Zeeb 	 * Default queue is 0
973*6b627f88SBjoern A. Zeeb 	 */
974*6b627f88SBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_GEN_CFG,
975*6b627f88SBjoern A. Zeeb 			       RFH_GEN_CFG_RFH_DMA_SNOOP |
976*6b627f88SBjoern A. Zeeb 			       RFH_GEN_CFG_VAL(DEFAULT_RXQ_NUM, 0) |
977*6b627f88SBjoern A. Zeeb 			       RFH_GEN_CFG_SERVICE_DMA_SNOOP |
978*6b627f88SBjoern A. Zeeb 			       RFH_GEN_CFG_VAL(RB_CHUNK_SIZE,
979*6b627f88SBjoern A. Zeeb 					       trans->mac_cfg->integrated ?
980*6b627f88SBjoern A. Zeeb 					       RFH_GEN_CFG_RB_CHUNK_SIZE_64 :
981*6b627f88SBjoern A. Zeeb 					       RFH_GEN_CFG_RB_CHUNK_SIZE_128));
982*6b627f88SBjoern A. Zeeb 	/* Enable the relevant rx queues */
983*6b627f88SBjoern A. Zeeb 	iwl_write_prph_no_grab(trans, RFH_RXF_RXQ_ACTIVE, enabled);
984*6b627f88SBjoern A. Zeeb 
985*6b627f88SBjoern A. Zeeb 	iwl_trans_release_nic_access(trans);
986*6b627f88SBjoern A. Zeeb 
987*6b627f88SBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
988*6b627f88SBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
989*6b627f88SBjoern A. Zeeb }
990*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_init_rxb_lists(struct iwl_rxq * rxq)991*6b627f88SBjoern A. Zeeb void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq)
992*6b627f88SBjoern A. Zeeb {
993*6b627f88SBjoern A. Zeeb 	lockdep_assert_held(&rxq->lock);
994*6b627f88SBjoern A. Zeeb 
995*6b627f88SBjoern A. Zeeb 	INIT_LIST_HEAD(&rxq->rx_free);
996*6b627f88SBjoern A. Zeeb 	INIT_LIST_HEAD(&rxq->rx_used);
997*6b627f88SBjoern A. Zeeb 	rxq->free_count = 0;
998*6b627f88SBjoern A. Zeeb 	rxq->used_count = 0;
999*6b627f88SBjoern A. Zeeb }
1000*6b627f88SBjoern A. Zeeb 
1001*6b627f88SBjoern A. Zeeb static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget);
1002*6b627f88SBjoern A. Zeeb 
iwl_netdev_to_trans_pcie(struct net_device * dev)1003*6b627f88SBjoern A. Zeeb static inline struct iwl_trans_pcie *iwl_netdev_to_trans_pcie(struct net_device *dev)
1004*6b627f88SBjoern A. Zeeb {
1005*6b627f88SBjoern A. Zeeb 	return *(struct iwl_trans_pcie **)netdev_priv(dev);
1006*6b627f88SBjoern A. Zeeb }
1007*6b627f88SBjoern A. Zeeb 
iwl_pcie_napi_poll(struct napi_struct * napi,int budget)1008*6b627f88SBjoern A. Zeeb static int iwl_pcie_napi_poll(struct napi_struct *napi, int budget)
1009*6b627f88SBjoern A. Zeeb {
1010*6b627f88SBjoern A. Zeeb 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1011*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie;
1012*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans;
1013*6b627f88SBjoern A. Zeeb 	int ret;
1014*6b627f88SBjoern A. Zeeb 
1015*6b627f88SBjoern A. Zeeb 	trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1016*6b627f88SBjoern A. Zeeb 	trans = trans_pcie->trans;
1017*6b627f88SBjoern A. Zeeb 
1018*6b627f88SBjoern A. Zeeb 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1019*6b627f88SBjoern A. Zeeb 
1020*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n",
1021*6b627f88SBjoern A. Zeeb 		      rxq->id, ret, budget);
1022*6b627f88SBjoern A. Zeeb 
1023*6b627f88SBjoern A. Zeeb 	if (ret < budget) {
1024*6b627f88SBjoern A. Zeeb 		spin_lock(&trans_pcie->irq_lock);
1025*6b627f88SBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1026*6b627f88SBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
1027*6b627f88SBjoern A. Zeeb 		spin_unlock(&trans_pcie->irq_lock);
1028*6b627f88SBjoern A. Zeeb 
1029*6b627f88SBjoern A. Zeeb 		napi_complete_done(&rxq->napi, ret);
1030*6b627f88SBjoern A. Zeeb 	}
1031*6b627f88SBjoern A. Zeeb 
1032*6b627f88SBjoern A. Zeeb 	return ret;
1033*6b627f88SBjoern A. Zeeb }
1034*6b627f88SBjoern A. Zeeb 
iwl_pcie_napi_poll_msix(struct napi_struct * napi,int budget)1035*6b627f88SBjoern A. Zeeb static int iwl_pcie_napi_poll_msix(struct napi_struct *napi, int budget)
1036*6b627f88SBjoern A. Zeeb {
1037*6b627f88SBjoern A. Zeeb 	struct iwl_rxq *rxq = container_of(napi, struct iwl_rxq, napi);
1038*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie;
1039*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans;
1040*6b627f88SBjoern A. Zeeb 	int ret;
1041*6b627f88SBjoern A. Zeeb 
1042*6b627f88SBjoern A. Zeeb 	trans_pcie = iwl_netdev_to_trans_pcie(napi->dev);
1043*6b627f88SBjoern A. Zeeb 	trans = trans_pcie->trans;
1044*6b627f88SBjoern A. Zeeb 
1045*6b627f88SBjoern A. Zeeb 	ret = iwl_pcie_rx_handle(trans, rxq->id, budget);
1046*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] handled %d, budget %d\n", rxq->id, ret,
1047*6b627f88SBjoern A. Zeeb 		      budget);
1048*6b627f88SBjoern A. Zeeb 
1049*6b627f88SBjoern A. Zeeb 	if (ret < budget) {
1050*6b627f88SBjoern A. Zeeb 		int irq_line = rxq->id;
1051*6b627f88SBjoern A. Zeeb 
1052*6b627f88SBjoern A. Zeeb 		/* FIRST_RSS is shared with line 0 */
1053*6b627f88SBjoern A. Zeeb 		if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS &&
1054*6b627f88SBjoern A. Zeeb 		    rxq->id == 1)
1055*6b627f88SBjoern A. Zeeb 			irq_line = 0;
1056*6b627f88SBjoern A. Zeeb 
1057*6b627f88SBjoern A. Zeeb 		spin_lock(&trans_pcie->irq_lock);
1058*6b627f88SBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, irq_line);
1059*6b627f88SBjoern A. Zeeb 		spin_unlock(&trans_pcie->irq_lock);
1060*6b627f88SBjoern A. Zeeb 
1061*6b627f88SBjoern A. Zeeb 		napi_complete_done(&rxq->napi, ret);
1062*6b627f88SBjoern A. Zeeb 	}
1063*6b627f88SBjoern A. Zeeb 
1064*6b627f88SBjoern A. Zeeb 	return ret;
1065*6b627f88SBjoern A. Zeeb }
1066*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_napi_sync(struct iwl_trans * trans)1067*6b627f88SBjoern A. Zeeb void iwl_pcie_rx_napi_sync(struct iwl_trans *trans)
1068*6b627f88SBjoern A. Zeeb {
1069*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1070*6b627f88SBjoern A. Zeeb 	int i;
1071*6b627f88SBjoern A. Zeeb 
1072*6b627f88SBjoern A. Zeeb 	if (unlikely(!trans_pcie->rxq))
1073*6b627f88SBjoern A. Zeeb 		return;
1074*6b627f88SBjoern A. Zeeb 
1075*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
1076*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1077*6b627f88SBjoern A. Zeeb 
1078*6b627f88SBjoern A. Zeeb 		if (rxq && rxq->napi.poll)
1079*6b627f88SBjoern A. Zeeb 			napi_synchronize(&rxq->napi);
1080*6b627f88SBjoern A. Zeeb 	}
1081*6b627f88SBjoern A. Zeeb }
1082*6b627f88SBjoern A. Zeeb 
_iwl_pcie_rx_init(struct iwl_trans * trans)1083*6b627f88SBjoern A. Zeeb static int _iwl_pcie_rx_init(struct iwl_trans *trans)
1084*6b627f88SBjoern A. Zeeb {
1085*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1086*6b627f88SBjoern A. Zeeb 	struct iwl_rxq *def_rxq;
1087*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1088*6b627f88SBjoern A. Zeeb 	int i, err, queue_size, allocator_pool_size, num_alloc;
1089*6b627f88SBjoern A. Zeeb 
1090*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->rxq) {
1091*6b627f88SBjoern A. Zeeb 		err = iwl_pcie_rx_alloc(trans);
1092*6b627f88SBjoern A. Zeeb 		if (err)
1093*6b627f88SBjoern A. Zeeb 			return err;
1094*6b627f88SBjoern A. Zeeb 	}
1095*6b627f88SBjoern A. Zeeb 	def_rxq = trans_pcie->rxq;
1096*6b627f88SBjoern A. Zeeb 
1097*6b627f88SBjoern A. Zeeb 	cancel_work_sync(&rba->rx_alloc);
1098*6b627f88SBjoern A. Zeeb 
1099*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&rba->lock);
1100*6b627f88SBjoern A. Zeeb 	atomic_set(&rba->req_pending, 0);
1101*6b627f88SBjoern A. Zeeb 	atomic_set(&rba->req_ready, 0);
1102*6b627f88SBjoern A. Zeeb 	INIT_LIST_HEAD(&rba->rbd_allocated);
1103*6b627f88SBjoern A. Zeeb 	INIT_LIST_HEAD(&rba->rbd_empty);
1104*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&rba->lock);
1105*6b627f88SBjoern A. Zeeb 
1106*6b627f88SBjoern A. Zeeb 	/* free all first - we overwrite everything here */
1107*6b627f88SBjoern A. Zeeb 	iwl_pcie_free_rbs_pool(trans);
1108*6b627f88SBjoern A. Zeeb 
1109*6b627f88SBjoern A. Zeeb 	for (i = 0; i < RX_QUEUE_SIZE; i++)
1110*6b627f88SBjoern A. Zeeb 		def_rxq->queue[i] = NULL;
1111*6b627f88SBjoern A. Zeeb 
1112*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
1113*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1114*6b627f88SBjoern A. Zeeb 
1115*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&rxq->lock);
1116*6b627f88SBjoern A. Zeeb 		/*
1117*6b627f88SBjoern A. Zeeb 		 * Set read write pointer to reflect that we have processed
1118*6b627f88SBjoern A. Zeeb 		 * and used all buffers, but have not restocked the Rx queue
1119*6b627f88SBjoern A. Zeeb 		 * with fresh buffers
1120*6b627f88SBjoern A. Zeeb 		 */
1121*6b627f88SBjoern A. Zeeb 		rxq->read = 0;
1122*6b627f88SBjoern A. Zeeb 		rxq->write = 0;
1123*6b627f88SBjoern A. Zeeb 		rxq->write_actual = 0;
1124*6b627f88SBjoern A. Zeeb 		memset(rxq->rb_stts, 0,
1125*6b627f88SBjoern A. Zeeb 		       (trans->mac_cfg->device_family >=
1126*6b627f88SBjoern A. Zeeb 			IWL_DEVICE_FAMILY_AX210) ?
1127*6b627f88SBjoern A. Zeeb 		       sizeof(__le16) : sizeof(struct iwl_rb_status));
1128*6b627f88SBjoern A. Zeeb 
1129*6b627f88SBjoern A. Zeeb 		iwl_pcie_rx_init_rxb_lists(rxq);
1130*6b627f88SBjoern A. Zeeb 
1131*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&rxq->lock);
1132*6b627f88SBjoern A. Zeeb 
1133*6b627f88SBjoern A. Zeeb 		if (!rxq->napi.poll) {
1134*6b627f88SBjoern A. Zeeb 			int (*poll)(struct napi_struct *, int) = iwl_pcie_napi_poll;
1135*6b627f88SBjoern A. Zeeb 
1136*6b627f88SBjoern A. Zeeb 			if (trans_pcie->msix_enabled)
1137*6b627f88SBjoern A. Zeeb 				poll = iwl_pcie_napi_poll_msix;
1138*6b627f88SBjoern A. Zeeb 
1139*6b627f88SBjoern A. Zeeb 			netif_napi_add(trans_pcie->napi_dev, &rxq->napi,
1140*6b627f88SBjoern A. Zeeb 				       poll);
1141*6b627f88SBjoern A. Zeeb 			napi_enable(&rxq->napi);
1142*6b627f88SBjoern A. Zeeb 		}
1143*6b627f88SBjoern A. Zeeb 
1144*6b627f88SBjoern A. Zeeb 	}
1145*6b627f88SBjoern A. Zeeb 
1146*6b627f88SBjoern A. Zeeb 	/* move the pool to the default queue and allocator ownerships */
1147*6b627f88SBjoern A. Zeeb 	queue_size = trans->mac_cfg->mq_rx_supported ?
1148*6b627f88SBjoern A. Zeeb 			trans_pcie->num_rx_bufs - 1 : RX_QUEUE_SIZE;
1149*6b627f88SBjoern A. Zeeb 	allocator_pool_size = trans->info.num_rxqs *
1150*6b627f88SBjoern A. Zeeb 		(RX_CLAIM_REQ_ALLOC - RX_POST_REQ_ALLOC);
1151*6b627f88SBjoern A. Zeeb 	num_alloc = queue_size + allocator_pool_size;
1152*6b627f88SBjoern A. Zeeb 
1153*6b627f88SBjoern A. Zeeb 	for (i = 0; i < num_alloc; i++) {
1154*6b627f88SBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb = &trans_pcie->rx_pool[i];
1155*6b627f88SBjoern A. Zeeb 
1156*6b627f88SBjoern A. Zeeb 		if (i < allocator_pool_size)
1157*6b627f88SBjoern A. Zeeb 			list_add(&rxb->list, &rba->rbd_empty);
1158*6b627f88SBjoern A. Zeeb 		else
1159*6b627f88SBjoern A. Zeeb 			list_add(&rxb->list, &def_rxq->rx_used);
1160*6b627f88SBjoern A. Zeeb 		trans_pcie->global_table[i] = rxb;
1161*6b627f88SBjoern A. Zeeb 		rxb->vid = (u16)(i + 1);
1162*6b627f88SBjoern A. Zeeb 		rxb->invalid = true;
1163*6b627f88SBjoern A. Zeeb 	}
1164*6b627f88SBjoern A. Zeeb 
1165*6b627f88SBjoern A. Zeeb 	iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL, def_rxq);
1166*6b627f88SBjoern A. Zeeb 
1167*6b627f88SBjoern A. Zeeb 	return 0;
1168*6b627f88SBjoern A. Zeeb }
1169*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_init(struct iwl_trans * trans)1170*6b627f88SBjoern A. Zeeb int iwl_pcie_rx_init(struct iwl_trans *trans)
1171*6b627f88SBjoern A. Zeeb {
1172*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1173*6b627f88SBjoern A. Zeeb 	int ret = _iwl_pcie_rx_init(trans);
1174*6b627f88SBjoern A. Zeeb 
1175*6b627f88SBjoern A. Zeeb 	if (ret)
1176*6b627f88SBjoern A. Zeeb 		return ret;
1177*6b627f88SBjoern A. Zeeb 
1178*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->mq_rx_supported)
1179*6b627f88SBjoern A. Zeeb 		iwl_pcie_rx_mq_hw_init(trans);
1180*6b627f88SBjoern A. Zeeb 	else
1181*6b627f88SBjoern A. Zeeb 		iwl_pcie_rx_hw_init(trans, trans_pcie->rxq);
1182*6b627f88SBjoern A. Zeeb 
1183*6b627f88SBjoern A. Zeeb 	iwl_pcie_rxq_restock(trans, trans_pcie->rxq);
1184*6b627f88SBjoern A. Zeeb 
1185*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->rxq->lock);
1186*6b627f88SBjoern A. Zeeb 	iwl_pcie_rxq_inc_wr_ptr(trans, trans_pcie->rxq);
1187*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->rxq->lock);
1188*6b627f88SBjoern A. Zeeb 
1189*6b627f88SBjoern A. Zeeb 	return 0;
1190*6b627f88SBjoern A. Zeeb }
1191*6b627f88SBjoern A. Zeeb 
iwl_pcie_gen2_rx_init(struct iwl_trans * trans)1192*6b627f88SBjoern A. Zeeb int iwl_pcie_gen2_rx_init(struct iwl_trans *trans)
1193*6b627f88SBjoern A. Zeeb {
1194*6b627f88SBjoern A. Zeeb 	/* Set interrupt coalescing timer to default (2048 usecs) */
1195*6b627f88SBjoern A. Zeeb 	iwl_write8(trans, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
1196*6b627f88SBjoern A. Zeeb 
1197*6b627f88SBjoern A. Zeeb 	/*
1198*6b627f88SBjoern A. Zeeb 	 * We don't configure the RFH.
1199*6b627f88SBjoern A. Zeeb 	 * Restock will be done at alive, after firmware configured the RFH.
1200*6b627f88SBjoern A. Zeeb 	 */
1201*6b627f88SBjoern A. Zeeb 	return _iwl_pcie_rx_init(trans);
1202*6b627f88SBjoern A. Zeeb }
1203*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_free(struct iwl_trans * trans)1204*6b627f88SBjoern A. Zeeb void iwl_pcie_rx_free(struct iwl_trans *trans)
1205*6b627f88SBjoern A. Zeeb {
1206*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1207*6b627f88SBjoern A. Zeeb 	size_t rb_stts_size = iwl_pcie_rb_stts_size(trans);
1208*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1209*6b627f88SBjoern A. Zeeb 	int i;
1210*6b627f88SBjoern A. Zeeb 
1211*6b627f88SBjoern A. Zeeb 	/*
1212*6b627f88SBjoern A. Zeeb 	 * if rxq is NULL, it means that nothing has been allocated,
1213*6b627f88SBjoern A. Zeeb 	 * exit now
1214*6b627f88SBjoern A. Zeeb 	 */
1215*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->rxq) {
1216*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_INFO(trans, "Free NULL rx context\n");
1217*6b627f88SBjoern A. Zeeb 		return;
1218*6b627f88SBjoern A. Zeeb 	}
1219*6b627f88SBjoern A. Zeeb 
1220*6b627f88SBjoern A. Zeeb 	cancel_work_sync(&rba->rx_alloc);
1221*6b627f88SBjoern A. Zeeb 
1222*6b627f88SBjoern A. Zeeb 	iwl_pcie_free_rbs_pool(trans);
1223*6b627f88SBjoern A. Zeeb 
1224*6b627f88SBjoern A. Zeeb 	if (trans_pcie->base_rb_stts) {
1225*6b627f88SBjoern A. Zeeb 		dma_free_coherent(trans->dev,
1226*6b627f88SBjoern A. Zeeb 				  rb_stts_size * trans->info.num_rxqs,
1227*6b627f88SBjoern A. Zeeb 				  trans_pcie->base_rb_stts,
1228*6b627f88SBjoern A. Zeeb 				  trans_pcie->base_rb_stts_dma);
1229*6b627f88SBjoern A. Zeeb 		trans_pcie->base_rb_stts = NULL;
1230*6b627f88SBjoern A. Zeeb 		trans_pcie->base_rb_stts_dma = 0;
1231*6b627f88SBjoern A. Zeeb 	}
1232*6b627f88SBjoern A. Zeeb 
1233*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->info.num_rxqs; i++) {
1234*6b627f88SBjoern A. Zeeb 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
1235*6b627f88SBjoern A. Zeeb 
1236*6b627f88SBjoern A. Zeeb 		iwl_pcie_free_rxq_dma(trans, rxq);
1237*6b627f88SBjoern A. Zeeb 
1238*6b627f88SBjoern A. Zeeb 		if (rxq->napi.poll) {
1239*6b627f88SBjoern A. Zeeb 			napi_disable(&rxq->napi);
1240*6b627f88SBjoern A. Zeeb 			netif_napi_del(&rxq->napi);
1241*6b627f88SBjoern A. Zeeb 		}
1242*6b627f88SBjoern A. Zeeb 	}
1243*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->rx_pool);
1244*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->global_table);
1245*6b627f88SBjoern A. Zeeb 	kfree(trans_pcie->rxq);
1246*6b627f88SBjoern A. Zeeb 
1247*6b627f88SBjoern A. Zeeb 	if (trans_pcie->alloc_page)
1248*6b627f88SBjoern A. Zeeb 		__free_pages(trans_pcie->alloc_page, trans_pcie->rx_page_order);
1249*6b627f88SBjoern A. Zeeb }
1250*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_move_to_allocator(struct iwl_rxq * rxq,struct iwl_rb_allocator * rba)1251*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_move_to_allocator(struct iwl_rxq *rxq,
1252*6b627f88SBjoern A. Zeeb 					  struct iwl_rb_allocator *rba)
1253*6b627f88SBjoern A. Zeeb {
1254*6b627f88SBjoern A. Zeeb 	spin_lock(&rba->lock);
1255*6b627f88SBjoern A. Zeeb 	list_splice_tail_init(&rxq->rx_used, &rba->rbd_empty);
1256*6b627f88SBjoern A. Zeeb 	spin_unlock(&rba->lock);
1257*6b627f88SBjoern A. Zeeb }
1258*6b627f88SBjoern A. Zeeb 
1259*6b627f88SBjoern A. Zeeb /*
1260*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_reuse_rbd - Recycle used RBDs
1261*6b627f88SBjoern A. Zeeb  *
1262*6b627f88SBjoern A. Zeeb  * Called when a RBD can be reused. The RBD is transferred to the allocator.
1263*6b627f88SBjoern A. Zeeb  * When there are 2 empty RBDs - a request for allocation is posted
1264*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_reuse_rbd(struct iwl_trans * trans,struct iwl_rx_mem_buffer * rxb,struct iwl_rxq * rxq,bool emergency)1265*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_reuse_rbd(struct iwl_trans *trans,
1266*6b627f88SBjoern A. Zeeb 				  struct iwl_rx_mem_buffer *rxb,
1267*6b627f88SBjoern A. Zeeb 				  struct iwl_rxq *rxq, bool emergency)
1268*6b627f88SBjoern A. Zeeb {
1269*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1270*6b627f88SBjoern A. Zeeb 	struct iwl_rb_allocator *rba = &trans_pcie->rba;
1271*6b627f88SBjoern A. Zeeb 
1272*6b627f88SBjoern A. Zeeb 	/* Move the RBD to the used list, will be moved to allocator in batches
1273*6b627f88SBjoern A. Zeeb 	 * before claiming or posting a request*/
1274*6b627f88SBjoern A. Zeeb 	list_add_tail(&rxb->list, &rxq->rx_used);
1275*6b627f88SBjoern A. Zeeb 
1276*6b627f88SBjoern A. Zeeb 	if (unlikely(emergency))
1277*6b627f88SBjoern A. Zeeb 		return;
1278*6b627f88SBjoern A. Zeeb 
1279*6b627f88SBjoern A. Zeeb 	/* Count the allocator owned RBDs */
1280*6b627f88SBjoern A. Zeeb 	rxq->used_count++;
1281*6b627f88SBjoern A. Zeeb 
1282*6b627f88SBjoern A. Zeeb 	/* If we have RX_POST_REQ_ALLOC new released rx buffers -
1283*6b627f88SBjoern A. Zeeb 	 * issue a request for allocator. Modulo RX_CLAIM_REQ_ALLOC is
1284*6b627f88SBjoern A. Zeeb 	 * used for the case we failed to claim RX_CLAIM_REQ_ALLOC,
1285*6b627f88SBjoern A. Zeeb 	 * after but we still need to post another request.
1286*6b627f88SBjoern A. Zeeb 	 */
1287*6b627f88SBjoern A. Zeeb 	if ((rxq->used_count % RX_CLAIM_REQ_ALLOC) == RX_POST_REQ_ALLOC) {
1288*6b627f88SBjoern A. Zeeb 		/* Move the 2 RBDs to the allocator ownership.
1289*6b627f88SBjoern A. Zeeb 		 Allocator has another 6 from pool for the request completion*/
1290*6b627f88SBjoern A. Zeeb 		iwl_pcie_rx_move_to_allocator(rxq, rba);
1291*6b627f88SBjoern A. Zeeb 
1292*6b627f88SBjoern A. Zeeb 		atomic_inc(&rba->req_pending);
1293*6b627f88SBjoern A. Zeeb 		queue_work(rba->alloc_wq, &rba->rx_alloc);
1294*6b627f88SBjoern A. Zeeb 	}
1295*6b627f88SBjoern A. Zeeb }
1296*6b627f88SBjoern A. Zeeb 
iwl_pcie_rx_handle_rb(struct iwl_trans * trans,struct iwl_rxq * rxq,struct iwl_rx_mem_buffer * rxb,bool emergency,int i)1297*6b627f88SBjoern A. Zeeb static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
1298*6b627f88SBjoern A. Zeeb 				struct iwl_rxq *rxq,
1299*6b627f88SBjoern A. Zeeb 				struct iwl_rx_mem_buffer *rxb,
1300*6b627f88SBjoern A. Zeeb 				bool emergency,
1301*6b627f88SBjoern A. Zeeb 				int i)
1302*6b627f88SBjoern A. Zeeb {
1303*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1304*6b627f88SBjoern A. Zeeb 	struct iwl_txq *txq = trans_pcie->txqs.txq[trans->conf.cmd_queue];
1305*6b627f88SBjoern A. Zeeb 	bool page_stolen = false;
1306*6b627f88SBjoern A. Zeeb 	int max_len = trans_pcie->rx_buf_bytes;
1307*6b627f88SBjoern A. Zeeb 	u32 offset = 0;
1308*6b627f88SBjoern A. Zeeb 
1309*6b627f88SBjoern A. Zeeb 	if (WARN_ON(!rxb))
1310*6b627f88SBjoern A. Zeeb 		return;
1311*6b627f88SBjoern A. Zeeb 
1312*6b627f88SBjoern A. Zeeb 	dma_unmap_page(trans->dev, rxb->page_dma, max_len, DMA_FROM_DEVICE);
1313*6b627f88SBjoern A. Zeeb 
1314*6b627f88SBjoern A. Zeeb 	while (offset + sizeof(u32) + sizeof(struct iwl_cmd_header) < max_len) {
1315*6b627f88SBjoern A. Zeeb 		struct iwl_rx_packet *pkt;
1316*6b627f88SBjoern A. Zeeb 		bool reclaim;
1317*6b627f88SBjoern A. Zeeb 		int len;
1318*6b627f88SBjoern A. Zeeb 		struct iwl_rx_cmd_buffer rxcb = {
1319*6b627f88SBjoern A. Zeeb 			._offset = rxb->offset + offset,
1320*6b627f88SBjoern A. Zeeb 			._rx_page_order = trans_pcie->rx_page_order,
1321*6b627f88SBjoern A. Zeeb 			._page = rxb->page,
1322*6b627f88SBjoern A. Zeeb 			._page_stolen = false,
1323*6b627f88SBjoern A. Zeeb 			.truesize = max_len,
1324*6b627f88SBjoern A. Zeeb 		};
1325*6b627f88SBjoern A. Zeeb 
1326*6b627f88SBjoern A. Zeeb 		pkt = rxb_addr(&rxcb);
1327*6b627f88SBjoern A. Zeeb 
1328*6b627f88SBjoern A. Zeeb 		if (pkt->len_n_flags == cpu_to_le32(FH_RSCSR_FRAME_INVALID)) {
1329*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_RX(trans,
1330*6b627f88SBjoern A. Zeeb 				     "Q %d: RB end marker at offset %d\n",
1331*6b627f88SBjoern A. Zeeb 				     rxq->id, offset);
1332*6b627f88SBjoern A. Zeeb 			break;
1333*6b627f88SBjoern A. Zeeb 		}
1334*6b627f88SBjoern A. Zeeb 
1335*6b627f88SBjoern A. Zeeb 		WARN((le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1336*6b627f88SBjoern A. Zeeb 			FH_RSCSR_RXQ_POS != rxq->id,
1337*6b627f88SBjoern A. Zeeb 		     "frame on invalid queue - is on %d and indicates %d\n",
1338*6b627f88SBjoern A. Zeeb 		     rxq->id,
1339*6b627f88SBjoern A. Zeeb 		     (le32_to_cpu(pkt->len_n_flags) & FH_RSCSR_RXQ_MASK) >>
1340*6b627f88SBjoern A. Zeeb 			FH_RSCSR_RXQ_POS);
1341*6b627f88SBjoern A. Zeeb 
1342*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_RX(trans,
1343*6b627f88SBjoern A. Zeeb 			     "Q %d: cmd at offset %d: %s (%.2x.%2x, seq 0x%x)\n",
1344*6b627f88SBjoern A. Zeeb 			     rxq->id, offset,
1345*6b627f88SBjoern A. Zeeb 			     iwl_get_cmd_string(trans,
1346*6b627f88SBjoern A. Zeeb 						WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd)),
1347*6b627f88SBjoern A. Zeeb 			     pkt->hdr.group_id, pkt->hdr.cmd,
1348*6b627f88SBjoern A. Zeeb 			     le16_to_cpu(pkt->hdr.sequence));
1349*6b627f88SBjoern A. Zeeb 
1350*6b627f88SBjoern A. Zeeb 		len = iwl_rx_packet_len(pkt);
1351*6b627f88SBjoern A. Zeeb 		len += sizeof(u32); /* account for status word */
1352*6b627f88SBjoern A. Zeeb 
1353*6b627f88SBjoern A. Zeeb 		offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN);
1354*6b627f88SBjoern A. Zeeb 
1355*6b627f88SBjoern A. Zeeb 		/* check that what the device tells us made sense */
1356*6b627f88SBjoern A. Zeeb 		if (len < sizeof(*pkt) || offset > max_len)
1357*6b627f88SBjoern A. Zeeb 			break;
1358*6b627f88SBjoern A. Zeeb 
1359*6b627f88SBjoern A. Zeeb 		maybe_trace_iwlwifi_dev_rx(trans, pkt, len);
1360*6b627f88SBjoern A. Zeeb 
1361*6b627f88SBjoern A. Zeeb 		/* Reclaim a command buffer only if this packet is a response
1362*6b627f88SBjoern A. Zeeb 		 *   to a (driver-originated) command.
1363*6b627f88SBjoern A. Zeeb 		 * If the packet (e.g. Rx frame) originated from uCode,
1364*6b627f88SBjoern A. Zeeb 		 *   there is no command buffer to reclaim.
1365*6b627f88SBjoern A. Zeeb 		 * Ucode should set SEQ_RX_FRAME bit if ucode-originated,
1366*6b627f88SBjoern A. Zeeb 		 *   but apparently a few don't get set; catch them here. */
1367*6b627f88SBjoern A. Zeeb 		reclaim = !(pkt->hdr.sequence & SEQ_RX_FRAME);
1368*6b627f88SBjoern A. Zeeb 		if (reclaim && !pkt->hdr.group_id) {
1369*6b627f88SBjoern A. Zeeb 			int i;
1370*6b627f88SBjoern A. Zeeb 
1371*6b627f88SBjoern A. Zeeb 			for (i = 0; i < trans->conf.n_no_reclaim_cmds; i++) {
1372*6b627f88SBjoern A. Zeeb 				if (trans->conf.no_reclaim_cmds[i] ==
1373*6b627f88SBjoern A. Zeeb 							pkt->hdr.cmd) {
1374*6b627f88SBjoern A. Zeeb 					reclaim = false;
1375*6b627f88SBjoern A. Zeeb 					break;
1376*6b627f88SBjoern A. Zeeb 				}
1377*6b627f88SBjoern A. Zeeb 			}
1378*6b627f88SBjoern A. Zeeb 		}
1379*6b627f88SBjoern A. Zeeb 
1380*6b627f88SBjoern A. Zeeb 		if (rxq->id == IWL_DEFAULT_RX_QUEUE)
1381*6b627f88SBjoern A. Zeeb 			iwl_op_mode_rx(trans->op_mode, &rxq->napi,
1382*6b627f88SBjoern A. Zeeb 				       &rxcb);
1383*6b627f88SBjoern A. Zeeb 		else
1384*6b627f88SBjoern A. Zeeb 			iwl_op_mode_rx_rss(trans->op_mode, &rxq->napi,
1385*6b627f88SBjoern A. Zeeb 					   &rxcb, rxq->id);
1386*6b627f88SBjoern A. Zeeb 
1387*6b627f88SBjoern A. Zeeb 		/*
1388*6b627f88SBjoern A. Zeeb 		 * After here, we should always check rxcb._page_stolen,
1389*6b627f88SBjoern A. Zeeb 		 * if it is true then one of the handlers took the page.
1390*6b627f88SBjoern A. Zeeb 		 */
1391*6b627f88SBjoern A. Zeeb 
1392*6b627f88SBjoern A. Zeeb 		if (reclaim && txq) {
1393*6b627f88SBjoern A. Zeeb 			u16 sequence = le16_to_cpu(pkt->hdr.sequence);
1394*6b627f88SBjoern A. Zeeb 			int index = SEQ_TO_INDEX(sequence);
1395*6b627f88SBjoern A. Zeeb 			int cmd_index = iwl_txq_get_cmd_index(txq, index);
1396*6b627f88SBjoern A. Zeeb 
1397*6b627f88SBjoern A. Zeeb 			kfree_sensitive(txq->entries[cmd_index].free_buf);
1398*6b627f88SBjoern A. Zeeb 			txq->entries[cmd_index].free_buf = NULL;
1399*6b627f88SBjoern A. Zeeb 
1400*6b627f88SBjoern A. Zeeb 			/* Invoke any callbacks, transfer the buffer to caller,
1401*6b627f88SBjoern A. Zeeb 			 * and fire off the (possibly) blocking
1402*6b627f88SBjoern A. Zeeb 			 * iwl_trans_send_cmd()
1403*6b627f88SBjoern A. Zeeb 			 * as we reclaim the driver command queue */
1404*6b627f88SBjoern A. Zeeb 			if (!rxcb._page_stolen)
1405*6b627f88SBjoern A. Zeeb 				iwl_pcie_hcmd_complete(trans, &rxcb);
1406*6b627f88SBjoern A. Zeeb 			else
1407*6b627f88SBjoern A. Zeeb 				IWL_WARN(trans, "Claim null rxb?\n");
1408*6b627f88SBjoern A. Zeeb 		}
1409*6b627f88SBjoern A. Zeeb 
1410*6b627f88SBjoern A. Zeeb 		page_stolen |= rxcb._page_stolen;
1411*6b627f88SBjoern A. Zeeb 		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1412*6b627f88SBjoern A. Zeeb 			break;
1413*6b627f88SBjoern A. Zeeb 	}
1414*6b627f88SBjoern A. Zeeb 
1415*6b627f88SBjoern A. Zeeb 	/* page was stolen from us -- free our reference */
1416*6b627f88SBjoern A. Zeeb 	if (page_stolen) {
1417*6b627f88SBjoern A. Zeeb 		__free_pages(rxb->page, trans_pcie->rx_page_order);
1418*6b627f88SBjoern A. Zeeb 		rxb->page = NULL;
1419*6b627f88SBjoern A. Zeeb 	}
1420*6b627f88SBjoern A. Zeeb 
1421*6b627f88SBjoern A. Zeeb 	/* Reuse the page if possible. For notification packets and
1422*6b627f88SBjoern A. Zeeb 	 * SKBs that fail to Rx correctly, add them back into the
1423*6b627f88SBjoern A. Zeeb 	 * rx_free list for reuse later. */
1424*6b627f88SBjoern A. Zeeb 	if (rxb->page != NULL) {
1425*6b627f88SBjoern A. Zeeb 		rxb->page_dma =
1426*6b627f88SBjoern A. Zeeb 			dma_map_page(trans->dev, rxb->page, rxb->offset,
1427*6b627f88SBjoern A. Zeeb 				     trans_pcie->rx_buf_bytes,
1428*6b627f88SBjoern A. Zeeb 				     DMA_FROM_DEVICE);
1429*6b627f88SBjoern A. Zeeb 		if (dma_mapping_error(trans->dev, rxb->page_dma)) {
1430*6b627f88SBjoern A. Zeeb 			/*
1431*6b627f88SBjoern A. Zeeb 			 * free the page(s) as well to not break
1432*6b627f88SBjoern A. Zeeb 			 * the invariant that the items on the used
1433*6b627f88SBjoern A. Zeeb 			 * list have no page(s)
1434*6b627f88SBjoern A. Zeeb 			 */
1435*6b627f88SBjoern A. Zeeb 			__free_pages(rxb->page, trans_pcie->rx_page_order);
1436*6b627f88SBjoern A. Zeeb 			rxb->page = NULL;
1437*6b627f88SBjoern A. Zeeb 			iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1438*6b627f88SBjoern A. Zeeb 		} else {
1439*6b627f88SBjoern A. Zeeb 			list_add_tail(&rxb->list, &rxq->rx_free);
1440*6b627f88SBjoern A. Zeeb 			rxq->free_count++;
1441*6b627f88SBjoern A. Zeeb 		}
1442*6b627f88SBjoern A. Zeeb 	} else
1443*6b627f88SBjoern A. Zeeb 		iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency);
1444*6b627f88SBjoern A. Zeeb }
1445*6b627f88SBjoern A. Zeeb 
iwl_pcie_get_rxb(struct iwl_trans * trans,struct iwl_rxq * rxq,int i,bool * join)1446*6b627f88SBjoern A. Zeeb static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans,
1447*6b627f88SBjoern A. Zeeb 						  struct iwl_rxq *rxq, int i,
1448*6b627f88SBjoern A. Zeeb 						  bool *join)
1449*6b627f88SBjoern A. Zeeb {
1450*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1451*6b627f88SBjoern A. Zeeb 	struct iwl_rx_mem_buffer *rxb;
1452*6b627f88SBjoern A. Zeeb 	u16 vid;
1453*6b627f88SBjoern A. Zeeb 
1454*6b627f88SBjoern A. Zeeb 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc) != 32);
1455*6b627f88SBjoern A. Zeeb 	BUILD_BUG_ON(sizeof(struct iwl_rx_completion_desc_bz) != 4);
1456*6b627f88SBjoern A. Zeeb 
1457*6b627f88SBjoern A. Zeeb 	if (!trans->mac_cfg->mq_rx_supported) {
1458*6b627f88SBjoern A. Zeeb 		rxb = rxq->queue[i];
1459*6b627f88SBjoern A. Zeeb 		rxq->queue[i] = NULL;
1460*6b627f88SBjoern A. Zeeb 		return rxb;
1461*6b627f88SBjoern A. Zeeb 	}
1462*6b627f88SBjoern A. Zeeb 
1463*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
1464*6b627f88SBjoern A. Zeeb 		struct iwl_rx_completion_desc_bz *cd = rxq->used_bd;
1465*6b627f88SBjoern A. Zeeb 
1466*6b627f88SBjoern A. Zeeb 		vid = le16_to_cpu(cd[i].rbid);
1467*6b627f88SBjoern A. Zeeb 		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1468*6b627f88SBjoern A. Zeeb 	} else if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1469*6b627f88SBjoern A. Zeeb 		struct iwl_rx_completion_desc *cd = rxq->used_bd;
1470*6b627f88SBjoern A. Zeeb 
1471*6b627f88SBjoern A. Zeeb 		vid = le16_to_cpu(cd[i].rbid);
1472*6b627f88SBjoern A. Zeeb 		*join = cd[i].flags & IWL_RX_CD_FLAGS_FRAGMENTED;
1473*6b627f88SBjoern A. Zeeb 	} else {
1474*6b627f88SBjoern A. Zeeb 		__le32 *cd = rxq->used_bd;
1475*6b627f88SBjoern A. Zeeb 
1476*6b627f88SBjoern A. Zeeb 		vid = le32_to_cpu(cd[i]) & 0x0FFF; /* 12-bit VID */
1477*6b627f88SBjoern A. Zeeb 	}
1478*6b627f88SBjoern A. Zeeb 
1479*6b627f88SBjoern A. Zeeb 	if (!vid || vid > RX_POOL_SIZE(trans_pcie->num_rx_bufs))
1480*6b627f88SBjoern A. Zeeb 		goto out_err;
1481*6b627f88SBjoern A. Zeeb 
1482*6b627f88SBjoern A. Zeeb 	rxb = trans_pcie->global_table[vid - 1];
1483*6b627f88SBjoern A. Zeeb 	if (rxb->invalid)
1484*6b627f88SBjoern A. Zeeb 		goto out_err;
1485*6b627f88SBjoern A. Zeeb 
1486*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_RX(trans, "Got virtual RB ID %u\n", (u32)rxb->vid);
1487*6b627f88SBjoern A. Zeeb 
1488*6b627f88SBjoern A. Zeeb 	rxb->invalid = true;
1489*6b627f88SBjoern A. Zeeb 
1490*6b627f88SBjoern A. Zeeb 	return rxb;
1491*6b627f88SBjoern A. Zeeb 
1492*6b627f88SBjoern A. Zeeb out_err:
1493*6b627f88SBjoern A. Zeeb 	WARN(1, "Invalid rxb from HW %u\n", (u32)vid);
1494*6b627f88SBjoern A. Zeeb 	iwl_force_nmi(trans);
1495*6b627f88SBjoern A. Zeeb 	return NULL;
1496*6b627f88SBjoern A. Zeeb }
1497*6b627f88SBjoern A. Zeeb 
1498*6b627f88SBjoern A. Zeeb /*
1499*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_handle - Main entry function for receiving responses from fw
1500*6b627f88SBjoern A. Zeeb  */
iwl_pcie_rx_handle(struct iwl_trans * trans,int queue,int budget)1501*6b627f88SBjoern A. Zeeb static int iwl_pcie_rx_handle(struct iwl_trans *trans, int queue, int budget)
1502*6b627f88SBjoern A. Zeeb {
1503*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1504*6b627f88SBjoern A. Zeeb 	struct iwl_rxq *rxq;
1505*6b627f88SBjoern A. Zeeb 	u32 r, i, count = 0, handled = 0;
1506*6b627f88SBjoern A. Zeeb 	bool emergency = false;
1507*6b627f88SBjoern A. Zeeb 
1508*6b627f88SBjoern A. Zeeb 	if (WARN_ON_ONCE(!trans_pcie->rxq || !trans_pcie->rxq[queue].bd))
1509*6b627f88SBjoern A. Zeeb 		return budget;
1510*6b627f88SBjoern A. Zeeb 
1511*6b627f88SBjoern A. Zeeb 	rxq = &trans_pcie->rxq[queue];
1512*6b627f88SBjoern A. Zeeb 
1513*6b627f88SBjoern A. Zeeb restart:
1514*6b627f88SBjoern A. Zeeb 	spin_lock(&rxq->lock);
1515*6b627f88SBjoern A. Zeeb 	/* uCode's read index (stored in shared DRAM) indicates the last Rx
1516*6b627f88SBjoern A. Zeeb 	 * buffer that the driver may process (last buffer filled by ucode). */
1517*6b627f88SBjoern A. Zeeb 	r = iwl_get_closed_rb_stts(trans, rxq);
1518*6b627f88SBjoern A. Zeeb 	i = rxq->read;
1519*6b627f88SBjoern A. Zeeb 
1520*6b627f88SBjoern A. Zeeb 	/* W/A 9000 device step A0 wrap-around bug */
1521*6b627f88SBjoern A. Zeeb 	r &= (rxq->queue_size - 1);
1522*6b627f88SBjoern A. Zeeb 
1523*6b627f88SBjoern A. Zeeb 	/* Rx interrupt, but nothing sent from uCode */
1524*6b627f88SBjoern A. Zeeb 	if (i == r)
1525*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_RX(trans, "Q %d: HW = SW = %d\n", rxq->id, r);
1526*6b627f88SBjoern A. Zeeb 
1527*6b627f88SBjoern A. Zeeb 	while (i != r && ++handled < budget) {
1528*6b627f88SBjoern A. Zeeb 		struct iwl_rb_allocator *rba = &trans_pcie->rba;
1529*6b627f88SBjoern A. Zeeb 		struct iwl_rx_mem_buffer *rxb;
1530*6b627f88SBjoern A. Zeeb 		/* number of RBDs still waiting for page allocation */
1531*6b627f88SBjoern A. Zeeb 		u32 rb_pending_alloc =
1532*6b627f88SBjoern A. Zeeb 			atomic_read(&trans_pcie->rba.req_pending) *
1533*6b627f88SBjoern A. Zeeb 			RX_CLAIM_REQ_ALLOC;
1534*6b627f88SBjoern A. Zeeb 		bool join = false;
1535*6b627f88SBjoern A. Zeeb 
1536*6b627f88SBjoern A. Zeeb 		if (unlikely(rb_pending_alloc >= rxq->queue_size / 2 &&
1537*6b627f88SBjoern A. Zeeb 			     !emergency)) {
1538*6b627f88SBjoern A. Zeeb 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1539*6b627f88SBjoern A. Zeeb 			emergency = true;
1540*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_TPT(trans,
1541*6b627f88SBjoern A. Zeeb 				      "RX path is in emergency. Pending allocations %d\n",
1542*6b627f88SBjoern A. Zeeb 				      rb_pending_alloc);
1543*6b627f88SBjoern A. Zeeb 		}
1544*6b627f88SBjoern A. Zeeb 
1545*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i);
1546*6b627f88SBjoern A. Zeeb 
1547*6b627f88SBjoern A. Zeeb 		rxb = iwl_pcie_get_rxb(trans, rxq, i, &join);
1548*6b627f88SBjoern A. Zeeb 		if (!rxb)
1549*6b627f88SBjoern A. Zeeb 			goto out;
1550*6b627f88SBjoern A. Zeeb 
1551*6b627f88SBjoern A. Zeeb 		if (unlikely(join || rxq->next_rb_is_fragment)) {
1552*6b627f88SBjoern A. Zeeb 			rxq->next_rb_is_fragment = join;
1553*6b627f88SBjoern A. Zeeb 			/*
1554*6b627f88SBjoern A. Zeeb 			 * We can only get a multi-RB in the following cases:
1555*6b627f88SBjoern A. Zeeb 			 *  - firmware issue, sending a too big notification
1556*6b627f88SBjoern A. Zeeb 			 *  - sniffer mode with a large A-MSDU
1557*6b627f88SBjoern A. Zeeb 			 *  - large MTU frames (>2k)
1558*6b627f88SBjoern A. Zeeb 			 * since the multi-RB functionality is limited to newer
1559*6b627f88SBjoern A. Zeeb 			 * hardware that cannot put multiple entries into a
1560*6b627f88SBjoern A. Zeeb 			 * single RB.
1561*6b627f88SBjoern A. Zeeb 			 *
1562*6b627f88SBjoern A. Zeeb 			 * Right now, the higher layers aren't set up to deal
1563*6b627f88SBjoern A. Zeeb 			 * with that, so discard all of these.
1564*6b627f88SBjoern A. Zeeb 			 */
1565*6b627f88SBjoern A. Zeeb 			list_add_tail(&rxb->list, &rxq->rx_free);
1566*6b627f88SBjoern A. Zeeb 			rxq->free_count++;
1567*6b627f88SBjoern A. Zeeb 		} else {
1568*6b627f88SBjoern A. Zeeb 			iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency, i);
1569*6b627f88SBjoern A. Zeeb 		}
1570*6b627f88SBjoern A. Zeeb 
1571*6b627f88SBjoern A. Zeeb 		i = (i + 1) & (rxq->queue_size - 1);
1572*6b627f88SBjoern A. Zeeb 
1573*6b627f88SBjoern A. Zeeb 		/*
1574*6b627f88SBjoern A. Zeeb 		 * If we have RX_CLAIM_REQ_ALLOC released rx buffers -
1575*6b627f88SBjoern A. Zeeb 		 * try to claim the pre-allocated buffers from the allocator.
1576*6b627f88SBjoern A. Zeeb 		 * If not ready - will try to reclaim next time.
1577*6b627f88SBjoern A. Zeeb 		 * There is no need to reschedule work - allocator exits only
1578*6b627f88SBjoern A. Zeeb 		 * on success
1579*6b627f88SBjoern A. Zeeb 		 */
1580*6b627f88SBjoern A. Zeeb 		if (rxq->used_count >= RX_CLAIM_REQ_ALLOC)
1581*6b627f88SBjoern A. Zeeb 			iwl_pcie_rx_allocator_get(trans, rxq);
1582*6b627f88SBjoern A. Zeeb 
1583*6b627f88SBjoern A. Zeeb 		if (rxq->used_count % RX_CLAIM_REQ_ALLOC == 0 && !emergency) {
1584*6b627f88SBjoern A. Zeeb 			/* Add the remaining empty RBDs for allocator use */
1585*6b627f88SBjoern A. Zeeb 			iwl_pcie_rx_move_to_allocator(rxq, rba);
1586*6b627f88SBjoern A. Zeeb 		} else if (emergency) {
1587*6b627f88SBjoern A. Zeeb 			count++;
1588*6b627f88SBjoern A. Zeeb 			if (count == 8) {
1589*6b627f88SBjoern A. Zeeb 				count = 0;
1590*6b627f88SBjoern A. Zeeb 				if (rb_pending_alloc < rxq->queue_size / 3) {
1591*6b627f88SBjoern A. Zeeb 					IWL_DEBUG_TPT(trans,
1592*6b627f88SBjoern A. Zeeb 						      "RX path exited emergency. Pending allocations %d\n",
1593*6b627f88SBjoern A. Zeeb 						      rb_pending_alloc);
1594*6b627f88SBjoern A. Zeeb 					emergency = false;
1595*6b627f88SBjoern A. Zeeb 				}
1596*6b627f88SBjoern A. Zeeb 
1597*6b627f88SBjoern A. Zeeb 				rxq->read = i;
1598*6b627f88SBjoern A. Zeeb 				spin_unlock(&rxq->lock);
1599*6b627f88SBjoern A. Zeeb 				iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1600*6b627f88SBjoern A. Zeeb 				iwl_pcie_rxq_restock(trans, rxq);
1601*6b627f88SBjoern A. Zeeb 				goto restart;
1602*6b627f88SBjoern A. Zeeb 			}
1603*6b627f88SBjoern A. Zeeb 		}
1604*6b627f88SBjoern A. Zeeb 	}
1605*6b627f88SBjoern A. Zeeb out:
1606*6b627f88SBjoern A. Zeeb 	/* Backtrack one entry */
1607*6b627f88SBjoern A. Zeeb 	rxq->read = i;
1608*6b627f88SBjoern A. Zeeb 	spin_unlock(&rxq->lock);
1609*6b627f88SBjoern A. Zeeb 
1610*6b627f88SBjoern A. Zeeb 	/*
1611*6b627f88SBjoern A. Zeeb 	 * handle a case where in emergency there are some unallocated RBDs.
1612*6b627f88SBjoern A. Zeeb 	 * those RBDs are in the used list, but are not tracked by the queue's
1613*6b627f88SBjoern A. Zeeb 	 * used_count which counts allocator owned RBDs.
1614*6b627f88SBjoern A. Zeeb 	 * unallocated emergency RBDs must be allocated on exit, otherwise
1615*6b627f88SBjoern A. Zeeb 	 * when called again the function may not be in emergency mode and
1616*6b627f88SBjoern A. Zeeb 	 * they will be handed to the allocator with no tracking in the RBD
1617*6b627f88SBjoern A. Zeeb 	 * allocator counters, which will lead to them never being claimed back
1618*6b627f88SBjoern A. Zeeb 	 * by the queue.
1619*6b627f88SBjoern A. Zeeb 	 * by allocating them here, they are now in the queue free list, and
1620*6b627f88SBjoern A. Zeeb 	 * will be restocked by the next call of iwl_pcie_rxq_restock.
1621*6b627f88SBjoern A. Zeeb 	 */
1622*6b627f88SBjoern A. Zeeb 	if (unlikely(emergency && count))
1623*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxq_alloc_rbs(trans, GFP_ATOMIC, rxq);
1624*6b627f88SBjoern A. Zeeb 
1625*6b627f88SBjoern A. Zeeb 	iwl_pcie_rxq_restock(trans, rxq);
1626*6b627f88SBjoern A. Zeeb 
1627*6b627f88SBjoern A. Zeeb 	return handled;
1628*6b627f88SBjoern A. Zeeb }
1629*6b627f88SBjoern A. Zeeb 
iwl_pcie_get_trans_pcie(struct msix_entry * entry)1630*6b627f88SBjoern A. Zeeb static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry)
1631*6b627f88SBjoern A. Zeeb {
1632*6b627f88SBjoern A. Zeeb 	u8 queue = entry->entry;
1633*6b627f88SBjoern A. Zeeb 	struct msix_entry *entries = entry - queue;
1634*6b627f88SBjoern A. Zeeb 
1635*6b627f88SBjoern A. Zeeb 	return container_of(entries, struct iwl_trans_pcie, msix_entries[0]);
1636*6b627f88SBjoern A. Zeeb }
1637*6b627f88SBjoern A. Zeeb 
1638*6b627f88SBjoern A. Zeeb /*
1639*6b627f88SBjoern A. Zeeb  * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw
1640*6b627f88SBjoern A. Zeeb  * This interrupt handler should be used with RSS queue only.
1641*6b627f88SBjoern A. Zeeb  */
iwl_pcie_irq_rx_msix_handler(int irq,void * dev_id)1642*6b627f88SBjoern A. Zeeb irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id)
1643*6b627f88SBjoern A. Zeeb {
1644*6b627f88SBjoern A. Zeeb 	struct msix_entry *entry = dev_id;
1645*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
1646*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans = trans_pcie->trans;
1647*6b627f88SBjoern A. Zeeb 	struct iwl_rxq *rxq;
1648*6b627f88SBjoern A. Zeeb 
1649*6b627f88SBjoern A. Zeeb 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, false, 0, 0);
1650*6b627f88SBjoern A. Zeeb 
1651*6b627f88SBjoern A. Zeeb 	if (WARN_ON(entry->entry >= trans->info.num_rxqs))
1652*6b627f88SBjoern A. Zeeb 		return IRQ_NONE;
1653*6b627f88SBjoern A. Zeeb 
1654*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->rxq) {
1655*6b627f88SBjoern A. Zeeb 		if (net_ratelimit())
1656*6b627f88SBjoern A. Zeeb 			IWL_ERR(trans,
1657*6b627f88SBjoern A. Zeeb 				"[%d] Got MSI-X interrupt before we have Rx queues\n",
1658*6b627f88SBjoern A. Zeeb 				entry->entry);
1659*6b627f88SBjoern A. Zeeb 		return IRQ_NONE;
1660*6b627f88SBjoern A. Zeeb 	}
1661*6b627f88SBjoern A. Zeeb 
1662*6b627f88SBjoern A. Zeeb 	rxq = &trans_pcie->rxq[entry->entry];
1663*6b627f88SBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1664*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "[%d] Got interrupt\n", entry->entry);
1665*6b627f88SBjoern A. Zeeb 
1666*6b627f88SBjoern A. Zeeb 	local_bh_disable();
1667*6b627f88SBjoern A. Zeeb 	if (!napi_schedule(&rxq->napi))
1668*6b627f88SBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, entry->entry);
1669*6b627f88SBjoern A. Zeeb 	local_bh_enable();
1670*6b627f88SBjoern A. Zeeb 
1671*6b627f88SBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
1672*6b627f88SBjoern A. Zeeb 
1673*6b627f88SBjoern A. Zeeb 	return IRQ_HANDLED;
1674*6b627f88SBjoern A. Zeeb }
1675*6b627f88SBjoern A. Zeeb 
1676*6b627f88SBjoern A. Zeeb /*
1677*6b627f88SBjoern A. Zeeb  * iwl_pcie_irq_handle_error - called for HW or SW error interrupt from card
1678*6b627f88SBjoern A. Zeeb  */
iwl_pcie_irq_handle_error(struct iwl_trans * trans)1679*6b627f88SBjoern A. Zeeb static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
1680*6b627f88SBjoern A. Zeeb {
1681*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1682*6b627f88SBjoern A. Zeeb 	int i;
1683*6b627f88SBjoern A. Zeeb 
1684*6b627f88SBjoern A. Zeeb 	/* W/A for WiFi/WiMAX coex and WiMAX own the RF */
1685*6b627f88SBjoern A. Zeeb 	if (trans->cfg->internal_wimax_coex &&
1686*6b627f88SBjoern A. Zeeb 	    !trans->mac_cfg->base->apmg_not_supported &&
1687*6b627f88SBjoern A. Zeeb 	    (!(iwl_read_prph(trans, APMG_CLK_CTRL_REG) &
1688*6b627f88SBjoern A. Zeeb 			     APMS_CLK_VAL_MRB_FUNC_MODE) ||
1689*6b627f88SBjoern A. Zeeb 	     (iwl_read_prph(trans, APMG_PS_CTRL_REG) &
1690*6b627f88SBjoern A. Zeeb 			    APMG_PS_CTRL_VAL_RESET_REQ))) {
1691*6b627f88SBjoern A. Zeeb 		clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1692*6b627f88SBjoern A. Zeeb 		iwl_op_mode_wimax_active(trans->op_mode);
1693*6b627f88SBjoern A. Zeeb 		wake_up(&trans_pcie->wait_command_queue);
1694*6b627f88SBjoern A. Zeeb 		return;
1695*6b627f88SBjoern A. Zeeb 	}
1696*6b627f88SBjoern A. Zeeb 
1697*6b627f88SBjoern A. Zeeb 	for (i = 0; i < trans->mac_cfg->base->num_of_queues; i++) {
1698*6b627f88SBjoern A. Zeeb 		if (!trans_pcie->txqs.txq[i])
1699*6b627f88SBjoern A. Zeeb 			continue;
1700*6b627f88SBjoern A. Zeeb 		timer_delete(&trans_pcie->txqs.txq[i]->stuck_timer);
1701*6b627f88SBjoern A. Zeeb 	}
1702*6b627f88SBjoern A. Zeeb 
1703*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
1704*6b627f88SBjoern A. Zeeb 		u32 val = iwl_read32(trans, CSR_IPC_STATE);
1705*6b627f88SBjoern A. Zeeb 
1706*6b627f88SBjoern A. Zeeb 		if (val & CSR_IPC_STATE_TOP_RESET_REQ) {
1707*6b627f88SBjoern A. Zeeb 			IWL_ERR(trans, "FW requested TOP reset for FSEQ\n");
1708*6b627f88SBjoern A. Zeeb 			trans->do_top_reset = 1;
1709*6b627f88SBjoern A. Zeeb 		}
1710*6b627f88SBjoern A. Zeeb 	}
1711*6b627f88SBjoern A. Zeeb 
1712*6b627f88SBjoern A. Zeeb 	/* The STATUS_FW_ERROR bit is set in this function. This must happen
1713*6b627f88SBjoern A. Zeeb 	 * before we wake up the command caller, to ensure a proper cleanup. */
1714*6b627f88SBjoern A. Zeeb 	iwl_trans_fw_error(trans, IWL_ERR_TYPE_IRQ);
1715*6b627f88SBjoern A. Zeeb 
1716*6b627f88SBjoern A. Zeeb 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1717*6b627f88SBjoern A. Zeeb 	wake_up(&trans_pcie->wait_command_queue);
1718*6b627f88SBjoern A. Zeeb }
1719*6b627f88SBjoern A. Zeeb 
iwl_pcie_int_cause_non_ict(struct iwl_trans * trans)1720*6b627f88SBjoern A. Zeeb static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
1721*6b627f88SBjoern A. Zeeb {
1722*6b627f88SBjoern A. Zeeb 	u32 inta;
1723*6b627f88SBjoern A. Zeeb 
1724*6b627f88SBjoern A. Zeeb 	lockdep_assert_held(&IWL_TRANS_GET_PCIE_TRANS(trans)->irq_lock);
1725*6b627f88SBjoern A. Zeeb 
1726*6b627f88SBjoern A. Zeeb 	trace_iwlwifi_dev_irq(trans->dev);
1727*6b627f88SBjoern A. Zeeb 
1728*6b627f88SBjoern A. Zeeb 	/* Discover which interrupts are active/pending */
1729*6b627f88SBjoern A. Zeeb 	inta = iwl_read32(trans, CSR_INT);
1730*6b627f88SBjoern A. Zeeb 
1731*6b627f88SBjoern A. Zeeb 	/* the thread will service interrupts and re-enable them */
1732*6b627f88SBjoern A. Zeeb 	return inta;
1733*6b627f88SBjoern A. Zeeb }
1734*6b627f88SBjoern A. Zeeb 
1735*6b627f88SBjoern A. Zeeb /* a device (PCI-E) page is 4096 bytes long */
1736*6b627f88SBjoern A. Zeeb #define ICT_SHIFT	12
1737*6b627f88SBjoern A. Zeeb #define ICT_SIZE	(1 << ICT_SHIFT)
1738*6b627f88SBjoern A. Zeeb #define ICT_COUNT	(ICT_SIZE / sizeof(u32))
1739*6b627f88SBjoern A. Zeeb 
1740*6b627f88SBjoern A. Zeeb /* interrupt handler using ict table, with this interrupt driver will
1741*6b627f88SBjoern A. Zeeb  * stop using INTA register to get device's interrupt, reading this register
1742*6b627f88SBjoern A. Zeeb  * is expensive, device will write interrupts in ICT dram table, increment
1743*6b627f88SBjoern A. Zeeb  * index then will fire interrupt to driver, driver will OR all ICT table
1744*6b627f88SBjoern A. Zeeb  * entries from current index up to table entry with 0 value. the result is
1745*6b627f88SBjoern A. Zeeb  * the interrupt we need to service, driver will set the entries back to 0 and
1746*6b627f88SBjoern A. Zeeb  * set index.
1747*6b627f88SBjoern A. Zeeb  */
iwl_pcie_int_cause_ict(struct iwl_trans * trans)1748*6b627f88SBjoern A. Zeeb static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
1749*6b627f88SBjoern A. Zeeb {
1750*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1751*6b627f88SBjoern A. Zeeb 	u32 inta;
1752*6b627f88SBjoern A. Zeeb 	u32 val = 0;
1753*6b627f88SBjoern A. Zeeb 	u32 read;
1754*6b627f88SBjoern A. Zeeb 
1755*6b627f88SBjoern A. Zeeb 	trace_iwlwifi_dev_irq(trans->dev);
1756*6b627f88SBjoern A. Zeeb 
1757*6b627f88SBjoern A. Zeeb 	/* Ignore interrupt if there's nothing in NIC to service.
1758*6b627f88SBjoern A. Zeeb 	 * This may be due to IRQ shared with another device,
1759*6b627f88SBjoern A. Zeeb 	 * or due to sporadic interrupts thrown from our NIC. */
1760*6b627f88SBjoern A. Zeeb 	read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1761*6b627f88SBjoern A. Zeeb 	trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
1762*6b627f88SBjoern A. Zeeb 	if (!read)
1763*6b627f88SBjoern A. Zeeb 		return 0;
1764*6b627f88SBjoern A. Zeeb 
1765*6b627f88SBjoern A. Zeeb 	/*
1766*6b627f88SBjoern A. Zeeb 	 * Collect all entries up to the first 0, starting from ict_index;
1767*6b627f88SBjoern A. Zeeb 	 * note we already read at ict_index.
1768*6b627f88SBjoern A. Zeeb 	 */
1769*6b627f88SBjoern A. Zeeb 	do {
1770*6b627f88SBjoern A. Zeeb 		val |= read;
1771*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
1772*6b627f88SBjoern A. Zeeb 				trans_pcie->ict_index, read);
1773*6b627f88SBjoern A. Zeeb 		trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
1774*6b627f88SBjoern A. Zeeb 		trans_pcie->ict_index =
1775*6b627f88SBjoern A. Zeeb 			((trans_pcie->ict_index + 1) & (ICT_COUNT - 1));
1776*6b627f88SBjoern A. Zeeb 
1777*6b627f88SBjoern A. Zeeb 		read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
1778*6b627f88SBjoern A. Zeeb 		trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
1779*6b627f88SBjoern A. Zeeb 					   read);
1780*6b627f88SBjoern A. Zeeb 	} while (read);
1781*6b627f88SBjoern A. Zeeb 
1782*6b627f88SBjoern A. Zeeb 	/* We should not get this value, just ignore it. */
1783*6b627f88SBjoern A. Zeeb 	if (val == 0xffffffff)
1784*6b627f88SBjoern A. Zeeb 		val = 0;
1785*6b627f88SBjoern A. Zeeb 
1786*6b627f88SBjoern A. Zeeb 	/*
1787*6b627f88SBjoern A. Zeeb 	 * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
1788*6b627f88SBjoern A. Zeeb 	 * (bit 15 before shifting it to 31) to clear when using interrupt
1789*6b627f88SBjoern A. Zeeb 	 * coalescing. fortunately, bits 18 and 19 stay set when this happens
1790*6b627f88SBjoern A. Zeeb 	 * so we use them to decide on the real state of the Rx bit.
1791*6b627f88SBjoern A. Zeeb 	 * In order words, bit 15 is set if bit 18 or bit 19 are set.
1792*6b627f88SBjoern A. Zeeb 	 */
1793*6b627f88SBjoern A. Zeeb 	if (val & 0xC0000)
1794*6b627f88SBjoern A. Zeeb 		val |= 0x8000;
1795*6b627f88SBjoern A. Zeeb 
1796*6b627f88SBjoern A. Zeeb 	inta = (0xff & val) | ((0xff00 & val) << 16);
1797*6b627f88SBjoern A. Zeeb 	return inta;
1798*6b627f88SBjoern A. Zeeb }
1799*6b627f88SBjoern A. Zeeb 
iwl_pcie_handle_rfkill_irq(struct iwl_trans * trans,bool from_irq)1800*6b627f88SBjoern A. Zeeb void iwl_pcie_handle_rfkill_irq(struct iwl_trans *trans, bool from_irq)
1801*6b627f88SBjoern A. Zeeb {
1802*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1803*6b627f88SBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1804*6b627f88SBjoern A. Zeeb 	bool hw_rfkill, prev, report;
1805*6b627f88SBjoern A. Zeeb 
1806*6b627f88SBjoern A. Zeeb 	mutex_lock(&trans_pcie->mutex);
1807*6b627f88SBjoern A. Zeeb 	prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1808*6b627f88SBjoern A. Zeeb 	hw_rfkill = iwl_is_rfkill_set(trans);
1809*6b627f88SBjoern A. Zeeb 	if (hw_rfkill) {
1810*6b627f88SBjoern A. Zeeb 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1811*6b627f88SBjoern A. Zeeb 		set_bit(STATUS_RFKILL_HW, &trans->status);
1812*6b627f88SBjoern A. Zeeb 	}
1813*6b627f88SBjoern A. Zeeb 	if (trans_pcie->opmode_down)
1814*6b627f88SBjoern A. Zeeb 		report = hw_rfkill;
1815*6b627f88SBjoern A. Zeeb 	else
1816*6b627f88SBjoern A. Zeeb 		report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1817*6b627f88SBjoern A. Zeeb 
1818*6b627f88SBjoern A. Zeeb 	IWL_WARN(trans, "RF_KILL bit toggled to %s.\n",
1819*6b627f88SBjoern A. Zeeb 		 hw_rfkill ? "disable radio" : "enable radio");
1820*6b627f88SBjoern A. Zeeb 
1821*6b627f88SBjoern A. Zeeb 	isr_stats->rfkill++;
1822*6b627f88SBjoern A. Zeeb 
1823*6b627f88SBjoern A. Zeeb 	if (prev != report)
1824*6b627f88SBjoern A. Zeeb 		iwl_trans_pcie_rf_kill(trans, report, from_irq);
1825*6b627f88SBjoern A. Zeeb 	mutex_unlock(&trans_pcie->mutex);
1826*6b627f88SBjoern A. Zeeb 
1827*6b627f88SBjoern A. Zeeb 	if (hw_rfkill) {
1828*6b627f88SBjoern A. Zeeb 		if (test_and_clear_bit(STATUS_SYNC_HCMD_ACTIVE,
1829*6b627f88SBjoern A. Zeeb 				       &trans->status))
1830*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_RF_KILL(trans,
1831*6b627f88SBjoern A. Zeeb 					  "Rfkill while SYNC HCMD in flight\n");
1832*6b627f88SBjoern A. Zeeb 		wake_up(&trans_pcie->wait_command_queue);
1833*6b627f88SBjoern A. Zeeb 	} else {
1834*6b627f88SBjoern A. Zeeb 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1835*6b627f88SBjoern A. Zeeb 		if (trans_pcie->opmode_down)
1836*6b627f88SBjoern A. Zeeb 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1837*6b627f88SBjoern A. Zeeb 	}
1838*6b627f88SBjoern A. Zeeb }
1839*6b627f88SBjoern A. Zeeb 
iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans * trans)1840*6b627f88SBjoern A. Zeeb static void iwl_trans_pcie_handle_reset_interrupt(struct iwl_trans *trans)
1841*6b627f88SBjoern A. Zeeb {
1842*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1843*6b627f88SBjoern A. Zeeb 	u32 state;
1844*6b627f88SBjoern A. Zeeb 
1845*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_SC) {
1846*6b627f88SBjoern A. Zeeb 		u32 val = iwl_read32(trans, CSR_IPC_STATE);
1847*6b627f88SBjoern A. Zeeb 
1848*6b627f88SBjoern A. Zeeb 		state = u32_get_bits(val, CSR_IPC_STATE_RESET);
1849*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "IPC state = 0x%x/%d\n", val, state);
1850*6b627f88SBjoern A. Zeeb 	} else {
1851*6b627f88SBjoern A. Zeeb 		state = CSR_IPC_STATE_RESET_SW_READY;
1852*6b627f88SBjoern A. Zeeb 	}
1853*6b627f88SBjoern A. Zeeb 
1854*6b627f88SBjoern A. Zeeb 	switch (state) {
1855*6b627f88SBjoern A. Zeeb 	case CSR_IPC_STATE_RESET_SW_READY:
1856*6b627f88SBjoern A. Zeeb 		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
1857*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans, "Reset flow completed\n");
1858*6b627f88SBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_OK;
1859*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
1860*6b627f88SBjoern A. Zeeb 			break;
1861*6b627f88SBjoern A. Zeeb 		}
1862*6b627f88SBjoern A. Zeeb 		fallthrough;
1863*6b627f88SBjoern A. Zeeb 	case CSR_IPC_STATE_RESET_TOP_READY:
1864*6b627f88SBjoern A. Zeeb 		if (trans_pcie->fw_reset_state == FW_RESET_TOP_REQUESTED) {
1865*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans, "TOP Reset continues\n");
1866*6b627f88SBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_OK;
1867*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
1868*6b627f88SBjoern A. Zeeb 			break;
1869*6b627f88SBjoern A. Zeeb 		}
1870*6b627f88SBjoern A. Zeeb 		fallthrough;
1871*6b627f88SBjoern A. Zeeb 	case CSR_IPC_STATE_RESET_NONE:
1872*6b627f88SBjoern A. Zeeb 		IWL_FW_CHECK_FAILED(trans,
1873*6b627f88SBjoern A. Zeeb 				    "Invalid reset interrupt (state=%d)!\n",
1874*6b627f88SBjoern A. Zeeb 				    state);
1875*6b627f88SBjoern A. Zeeb 		break;
1876*6b627f88SBjoern A. Zeeb 	case CSR_IPC_STATE_RESET_TOP_FOLLOWER:
1877*6b627f88SBjoern A. Zeeb 		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
1878*6b627f88SBjoern A. Zeeb 			/* if we were in reset, wake that up */
1879*6b627f88SBjoern A. Zeeb 			IWL_INFO(trans,
1880*6b627f88SBjoern A. Zeeb 				 "TOP reset from BT while doing reset\n");
1881*6b627f88SBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_OK;
1882*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
1883*6b627f88SBjoern A. Zeeb 		} else {
1884*6b627f88SBjoern A. Zeeb 			IWL_INFO(trans, "TOP reset from BT\n");
1885*6b627f88SBjoern A. Zeeb 			trans->state = IWL_TRANS_NO_FW;
1886*6b627f88SBjoern A. Zeeb 			iwl_trans_schedule_reset(trans,
1887*6b627f88SBjoern A. Zeeb 						 IWL_ERR_TYPE_TOP_RESET_BY_BT);
1888*6b627f88SBjoern A. Zeeb 		}
1889*6b627f88SBjoern A. Zeeb 		break;
1890*6b627f88SBjoern A. Zeeb 	}
1891*6b627f88SBjoern A. Zeeb }
1892*6b627f88SBjoern A. Zeeb 
iwl_pcie_irq_handler(int irq,void * dev_id)1893*6b627f88SBjoern A. Zeeb irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
1894*6b627f88SBjoern A. Zeeb {
1895*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans = dev_id;
1896*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1897*6b627f88SBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
1898*6b627f88SBjoern A. Zeeb 	u32 inta = 0;
1899*6b627f88SBjoern A. Zeeb 	u32 handled = 0;
1900*6b627f88SBjoern A. Zeeb 	bool polling = false;
1901*6b627f88SBjoern A. Zeeb 
1902*6b627f88SBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
1903*6b627f88SBjoern A. Zeeb 
1904*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
1905*6b627f88SBjoern A. Zeeb 
1906*6b627f88SBjoern A. Zeeb 	/* dram interrupt table not set yet,
1907*6b627f88SBjoern A. Zeeb 	 * use legacy interrupt.
1908*6b627f88SBjoern A. Zeeb 	 */
1909*6b627f88SBjoern A. Zeeb 	if (likely(trans_pcie->use_ict))
1910*6b627f88SBjoern A. Zeeb 		inta = iwl_pcie_int_cause_ict(trans);
1911*6b627f88SBjoern A. Zeeb 	else
1912*6b627f88SBjoern A. Zeeb 		inta = iwl_pcie_int_cause_non_ict(trans);
1913*6b627f88SBjoern A. Zeeb 
1914*6b627f88SBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
1915*6b627f88SBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
1916*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
1917*6b627f88SBjoern A. Zeeb 			      "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
1918*6b627f88SBjoern A. Zeeb 			      inta, trans_pcie->inta_mask,
1919*6b627f88SBjoern A. Zeeb 			      iwl_read32(trans, CSR_INT_MASK),
1920*6b627f88SBjoern A. Zeeb 			      iwl_read32(trans, CSR_FH_INT_STATUS));
1921*6b627f88SBjoern A. Zeeb 		if (inta & (~trans_pcie->inta_mask))
1922*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
1923*6b627f88SBjoern A. Zeeb 				      "We got a masked interrupt (0x%08x)\n",
1924*6b627f88SBjoern A. Zeeb 				      inta & (~trans_pcie->inta_mask));
1925*6b627f88SBjoern A. Zeeb 	}
1926*6b627f88SBjoern A. Zeeb #endif
1927*6b627f88SBjoern A. Zeeb 
1928*6b627f88SBjoern A. Zeeb 	inta &= trans_pcie->inta_mask;
1929*6b627f88SBjoern A. Zeeb 
1930*6b627f88SBjoern A. Zeeb 	/*
1931*6b627f88SBjoern A. Zeeb 	 * Ignore interrupt if there's nothing in NIC to service.
1932*6b627f88SBjoern A. Zeeb 	 * This may be due to IRQ shared with another device,
1933*6b627f88SBjoern A. Zeeb 	 * or due to sporadic interrupts thrown from our NIC.
1934*6b627f88SBjoern A. Zeeb 	 */
1935*6b627f88SBjoern A. Zeeb 	if (unlikely(!inta)) {
1936*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
1937*6b627f88SBjoern A. Zeeb 		/*
1938*6b627f88SBjoern A. Zeeb 		 * Re-enable interrupts here since we don't
1939*6b627f88SBjoern A. Zeeb 		 * have anything to service
1940*6b627f88SBjoern A. Zeeb 		 */
1941*6b627f88SBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
1942*6b627f88SBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
1943*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
1944*6b627f88SBjoern A. Zeeb 		lock_map_release(&trans->sync_cmd_lockdep_map);
1945*6b627f88SBjoern A. Zeeb 		return IRQ_NONE;
1946*6b627f88SBjoern A. Zeeb 	}
1947*6b627f88SBjoern A. Zeeb 
1948*6b627f88SBjoern A. Zeeb 	if (unlikely(inta == 0xFFFFFFFF || iwl_trans_is_hw_error_value(inta))) {
1949*6b627f88SBjoern A. Zeeb 		/*
1950*6b627f88SBjoern A. Zeeb 		 * Hardware disappeared. It might have
1951*6b627f88SBjoern A. Zeeb 		 * already raised an interrupt.
1952*6b627f88SBjoern A. Zeeb 		 */
1953*6b627f88SBjoern A. Zeeb 		IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
1954*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
1955*6b627f88SBjoern A. Zeeb 		goto out;
1956*6b627f88SBjoern A. Zeeb 	}
1957*6b627f88SBjoern A. Zeeb 
1958*6b627f88SBjoern A. Zeeb 	/* Ack/clear/reset pending uCode interrupts.
1959*6b627f88SBjoern A. Zeeb 	 * Note:  Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
1960*6b627f88SBjoern A. Zeeb 	 */
1961*6b627f88SBjoern A. Zeeb 	/* There is a hardware bug in the interrupt mask function that some
1962*6b627f88SBjoern A. Zeeb 	 * interrupts (i.e. CSR_INT_BIT_SCD) can still be generated even if
1963*6b627f88SBjoern A. Zeeb 	 * they are disabled in the CSR_INT_MASK register. Furthermore the
1964*6b627f88SBjoern A. Zeeb 	 * ICT interrupt handling mechanism has another bug that might cause
1965*6b627f88SBjoern A. Zeeb 	 * these unmasked interrupts fail to be detected. We workaround the
1966*6b627f88SBjoern A. Zeeb 	 * hardware bugs here by ACKing all the possible interrupts so that
1967*6b627f88SBjoern A. Zeeb 	 * interrupt coalescing can still be achieved.
1968*6b627f88SBjoern A. Zeeb 	 */
1969*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
1970*6b627f88SBjoern A. Zeeb 
1971*6b627f88SBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
1972*6b627f88SBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR))
1973*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
1974*6b627f88SBjoern A. Zeeb 			      inta, iwl_read32(trans, CSR_INT_MASK));
1975*6b627f88SBjoern A. Zeeb #endif
1976*6b627f88SBjoern A. Zeeb 
1977*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
1978*6b627f88SBjoern A. Zeeb 
1979*6b627f88SBjoern A. Zeeb 	/* Now service all interrupt bits discovered above. */
1980*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_HW_ERR) {
1981*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "Hardware error detected.  Restarting.\n");
1982*6b627f88SBjoern A. Zeeb 
1983*6b627f88SBjoern A. Zeeb 		/* Tell the device to stop sending interrupts */
1984*6b627f88SBjoern A. Zeeb 		iwl_disable_interrupts(trans);
1985*6b627f88SBjoern A. Zeeb 
1986*6b627f88SBjoern A. Zeeb 		isr_stats->hw++;
1987*6b627f88SBjoern A. Zeeb 		iwl_pcie_irq_handle_error(trans);
1988*6b627f88SBjoern A. Zeeb 
1989*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_HW_ERR;
1990*6b627f88SBjoern A. Zeeb 
1991*6b627f88SBjoern A. Zeeb 		goto out;
1992*6b627f88SBjoern A. Zeeb 	}
1993*6b627f88SBjoern A. Zeeb 
1994*6b627f88SBjoern A. Zeeb 	/* NIC fires this, but we don't use it, redundant with WAKEUP */
1995*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_SCD) {
1996*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
1997*6b627f88SBjoern A. Zeeb 			      "Scheduler finished to transmit the frame/frames.\n");
1998*6b627f88SBjoern A. Zeeb 		isr_stats->sch++;
1999*6b627f88SBjoern A. Zeeb 	}
2000*6b627f88SBjoern A. Zeeb 
2001*6b627f88SBjoern A. Zeeb 	/* Alive notification via Rx interrupt will do the real work */
2002*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_ALIVE) {
2003*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2004*6b627f88SBjoern A. Zeeb 		isr_stats->alive++;
2005*6b627f88SBjoern A. Zeeb 		if (trans->mac_cfg->gen2) {
2006*6b627f88SBjoern A. Zeeb 			/*
2007*6b627f88SBjoern A. Zeeb 			 * We can restock, since firmware configured
2008*6b627f88SBjoern A. Zeeb 			 * the RFH
2009*6b627f88SBjoern A. Zeeb 			 */
2010*6b627f88SBjoern A. Zeeb 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2011*6b627f88SBjoern A. Zeeb 		}
2012*6b627f88SBjoern A. Zeeb 
2013*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_ALIVE;
2014*6b627f88SBjoern A. Zeeb 	}
2015*6b627f88SBjoern A. Zeeb 
2016*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_RESET_DONE) {
2017*6b627f88SBjoern A. Zeeb 		iwl_trans_pcie_handle_reset_interrupt(trans);
2018*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_RESET_DONE;
2019*6b627f88SBjoern A. Zeeb 	}
2020*6b627f88SBjoern A. Zeeb 
2021*6b627f88SBjoern A. Zeeb 	/* Safely ignore these bits for debug checks below */
2022*6b627f88SBjoern A. Zeeb 	inta &= ~(CSR_INT_BIT_SCD | CSR_INT_BIT_ALIVE);
2023*6b627f88SBjoern A. Zeeb 
2024*6b627f88SBjoern A. Zeeb 	/* HW RF KILL switch toggled */
2025*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_RF_KILL) {
2026*6b627f88SBjoern A. Zeeb 		iwl_pcie_handle_rfkill_irq(trans, true);
2027*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_RF_KILL;
2028*6b627f88SBjoern A. Zeeb 	}
2029*6b627f88SBjoern A. Zeeb 
2030*6b627f88SBjoern A. Zeeb 	/* Chip got too hot and stopped itself */
2031*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_CT_KILL) {
2032*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2033*6b627f88SBjoern A. Zeeb 		isr_stats->ctkill++;
2034*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_CT_KILL;
2035*6b627f88SBjoern A. Zeeb 	}
2036*6b627f88SBjoern A. Zeeb 
2037*6b627f88SBjoern A. Zeeb 	/* Error detected by uCode */
2038*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_SW_ERR) {
2039*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "Microcode SW error detected. "
2040*6b627f88SBjoern A. Zeeb 			" Restarting 0x%X.\n", inta);
2041*6b627f88SBjoern A. Zeeb 		isr_stats->sw++;
2042*6b627f88SBjoern A. Zeeb 		if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2043*6b627f88SBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_ERROR;
2044*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
2045*6b627f88SBjoern A. Zeeb 		} else {
2046*6b627f88SBjoern A. Zeeb 			iwl_pcie_irq_handle_error(trans);
2047*6b627f88SBjoern A. Zeeb 		}
2048*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_SW_ERR;
2049*6b627f88SBjoern A. Zeeb 	}
2050*6b627f88SBjoern A. Zeeb 
2051*6b627f88SBjoern A. Zeeb 	/* uCode wakes up after power-down sleep */
2052*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_WAKEUP) {
2053*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2054*6b627f88SBjoern A. Zeeb 		iwl_pcie_rxq_check_wrptr(trans);
2055*6b627f88SBjoern A. Zeeb 		iwl_pcie_txq_check_wrptrs(trans);
2056*6b627f88SBjoern A. Zeeb 
2057*6b627f88SBjoern A. Zeeb 		isr_stats->wakeup++;
2058*6b627f88SBjoern A. Zeeb 
2059*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_WAKEUP;
2060*6b627f88SBjoern A. Zeeb 	}
2061*6b627f88SBjoern A. Zeeb 
2062*6b627f88SBjoern A. Zeeb 	/* All uCode command responses, including Tx command responses,
2063*6b627f88SBjoern A. Zeeb 	 * Rx "responses" (frame-received notification), and other
2064*6b627f88SBjoern A. Zeeb 	 * notifications from uCode come through here*/
2065*6b627f88SBjoern A. Zeeb 	if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX |
2066*6b627f88SBjoern A. Zeeb 		    CSR_INT_BIT_RX_PERIODIC)) {
2067*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Rx interrupt\n");
2068*6b627f88SBjoern A. Zeeb 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX)) {
2069*6b627f88SBjoern A. Zeeb 			handled |= (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX);
2070*6b627f88SBjoern A. Zeeb 			iwl_write32(trans, CSR_FH_INT_STATUS,
2071*6b627f88SBjoern A. Zeeb 					CSR_FH_INT_RX_MASK);
2072*6b627f88SBjoern A. Zeeb 		}
2073*6b627f88SBjoern A. Zeeb 		if (inta & CSR_INT_BIT_RX_PERIODIC) {
2074*6b627f88SBjoern A. Zeeb 			handled |= CSR_INT_BIT_RX_PERIODIC;
2075*6b627f88SBjoern A. Zeeb 			iwl_write32(trans,
2076*6b627f88SBjoern A. Zeeb 				CSR_INT, CSR_INT_BIT_RX_PERIODIC);
2077*6b627f88SBjoern A. Zeeb 		}
2078*6b627f88SBjoern A. Zeeb 		/* Sending RX interrupt require many steps to be done in the
2079*6b627f88SBjoern A. Zeeb 		 * device:
2080*6b627f88SBjoern A. Zeeb 		 * 1- write interrupt to current index in ICT table.
2081*6b627f88SBjoern A. Zeeb 		 * 2- dma RX frame.
2082*6b627f88SBjoern A. Zeeb 		 * 3- update RX shared data to indicate last write index.
2083*6b627f88SBjoern A. Zeeb 		 * 4- send interrupt.
2084*6b627f88SBjoern A. Zeeb 		 * This could lead to RX race, driver could receive RX interrupt
2085*6b627f88SBjoern A. Zeeb 		 * but the shared data changes does not reflect this;
2086*6b627f88SBjoern A. Zeeb 		 * periodic interrupt will detect any dangling Rx activity.
2087*6b627f88SBjoern A. Zeeb 		 */
2088*6b627f88SBjoern A. Zeeb 
2089*6b627f88SBjoern A. Zeeb 		/* Disable periodic interrupt; we use it as just a one-shot. */
2090*6b627f88SBjoern A. Zeeb 		iwl_write8(trans, CSR_INT_PERIODIC_REG,
2091*6b627f88SBjoern A. Zeeb 			    CSR_INT_PERIODIC_DIS);
2092*6b627f88SBjoern A. Zeeb 
2093*6b627f88SBjoern A. Zeeb 		/*
2094*6b627f88SBjoern A. Zeeb 		 * Enable periodic interrupt in 8 msec only if we received
2095*6b627f88SBjoern A. Zeeb 		 * real RX interrupt (instead of just periodic int), to catch
2096*6b627f88SBjoern A. Zeeb 		 * any dangling Rx interrupt.  If it was just the periodic
2097*6b627f88SBjoern A. Zeeb 		 * interrupt, there was no dangling Rx activity, and no need
2098*6b627f88SBjoern A. Zeeb 		 * to extend the periodic interrupt; one-shot is enough.
2099*6b627f88SBjoern A. Zeeb 		 */
2100*6b627f88SBjoern A. Zeeb 		if (inta & (CSR_INT_BIT_FH_RX | CSR_INT_BIT_SW_RX))
2101*6b627f88SBjoern A. Zeeb 			iwl_write8(trans, CSR_INT_PERIODIC_REG,
2102*6b627f88SBjoern A. Zeeb 				   CSR_INT_PERIODIC_ENA);
2103*6b627f88SBjoern A. Zeeb 
2104*6b627f88SBjoern A. Zeeb 		isr_stats->rx++;
2105*6b627f88SBjoern A. Zeeb 
2106*6b627f88SBjoern A. Zeeb 		local_bh_disable();
2107*6b627f88SBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2108*6b627f88SBjoern A. Zeeb 			polling = true;
2109*6b627f88SBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[0].napi);
2110*6b627f88SBjoern A. Zeeb 		}
2111*6b627f88SBjoern A. Zeeb 		local_bh_enable();
2112*6b627f88SBjoern A. Zeeb 	}
2113*6b627f88SBjoern A. Zeeb 
2114*6b627f88SBjoern A. Zeeb 	/* This "Tx" DMA channel is used only for loading uCode */
2115*6b627f88SBjoern A. Zeeb 	if (inta & CSR_INT_BIT_FH_TX) {
2116*6b627f88SBjoern A. Zeeb 		iwl_write32(trans, CSR_FH_INT_STATUS, CSR_FH_INT_TX_MASK);
2117*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2118*6b627f88SBjoern A. Zeeb 		isr_stats->tx++;
2119*6b627f88SBjoern A. Zeeb 		handled |= CSR_INT_BIT_FH_TX;
2120*6b627f88SBjoern A. Zeeb 		/* Wake up uCode load routine, now that load is complete */
2121*6b627f88SBjoern A. Zeeb 		trans_pcie->ucode_write_complete = true;
2122*6b627f88SBjoern A. Zeeb 		wake_up(&trans_pcie->ucode_write_waitq);
2123*6b627f88SBjoern A. Zeeb 		/* Wake up IMR write routine, now that write to SRAM is complete */
2124*6b627f88SBjoern A. Zeeb 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2125*6b627f88SBjoern A. Zeeb 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2126*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->ucode_write_waitq);
2127*6b627f88SBjoern A. Zeeb 		}
2128*6b627f88SBjoern A. Zeeb 	}
2129*6b627f88SBjoern A. Zeeb 
2130*6b627f88SBjoern A. Zeeb 	if (inta & ~handled) {
2131*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "Unhandled INTA bits 0x%08x\n", inta & ~handled);
2132*6b627f88SBjoern A. Zeeb 		isr_stats->unhandled++;
2133*6b627f88SBjoern A. Zeeb 	}
2134*6b627f88SBjoern A. Zeeb 
2135*6b627f88SBjoern A. Zeeb 	if (inta & ~(trans_pcie->inta_mask)) {
2136*6b627f88SBjoern A. Zeeb 		IWL_WARN(trans, "Disabled INTA bits 0x%08x were pending\n",
2137*6b627f88SBjoern A. Zeeb 			 inta & ~trans_pcie->inta_mask);
2138*6b627f88SBjoern A. Zeeb 	}
2139*6b627f88SBjoern A. Zeeb 
2140*6b627f88SBjoern A. Zeeb 	if (!polling) {
2141*6b627f88SBjoern A. Zeeb 		spin_lock_bh(&trans_pcie->irq_lock);
2142*6b627f88SBjoern A. Zeeb 		/* only Re-enable all interrupt if disabled by irq */
2143*6b627f88SBjoern A. Zeeb 		if (test_bit(STATUS_INT_ENABLED, &trans->status))
2144*6b627f88SBjoern A. Zeeb 			_iwl_enable_interrupts(trans);
2145*6b627f88SBjoern A. Zeeb 		/* we are loading the firmware, enable FH_TX interrupt only */
2146*6b627f88SBjoern A. Zeeb 		else if (handled & CSR_INT_BIT_FH_TX)
2147*6b627f88SBjoern A. Zeeb 			iwl_enable_fw_load_int(trans);
2148*6b627f88SBjoern A. Zeeb 		/* Re-enable RF_KILL if it occurred */
2149*6b627f88SBjoern A. Zeeb 		else if (handled & CSR_INT_BIT_RF_KILL)
2150*6b627f88SBjoern A. Zeeb 			iwl_enable_rfkill_int(trans);
2151*6b627f88SBjoern A. Zeeb 		/* Re-enable the ALIVE / Rx interrupt if it occurred */
2152*6b627f88SBjoern A. Zeeb 		else if (handled & (CSR_INT_BIT_ALIVE | CSR_INT_BIT_FH_RX))
2153*6b627f88SBjoern A. Zeeb 			iwl_enable_fw_load_int_ctx_info(trans, false);
2154*6b627f88SBjoern A. Zeeb 		spin_unlock_bh(&trans_pcie->irq_lock);
2155*6b627f88SBjoern A. Zeeb 	}
2156*6b627f88SBjoern A. Zeeb 
2157*6b627f88SBjoern A. Zeeb out:
2158*6b627f88SBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
2159*6b627f88SBjoern A. Zeeb 	return IRQ_HANDLED;
2160*6b627f88SBjoern A. Zeeb }
2161*6b627f88SBjoern A. Zeeb 
2162*6b627f88SBjoern A. Zeeb /******************************************************************************
2163*6b627f88SBjoern A. Zeeb  *
2164*6b627f88SBjoern A. Zeeb  * ICT functions
2165*6b627f88SBjoern A. Zeeb  *
2166*6b627f88SBjoern A. Zeeb  ******************************************************************************/
2167*6b627f88SBjoern A. Zeeb 
2168*6b627f88SBjoern A. Zeeb /* Free dram table */
iwl_pcie_free_ict(struct iwl_trans * trans)2169*6b627f88SBjoern A. Zeeb void iwl_pcie_free_ict(struct iwl_trans *trans)
2170*6b627f88SBjoern A. Zeeb {
2171*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2172*6b627f88SBjoern A. Zeeb 
2173*6b627f88SBjoern A. Zeeb 	if (trans_pcie->ict_tbl) {
2174*6b627f88SBjoern A. Zeeb 		dma_free_coherent(trans->dev, ICT_SIZE,
2175*6b627f88SBjoern A. Zeeb 				  trans_pcie->ict_tbl,
2176*6b627f88SBjoern A. Zeeb 				  trans_pcie->ict_tbl_dma);
2177*6b627f88SBjoern A. Zeeb 		trans_pcie->ict_tbl = NULL;
2178*6b627f88SBjoern A. Zeeb 		trans_pcie->ict_tbl_dma = 0;
2179*6b627f88SBjoern A. Zeeb 	}
2180*6b627f88SBjoern A. Zeeb }
2181*6b627f88SBjoern A. Zeeb 
2182*6b627f88SBjoern A. Zeeb /*
2183*6b627f88SBjoern A. Zeeb  * allocate dram shared table, it is an aligned memory
2184*6b627f88SBjoern A. Zeeb  * block of ICT_SIZE.
2185*6b627f88SBjoern A. Zeeb  * also reset all data related to ICT table interrupt.
2186*6b627f88SBjoern A. Zeeb  */
iwl_pcie_alloc_ict(struct iwl_trans * trans)2187*6b627f88SBjoern A. Zeeb int iwl_pcie_alloc_ict(struct iwl_trans *trans)
2188*6b627f88SBjoern A. Zeeb {
2189*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2190*6b627f88SBjoern A. Zeeb 
2191*6b627f88SBjoern A. Zeeb 	trans_pcie->ict_tbl =
2192*6b627f88SBjoern A. Zeeb 		dma_alloc_coherent(trans->dev, ICT_SIZE,
2193*6b627f88SBjoern A. Zeeb 				   &trans_pcie->ict_tbl_dma, GFP_KERNEL);
2194*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->ict_tbl)
2195*6b627f88SBjoern A. Zeeb 		return -ENOMEM;
2196*6b627f88SBjoern A. Zeeb 
2197*6b627f88SBjoern A. Zeeb 	/* just an API sanity check ... it is guaranteed to be aligned */
2198*6b627f88SBjoern A. Zeeb 	if (WARN_ON(trans_pcie->ict_tbl_dma & (ICT_SIZE - 1))) {
2199*6b627f88SBjoern A. Zeeb 		iwl_pcie_free_ict(trans);
2200*6b627f88SBjoern A. Zeeb 		return -EINVAL;
2201*6b627f88SBjoern A. Zeeb 	}
2202*6b627f88SBjoern A. Zeeb 
2203*6b627f88SBjoern A. Zeeb 	return 0;
2204*6b627f88SBjoern A. Zeeb }
2205*6b627f88SBjoern A. Zeeb 
2206*6b627f88SBjoern A. Zeeb /* Device is going up inform it about using ICT interrupt table,
2207*6b627f88SBjoern A. Zeeb  * also we need to tell the driver to start using ICT interrupt.
2208*6b627f88SBjoern A. Zeeb  */
iwl_pcie_reset_ict(struct iwl_trans * trans)2209*6b627f88SBjoern A. Zeeb void iwl_pcie_reset_ict(struct iwl_trans *trans)
2210*6b627f88SBjoern A. Zeeb {
2211*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2212*6b627f88SBjoern A. Zeeb 	u32 val;
2213*6b627f88SBjoern A. Zeeb 
2214*6b627f88SBjoern A. Zeeb 	if (!trans_pcie->ict_tbl)
2215*6b627f88SBjoern A. Zeeb 		return;
2216*6b627f88SBjoern A. Zeeb 
2217*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2218*6b627f88SBjoern A. Zeeb 	_iwl_disable_interrupts(trans);
2219*6b627f88SBjoern A. Zeeb 
2220*6b627f88SBjoern A. Zeeb 	memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
2221*6b627f88SBjoern A. Zeeb 
2222*6b627f88SBjoern A. Zeeb 	val = trans_pcie->ict_tbl_dma >> ICT_SHIFT;
2223*6b627f88SBjoern A. Zeeb 
2224*6b627f88SBjoern A. Zeeb 	val |= CSR_DRAM_INT_TBL_ENABLE |
2225*6b627f88SBjoern A. Zeeb 	       CSR_DRAM_INIT_TBL_WRAP_CHECK |
2226*6b627f88SBjoern A. Zeeb 	       CSR_DRAM_INIT_TBL_WRITE_POINTER;
2227*6b627f88SBjoern A. Zeeb 
2228*6b627f88SBjoern A. Zeeb 	IWL_DEBUG_ISR(trans, "CSR_DRAM_INT_TBL_REG =0x%x\n", val);
2229*6b627f88SBjoern A. Zeeb 
2230*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_DRAM_INT_TBL_REG, val);
2231*6b627f88SBjoern A. Zeeb 	trans_pcie->use_ict = true;
2232*6b627f88SBjoern A. Zeeb 	trans_pcie->ict_index = 0;
2233*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
2234*6b627f88SBjoern A. Zeeb 	_iwl_enable_interrupts(trans);
2235*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2236*6b627f88SBjoern A. Zeeb }
2237*6b627f88SBjoern A. Zeeb 
2238*6b627f88SBjoern A. Zeeb /* Device is going down disable ict interrupt usage */
iwl_pcie_disable_ict(struct iwl_trans * trans)2239*6b627f88SBjoern A. Zeeb void iwl_pcie_disable_ict(struct iwl_trans *trans)
2240*6b627f88SBjoern A. Zeeb {
2241*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2242*6b627f88SBjoern A. Zeeb 
2243*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2244*6b627f88SBjoern A. Zeeb 	trans_pcie->use_ict = false;
2245*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2246*6b627f88SBjoern A. Zeeb }
2247*6b627f88SBjoern A. Zeeb 
iwl_pcie_isr(int irq,void * data)2248*6b627f88SBjoern A. Zeeb irqreturn_t iwl_pcie_isr(int irq, void *data)
2249*6b627f88SBjoern A. Zeeb {
2250*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans = data;
2251*6b627f88SBjoern A. Zeeb 
2252*6b627f88SBjoern A. Zeeb 	if (!trans)
2253*6b627f88SBjoern A. Zeeb 		return IRQ_NONE;
2254*6b627f88SBjoern A. Zeeb 
2255*6b627f88SBjoern A. Zeeb 	/* Disable (but don't clear!) interrupts here to avoid
2256*6b627f88SBjoern A. Zeeb 	 * back-to-back ISRs and sporadic interrupts from our NIC.
2257*6b627f88SBjoern A. Zeeb 	 * If we have something to service, the tasklet will re-enable ints.
2258*6b627f88SBjoern A. Zeeb 	 * If we *don't* have something, we'll re-enable before leaving here.
2259*6b627f88SBjoern A. Zeeb 	 */
2260*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_INT_MASK, 0x00000000);
2261*6b627f88SBjoern A. Zeeb 
2262*6b627f88SBjoern A. Zeeb 	return IRQ_WAKE_THREAD;
2263*6b627f88SBjoern A. Zeeb }
2264*6b627f88SBjoern A. Zeeb 
iwl_pcie_msix_isr(int irq,void * data)2265*6b627f88SBjoern A. Zeeb irqreturn_t iwl_pcie_msix_isr(int irq, void *data)
2266*6b627f88SBjoern A. Zeeb {
2267*6b627f88SBjoern A. Zeeb 	return IRQ_WAKE_THREAD;
2268*6b627f88SBjoern A. Zeeb }
2269*6b627f88SBjoern A. Zeeb 
iwl_pcie_irq_msix_handler(int irq,void * dev_id)2270*6b627f88SBjoern A. Zeeb irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id)
2271*6b627f88SBjoern A. Zeeb {
2272*6b627f88SBjoern A. Zeeb 	struct msix_entry *entry = dev_id;
2273*6b627f88SBjoern A. Zeeb 	struct iwl_trans_pcie *trans_pcie = iwl_pcie_get_trans_pcie(entry);
2274*6b627f88SBjoern A. Zeeb 	struct iwl_trans *trans = trans_pcie->trans;
2275*6b627f88SBjoern A. Zeeb 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2276*6b627f88SBjoern A. Zeeb 	u32 inta_fh_msk = ~MSIX_FH_INT_CAUSES_DATA_QUEUE;
2277*6b627f88SBjoern A. Zeeb 	u32 inta_fh, inta_hw;
2278*6b627f88SBjoern A. Zeeb 	bool polling = false;
2279*6b627f88SBjoern A. Zeeb 	bool sw_err;
2280*6b627f88SBjoern A. Zeeb 
2281*6b627f88SBjoern A. Zeeb 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
2282*6b627f88SBjoern A. Zeeb 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q0;
2283*6b627f88SBjoern A. Zeeb 
2284*6b627f88SBjoern A. Zeeb 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
2285*6b627f88SBjoern A. Zeeb 		inta_fh_msk |= MSIX_FH_INT_CAUSES_Q1;
2286*6b627f88SBjoern A. Zeeb 
2287*6b627f88SBjoern A. Zeeb 	lock_map_acquire(&trans->sync_cmd_lockdep_map);
2288*6b627f88SBjoern A. Zeeb 
2289*6b627f88SBjoern A. Zeeb 	spin_lock_bh(&trans_pcie->irq_lock);
2290*6b627f88SBjoern A. Zeeb 	inta_fh = iwl_read32(trans, CSR_MSIX_FH_INT_CAUSES_AD);
2291*6b627f88SBjoern A. Zeeb 	inta_hw = iwl_read32(trans, CSR_MSIX_HW_INT_CAUSES_AD);
2292*6b627f88SBjoern A. Zeeb 	/*
2293*6b627f88SBjoern A. Zeeb 	 * Clear causes registers to avoid being handling the same cause.
2294*6b627f88SBjoern A. Zeeb 	 */
2295*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_MSIX_FH_INT_CAUSES_AD, inta_fh & inta_fh_msk);
2296*6b627f88SBjoern A. Zeeb 	iwl_write32(trans, CSR_MSIX_HW_INT_CAUSES_AD, inta_hw);
2297*6b627f88SBjoern A. Zeeb 	spin_unlock_bh(&trans_pcie->irq_lock);
2298*6b627f88SBjoern A. Zeeb 
2299*6b627f88SBjoern A. Zeeb 	trace_iwlwifi_dev_irq_msix(trans->dev, entry, true, inta_fh, inta_hw);
2300*6b627f88SBjoern A. Zeeb 
2301*6b627f88SBjoern A. Zeeb 	if (unlikely(!(inta_fh | inta_hw))) {
2302*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
2303*6b627f88SBjoern A. Zeeb 		lock_map_release(&trans->sync_cmd_lockdep_map);
2304*6b627f88SBjoern A. Zeeb 		return IRQ_NONE;
2305*6b627f88SBjoern A. Zeeb 	}
2306*6b627f88SBjoern A. Zeeb 
2307*6b627f88SBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
2308*6b627f88SBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2309*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
2310*6b627f88SBjoern A. Zeeb 			      "ISR[%d] inta_fh 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2311*6b627f88SBjoern A. Zeeb 			      entry->entry, inta_fh, trans_pcie->fh_mask,
2312*6b627f88SBjoern A. Zeeb 			      iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD));
2313*6b627f88SBjoern A. Zeeb 		if (inta_fh & ~trans_pcie->fh_mask)
2314*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2315*6b627f88SBjoern A. Zeeb 				      "We got a masked interrupt (0x%08x)\n",
2316*6b627f88SBjoern A. Zeeb 				      inta_fh & ~trans_pcie->fh_mask);
2317*6b627f88SBjoern A. Zeeb 	}
2318*6b627f88SBjoern A. Zeeb #endif
2319*6b627f88SBjoern A. Zeeb 
2320*6b627f88SBjoern A. Zeeb 	inta_fh &= trans_pcie->fh_mask;
2321*6b627f88SBjoern A. Zeeb 
2322*6b627f88SBjoern A. Zeeb 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX) &&
2323*6b627f88SBjoern A. Zeeb 	    inta_fh & MSIX_FH_INT_CAUSES_Q0) {
2324*6b627f88SBjoern A. Zeeb 		local_bh_disable();
2325*6b627f88SBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[0].napi)) {
2326*6b627f88SBjoern A. Zeeb 			polling = true;
2327*6b627f88SBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[0].napi);
2328*6b627f88SBjoern A. Zeeb 		}
2329*6b627f88SBjoern A. Zeeb 		local_bh_enable();
2330*6b627f88SBjoern A. Zeeb 	}
2331*6b627f88SBjoern A. Zeeb 
2332*6b627f88SBjoern A. Zeeb 	if ((trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS) &&
2333*6b627f88SBjoern A. Zeeb 	    inta_fh & MSIX_FH_INT_CAUSES_Q1) {
2334*6b627f88SBjoern A. Zeeb 		local_bh_disable();
2335*6b627f88SBjoern A. Zeeb 		if (napi_schedule_prep(&trans_pcie->rxq[1].napi)) {
2336*6b627f88SBjoern A. Zeeb 			polling = true;
2337*6b627f88SBjoern A. Zeeb 			__napi_schedule(&trans_pcie->rxq[1].napi);
2338*6b627f88SBjoern A. Zeeb 		}
2339*6b627f88SBjoern A. Zeeb 		local_bh_enable();
2340*6b627f88SBjoern A. Zeeb 	}
2341*6b627f88SBjoern A. Zeeb 
2342*6b627f88SBjoern A. Zeeb 	/* This "Tx" DMA channel is used only for loading uCode */
2343*6b627f88SBjoern A. Zeeb 	if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM &&
2344*6b627f88SBjoern A. Zeeb 	    trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2345*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "IMR Complete interrupt\n");
2346*6b627f88SBjoern A. Zeeb 		isr_stats->tx++;
2347*6b627f88SBjoern A. Zeeb 
2348*6b627f88SBjoern A. Zeeb 		/* Wake up IMR routine once write to SRAM is complete */
2349*6b627f88SBjoern A. Zeeb 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2350*6b627f88SBjoern A. Zeeb 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2351*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->ucode_write_waitq);
2352*6b627f88SBjoern A. Zeeb 		}
2353*6b627f88SBjoern A. Zeeb 	} else if (inta_fh & MSIX_FH_INT_CAUSES_D2S_CH0_NUM) {
2354*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "uCode load interrupt\n");
2355*6b627f88SBjoern A. Zeeb 		isr_stats->tx++;
2356*6b627f88SBjoern A. Zeeb 		/*
2357*6b627f88SBjoern A. Zeeb 		 * Wake up uCode load routine,
2358*6b627f88SBjoern A. Zeeb 		 * now that load is complete
2359*6b627f88SBjoern A. Zeeb 		 */
2360*6b627f88SBjoern A. Zeeb 		trans_pcie->ucode_write_complete = true;
2361*6b627f88SBjoern A. Zeeb 		wake_up(&trans_pcie->ucode_write_waitq);
2362*6b627f88SBjoern A. Zeeb 
2363*6b627f88SBjoern A. Zeeb 		/* Wake up IMR routine once write to SRAM is complete */
2364*6b627f88SBjoern A. Zeeb 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2365*6b627f88SBjoern A. Zeeb 			trans_pcie->imr_status = IMR_D2S_COMPLETED;
2366*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->ucode_write_waitq);
2367*6b627f88SBjoern A. Zeeb 		}
2368*6b627f88SBjoern A. Zeeb 	}
2369*6b627f88SBjoern A. Zeeb 
2370*6b627f88SBjoern A. Zeeb 	if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ)
2371*6b627f88SBjoern A. Zeeb 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_BZ;
2372*6b627f88SBjoern A. Zeeb 	else
2373*6b627f88SBjoern A. Zeeb 		sw_err = inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR;
2374*6b627f88SBjoern A. Zeeb 
2375*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_TOP_FATAL_ERR) {
2376*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "TOP Fatal error detected, inta_hw=0x%x.\n",
2377*6b627f88SBjoern A. Zeeb 			inta_hw);
2378*6b627f88SBjoern A. Zeeb 		if (trans->mac_cfg->device_family >= IWL_DEVICE_FAMILY_BZ) {
2379*6b627f88SBjoern A. Zeeb 			trans->request_top_reset = 1;
2380*6b627f88SBjoern A. Zeeb 			iwl_op_mode_nic_error(trans->op_mode,
2381*6b627f88SBjoern A. Zeeb 					      IWL_ERR_TYPE_TOP_FATAL_ERROR);
2382*6b627f88SBjoern A. Zeeb 			iwl_trans_schedule_reset(trans,
2383*6b627f88SBjoern A. Zeeb 						 IWL_ERR_TYPE_TOP_FATAL_ERROR);
2384*6b627f88SBjoern A. Zeeb 		}
2385*6b627f88SBjoern A. Zeeb 	}
2386*6b627f88SBjoern A. Zeeb 
2387*6b627f88SBjoern A. Zeeb 	/* Error detected by uCode */
2388*6b627f88SBjoern A. Zeeb 	if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || sw_err) {
2389*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans,
2390*6b627f88SBjoern A. Zeeb 			"Microcode SW error detected. Restarting 0x%X.\n",
2391*6b627f88SBjoern A. Zeeb 			inta_fh);
2392*6b627f88SBjoern A. Zeeb 		isr_stats->sw++;
2393*6b627f88SBjoern A. Zeeb 		/* during FW reset flow report errors from there */
2394*6b627f88SBjoern A. Zeeb 		if (trans_pcie->imr_status == IMR_D2S_REQUESTED) {
2395*6b627f88SBjoern A. Zeeb 			trans_pcie->imr_status = IMR_D2S_ERROR;
2396*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->imr_waitq);
2397*6b627f88SBjoern A. Zeeb 		} else if (trans_pcie->fw_reset_state == FW_RESET_REQUESTED) {
2398*6b627f88SBjoern A. Zeeb 			trans_pcie->fw_reset_state = FW_RESET_ERROR;
2399*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->fw_reset_waitq);
2400*6b627f88SBjoern A. Zeeb 		} else {
2401*6b627f88SBjoern A. Zeeb 			iwl_pcie_irq_handle_error(trans);
2402*6b627f88SBjoern A. Zeeb 		}
2403*6b627f88SBjoern A. Zeeb 
2404*6b627f88SBjoern A. Zeeb 		if (trans_pcie->sx_state == IWL_SX_WAITING) {
2405*6b627f88SBjoern A. Zeeb 			trans_pcie->sx_state = IWL_SX_ERROR;
2406*6b627f88SBjoern A. Zeeb 			wake_up(&trans_pcie->sx_waitq);
2407*6b627f88SBjoern A. Zeeb 		}
2408*6b627f88SBjoern A. Zeeb 	}
2409*6b627f88SBjoern A. Zeeb 
2410*6b627f88SBjoern A. Zeeb 	/* After checking FH register check HW register */
2411*6b627f88SBjoern A. Zeeb #ifdef CONFIG_IWLWIFI_DEBUG
2412*6b627f88SBjoern A. Zeeb 	if (iwl_have_debug_level(IWL_DL_ISR)) {
2413*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans,
2414*6b627f88SBjoern A. Zeeb 			      "ISR[%d] inta_hw 0x%08x, enabled (sw) 0x%08x (hw) 0x%08x\n",
2415*6b627f88SBjoern A. Zeeb 			      entry->entry, inta_hw, trans_pcie->hw_mask,
2416*6b627f88SBjoern A. Zeeb 			      iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD));
2417*6b627f88SBjoern A. Zeeb 		if (inta_hw & ~trans_pcie->hw_mask)
2418*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2419*6b627f88SBjoern A. Zeeb 				      "We got a masked interrupt 0x%08x\n",
2420*6b627f88SBjoern A. Zeeb 				      inta_hw & ~trans_pcie->hw_mask);
2421*6b627f88SBjoern A. Zeeb 	}
2422*6b627f88SBjoern A. Zeeb #endif
2423*6b627f88SBjoern A. Zeeb 
2424*6b627f88SBjoern A. Zeeb 	inta_hw &= trans_pcie->hw_mask;
2425*6b627f88SBjoern A. Zeeb 
2426*6b627f88SBjoern A. Zeeb 	/* Alive notification via Rx interrupt will do the real work */
2427*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_ALIVE) {
2428*6b627f88SBjoern A. Zeeb 		IWL_DEBUG_ISR(trans, "Alive interrupt\n");
2429*6b627f88SBjoern A. Zeeb 		isr_stats->alive++;
2430*6b627f88SBjoern A. Zeeb 		if (trans->mac_cfg->gen2) {
2431*6b627f88SBjoern A. Zeeb 			/* We can restock, since firmware configured the RFH */
2432*6b627f88SBjoern A. Zeeb 			iwl_pcie_rxmq_restock(trans, trans_pcie->rxq);
2433*6b627f88SBjoern A. Zeeb 		}
2434*6b627f88SBjoern A. Zeeb 	}
2435*6b627f88SBjoern A. Zeeb 
2436*6b627f88SBjoern A. Zeeb 	/*
2437*6b627f88SBjoern A. Zeeb 	 * In some rare cases when the HW is in a bad state, we may
2438*6b627f88SBjoern A. Zeeb 	 * get this interrupt too early, when prph_info is still NULL.
2439*6b627f88SBjoern A. Zeeb 	 * So make sure that it's not NULL to prevent crashing.
2440*6b627f88SBjoern A. Zeeb 	 */
2441*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP && trans_pcie->prph_info) {
2442*6b627f88SBjoern A. Zeeb 		u32 sleep_notif =
2443*6b627f88SBjoern A. Zeeb 			le32_to_cpu(trans_pcie->prph_info->sleep_notif);
2444*6b627f88SBjoern A. Zeeb 
2445*6b627f88SBjoern A. Zeeb 		if (sleep_notif == IWL_D3_SLEEP_STATUS_SUSPEND ||
2446*6b627f88SBjoern A. Zeeb 		    sleep_notif == IWL_D3_SLEEP_STATUS_RESUME) {
2447*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans,
2448*6b627f88SBjoern A. Zeeb 				      "Sx interrupt: sleep notification = 0x%x\n",
2449*6b627f88SBjoern A. Zeeb 				      sleep_notif);
2450*6b627f88SBjoern A. Zeeb 			if (trans_pcie->sx_state == IWL_SX_WAITING) {
2451*6b627f88SBjoern A. Zeeb 				trans_pcie->sx_state = IWL_SX_COMPLETE;
2452*6b627f88SBjoern A. Zeeb 				wake_up(&trans_pcie->sx_waitq);
2453*6b627f88SBjoern A. Zeeb 			} else {
2454*6b627f88SBjoern A. Zeeb 				IWL_ERR(trans,
2455*6b627f88SBjoern A. Zeeb 					"unexpected Sx interrupt (0x%x)\n",
2456*6b627f88SBjoern A. Zeeb 					sleep_notif);
2457*6b627f88SBjoern A. Zeeb 			}
2458*6b627f88SBjoern A. Zeeb 		} else {
2459*6b627f88SBjoern A. Zeeb 			/* uCode wakes up after power-down sleep */
2460*6b627f88SBjoern A. Zeeb 			IWL_DEBUG_ISR(trans, "Wakeup interrupt\n");
2461*6b627f88SBjoern A. Zeeb 			iwl_pcie_rxq_check_wrptr(trans);
2462*6b627f88SBjoern A. Zeeb 			iwl_pcie_txq_check_wrptrs(trans);
2463*6b627f88SBjoern A. Zeeb 
2464*6b627f88SBjoern A. Zeeb 			isr_stats->wakeup++;
2465*6b627f88SBjoern A. Zeeb 		}
2466*6b627f88SBjoern A. Zeeb 	}
2467*6b627f88SBjoern A. Zeeb 
2468*6b627f88SBjoern A. Zeeb 	/* Chip got too hot and stopped itself */
2469*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_CT_KILL) {
2470*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans, "Microcode CT kill error detected.\n");
2471*6b627f88SBjoern A. Zeeb 		isr_stats->ctkill++;
2472*6b627f88SBjoern A. Zeeb 	}
2473*6b627f88SBjoern A. Zeeb 
2474*6b627f88SBjoern A. Zeeb 	/* HW RF KILL switch toggled */
2475*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RF_KILL)
2476*6b627f88SBjoern A. Zeeb 		iwl_pcie_handle_rfkill_irq(trans, true);
2477*6b627f88SBjoern A. Zeeb 
2478*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_HW_ERR) {
2479*6b627f88SBjoern A. Zeeb 		IWL_ERR(trans,
2480*6b627f88SBjoern A. Zeeb 			"Hardware error detected. Restarting.\n");
2481*6b627f88SBjoern A. Zeeb 
2482*6b627f88SBjoern A. Zeeb 		isr_stats->hw++;
2483*6b627f88SBjoern A. Zeeb 		trans->dbg.hw_error = true;
2484*6b627f88SBjoern A. Zeeb 		iwl_pcie_irq_handle_error(trans);
2485*6b627f88SBjoern A. Zeeb 	}
2486*6b627f88SBjoern A. Zeeb 
2487*6b627f88SBjoern A. Zeeb 	if (inta_hw & MSIX_HW_INT_CAUSES_REG_RESET_DONE)
2488*6b627f88SBjoern A. Zeeb 		iwl_trans_pcie_handle_reset_interrupt(trans);
2489*6b627f88SBjoern A. Zeeb 
2490*6b627f88SBjoern A. Zeeb 	if (!polling)
2491*6b627f88SBjoern A. Zeeb 		iwl_pcie_clear_irq(trans, entry->entry);
2492*6b627f88SBjoern A. Zeeb 
2493*6b627f88SBjoern A. Zeeb 	lock_map_release(&trans->sync_cmd_lockdep_map);
2494*6b627f88SBjoern A. Zeeb 
2495*6b627f88SBjoern A. Zeeb 	return IRQ_HANDLED;
2496*6b627f88SBjoern A. Zeeb }
2497