xref: /linux/drivers/net/wireless/intel/iwlwifi/pcie/trans.c (revision c79c3c34f75d72a066e292b10aa50fc758c97c89)
1 // SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
2 /*
3  * Copyright (C) 2007-2015, 2018-2020 Intel Corporation
4  * Copyright (C) 2013-2015 Intel Mobile Communications GmbH
5  * Copyright (C) 2016-2017 Intel Deutschland GmbH
6  */
7 #include <linux/pci.h>
8 #include <linux/interrupt.h>
9 #include <linux/debugfs.h>
10 #include <linux/sched.h>
11 #include <linux/bitops.h>
12 #include <linux/gfp.h>
13 #include <linux/vmalloc.h>
14 #include <linux/module.h>
15 #include <linux/wait.h>
16 #include <linux/seq_file.h>
17 
18 #include "iwl-drv.h"
19 #include "iwl-trans.h"
20 #include "iwl-csr.h"
21 #include "iwl-prph.h"
22 #include "iwl-scd.h"
23 #include "iwl-agn-hw.h"
24 #include "fw/error-dump.h"
25 #include "fw/dbg.h"
26 #include "fw/api/tx.h"
27 #include "internal.h"
28 #include "iwl-fh.h"
29 #include "iwl-context-info-gen3.h"
30 
31 /* extended range in FW SRAM */
32 #define IWL_FW_MEM_EXTENDED_START	0x40000
33 #define IWL_FW_MEM_EXTENDED_END		0x57FFF
34 
35 void iwl_trans_pcie_dump_regs(struct iwl_trans *trans)
36 {
37 #define PCI_DUMP_SIZE		352
38 #define PCI_MEM_DUMP_SIZE	64
39 #define PCI_PARENT_DUMP_SIZE	524
40 #define PREFIX_LEN		32
41 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
42 	struct pci_dev *pdev = trans_pcie->pci_dev;
43 	u32 i, pos, alloc_size, *ptr, *buf;
44 	char *prefix;
45 
46 	if (trans_pcie->pcie_dbg_dumped_once)
47 		return;
48 
49 	/* Should be a multiple of 4 */
50 	BUILD_BUG_ON(PCI_DUMP_SIZE > 4096 || PCI_DUMP_SIZE & 0x3);
51 	BUILD_BUG_ON(PCI_MEM_DUMP_SIZE > 4096 || PCI_MEM_DUMP_SIZE & 0x3);
52 	BUILD_BUG_ON(PCI_PARENT_DUMP_SIZE > 4096 || PCI_PARENT_DUMP_SIZE & 0x3);
53 
54 	/* Alloc a max size buffer */
55 	alloc_size = PCI_ERR_ROOT_ERR_SRC +  4 + PREFIX_LEN;
56 	alloc_size = max_t(u32, alloc_size, PCI_DUMP_SIZE + PREFIX_LEN);
57 	alloc_size = max_t(u32, alloc_size, PCI_MEM_DUMP_SIZE + PREFIX_LEN);
58 	alloc_size = max_t(u32, alloc_size, PCI_PARENT_DUMP_SIZE + PREFIX_LEN);
59 
60 	buf = kmalloc(alloc_size, GFP_ATOMIC);
61 	if (!buf)
62 		return;
63 	prefix = (char *)buf + alloc_size - PREFIX_LEN;
64 
65 	IWL_ERR(trans, "iwlwifi transaction failed, dumping registers\n");
66 
67 	/* Print wifi device registers */
68 	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
69 	IWL_ERR(trans, "iwlwifi device config registers:\n");
70 	for (i = 0, ptr = buf; i < PCI_DUMP_SIZE; i += 4, ptr++)
71 		if (pci_read_config_dword(pdev, i, ptr))
72 			goto err_read;
73 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
74 
75 	IWL_ERR(trans, "iwlwifi device memory mapped registers:\n");
76 	for (i = 0, ptr = buf; i < PCI_MEM_DUMP_SIZE; i += 4, ptr++)
77 		*ptr = iwl_read32(trans, i);
78 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
79 
80 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
81 	if (pos) {
82 		IWL_ERR(trans, "iwlwifi device AER capability structure:\n");
83 		for (i = 0, ptr = buf; i < PCI_ERR_ROOT_COMMAND; i += 4, ptr++)
84 			if (pci_read_config_dword(pdev, pos + i, ptr))
85 				goto err_read;
86 		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET,
87 			       32, 4, buf, i, 0);
88 	}
89 
90 	/* Print parent device registers next */
91 	if (!pdev->bus->self)
92 		goto out;
93 
94 	pdev = pdev->bus->self;
95 	sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
96 
97 	IWL_ERR(trans, "iwlwifi parent port (%s) config registers:\n",
98 		pci_name(pdev));
99 	for (i = 0, ptr = buf; i < PCI_PARENT_DUMP_SIZE; i += 4, ptr++)
100 		if (pci_read_config_dword(pdev, i, ptr))
101 			goto err_read;
102 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
103 
104 	/* Print root port AER registers */
105 	pos = 0;
106 	pdev = pcie_find_root_port(pdev);
107 	if (pdev)
108 		pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_ERR);
109 	if (pos) {
110 		IWL_ERR(trans, "iwlwifi root port (%s) AER cap structure:\n",
111 			pci_name(pdev));
112 		sprintf(prefix, "iwlwifi %s: ", pci_name(pdev));
113 		for (i = 0, ptr = buf; i <= PCI_ERR_ROOT_ERR_SRC; i += 4, ptr++)
114 			if (pci_read_config_dword(pdev, pos + i, ptr))
115 				goto err_read;
116 		print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32,
117 			       4, buf, i, 0);
118 	}
119 	goto out;
120 
121 err_read:
122 	print_hex_dump(KERN_ERR, prefix, DUMP_PREFIX_OFFSET, 32, 4, buf, i, 0);
123 	IWL_ERR(trans, "Read failed at 0x%X\n", i);
124 out:
125 	trans_pcie->pcie_dbg_dumped_once = 1;
126 	kfree(buf);
127 }
128 
129 static void iwl_trans_pcie_sw_reset(struct iwl_trans *trans)
130 {
131 	/* Reset entire device - do controller reset (results in SHRD_HW_RST) */
132 	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
133 	usleep_range(5000, 6000);
134 }
135 
136 static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans)
137 {
138 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
139 
140 	if (!fw_mon->size)
141 		return;
142 
143 	dma_free_coherent(trans->dev, fw_mon->size, fw_mon->block,
144 			  fw_mon->physical);
145 
146 	fw_mon->block = NULL;
147 	fw_mon->physical = 0;
148 	fw_mon->size = 0;
149 }
150 
151 static void iwl_pcie_alloc_fw_monitor_block(struct iwl_trans *trans,
152 					    u8 max_power, u8 min_power)
153 {
154 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
155 	void *block = NULL;
156 	dma_addr_t physical = 0;
157 	u32 size = 0;
158 	u8 power;
159 
160 	if (fw_mon->size)
161 		return;
162 
163 	for (power = max_power; power >= min_power; power--) {
164 		size = BIT(power);
165 		block = dma_alloc_coherent(trans->dev, size, &physical,
166 					   GFP_KERNEL | __GFP_NOWARN);
167 		if (!block)
168 			continue;
169 
170 		IWL_INFO(trans,
171 			 "Allocated 0x%08x bytes for firmware monitor.\n",
172 			 size);
173 		break;
174 	}
175 
176 	if (WARN_ON_ONCE(!block))
177 		return;
178 
179 	if (power != max_power)
180 		IWL_ERR(trans,
181 			"Sorry - debug buffer is only %luK while you requested %luK\n",
182 			(unsigned long)BIT(power - 10),
183 			(unsigned long)BIT(max_power - 10));
184 
185 	fw_mon->block = block;
186 	fw_mon->physical = physical;
187 	fw_mon->size = size;
188 }
189 
190 void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power)
191 {
192 	if (!max_power) {
193 		/* default max_power is maximum */
194 		max_power = 26;
195 	} else {
196 		max_power += 11;
197 	}
198 
199 	if (WARN(max_power > 26,
200 		 "External buffer size for monitor is too big %d, check the FW TLV\n",
201 		 max_power))
202 		return;
203 
204 	if (trans->dbg.fw_mon.size)
205 		return;
206 
207 	iwl_pcie_alloc_fw_monitor_block(trans, max_power, 11);
208 }
209 
210 static u32 iwl_trans_pcie_read_shr(struct iwl_trans *trans, u32 reg)
211 {
212 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
213 		    ((reg & 0x0000ffff) | (2 << 28)));
214 	return iwl_read32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG);
215 }
216 
217 static void iwl_trans_pcie_write_shr(struct iwl_trans *trans, u32 reg, u32 val)
218 {
219 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_DATA_REG, val);
220 	iwl_write32(trans, HEEP_CTRL_WRD_PCIEX_CTRL_REG,
221 		    ((reg & 0x0000ffff) | (3 << 28)));
222 }
223 
224 static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
225 {
226 	if (trans->cfg->apmg_not_supported)
227 		return;
228 
229 	if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
230 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
231 				       APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
232 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
233 	else
234 		iwl_set_bits_mask_prph(trans, APMG_PS_CTRL_REG,
235 				       APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
236 				       ~APMG_PS_CTRL_MSK_PWR_SRC);
237 }
238 
239 /* PCI registers */
240 #define PCI_CFG_RETRY_TIMEOUT	0x041
241 
242 void iwl_pcie_apm_config(struct iwl_trans *trans)
243 {
244 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
245 	u16 lctl;
246 	u16 cap;
247 
248 	/*
249 	 * L0S states have been found to be unstable with our devices
250 	 * and in newer hardware they are not officially supported at
251 	 * all, so we must always set the L0S_DISABLED bit.
252 	 */
253 	iwl_set_bit(trans, CSR_GIO_REG, CSR_GIO_REG_VAL_L0S_DISABLED);
254 
255 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_LNKCTL, &lctl);
256 	trans->pm_support = !(lctl & PCI_EXP_LNKCTL_ASPM_L0S);
257 
258 	pcie_capability_read_word(trans_pcie->pci_dev, PCI_EXP_DEVCTL2, &cap);
259 	trans->ltr_enabled = cap & PCI_EXP_DEVCTL2_LTR_EN;
260 	IWL_DEBUG_POWER(trans, "L1 %sabled - LTR %sabled\n",
261 			(lctl & PCI_EXP_LNKCTL_ASPM_L1) ? "En" : "Dis",
262 			trans->ltr_enabled ? "En" : "Dis");
263 }
264 
265 /*
266  * Start up NIC's basic functionality after it has been reset
267  * (e.g. after platform boot, or shutdown via iwl_pcie_apm_stop())
268  * NOTE:  This does not load uCode nor start the embedded processor
269  */
270 static int iwl_pcie_apm_init(struct iwl_trans *trans)
271 {
272 	int ret;
273 
274 	IWL_DEBUG_INFO(trans, "Init card's basic functions\n");
275 
276 	/*
277 	 * Use "set_bit" below rather than "write", to preserve any hardware
278 	 * bits already set by default after reset.
279 	 */
280 
281 	/* Disable L0S exit timer (platform NMI Work/Around) */
282 	if (trans->trans_cfg->device_family < IWL_DEVICE_FAMILY_8000)
283 		iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
284 			    CSR_GIO_CHICKEN_BITS_REG_BIT_DIS_L0S_EXIT_TIMER);
285 
286 	/*
287 	 * Disable L0s without affecting L1;
288 	 *  don't wait for ICH L0s (ICH bug W/A)
289 	 */
290 	iwl_set_bit(trans, CSR_GIO_CHICKEN_BITS,
291 		    CSR_GIO_CHICKEN_BITS_REG_BIT_L1A_NO_L0S_RX);
292 
293 	/* Set FH wait threshold to maximum (HW error during stress W/A) */
294 	iwl_set_bit(trans, CSR_DBG_HPET_MEM_REG, CSR_DBG_HPET_MEM_REG_VAL);
295 
296 	/*
297 	 * Enable HAP INTA (interrupt from management bus) to
298 	 * wake device's PCI Express link L1a -> L0s
299 	 */
300 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
301 		    CSR_HW_IF_CONFIG_REG_BIT_HAP_WAKE_L1A);
302 
303 	iwl_pcie_apm_config(trans);
304 
305 	/* Configure analog phase-lock-loop before activating to D0A */
306 	if (trans->trans_cfg->base_params->pll_cfg)
307 		iwl_set_bit(trans, CSR_ANA_PLL_CFG, CSR50_ANA_PLL_CFG_VAL);
308 
309 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
310 	if (ret)
311 		return ret;
312 
313 	if (trans->cfg->host_interrupt_operation_mode) {
314 		/*
315 		 * This is a bit of an abuse - This is needed for 7260 / 3160
316 		 * only check host_interrupt_operation_mode even if this is
317 		 * not related to host_interrupt_operation_mode.
318 		 *
319 		 * Enable the oscillator to count wake up time for L1 exit. This
320 		 * consumes slightly more power (100uA) - but allows to be sure
321 		 * that we wake up from L1 on time.
322 		 *
323 		 * This looks weird: read twice the same register, discard the
324 		 * value, set a bit, and yet again, read that same register
325 		 * just to discard the value. But that's the way the hardware
326 		 * seems to like it.
327 		 */
328 		iwl_read_prph(trans, OSC_CLK);
329 		iwl_read_prph(trans, OSC_CLK);
330 		iwl_set_bits_prph(trans, OSC_CLK, OSC_CLK_FORCE_CONTROL);
331 		iwl_read_prph(trans, OSC_CLK);
332 		iwl_read_prph(trans, OSC_CLK);
333 	}
334 
335 	/*
336 	 * Enable DMA clock and wait for it to stabilize.
337 	 *
338 	 * Write to "CLK_EN_REG"; "1" bits enable clocks, while "0"
339 	 * bits do not disable clocks.  This preserves any hardware
340 	 * bits already set by default in "CLK_CTRL_REG" after reset.
341 	 */
342 	if (!trans->cfg->apmg_not_supported) {
343 		iwl_write_prph(trans, APMG_CLK_EN_REG,
344 			       APMG_CLK_VAL_DMA_CLK_RQT);
345 		udelay(20);
346 
347 		/* Disable L1-Active */
348 		iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
349 				  APMG_PCIDEV_STT_VAL_L1_ACT_DIS);
350 
351 		/* Clear the interrupt in APMG if the NIC is in RFKILL */
352 		iwl_write_prph(trans, APMG_RTC_INT_STT_REG,
353 			       APMG_RTC_INT_STT_RFKILL);
354 	}
355 
356 	set_bit(STATUS_DEVICE_ENABLED, &trans->status);
357 
358 	return 0;
359 }
360 
361 /*
362  * Enable LP XTAL to avoid HW bug where device may consume much power if
363  * FW is not loaded after device reset. LP XTAL is disabled by default
364  * after device HW reset. Do it only if XTAL is fed by internal source.
365  * Configure device's "persistence" mode to avoid resetting XTAL again when
366  * SHRD_HW_RST occurs in S3.
367  */
368 static void iwl_pcie_apm_lp_xtal_enable(struct iwl_trans *trans)
369 {
370 	int ret;
371 	u32 apmg_gp1_reg;
372 	u32 apmg_xtal_cfg_reg;
373 	u32 dl_cfg_reg;
374 
375 	/* Force XTAL ON */
376 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
377 				 CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
378 
379 	iwl_trans_pcie_sw_reset(trans);
380 
381 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
382 	if (WARN_ON(ret)) {
383 		/* Release XTAL ON request */
384 		__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
385 					   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
386 		return;
387 	}
388 
389 	/*
390 	 * Clear "disable persistence" to avoid LP XTAL resetting when
391 	 * SHRD_HW_RST is applied in S3.
392 	 */
393 	iwl_clear_bits_prph(trans, APMG_PCIDEV_STT_REG,
394 				    APMG_PCIDEV_STT_VAL_PERSIST_DIS);
395 
396 	/*
397 	 * Force APMG XTAL to be active to prevent its disabling by HW
398 	 * caused by APMG idle state.
399 	 */
400 	apmg_xtal_cfg_reg = iwl_trans_pcie_read_shr(trans,
401 						    SHR_APMG_XTAL_CFG_REG);
402 	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
403 				 apmg_xtal_cfg_reg |
404 				 SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
405 
406 	iwl_trans_pcie_sw_reset(trans);
407 
408 	/* Enable LP XTAL by indirect access through CSR */
409 	apmg_gp1_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_GP1_REG);
410 	iwl_trans_pcie_write_shr(trans, SHR_APMG_GP1_REG, apmg_gp1_reg |
411 				 SHR_APMG_GP1_WF_XTAL_LP_EN |
412 				 SHR_APMG_GP1_CHICKEN_BIT_SELECT);
413 
414 	/* Clear delay line clock power up */
415 	dl_cfg_reg = iwl_trans_pcie_read_shr(trans, SHR_APMG_DL_CFG_REG);
416 	iwl_trans_pcie_write_shr(trans, SHR_APMG_DL_CFG_REG, dl_cfg_reg &
417 				 ~SHR_APMG_DL_CFG_DL_CLOCK_POWER_UP);
418 
419 	/*
420 	 * Enable persistence mode to avoid LP XTAL resetting when
421 	 * SHRD_HW_RST is applied in S3.
422 	 */
423 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
424 		    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
425 
426 	/*
427 	 * Clear "initialization complete" bit to move adapter from
428 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
429 	 */
430 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
431 
432 	/* Activates XTAL resources monitor */
433 	__iwl_trans_pcie_set_bit(trans, CSR_MONITOR_CFG_REG,
434 				 CSR_MONITOR_XTAL_RESOURCES);
435 
436 	/* Release XTAL ON request */
437 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
438 				   CSR_GP_CNTRL_REG_FLAG_XTAL_ON);
439 	udelay(10);
440 
441 	/* Release APMG XTAL */
442 	iwl_trans_pcie_write_shr(trans, SHR_APMG_XTAL_CFG_REG,
443 				 apmg_xtal_cfg_reg &
444 				 ~SHR_APMG_XTAL_CFG_XTAL_ON_REQ);
445 }
446 
447 void iwl_pcie_apm_stop_master(struct iwl_trans *trans)
448 {
449 	int ret;
450 
451 	/* stop device's busmaster DMA activity */
452 	iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_STOP_MASTER);
453 
454 	ret = iwl_poll_bit(trans, CSR_RESET,
455 			   CSR_RESET_REG_FLAG_MASTER_DISABLED,
456 			   CSR_RESET_REG_FLAG_MASTER_DISABLED, 100);
457 	if (ret < 0)
458 		IWL_WARN(trans, "Master Disable Timed Out, 100 usec\n");
459 
460 	IWL_DEBUG_INFO(trans, "stop master\n");
461 }
462 
463 static void iwl_pcie_apm_stop(struct iwl_trans *trans, bool op_mode_leave)
464 {
465 	IWL_DEBUG_INFO(trans, "Stop card, put in low power state\n");
466 
467 	if (op_mode_leave) {
468 		if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
469 			iwl_pcie_apm_init(trans);
470 
471 		/* inform ME that we are leaving */
472 		if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000)
473 			iwl_set_bits_prph(trans, APMG_PCIDEV_STT_REG,
474 					  APMG_PCIDEV_STT_VAL_WAKE_ME);
475 		else if (trans->trans_cfg->device_family >=
476 			 IWL_DEVICE_FAMILY_8000) {
477 			iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
478 				    CSR_RESET_LINK_PWR_MGMT_DISABLED);
479 			iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
480 				    CSR_HW_IF_CONFIG_REG_PREPARE |
481 				    CSR_HW_IF_CONFIG_REG_ENABLE_PME);
482 			mdelay(1);
483 			iwl_clear_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
484 				      CSR_RESET_LINK_PWR_MGMT_DISABLED);
485 		}
486 		mdelay(5);
487 	}
488 
489 	clear_bit(STATUS_DEVICE_ENABLED, &trans->status);
490 
491 	/* Stop device's DMA activity */
492 	iwl_pcie_apm_stop_master(trans);
493 
494 	if (trans->cfg->lp_xtal_workaround) {
495 		iwl_pcie_apm_lp_xtal_enable(trans);
496 		return;
497 	}
498 
499 	iwl_trans_pcie_sw_reset(trans);
500 
501 	/*
502 	 * Clear "initialization complete" bit to move adapter from
503 	 * D0A* (powered-up Active) --> D0U* (Uninitialized) state.
504 	 */
505 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
506 }
507 
508 static int iwl_pcie_nic_init(struct iwl_trans *trans)
509 {
510 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
511 	int ret;
512 
513 	/* nic_init */
514 	spin_lock_bh(&trans_pcie->irq_lock);
515 	ret = iwl_pcie_apm_init(trans);
516 	spin_unlock_bh(&trans_pcie->irq_lock);
517 
518 	if (ret)
519 		return ret;
520 
521 	iwl_pcie_set_pwr(trans, false);
522 
523 	iwl_op_mode_nic_config(trans->op_mode);
524 
525 	/* Allocate the RX queue, or reset if it is already allocated */
526 	ret = iwl_pcie_rx_init(trans);
527 	if (ret)
528 		return ret;
529 
530 	/* Allocate or reset and init all Tx and Command queues */
531 	if (iwl_pcie_tx_init(trans)) {
532 		iwl_pcie_rx_free(trans);
533 		return -ENOMEM;
534 	}
535 
536 	if (trans->trans_cfg->base_params->shadow_reg_enable) {
537 		/* enable shadow regs in HW */
538 		iwl_set_bit(trans, CSR_MAC_SHADOW_REG_CTRL, 0x800FFFFF);
539 		IWL_DEBUG_INFO(trans, "Enabling shadow registers in device\n");
540 	}
541 
542 	return 0;
543 }
544 
545 #define HW_READY_TIMEOUT (50)
546 
547 /* Note: returns poll_bit return value, which is >= 0 if success */
548 static int iwl_pcie_set_hw_ready(struct iwl_trans *trans)
549 {
550 	int ret;
551 
552 	iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
553 		    CSR_HW_IF_CONFIG_REG_BIT_NIC_READY);
554 
555 	/* See if we got it */
556 	ret = iwl_poll_bit(trans, CSR_HW_IF_CONFIG_REG,
557 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
558 			   CSR_HW_IF_CONFIG_REG_BIT_NIC_READY,
559 			   HW_READY_TIMEOUT);
560 
561 	if (ret >= 0)
562 		iwl_set_bit(trans, CSR_MBOX_SET_REG, CSR_MBOX_SET_REG_OS_ALIVE);
563 
564 	IWL_DEBUG_INFO(trans, "hardware%s ready\n", ret < 0 ? " not" : "");
565 	return ret;
566 }
567 
568 /* Note: returns standard 0/-ERROR code */
569 int iwl_pcie_prepare_card_hw(struct iwl_trans *trans)
570 {
571 	int ret;
572 	int t = 0;
573 	int iter;
574 
575 	IWL_DEBUG_INFO(trans, "iwl_trans_prepare_card_hw enter\n");
576 
577 	ret = iwl_pcie_set_hw_ready(trans);
578 	/* If the card is ready, exit 0 */
579 	if (ret >= 0)
580 		return 0;
581 
582 	iwl_set_bit(trans, CSR_DBG_LINK_PWR_MGMT_REG,
583 		    CSR_RESET_LINK_PWR_MGMT_DISABLED);
584 	usleep_range(1000, 2000);
585 
586 	for (iter = 0; iter < 10; iter++) {
587 		/* If HW is not ready, prepare the conditions to check again */
588 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
589 			    CSR_HW_IF_CONFIG_REG_PREPARE);
590 
591 		do {
592 			ret = iwl_pcie_set_hw_ready(trans);
593 			if (ret >= 0)
594 				return 0;
595 
596 			usleep_range(200, 1000);
597 			t += 200;
598 		} while (t < 150000);
599 		msleep(25);
600 	}
601 
602 	IWL_ERR(trans, "Couldn't prepare the card\n");
603 
604 	return ret;
605 }
606 
607 /*
608  * ucode
609  */
610 static void iwl_pcie_load_firmware_chunk_fh(struct iwl_trans *trans,
611 					    u32 dst_addr, dma_addr_t phy_addr,
612 					    u32 byte_cnt)
613 {
614 	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
615 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_PAUSE);
616 
617 	iwl_write32(trans, FH_SRVC_CHNL_SRAM_ADDR_REG(FH_SRVC_CHNL),
618 		    dst_addr);
619 
620 	iwl_write32(trans, FH_TFDIB_CTRL0_REG(FH_SRVC_CHNL),
621 		    phy_addr & FH_MEM_TFDIB_DRAM_ADDR_LSB_MSK);
622 
623 	iwl_write32(trans, FH_TFDIB_CTRL1_REG(FH_SRVC_CHNL),
624 		    (iwl_get_dma_hi_addr(phy_addr)
625 			<< FH_MEM_TFDIB_REG1_ADDR_BITSHIFT) | byte_cnt);
626 
627 	iwl_write32(trans, FH_TCSR_CHNL_TX_BUF_STS_REG(FH_SRVC_CHNL),
628 		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_NUM) |
629 		    BIT(FH_TCSR_CHNL_TX_BUF_STS_REG_POS_TB_IDX) |
630 		    FH_TCSR_CHNL_TX_BUF_STS_REG_VAL_TFDB_VALID);
631 
632 	iwl_write32(trans, FH_TCSR_CHNL_TX_CONFIG_REG(FH_SRVC_CHNL),
633 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CHNL_ENABLE |
634 		    FH_TCSR_TX_CONFIG_REG_VAL_DMA_CREDIT_DISABLE |
635 		    FH_TCSR_TX_CONFIG_REG_VAL_CIRQ_HOST_ENDTFD);
636 }
637 
638 static int iwl_pcie_load_firmware_chunk(struct iwl_trans *trans,
639 					u32 dst_addr, dma_addr_t phy_addr,
640 					u32 byte_cnt)
641 {
642 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
643 	int ret;
644 
645 	trans_pcie->ucode_write_complete = false;
646 
647 	if (!iwl_trans_grab_nic_access(trans))
648 		return -EIO;
649 
650 	iwl_pcie_load_firmware_chunk_fh(trans, dst_addr, phy_addr,
651 					byte_cnt);
652 	iwl_trans_release_nic_access(trans);
653 
654 	ret = wait_event_timeout(trans_pcie->ucode_write_waitq,
655 				 trans_pcie->ucode_write_complete, 5 * HZ);
656 	if (!ret) {
657 		IWL_ERR(trans, "Failed to load firmware chunk!\n");
658 		iwl_trans_pcie_dump_regs(trans);
659 		return -ETIMEDOUT;
660 	}
661 
662 	return 0;
663 }
664 
665 static int iwl_pcie_load_section(struct iwl_trans *trans, u8 section_num,
666 			    const struct fw_desc *section)
667 {
668 	u8 *v_addr;
669 	dma_addr_t p_addr;
670 	u32 offset, chunk_sz = min_t(u32, FH_MEM_TB_MAX_LENGTH, section->len);
671 	int ret = 0;
672 
673 	IWL_DEBUG_FW(trans, "[%d] uCode section being loaded...\n",
674 		     section_num);
675 
676 	v_addr = dma_alloc_coherent(trans->dev, chunk_sz, &p_addr,
677 				    GFP_KERNEL | __GFP_NOWARN);
678 	if (!v_addr) {
679 		IWL_DEBUG_INFO(trans, "Falling back to small chunks of DMA\n");
680 		chunk_sz = PAGE_SIZE;
681 		v_addr = dma_alloc_coherent(trans->dev, chunk_sz,
682 					    &p_addr, GFP_KERNEL);
683 		if (!v_addr)
684 			return -ENOMEM;
685 	}
686 
687 	for (offset = 0; offset < section->len; offset += chunk_sz) {
688 		u32 copy_size, dst_addr;
689 		bool extended_addr = false;
690 
691 		copy_size = min_t(u32, chunk_sz, section->len - offset);
692 		dst_addr = section->offset + offset;
693 
694 		if (dst_addr >= IWL_FW_MEM_EXTENDED_START &&
695 		    dst_addr <= IWL_FW_MEM_EXTENDED_END)
696 			extended_addr = true;
697 
698 		if (extended_addr)
699 			iwl_set_bits_prph(trans, LMPM_CHICK,
700 					  LMPM_CHICK_EXTENDED_ADDR_SPACE);
701 
702 		memcpy(v_addr, (u8 *)section->data + offset, copy_size);
703 		ret = iwl_pcie_load_firmware_chunk(trans, dst_addr, p_addr,
704 						   copy_size);
705 
706 		if (extended_addr)
707 			iwl_clear_bits_prph(trans, LMPM_CHICK,
708 					    LMPM_CHICK_EXTENDED_ADDR_SPACE);
709 
710 		if (ret) {
711 			IWL_ERR(trans,
712 				"Could not load the [%d] uCode section\n",
713 				section_num);
714 			break;
715 		}
716 	}
717 
718 	dma_free_coherent(trans->dev, chunk_sz, v_addr, p_addr);
719 	return ret;
720 }
721 
722 static int iwl_pcie_load_cpu_sections_8000(struct iwl_trans *trans,
723 					   const struct fw_img *image,
724 					   int cpu,
725 					   int *first_ucode_section)
726 {
727 	int shift_param;
728 	int i, ret = 0, sec_num = 0x1;
729 	u32 val, last_read_idx = 0;
730 
731 	if (cpu == 1) {
732 		shift_param = 0;
733 		*first_ucode_section = 0;
734 	} else {
735 		shift_param = 16;
736 		(*first_ucode_section)++;
737 	}
738 
739 	for (i = *first_ucode_section; i < image->num_sec; i++) {
740 		last_read_idx = i;
741 
742 		/*
743 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
744 		 * CPU1 to CPU2.
745 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
746 		 * CPU2 non paged to CPU2 paging sec.
747 		 */
748 		if (!image->sec[i].data ||
749 		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
750 		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
751 			IWL_DEBUG_FW(trans,
752 				     "Break since Data not valid or Empty section, sec = %d\n",
753 				     i);
754 			break;
755 		}
756 
757 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
758 		if (ret)
759 			return ret;
760 
761 		/* Notify ucode of loaded section number and status */
762 		val = iwl_read_direct32(trans, FH_UCODE_LOAD_STATUS);
763 		val = val | (sec_num << shift_param);
764 		iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS, val);
765 
766 		sec_num = (sec_num << 1) | 0x1;
767 	}
768 
769 	*first_ucode_section = last_read_idx;
770 
771 	iwl_enable_interrupts(trans);
772 
773 	if (trans->trans_cfg->use_tfh) {
774 		if (cpu == 1)
775 			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
776 				       0xFFFF);
777 		else
778 			iwl_write_prph(trans, UREG_UCODE_LOAD_STATUS,
779 				       0xFFFFFFFF);
780 	} else {
781 		if (cpu == 1)
782 			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
783 					   0xFFFF);
784 		else
785 			iwl_write_direct32(trans, FH_UCODE_LOAD_STATUS,
786 					   0xFFFFFFFF);
787 	}
788 
789 	return 0;
790 }
791 
792 static int iwl_pcie_load_cpu_sections(struct iwl_trans *trans,
793 				      const struct fw_img *image,
794 				      int cpu,
795 				      int *first_ucode_section)
796 {
797 	int i, ret = 0;
798 	u32 last_read_idx = 0;
799 
800 	if (cpu == 1)
801 		*first_ucode_section = 0;
802 	else
803 		(*first_ucode_section)++;
804 
805 	for (i = *first_ucode_section; i < image->num_sec; i++) {
806 		last_read_idx = i;
807 
808 		/*
809 		 * CPU1_CPU2_SEPARATOR_SECTION delimiter - separate between
810 		 * CPU1 to CPU2.
811 		 * PAGING_SEPARATOR_SECTION delimiter - separate between
812 		 * CPU2 non paged to CPU2 paging sec.
813 		 */
814 		if (!image->sec[i].data ||
815 		    image->sec[i].offset == CPU1_CPU2_SEPARATOR_SECTION ||
816 		    image->sec[i].offset == PAGING_SEPARATOR_SECTION) {
817 			IWL_DEBUG_FW(trans,
818 				     "Break since Data not valid or Empty section, sec = %d\n",
819 				     i);
820 			break;
821 		}
822 
823 		ret = iwl_pcie_load_section(trans, i, &image->sec[i]);
824 		if (ret)
825 			return ret;
826 	}
827 
828 	*first_ucode_section = last_read_idx;
829 
830 	return 0;
831 }
832 
833 static void iwl_pcie_apply_destination_ini(struct iwl_trans *trans)
834 {
835 	enum iwl_fw_ini_allocation_id alloc_id = IWL_FW_INI_ALLOCATION_ID_DBGC1;
836 	struct iwl_fw_ini_allocation_tlv *fw_mon_cfg =
837 		&trans->dbg.fw_mon_cfg[alloc_id];
838 	struct iwl_dram_data *frag;
839 
840 	if (!iwl_trans_dbg_ini_valid(trans))
841 		return;
842 
843 	if (le32_to_cpu(fw_mon_cfg->buf_location) ==
844 	    IWL_FW_INI_LOCATION_SRAM_PATH) {
845 		IWL_DEBUG_FW(trans, "WRT: Applying SMEM buffer destination\n");
846 		/* set sram monitor by enabling bit 7 */
847 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
848 			    CSR_HW_IF_CONFIG_REG_BIT_MONITOR_SRAM);
849 
850 		return;
851 	}
852 
853 	if (le32_to_cpu(fw_mon_cfg->buf_location) !=
854 	    IWL_FW_INI_LOCATION_DRAM_PATH ||
855 	    !trans->dbg.fw_mon_ini[alloc_id].num_frags)
856 		return;
857 
858 	frag = &trans->dbg.fw_mon_ini[alloc_id].frags[0];
859 
860 	IWL_DEBUG_FW(trans, "WRT: Applying DRAM destination (alloc_id=%u)\n",
861 		     alloc_id);
862 
863 	iwl_write_umac_prph(trans, MON_BUFF_BASE_ADDR_VER2,
864 			    frag->physical >> MON_BUFF_SHIFT_VER2);
865 	iwl_write_umac_prph(trans, MON_BUFF_END_ADDR_VER2,
866 			    (frag->physical + frag->size - 256) >>
867 			    MON_BUFF_SHIFT_VER2);
868 }
869 
870 void iwl_pcie_apply_destination(struct iwl_trans *trans)
871 {
872 	const struct iwl_fw_dbg_dest_tlv_v1 *dest = trans->dbg.dest_tlv;
873 	const struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
874 	int i;
875 
876 	if (iwl_trans_dbg_ini_valid(trans)) {
877 		iwl_pcie_apply_destination_ini(trans);
878 		return;
879 	}
880 
881 	IWL_INFO(trans, "Applying debug destination %s\n",
882 		 get_fw_dbg_mode_string(dest->monitor_mode));
883 
884 	if (dest->monitor_mode == EXTERNAL_MODE)
885 		iwl_pcie_alloc_fw_monitor(trans, dest->size_power);
886 	else
887 		IWL_WARN(trans, "PCI should have external buffer debug\n");
888 
889 	for (i = 0; i < trans->dbg.n_dest_reg; i++) {
890 		u32 addr = le32_to_cpu(dest->reg_ops[i].addr);
891 		u32 val = le32_to_cpu(dest->reg_ops[i].val);
892 
893 		switch (dest->reg_ops[i].op) {
894 		case CSR_ASSIGN:
895 			iwl_write32(trans, addr, val);
896 			break;
897 		case CSR_SETBIT:
898 			iwl_set_bit(trans, addr, BIT(val));
899 			break;
900 		case CSR_CLEARBIT:
901 			iwl_clear_bit(trans, addr, BIT(val));
902 			break;
903 		case PRPH_ASSIGN:
904 			iwl_write_prph(trans, addr, val);
905 			break;
906 		case PRPH_SETBIT:
907 			iwl_set_bits_prph(trans, addr, BIT(val));
908 			break;
909 		case PRPH_CLEARBIT:
910 			iwl_clear_bits_prph(trans, addr, BIT(val));
911 			break;
912 		case PRPH_BLOCKBIT:
913 			if (iwl_read_prph(trans, addr) & BIT(val)) {
914 				IWL_ERR(trans,
915 					"BIT(%u) in address 0x%x is 1, stopping FW configuration\n",
916 					val, addr);
917 				goto monitor;
918 			}
919 			break;
920 		default:
921 			IWL_ERR(trans, "FW debug - unknown OP %d\n",
922 				dest->reg_ops[i].op);
923 			break;
924 		}
925 	}
926 
927 monitor:
928 	if (dest->monitor_mode == EXTERNAL_MODE && fw_mon->size) {
929 		iwl_write_prph(trans, le32_to_cpu(dest->base_reg),
930 			       fw_mon->physical >> dest->base_shift);
931 		if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
932 			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
933 				       (fw_mon->physical + fw_mon->size -
934 					256) >> dest->end_shift);
935 		else
936 			iwl_write_prph(trans, le32_to_cpu(dest->end_reg),
937 				       (fw_mon->physical + fw_mon->size) >>
938 				       dest->end_shift);
939 	}
940 }
941 
942 static int iwl_pcie_load_given_ucode(struct iwl_trans *trans,
943 				const struct fw_img *image)
944 {
945 	int ret = 0;
946 	int first_ucode_section;
947 
948 	IWL_DEBUG_FW(trans, "working with %s CPU\n",
949 		     image->is_dual_cpus ? "Dual" : "Single");
950 
951 	/* load to FW the binary non secured sections of CPU1 */
952 	ret = iwl_pcie_load_cpu_sections(trans, image, 1, &first_ucode_section);
953 	if (ret)
954 		return ret;
955 
956 	if (image->is_dual_cpus) {
957 		/* set CPU2 header address */
958 		iwl_write_prph(trans,
959 			       LMPM_SECURE_UCODE_LOAD_CPU2_HDR_ADDR,
960 			       LMPM_SECURE_CPU2_HDR_MEM_SPACE);
961 
962 		/* load to FW the binary sections of CPU2 */
963 		ret = iwl_pcie_load_cpu_sections(trans, image, 2,
964 						 &first_ucode_section);
965 		if (ret)
966 			return ret;
967 	}
968 
969 	if (iwl_pcie_dbg_on(trans))
970 		iwl_pcie_apply_destination(trans);
971 
972 	iwl_enable_interrupts(trans);
973 
974 	/* release CPU reset */
975 	iwl_write32(trans, CSR_RESET, 0);
976 
977 	return 0;
978 }
979 
980 static int iwl_pcie_load_given_ucode_8000(struct iwl_trans *trans,
981 					  const struct fw_img *image)
982 {
983 	int ret = 0;
984 	int first_ucode_section;
985 
986 	IWL_DEBUG_FW(trans, "working with %s CPU\n",
987 		     image->is_dual_cpus ? "Dual" : "Single");
988 
989 	if (iwl_pcie_dbg_on(trans))
990 		iwl_pcie_apply_destination(trans);
991 
992 	IWL_DEBUG_POWER(trans, "Original WFPM value = 0x%08X\n",
993 			iwl_read_prph(trans, WFPM_GP2));
994 
995 	/*
996 	 * Set default value. On resume reading the values that were
997 	 * zeored can provide debug data on the resume flow.
998 	 * This is for debugging only and has no functional impact.
999 	 */
1000 	iwl_write_prph(trans, WFPM_GP2, 0x01010101);
1001 
1002 	/* configure the ucode to be ready to get the secured image */
1003 	/* release CPU reset */
1004 	iwl_write_prph(trans, RELEASE_CPU_RESET, RELEASE_CPU_RESET_BIT);
1005 
1006 	/* load to FW the binary Secured sections of CPU1 */
1007 	ret = iwl_pcie_load_cpu_sections_8000(trans, image, 1,
1008 					      &first_ucode_section);
1009 	if (ret)
1010 		return ret;
1011 
1012 	/* load to FW the binary sections of CPU2 */
1013 	return iwl_pcie_load_cpu_sections_8000(trans, image, 2,
1014 					       &first_ucode_section);
1015 }
1016 
1017 bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans)
1018 {
1019 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1020 	bool hw_rfkill = iwl_is_rfkill_set(trans);
1021 	bool prev = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1022 	bool report;
1023 
1024 	if (hw_rfkill) {
1025 		set_bit(STATUS_RFKILL_HW, &trans->status);
1026 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1027 	} else {
1028 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1029 		if (trans_pcie->opmode_down)
1030 			clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1031 	}
1032 
1033 	report = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1034 
1035 	if (prev != report)
1036 		iwl_trans_pcie_rf_kill(trans, report);
1037 
1038 	return hw_rfkill;
1039 }
1040 
1041 struct iwl_causes_list {
1042 	u32 cause_num;
1043 	u32 mask_reg;
1044 	u8 addr;
1045 };
1046 
1047 static struct iwl_causes_list causes_list[] = {
1048 	{MSIX_FH_INT_CAUSES_D2S_CH0_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0},
1049 	{MSIX_FH_INT_CAUSES_D2S_CH1_NUM,	CSR_MSIX_FH_INT_MASK_AD, 0x1},
1050 	{MSIX_FH_INT_CAUSES_S2D,		CSR_MSIX_FH_INT_MASK_AD, 0x3},
1051 	{MSIX_FH_INT_CAUSES_FH_ERR,		CSR_MSIX_FH_INT_MASK_AD, 0x5},
1052 	{MSIX_HW_INT_CAUSES_REG_ALIVE,		CSR_MSIX_HW_INT_MASK_AD, 0x10},
1053 	{MSIX_HW_INT_CAUSES_REG_WAKEUP,		CSR_MSIX_HW_INT_MASK_AD, 0x11},
1054 	{MSIX_HW_INT_CAUSES_REG_RESET_DONE,	CSR_MSIX_HW_INT_MASK_AD, 0x12},
1055 	{MSIX_HW_INT_CAUSES_REG_CT_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x16},
1056 	{MSIX_HW_INT_CAUSES_REG_RF_KILL,	CSR_MSIX_HW_INT_MASK_AD, 0x17},
1057 	{MSIX_HW_INT_CAUSES_REG_PERIODIC,	CSR_MSIX_HW_INT_MASK_AD, 0x18},
1058 	{MSIX_HW_INT_CAUSES_REG_SW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x29},
1059 	{MSIX_HW_INT_CAUSES_REG_SCD,		CSR_MSIX_HW_INT_MASK_AD, 0x2A},
1060 	{MSIX_HW_INT_CAUSES_REG_FH_TX,		CSR_MSIX_HW_INT_MASK_AD, 0x2B},
1061 	{MSIX_HW_INT_CAUSES_REG_HW_ERR,		CSR_MSIX_HW_INT_MASK_AD, 0x2D},
1062 	{MSIX_HW_INT_CAUSES_REG_HAP,		CSR_MSIX_HW_INT_MASK_AD, 0x2E},
1063 };
1064 
1065 static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans)
1066 {
1067 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1068 	int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE;
1069 	int i, arr_size = ARRAY_SIZE(causes_list);
1070 	struct iwl_causes_list *causes = causes_list;
1071 
1072 	/*
1073 	 * Access all non RX causes and map them to the default irq.
1074 	 * In case we are missing at least one interrupt vector,
1075 	 * the first interrupt vector will serve non-RX and FBQ causes.
1076 	 */
1077 	for (i = 0; i < arr_size; i++) {
1078 		iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val);
1079 		iwl_clear_bit(trans, causes[i].mask_reg,
1080 			      causes[i].cause_num);
1081 	}
1082 }
1083 
1084 static void iwl_pcie_map_rx_causes(struct iwl_trans *trans)
1085 {
1086 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1087 	u32 offset =
1088 		trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0;
1089 	u32 val, idx;
1090 
1091 	/*
1092 	 * The first RX queue - fallback queue, which is designated for
1093 	 * management frame, command responses etc, is always mapped to the
1094 	 * first interrupt vector. The other RX queues are mapped to
1095 	 * the other (N - 2) interrupt vectors.
1096 	 */
1097 	val = BIT(MSIX_FH_INT_CAUSES_Q(0));
1098 	for (idx = 1; idx < trans->num_rx_queues; idx++) {
1099 		iwl_write8(trans, CSR_MSIX_RX_IVAR(idx),
1100 			   MSIX_FH_INT_CAUSES_Q(idx - offset));
1101 		val |= BIT(MSIX_FH_INT_CAUSES_Q(idx));
1102 	}
1103 	iwl_write32(trans, CSR_MSIX_FH_INT_MASK_AD, ~val);
1104 
1105 	val = MSIX_FH_INT_CAUSES_Q(0);
1106 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_NON_RX)
1107 		val |= MSIX_NON_AUTO_CLEAR_CAUSE;
1108 	iwl_write8(trans, CSR_MSIX_RX_IVAR(0), val);
1109 
1110 	if (trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS)
1111 		iwl_write8(trans, CSR_MSIX_RX_IVAR(1), val);
1112 }
1113 
1114 void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie)
1115 {
1116 	struct iwl_trans *trans = trans_pcie->trans;
1117 
1118 	if (!trans_pcie->msix_enabled) {
1119 		if (trans->trans_cfg->mq_rx_supported &&
1120 		    test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1121 			iwl_write_umac_prph(trans, UREG_CHICK,
1122 					    UREG_CHICK_MSI_ENABLE);
1123 		return;
1124 	}
1125 	/*
1126 	 * The IVAR table needs to be configured again after reset,
1127 	 * but if the device is disabled, we can't write to
1128 	 * prph.
1129 	 */
1130 	if (test_bit(STATUS_DEVICE_ENABLED, &trans->status))
1131 		iwl_write_umac_prph(trans, UREG_CHICK, UREG_CHICK_MSIX_ENABLE);
1132 
1133 	/*
1134 	 * Each cause from the causes list above and the RX causes is
1135 	 * represented as a byte in the IVAR table. The first nibble
1136 	 * represents the bound interrupt vector of the cause, the second
1137 	 * represents no auto clear for this cause. This will be set if its
1138 	 * interrupt vector is bound to serve other causes.
1139 	 */
1140 	iwl_pcie_map_rx_causes(trans);
1141 
1142 	iwl_pcie_map_non_rx_causes(trans);
1143 }
1144 
1145 static void iwl_pcie_init_msix(struct iwl_trans_pcie *trans_pcie)
1146 {
1147 	struct iwl_trans *trans = trans_pcie->trans;
1148 
1149 	iwl_pcie_conf_msix_hw(trans_pcie);
1150 
1151 	if (!trans_pcie->msix_enabled)
1152 		return;
1153 
1154 	trans_pcie->fh_init_mask = ~iwl_read32(trans, CSR_MSIX_FH_INT_MASK_AD);
1155 	trans_pcie->fh_mask = trans_pcie->fh_init_mask;
1156 	trans_pcie->hw_init_mask = ~iwl_read32(trans, CSR_MSIX_HW_INT_MASK_AD);
1157 	trans_pcie->hw_mask = trans_pcie->hw_init_mask;
1158 }
1159 
1160 static void _iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1161 {
1162 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1163 
1164 	lockdep_assert_held(&trans_pcie->mutex);
1165 
1166 	if (trans_pcie->is_down)
1167 		return;
1168 
1169 	trans_pcie->is_down = true;
1170 
1171 	/* tell the device to stop sending interrupts */
1172 	iwl_disable_interrupts(trans);
1173 
1174 	/* device going down, Stop using ICT table */
1175 	iwl_pcie_disable_ict(trans);
1176 
1177 	/*
1178 	 * If a HW restart happens during firmware loading,
1179 	 * then the firmware loading might call this function
1180 	 * and later it might be called again due to the
1181 	 * restart. So don't process again if the device is
1182 	 * already dead.
1183 	 */
1184 	if (test_and_clear_bit(STATUS_DEVICE_ENABLED, &trans->status)) {
1185 		IWL_DEBUG_INFO(trans,
1186 			       "DEVICE_ENABLED bit was set and is now cleared\n");
1187 		iwl_pcie_tx_stop(trans);
1188 		iwl_pcie_rx_stop(trans);
1189 
1190 		/* Power-down device's busmaster DMA clocks */
1191 		if (!trans->cfg->apmg_not_supported) {
1192 			iwl_write_prph(trans, APMG_CLK_DIS_REG,
1193 				       APMG_CLK_VAL_DMA_CLK_RQT);
1194 			udelay(5);
1195 		}
1196 	}
1197 
1198 	/* Make sure (redundant) we've released our request to stay awake */
1199 	iwl_clear_bit(trans, CSR_GP_CNTRL,
1200 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1201 
1202 	/* Stop the device, and put it in low power state */
1203 	iwl_pcie_apm_stop(trans, false);
1204 
1205 	iwl_trans_pcie_sw_reset(trans);
1206 
1207 	/*
1208 	 * Upon stop, the IVAR table gets erased, so msi-x won't
1209 	 * work. This causes a bug in RF-KILL flows, since the interrupt
1210 	 * that enables radio won't fire on the correct irq, and the
1211 	 * driver won't be able to handle the interrupt.
1212 	 * Configure the IVAR table again after reset.
1213 	 */
1214 	iwl_pcie_conf_msix_hw(trans_pcie);
1215 
1216 	/*
1217 	 * Upon stop, the APM issues an interrupt if HW RF kill is set.
1218 	 * This is a bug in certain verions of the hardware.
1219 	 * Certain devices also keep sending HW RF kill interrupt all
1220 	 * the time, unless the interrupt is ACKed even if the interrupt
1221 	 * should be masked. Re-ACK all the interrupts here.
1222 	 */
1223 	iwl_disable_interrupts(trans);
1224 
1225 	/* clear all status bits */
1226 	clear_bit(STATUS_SYNC_HCMD_ACTIVE, &trans->status);
1227 	clear_bit(STATUS_INT_ENABLED, &trans->status);
1228 	clear_bit(STATUS_TPOWER_PMI, &trans->status);
1229 
1230 	/*
1231 	 * Even if we stop the HW, we still want the RF kill
1232 	 * interrupt
1233 	 */
1234 	iwl_enable_rfkill_int(trans);
1235 
1236 	/* re-take ownership to prevent other users from stealing the device */
1237 	iwl_pcie_prepare_card_hw(trans);
1238 }
1239 
1240 void iwl_pcie_synchronize_irqs(struct iwl_trans *trans)
1241 {
1242 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1243 
1244 	if (trans_pcie->msix_enabled) {
1245 		int i;
1246 
1247 		for (i = 0; i < trans_pcie->alloc_vecs; i++)
1248 			synchronize_irq(trans_pcie->msix_entries[i].vector);
1249 	} else {
1250 		synchronize_irq(trans_pcie->pci_dev->irq);
1251 	}
1252 }
1253 
1254 static int iwl_trans_pcie_start_fw(struct iwl_trans *trans,
1255 				   const struct fw_img *fw, bool run_in_rfkill)
1256 {
1257 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1258 	bool hw_rfkill;
1259 	int ret;
1260 
1261 	/* This may fail if AMT took ownership of the device */
1262 	if (iwl_pcie_prepare_card_hw(trans)) {
1263 		IWL_WARN(trans, "Exit HW not ready\n");
1264 		ret = -EIO;
1265 		goto out;
1266 	}
1267 
1268 	iwl_enable_rfkill_int(trans);
1269 
1270 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1271 
1272 	/*
1273 	 * We enabled the RF-Kill interrupt and the handler may very
1274 	 * well be running. Disable the interrupts to make sure no other
1275 	 * interrupt can be fired.
1276 	 */
1277 	iwl_disable_interrupts(trans);
1278 
1279 	/* Make sure it finished running */
1280 	iwl_pcie_synchronize_irqs(trans);
1281 
1282 	mutex_lock(&trans_pcie->mutex);
1283 
1284 	/* If platform's RF_KILL switch is NOT set to KILL */
1285 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1286 	if (hw_rfkill && !run_in_rfkill) {
1287 		ret = -ERFKILL;
1288 		goto out;
1289 	}
1290 
1291 	/* Someone called stop_device, don't try to start_fw */
1292 	if (trans_pcie->is_down) {
1293 		IWL_WARN(trans,
1294 			 "Can't start_fw since the HW hasn't been started\n");
1295 		ret = -EIO;
1296 		goto out;
1297 	}
1298 
1299 	/* make sure rfkill handshake bits are cleared */
1300 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1301 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR,
1302 		    CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
1303 
1304 	/* clear (again), then enable host interrupts */
1305 	iwl_write32(trans, CSR_INT, 0xFFFFFFFF);
1306 
1307 	ret = iwl_pcie_nic_init(trans);
1308 	if (ret) {
1309 		IWL_ERR(trans, "Unable to init nic\n");
1310 		goto out;
1311 	}
1312 
1313 	/*
1314 	 * Now, we load the firmware and don't want to be interrupted, even
1315 	 * by the RF-Kill interrupt (hence mask all the interrupt besides the
1316 	 * FH_TX interrupt which is needed to load the firmware). If the
1317 	 * RF-Kill switch is toggled, we will find out after having loaded
1318 	 * the firmware and return the proper value to the caller.
1319 	 */
1320 	iwl_enable_fw_load_int(trans);
1321 
1322 	/* really make sure rfkill handshake bits are cleared */
1323 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1324 	iwl_write32(trans, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
1325 
1326 	/* Load the given image to the HW */
1327 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1328 		ret = iwl_pcie_load_given_ucode_8000(trans, fw);
1329 	else
1330 		ret = iwl_pcie_load_given_ucode(trans, fw);
1331 
1332 	/* re-check RF-Kill state since we may have missed the interrupt */
1333 	hw_rfkill = iwl_pcie_check_hw_rf_kill(trans);
1334 	if (hw_rfkill && !run_in_rfkill)
1335 		ret = -ERFKILL;
1336 
1337 out:
1338 	mutex_unlock(&trans_pcie->mutex);
1339 	return ret;
1340 }
1341 
1342 static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
1343 {
1344 	iwl_pcie_reset_ict(trans);
1345 	iwl_pcie_tx_start(trans, scd_addr);
1346 }
1347 
1348 void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans,
1349 				       bool was_in_rfkill)
1350 {
1351 	bool hw_rfkill;
1352 
1353 	/*
1354 	 * Check again since the RF kill state may have changed while
1355 	 * all the interrupts were disabled, in this case we couldn't
1356 	 * receive the RF kill interrupt and update the state in the
1357 	 * op_mode.
1358 	 * Don't call the op_mode if the rkfill state hasn't changed.
1359 	 * This allows the op_mode to call stop_device from the rfkill
1360 	 * notification without endless recursion. Under very rare
1361 	 * circumstances, we might have a small recursion if the rfkill
1362 	 * state changed exactly now while we were called from stop_device.
1363 	 * This is very unlikely but can happen and is supported.
1364 	 */
1365 	hw_rfkill = iwl_is_rfkill_set(trans);
1366 	if (hw_rfkill) {
1367 		set_bit(STATUS_RFKILL_HW, &trans->status);
1368 		set_bit(STATUS_RFKILL_OPMODE, &trans->status);
1369 	} else {
1370 		clear_bit(STATUS_RFKILL_HW, &trans->status);
1371 		clear_bit(STATUS_RFKILL_OPMODE, &trans->status);
1372 	}
1373 	if (hw_rfkill != was_in_rfkill)
1374 		iwl_trans_pcie_rf_kill(trans, hw_rfkill);
1375 }
1376 
1377 static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
1378 {
1379 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1380 	bool was_in_rfkill;
1381 
1382 	iwl_op_mode_time_point(trans->op_mode,
1383 			       IWL_FW_INI_TIME_POINT_HOST_DEVICE_DISABLE,
1384 			       NULL);
1385 
1386 	mutex_lock(&trans_pcie->mutex);
1387 	trans_pcie->opmode_down = true;
1388 	was_in_rfkill = test_bit(STATUS_RFKILL_OPMODE, &trans->status);
1389 	_iwl_trans_pcie_stop_device(trans);
1390 	iwl_trans_pcie_handle_stop_rfkill(trans, was_in_rfkill);
1391 	mutex_unlock(&trans_pcie->mutex);
1392 }
1393 
1394 void iwl_trans_pcie_rf_kill(struct iwl_trans *trans, bool state)
1395 {
1396 	struct iwl_trans_pcie __maybe_unused *trans_pcie =
1397 		IWL_TRANS_GET_PCIE_TRANS(trans);
1398 
1399 	lockdep_assert_held(&trans_pcie->mutex);
1400 
1401 	IWL_WARN(trans, "reporting RF_KILL (radio %s)\n",
1402 		 state ? "disabled" : "enabled");
1403 	if (iwl_op_mode_hw_rf_kill(trans->op_mode, state)) {
1404 		if (trans->trans_cfg->gen2)
1405 			_iwl_trans_pcie_gen2_stop_device(trans);
1406 		else
1407 			_iwl_trans_pcie_stop_device(trans);
1408 	}
1409 }
1410 
1411 void iwl_pcie_d3_complete_suspend(struct iwl_trans *trans,
1412 				  bool test, bool reset)
1413 {
1414 	iwl_disable_interrupts(trans);
1415 
1416 	/*
1417 	 * in testing mode, the host stays awake and the
1418 	 * hardware won't be reset (not even partially)
1419 	 */
1420 	if (test)
1421 		return;
1422 
1423 	iwl_pcie_disable_ict(trans);
1424 
1425 	iwl_pcie_synchronize_irqs(trans);
1426 
1427 	iwl_clear_bit(trans, CSR_GP_CNTRL,
1428 		      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1429 	iwl_clear_bit(trans, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_INIT_DONE);
1430 
1431 	if (reset) {
1432 		/*
1433 		 * reset TX queues -- some of their registers reset during S3
1434 		 * so if we don't reset everything here the D3 image would try
1435 		 * to execute some invalid memory upon resume
1436 		 */
1437 		iwl_trans_pcie_tx_reset(trans);
1438 	}
1439 
1440 	iwl_pcie_set_pwr(trans, true);
1441 }
1442 
1443 static int iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test,
1444 				     bool reset)
1445 {
1446 	int ret;
1447 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1448 
1449 	if (!reset)
1450 		/* Enable persistence mode to avoid reset */
1451 		iwl_set_bit(trans, CSR_HW_IF_CONFIG_REG,
1452 			    CSR_HW_IF_CONFIG_REG_PERSIST_MODE);
1453 
1454 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1455 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1456 				    UREG_DOORBELL_TO_ISR6_SUSPEND);
1457 
1458 		ret = wait_event_timeout(trans_pcie->sx_waitq,
1459 					 trans_pcie->sx_complete, 2 * HZ);
1460 		/*
1461 		 * Invalidate it toward resume.
1462 		 */
1463 		trans_pcie->sx_complete = false;
1464 
1465 		if (!ret) {
1466 			IWL_ERR(trans, "Timeout entering D3\n");
1467 			return -ETIMEDOUT;
1468 		}
1469 	}
1470 	iwl_pcie_d3_complete_suspend(trans, test, reset);
1471 
1472 	return 0;
1473 }
1474 
1475 static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans,
1476 				    enum iwl_d3_status *status,
1477 				    bool test,  bool reset)
1478 {
1479 	struct iwl_trans_pcie *trans_pcie =  IWL_TRANS_GET_PCIE_TRANS(trans);
1480 	u32 val;
1481 	int ret;
1482 
1483 	if (test) {
1484 		iwl_enable_interrupts(trans);
1485 		*status = IWL_D3_STATUS_ALIVE;
1486 		goto out;
1487 	}
1488 
1489 	iwl_set_bit(trans, CSR_GP_CNTRL,
1490 		    CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1491 
1492 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1493 	if (ret)
1494 		return ret;
1495 
1496 	/*
1497 	 * Reconfigure IVAR table in case of MSIX or reset ict table in
1498 	 * MSI mode since HW reset erased it.
1499 	 * Also enables interrupts - none will happen as
1500 	 * the device doesn't know we're waking it up, only when
1501 	 * the opmode actually tells it after this call.
1502 	 */
1503 	iwl_pcie_conf_msix_hw(trans_pcie);
1504 	if (!trans_pcie->msix_enabled)
1505 		iwl_pcie_reset_ict(trans);
1506 	iwl_enable_interrupts(trans);
1507 
1508 	iwl_pcie_set_pwr(trans, false);
1509 
1510 	if (!reset) {
1511 		iwl_clear_bit(trans, CSR_GP_CNTRL,
1512 			      CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1513 	} else {
1514 		iwl_trans_pcie_tx_reset(trans);
1515 
1516 		ret = iwl_pcie_rx_init(trans);
1517 		if (ret) {
1518 			IWL_ERR(trans,
1519 				"Failed to resume the device (RX reset)\n");
1520 			return ret;
1521 		}
1522 	}
1523 
1524 	IWL_DEBUG_POWER(trans, "WFPM value upon resume = 0x%08X\n",
1525 			iwl_read_umac_prph(trans, WFPM_GP2));
1526 
1527 	val = iwl_read32(trans, CSR_RESET);
1528 	if (val & CSR_RESET_REG_FLAG_NEVO_RESET)
1529 		*status = IWL_D3_STATUS_RESET;
1530 	else
1531 		*status = IWL_D3_STATUS_ALIVE;
1532 
1533 out:
1534 	if (*status == IWL_D3_STATUS_ALIVE &&
1535 	    trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
1536 		trans_pcie->sx_complete = false;
1537 		iwl_write_umac_prph(trans, UREG_DOORBELL_TO_ISR6,
1538 				    UREG_DOORBELL_TO_ISR6_RESUME);
1539 
1540 		ret = wait_event_timeout(trans_pcie->sx_waitq,
1541 					 trans_pcie->sx_complete, 2 * HZ);
1542 		/*
1543 		 * Invalidate it toward next suspend.
1544 		 */
1545 		trans_pcie->sx_complete = false;
1546 
1547 		if (!ret) {
1548 			IWL_ERR(trans, "Timeout exiting D3\n");
1549 			return -ETIMEDOUT;
1550 		}
1551 	}
1552 	return 0;
1553 }
1554 
1555 static void
1556 iwl_pcie_set_interrupt_capa(struct pci_dev *pdev,
1557 			    struct iwl_trans *trans,
1558 			    const struct iwl_cfg_trans_params *cfg_trans)
1559 {
1560 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1561 	int max_irqs, num_irqs, i, ret;
1562 	u16 pci_cmd;
1563 	u32 max_rx_queues = IWL_MAX_RX_HW_QUEUES;
1564 
1565 	if (!cfg_trans->mq_rx_supported)
1566 		goto enable_msi;
1567 
1568 	if (cfg_trans->device_family <= IWL_DEVICE_FAMILY_9000)
1569 		max_rx_queues = IWL_9000_MAX_RX_HW_QUEUES;
1570 
1571 	max_irqs = min_t(u32, num_online_cpus() + 2, max_rx_queues);
1572 	for (i = 0; i < max_irqs; i++)
1573 		trans_pcie->msix_entries[i].entry = i;
1574 
1575 	num_irqs = pci_enable_msix_range(pdev, trans_pcie->msix_entries,
1576 					 MSIX_MIN_INTERRUPT_VECTORS,
1577 					 max_irqs);
1578 	if (num_irqs < 0) {
1579 		IWL_DEBUG_INFO(trans,
1580 			       "Failed to enable msi-x mode (ret %d). Moving to msi mode.\n",
1581 			       num_irqs);
1582 		goto enable_msi;
1583 	}
1584 	trans_pcie->def_irq = (num_irqs == max_irqs) ? num_irqs - 1 : 0;
1585 
1586 	IWL_DEBUG_INFO(trans,
1587 		       "MSI-X enabled. %d interrupt vectors were allocated\n",
1588 		       num_irqs);
1589 
1590 	/*
1591 	 * In case the OS provides fewer interrupts than requested, different
1592 	 * causes will share the same interrupt vector as follows:
1593 	 * One interrupt less: non rx causes shared with FBQ.
1594 	 * Two interrupts less: non rx causes shared with FBQ and RSS.
1595 	 * More than two interrupts: we will use fewer RSS queues.
1596 	 */
1597 	if (num_irqs <= max_irqs - 2) {
1598 		trans_pcie->trans->num_rx_queues = num_irqs + 1;
1599 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX |
1600 			IWL_SHARED_IRQ_FIRST_RSS;
1601 	} else if (num_irqs == max_irqs - 1) {
1602 		trans_pcie->trans->num_rx_queues = num_irqs;
1603 		trans_pcie->shared_vec_mask = IWL_SHARED_IRQ_NON_RX;
1604 	} else {
1605 		trans_pcie->trans->num_rx_queues = num_irqs - 1;
1606 	}
1607 	WARN_ON(trans_pcie->trans->num_rx_queues > IWL_MAX_RX_HW_QUEUES);
1608 
1609 	trans_pcie->alloc_vecs = num_irqs;
1610 	trans_pcie->msix_enabled = true;
1611 	return;
1612 
1613 enable_msi:
1614 	ret = pci_enable_msi(pdev);
1615 	if (ret) {
1616 		dev_err(&pdev->dev, "pci_enable_msi failed - %d\n", ret);
1617 		/* enable rfkill interrupt: hw bug w/a */
1618 		pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
1619 		if (pci_cmd & PCI_COMMAND_INTX_DISABLE) {
1620 			pci_cmd &= ~PCI_COMMAND_INTX_DISABLE;
1621 			pci_write_config_word(pdev, PCI_COMMAND, pci_cmd);
1622 		}
1623 	}
1624 }
1625 
1626 static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans)
1627 {
1628 	int iter_rx_q, i, ret, cpu, offset;
1629 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1630 
1631 	i = trans_pcie->shared_vec_mask & IWL_SHARED_IRQ_FIRST_RSS ? 0 : 1;
1632 	iter_rx_q = trans_pcie->trans->num_rx_queues - 1 + i;
1633 	offset = 1 + i;
1634 	for (; i < iter_rx_q ; i++) {
1635 		/*
1636 		 * Get the cpu prior to the place to search
1637 		 * (i.e. return will be > i - 1).
1638 		 */
1639 		cpu = cpumask_next(i - offset, cpu_online_mask);
1640 		cpumask_set_cpu(cpu, &trans_pcie->affinity_mask[i]);
1641 		ret = irq_set_affinity_hint(trans_pcie->msix_entries[i].vector,
1642 					    &trans_pcie->affinity_mask[i]);
1643 		if (ret)
1644 			IWL_ERR(trans_pcie->trans,
1645 				"Failed to set affinity mask for IRQ %d\n",
1646 				i);
1647 	}
1648 }
1649 
1650 static int iwl_pcie_init_msix_handler(struct pci_dev *pdev,
1651 				      struct iwl_trans_pcie *trans_pcie)
1652 {
1653 	int i;
1654 
1655 	for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1656 		int ret;
1657 		struct msix_entry *msix_entry;
1658 		const char *qname = queue_name(&pdev->dev, trans_pcie, i);
1659 
1660 		if (!qname)
1661 			return -ENOMEM;
1662 
1663 		msix_entry = &trans_pcie->msix_entries[i];
1664 		ret = devm_request_threaded_irq(&pdev->dev,
1665 						msix_entry->vector,
1666 						iwl_pcie_msix_isr,
1667 						(i == trans_pcie->def_irq) ?
1668 						iwl_pcie_irq_msix_handler :
1669 						iwl_pcie_irq_rx_msix_handler,
1670 						IRQF_SHARED,
1671 						qname,
1672 						msix_entry);
1673 		if (ret) {
1674 			IWL_ERR(trans_pcie->trans,
1675 				"Error allocating IRQ %d\n", i);
1676 
1677 			return ret;
1678 		}
1679 	}
1680 	iwl_pcie_irq_set_affinity(trans_pcie->trans);
1681 
1682 	return 0;
1683 }
1684 
1685 static int iwl_trans_pcie_clear_persistence_bit(struct iwl_trans *trans)
1686 {
1687 	u32 hpm, wprot;
1688 
1689 	switch (trans->trans_cfg->device_family) {
1690 	case IWL_DEVICE_FAMILY_9000:
1691 		wprot = PREG_PRPH_WPROT_9000;
1692 		break;
1693 	case IWL_DEVICE_FAMILY_22000:
1694 		wprot = PREG_PRPH_WPROT_22000;
1695 		break;
1696 	default:
1697 		return 0;
1698 	}
1699 
1700 	hpm = iwl_read_umac_prph_no_grab(trans, HPM_DEBUG);
1701 	if (hpm != 0xa5a5a5a0 && (hpm & PERSISTENCE_BIT)) {
1702 		u32 wprot_val = iwl_read_umac_prph_no_grab(trans, wprot);
1703 
1704 		if (wprot_val & PREG_WFPM_ACCESS) {
1705 			IWL_ERR(trans,
1706 				"Error, can not clear persistence bit\n");
1707 			return -EPERM;
1708 		}
1709 		iwl_write_umac_prph_no_grab(trans, HPM_DEBUG,
1710 					    hpm & ~PERSISTENCE_BIT);
1711 	}
1712 
1713 	return 0;
1714 }
1715 
1716 static int iwl_pcie_gen2_force_power_gating(struct iwl_trans *trans)
1717 {
1718 	int ret;
1719 
1720 	ret = iwl_finish_nic_init(trans, trans->trans_cfg);
1721 	if (ret < 0)
1722 		return ret;
1723 
1724 	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1725 			  HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1726 	udelay(20);
1727 	iwl_set_bits_prph(trans, HPM_HIPM_GEN_CFG,
1728 			  HPM_HIPM_GEN_CFG_CR_PG_EN |
1729 			  HPM_HIPM_GEN_CFG_CR_SLP_EN);
1730 	udelay(20);
1731 	iwl_clear_bits_prph(trans, HPM_HIPM_GEN_CFG,
1732 			    HPM_HIPM_GEN_CFG_CR_FORCE_ACTIVE);
1733 
1734 	iwl_trans_pcie_sw_reset(trans);
1735 
1736 	return 0;
1737 }
1738 
1739 static int _iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1740 {
1741 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1742 	int err;
1743 
1744 	lockdep_assert_held(&trans_pcie->mutex);
1745 
1746 	err = iwl_pcie_prepare_card_hw(trans);
1747 	if (err) {
1748 		IWL_ERR(trans, "Error while preparing HW: %d\n", err);
1749 		return err;
1750 	}
1751 
1752 	err = iwl_trans_pcie_clear_persistence_bit(trans);
1753 	if (err)
1754 		return err;
1755 
1756 	iwl_trans_pcie_sw_reset(trans);
1757 
1758 	if (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_22000 &&
1759 	    trans->trans_cfg->integrated) {
1760 		err = iwl_pcie_gen2_force_power_gating(trans);
1761 		if (err)
1762 			return err;
1763 	}
1764 
1765 	err = iwl_pcie_apm_init(trans);
1766 	if (err)
1767 		return err;
1768 
1769 	iwl_pcie_init_msix(trans_pcie);
1770 
1771 	/* From now on, the op_mode will be kept updated about RF kill state */
1772 	iwl_enable_rfkill_int(trans);
1773 
1774 	trans_pcie->opmode_down = false;
1775 
1776 	/* Set is_down to false here so that...*/
1777 	trans_pcie->is_down = false;
1778 
1779 	/* ...rfkill can call stop_device and set it false if needed */
1780 	iwl_pcie_check_hw_rf_kill(trans);
1781 
1782 	return 0;
1783 }
1784 
1785 static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
1786 {
1787 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1788 	int ret;
1789 
1790 	mutex_lock(&trans_pcie->mutex);
1791 	ret = _iwl_trans_pcie_start_hw(trans);
1792 	mutex_unlock(&trans_pcie->mutex);
1793 
1794 	return ret;
1795 }
1796 
1797 static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
1798 {
1799 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1800 
1801 	mutex_lock(&trans_pcie->mutex);
1802 
1803 	/* disable interrupts - don't enable HW RF kill interrupt */
1804 	iwl_disable_interrupts(trans);
1805 
1806 	iwl_pcie_apm_stop(trans, true);
1807 
1808 	iwl_disable_interrupts(trans);
1809 
1810 	iwl_pcie_disable_ict(trans);
1811 
1812 	mutex_unlock(&trans_pcie->mutex);
1813 
1814 	iwl_pcie_synchronize_irqs(trans);
1815 }
1816 
1817 static void iwl_trans_pcie_write8(struct iwl_trans *trans, u32 ofs, u8 val)
1818 {
1819 	writeb(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1820 }
1821 
1822 static void iwl_trans_pcie_write32(struct iwl_trans *trans, u32 ofs, u32 val)
1823 {
1824 	writel(val, IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1825 }
1826 
1827 static u32 iwl_trans_pcie_read32(struct iwl_trans *trans, u32 ofs)
1828 {
1829 	return readl(IWL_TRANS_GET_PCIE_TRANS(trans)->hw_base + ofs);
1830 }
1831 
1832 static u32 iwl_trans_pcie_prph_msk(struct iwl_trans *trans)
1833 {
1834 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1835 		return 0x00FFFFFF;
1836 	else
1837 		return 0x000FFFFF;
1838 }
1839 
1840 static u32 iwl_trans_pcie_read_prph(struct iwl_trans *trans, u32 reg)
1841 {
1842 	u32 mask = iwl_trans_pcie_prph_msk(trans);
1843 
1844 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_RADDR,
1845 			       ((reg & mask) | (3 << 24)));
1846 	return iwl_trans_pcie_read32(trans, HBUS_TARG_PRPH_RDAT);
1847 }
1848 
1849 static void iwl_trans_pcie_write_prph(struct iwl_trans *trans, u32 addr,
1850 				      u32 val)
1851 {
1852 	u32 mask = iwl_trans_pcie_prph_msk(trans);
1853 
1854 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WADDR,
1855 			       ((addr & mask) | (3 << 24)));
1856 	iwl_trans_pcie_write32(trans, HBUS_TARG_PRPH_WDAT, val);
1857 }
1858 
1859 static void iwl_trans_pcie_configure(struct iwl_trans *trans,
1860 				     const struct iwl_trans_config *trans_cfg)
1861 {
1862 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1863 
1864 	trans->txqs.cmd.q_id = trans_cfg->cmd_queue;
1865 	trans->txqs.cmd.fifo = trans_cfg->cmd_fifo;
1866 	trans->txqs.cmd.wdg_timeout = trans_cfg->cmd_q_wdg_timeout;
1867 	trans->txqs.page_offs = trans_cfg->cb_data_offs;
1868 	trans->txqs.dev_cmd_offs = trans_cfg->cb_data_offs + sizeof(void *);
1869 
1870 	if (WARN_ON(trans_cfg->n_no_reclaim_cmds > MAX_NO_RECLAIM_CMDS))
1871 		trans_pcie->n_no_reclaim_cmds = 0;
1872 	else
1873 		trans_pcie->n_no_reclaim_cmds = trans_cfg->n_no_reclaim_cmds;
1874 	if (trans_pcie->n_no_reclaim_cmds)
1875 		memcpy(trans_pcie->no_reclaim_cmds, trans_cfg->no_reclaim_cmds,
1876 		       trans_pcie->n_no_reclaim_cmds * sizeof(u8));
1877 
1878 	trans_pcie->rx_buf_size = trans_cfg->rx_buf_size;
1879 	trans_pcie->rx_page_order =
1880 		iwl_trans_get_rb_size_order(trans_pcie->rx_buf_size);
1881 	trans_pcie->rx_buf_bytes =
1882 		iwl_trans_get_rb_size(trans_pcie->rx_buf_size);
1883 	trans_pcie->supported_dma_mask = DMA_BIT_MASK(12);
1884 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210)
1885 		trans_pcie->supported_dma_mask = DMA_BIT_MASK(11);
1886 
1887 	trans->txqs.bc_table_dword = trans_cfg->bc_table_dword;
1888 	trans_pcie->scd_set_active = trans_cfg->scd_set_active;
1889 
1890 	trans->command_groups = trans_cfg->command_groups;
1891 	trans->command_groups_size = trans_cfg->command_groups_size;
1892 
1893 	/* Initialize NAPI here - it should be before registering to mac80211
1894 	 * in the opmode but after the HW struct is allocated.
1895 	 * As this function may be called again in some corner cases don't
1896 	 * do anything if NAPI was already initialized.
1897 	 */
1898 	if (trans_pcie->napi_dev.reg_state != NETREG_DUMMY)
1899 		init_dummy_netdev(&trans_pcie->napi_dev);
1900 
1901 	trans_pcie->fw_reset_handshake = trans_cfg->fw_reset_handshake;
1902 }
1903 
1904 void iwl_trans_pcie_free(struct iwl_trans *trans)
1905 {
1906 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1907 	int i;
1908 
1909 	iwl_pcie_synchronize_irqs(trans);
1910 
1911 	if (trans->trans_cfg->gen2)
1912 		iwl_txq_gen2_tx_free(trans);
1913 	else
1914 		iwl_pcie_tx_free(trans);
1915 	iwl_pcie_rx_free(trans);
1916 
1917 	if (trans_pcie->rba.alloc_wq) {
1918 		destroy_workqueue(trans_pcie->rba.alloc_wq);
1919 		trans_pcie->rba.alloc_wq = NULL;
1920 	}
1921 
1922 	if (trans_pcie->msix_enabled) {
1923 		for (i = 0; i < trans_pcie->alloc_vecs; i++) {
1924 			irq_set_affinity_hint(
1925 				trans_pcie->msix_entries[i].vector,
1926 				NULL);
1927 		}
1928 
1929 		trans_pcie->msix_enabled = false;
1930 	} else {
1931 		iwl_pcie_free_ict(trans);
1932 	}
1933 
1934 	iwl_pcie_free_fw_monitor(trans);
1935 
1936 	if (trans_pcie->pnvm_dram.size)
1937 		dma_free_coherent(trans->dev, trans_pcie->pnvm_dram.size,
1938 				  trans_pcie->pnvm_dram.block,
1939 				  trans_pcie->pnvm_dram.physical);
1940 
1941 	mutex_destroy(&trans_pcie->mutex);
1942 	iwl_trans_free(trans);
1943 }
1944 
1945 static void iwl_trans_pcie_set_pmi(struct iwl_trans *trans, bool state)
1946 {
1947 	if (state)
1948 		set_bit(STATUS_TPOWER_PMI, &trans->status);
1949 	else
1950 		clear_bit(STATUS_TPOWER_PMI, &trans->status);
1951 }
1952 
1953 struct iwl_trans_pcie_removal {
1954 	struct pci_dev *pdev;
1955 	struct work_struct work;
1956 };
1957 
1958 static void iwl_trans_pcie_removal_wk(struct work_struct *wk)
1959 {
1960 	struct iwl_trans_pcie_removal *removal =
1961 		container_of(wk, struct iwl_trans_pcie_removal, work);
1962 	struct pci_dev *pdev = removal->pdev;
1963 	static char *prop[] = {"EVENT=INACCESSIBLE", NULL};
1964 
1965 	dev_err(&pdev->dev, "Device gone - attempting removal\n");
1966 	kobject_uevent_env(&pdev->dev.kobj, KOBJ_CHANGE, prop);
1967 	pci_lock_rescan_remove();
1968 	pci_dev_put(pdev);
1969 	pci_stop_and_remove_bus_device(pdev);
1970 	pci_unlock_rescan_remove();
1971 
1972 	kfree(removal);
1973 	module_put(THIS_MODULE);
1974 }
1975 
1976 static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans)
1977 {
1978 	int ret;
1979 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
1980 
1981 	spin_lock_bh(&trans_pcie->reg_lock);
1982 
1983 	if (trans_pcie->cmd_hold_nic_awake)
1984 		goto out;
1985 
1986 	/* this bit wakes up the NIC */
1987 	__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
1988 				 CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
1989 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_8000)
1990 		udelay(2);
1991 
1992 	/*
1993 	 * These bits say the device is running, and should keep running for
1994 	 * at least a short while (at least as long as MAC_ACCESS_REQ stays 1),
1995 	 * but they do not indicate that embedded SRAM is restored yet;
1996 	 * HW with volatile SRAM must save/restore contents to/from
1997 	 * host DRAM when sleeping/waking for power-saving.
1998 	 * Each direction takes approximately 1/4 millisecond; with this
1999 	 * overhead, it's a good idea to grab and hold MAC_ACCESS_REQUEST if a
2000 	 * series of register accesses are expected (e.g. reading Event Log),
2001 	 * to keep device from sleeping.
2002 	 *
2003 	 * CSR_UCODE_DRV_GP1 register bit MAC_SLEEP == 0 indicates that
2004 	 * SRAM is okay/restored.  We don't check that here because this call
2005 	 * is just for hardware register access; but GP1 MAC_SLEEP
2006 	 * check is a good idea before accessing the SRAM of HW with
2007 	 * volatile SRAM (e.g. reading Event Log).
2008 	 *
2009 	 * 5000 series and later (including 1000 series) have non-volatile SRAM,
2010 	 * and do not save/restore SRAM when power cycling.
2011 	 */
2012 	ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
2013 			   CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
2014 			   (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
2015 			    CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP), 15000);
2016 	if (unlikely(ret < 0)) {
2017 		u32 cntrl = iwl_read32(trans, CSR_GP_CNTRL);
2018 
2019 		WARN_ONCE(1,
2020 			  "Timeout waiting for hardware access (CSR_GP_CNTRL 0x%08x)\n",
2021 			  cntrl);
2022 
2023 		iwl_trans_pcie_dump_regs(trans);
2024 
2025 		if (iwlwifi_mod_params.remove_when_gone && cntrl == ~0U) {
2026 			struct iwl_trans_pcie_removal *removal;
2027 
2028 			if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2029 				goto err;
2030 
2031 			IWL_ERR(trans, "Device gone - scheduling removal!\n");
2032 
2033 			/*
2034 			 * get a module reference to avoid doing this
2035 			 * while unloading anyway and to avoid
2036 			 * scheduling a work with code that's being
2037 			 * removed.
2038 			 */
2039 			if (!try_module_get(THIS_MODULE)) {
2040 				IWL_ERR(trans,
2041 					"Module is being unloaded - abort\n");
2042 				goto err;
2043 			}
2044 
2045 			removal = kzalloc(sizeof(*removal), GFP_ATOMIC);
2046 			if (!removal) {
2047 				module_put(THIS_MODULE);
2048 				goto err;
2049 			}
2050 			/*
2051 			 * we don't need to clear this flag, because
2052 			 * the trans will be freed and reallocated.
2053 			*/
2054 			set_bit(STATUS_TRANS_DEAD, &trans->status);
2055 
2056 			removal->pdev = to_pci_dev(trans->dev);
2057 			INIT_WORK(&removal->work, iwl_trans_pcie_removal_wk);
2058 			pci_dev_get(removal->pdev);
2059 			schedule_work(&removal->work);
2060 		} else {
2061 			iwl_write32(trans, CSR_RESET,
2062 				    CSR_RESET_REG_FLAG_FORCE_NMI);
2063 		}
2064 
2065 err:
2066 		spin_unlock_bh(&trans_pcie->reg_lock);
2067 		return false;
2068 	}
2069 
2070 out:
2071 	/*
2072 	 * Fool sparse by faking we release the lock - sparse will
2073 	 * track nic_access anyway.
2074 	 */
2075 	__release(&trans_pcie->reg_lock);
2076 	return true;
2077 }
2078 
2079 static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans)
2080 {
2081 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2082 
2083 	lockdep_assert_held(&trans_pcie->reg_lock);
2084 
2085 	/*
2086 	 * Fool sparse by faking we acquiring the lock - sparse will
2087 	 * track nic_access anyway.
2088 	 */
2089 	__acquire(&trans_pcie->reg_lock);
2090 
2091 	if (trans_pcie->cmd_hold_nic_awake)
2092 		goto out;
2093 
2094 	__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
2095 				   CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
2096 	/*
2097 	 * Above we read the CSR_GP_CNTRL register, which will flush
2098 	 * any previous writes, but we need the write that clears the
2099 	 * MAC_ACCESS_REQ bit to be performed before any other writes
2100 	 * scheduled on different CPUs (after we drop reg_lock).
2101 	 */
2102 out:
2103 	spin_unlock_bh(&trans_pcie->reg_lock);
2104 }
2105 
2106 static int iwl_trans_pcie_read_mem(struct iwl_trans *trans, u32 addr,
2107 				   void *buf, int dwords)
2108 {
2109 	int offs = 0;
2110 	u32 *vals = buf;
2111 
2112 	while (offs < dwords) {
2113 		/* limit the time we spin here under lock to 1/2s */
2114 		unsigned long end = jiffies + HZ / 2;
2115 		bool resched = false;
2116 
2117 		if (iwl_trans_grab_nic_access(trans)) {
2118 			iwl_write32(trans, HBUS_TARG_MEM_RADDR,
2119 				    addr + 4 * offs);
2120 
2121 			while (offs < dwords) {
2122 				vals[offs] = iwl_read32(trans,
2123 							HBUS_TARG_MEM_RDAT);
2124 				offs++;
2125 
2126 				if (time_after(jiffies, end)) {
2127 					resched = true;
2128 					break;
2129 				}
2130 			}
2131 			iwl_trans_release_nic_access(trans);
2132 
2133 			if (resched)
2134 				cond_resched();
2135 		} else {
2136 			return -EBUSY;
2137 		}
2138 	}
2139 
2140 	return 0;
2141 }
2142 
2143 static int iwl_trans_pcie_write_mem(struct iwl_trans *trans, u32 addr,
2144 				    const void *buf, int dwords)
2145 {
2146 	int offs, ret = 0;
2147 	const u32 *vals = buf;
2148 
2149 	if (iwl_trans_grab_nic_access(trans)) {
2150 		iwl_write32(trans, HBUS_TARG_MEM_WADDR, addr);
2151 		for (offs = 0; offs < dwords; offs++)
2152 			iwl_write32(trans, HBUS_TARG_MEM_WDAT,
2153 				    vals ? vals[offs] : 0);
2154 		iwl_trans_release_nic_access(trans);
2155 	} else {
2156 		ret = -EBUSY;
2157 	}
2158 	return ret;
2159 }
2160 
2161 static int iwl_trans_pcie_read_config32(struct iwl_trans *trans, u32 ofs,
2162 					u32 *val)
2163 {
2164 	return pci_read_config_dword(IWL_TRANS_GET_PCIE_TRANS(trans)->pci_dev,
2165 				     ofs, val);
2166 }
2167 
2168 static void iwl_trans_pcie_block_txq_ptrs(struct iwl_trans *trans, bool block)
2169 {
2170 	int i;
2171 
2172 	for (i = 0; i < trans->trans_cfg->base_params->num_of_queues; i++) {
2173 		struct iwl_txq *txq = trans->txqs.txq[i];
2174 
2175 		if (i == trans->txqs.cmd.q_id)
2176 			continue;
2177 
2178 		spin_lock_bh(&txq->lock);
2179 
2180 		if (!block && !(WARN_ON_ONCE(!txq->block))) {
2181 			txq->block--;
2182 			if (!txq->block) {
2183 				iwl_write32(trans, HBUS_TARG_WRPTR,
2184 					    txq->write_ptr | (i << 8));
2185 			}
2186 		} else if (block) {
2187 			txq->block++;
2188 		}
2189 
2190 		spin_unlock_bh(&txq->lock);
2191 	}
2192 }
2193 
2194 #define IWL_FLUSH_WAIT_MS	2000
2195 
2196 static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue,
2197 				       struct iwl_trans_rxq_dma_data *data)
2198 {
2199 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2200 
2201 	if (queue >= trans->num_rx_queues || !trans_pcie->rxq)
2202 		return -EINVAL;
2203 
2204 	data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma;
2205 	data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma;
2206 	data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma;
2207 	data->fr_bd_wid = 0;
2208 
2209 	return 0;
2210 }
2211 
2212 static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx)
2213 {
2214 	struct iwl_txq *txq;
2215 	unsigned long now = jiffies;
2216 	bool overflow_tx;
2217 	u8 wr_ptr;
2218 
2219 	/* Make sure the NIC is still alive in the bus */
2220 	if (test_bit(STATUS_TRANS_DEAD, &trans->status))
2221 		return -ENODEV;
2222 
2223 	if (!test_bit(txq_idx, trans->txqs.queue_used))
2224 		return -EINVAL;
2225 
2226 	IWL_DEBUG_TX_QUEUES(trans, "Emptying queue %d...\n", txq_idx);
2227 	txq = trans->txqs.txq[txq_idx];
2228 
2229 	spin_lock_bh(&txq->lock);
2230 	overflow_tx = txq->overflow_tx ||
2231 		      !skb_queue_empty(&txq->overflow_q);
2232 	spin_unlock_bh(&txq->lock);
2233 
2234 	wr_ptr = READ_ONCE(txq->write_ptr);
2235 
2236 	while ((txq->read_ptr != READ_ONCE(txq->write_ptr) ||
2237 		overflow_tx) &&
2238 	       !time_after(jiffies,
2239 			   now + msecs_to_jiffies(IWL_FLUSH_WAIT_MS))) {
2240 		u8 write_ptr = READ_ONCE(txq->write_ptr);
2241 
2242 		/*
2243 		 * If write pointer moved during the wait, warn only
2244 		 * if the TX came from op mode. In case TX came from
2245 		 * trans layer (overflow TX) don't warn.
2246 		 */
2247 		if (WARN_ONCE(wr_ptr != write_ptr && !overflow_tx,
2248 			      "WR pointer moved while flushing %d -> %d\n",
2249 			      wr_ptr, write_ptr))
2250 			return -ETIMEDOUT;
2251 		wr_ptr = write_ptr;
2252 
2253 		usleep_range(1000, 2000);
2254 
2255 		spin_lock_bh(&txq->lock);
2256 		overflow_tx = txq->overflow_tx ||
2257 			      !skb_queue_empty(&txq->overflow_q);
2258 		spin_unlock_bh(&txq->lock);
2259 	}
2260 
2261 	if (txq->read_ptr != txq->write_ptr) {
2262 		IWL_ERR(trans,
2263 			"fail to flush all tx fifo queues Q %d\n", txq_idx);
2264 		iwl_txq_log_scd_error(trans, txq);
2265 		return -ETIMEDOUT;
2266 	}
2267 
2268 	IWL_DEBUG_TX_QUEUES(trans, "Queue %d is now empty.\n", txq_idx);
2269 
2270 	return 0;
2271 }
2272 
2273 static int iwl_trans_pcie_wait_txqs_empty(struct iwl_trans *trans, u32 txq_bm)
2274 {
2275 	int cnt;
2276 	int ret = 0;
2277 
2278 	/* waiting for all the tx frames complete might take a while */
2279 	for (cnt = 0;
2280 	     cnt < trans->trans_cfg->base_params->num_of_queues;
2281 	     cnt++) {
2282 
2283 		if (cnt == trans->txqs.cmd.q_id)
2284 			continue;
2285 		if (!test_bit(cnt, trans->txqs.queue_used))
2286 			continue;
2287 		if (!(BIT(cnt) & txq_bm))
2288 			continue;
2289 
2290 		ret = iwl_trans_pcie_wait_txq_empty(trans, cnt);
2291 		if (ret)
2292 			break;
2293 	}
2294 
2295 	return ret;
2296 }
2297 
2298 static void iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans, u32 reg,
2299 					 u32 mask, u32 value)
2300 {
2301 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2302 
2303 	spin_lock_bh(&trans_pcie->reg_lock);
2304 	__iwl_trans_pcie_set_bits_mask(trans, reg, mask, value);
2305 	spin_unlock_bh(&trans_pcie->reg_lock);
2306 }
2307 
2308 static const char *get_csr_string(int cmd)
2309 {
2310 #define IWL_CMD(x) case x: return #x
2311 	switch (cmd) {
2312 	IWL_CMD(CSR_HW_IF_CONFIG_REG);
2313 	IWL_CMD(CSR_INT_COALESCING);
2314 	IWL_CMD(CSR_INT);
2315 	IWL_CMD(CSR_INT_MASK);
2316 	IWL_CMD(CSR_FH_INT_STATUS);
2317 	IWL_CMD(CSR_GPIO_IN);
2318 	IWL_CMD(CSR_RESET);
2319 	IWL_CMD(CSR_GP_CNTRL);
2320 	IWL_CMD(CSR_HW_REV);
2321 	IWL_CMD(CSR_EEPROM_REG);
2322 	IWL_CMD(CSR_EEPROM_GP);
2323 	IWL_CMD(CSR_OTP_GP_REG);
2324 	IWL_CMD(CSR_GIO_REG);
2325 	IWL_CMD(CSR_GP_UCODE_REG);
2326 	IWL_CMD(CSR_GP_DRIVER_REG);
2327 	IWL_CMD(CSR_UCODE_DRV_GP1);
2328 	IWL_CMD(CSR_UCODE_DRV_GP2);
2329 	IWL_CMD(CSR_LED_REG);
2330 	IWL_CMD(CSR_DRAM_INT_TBL_REG);
2331 	IWL_CMD(CSR_GIO_CHICKEN_BITS);
2332 	IWL_CMD(CSR_ANA_PLL_CFG);
2333 	IWL_CMD(CSR_HW_REV_WA_REG);
2334 	IWL_CMD(CSR_MONITOR_STATUS_REG);
2335 	IWL_CMD(CSR_DBG_HPET_MEM_REG);
2336 	default:
2337 		return "UNKNOWN";
2338 	}
2339 #undef IWL_CMD
2340 }
2341 
2342 void iwl_pcie_dump_csr(struct iwl_trans *trans)
2343 {
2344 	int i;
2345 	static const u32 csr_tbl[] = {
2346 		CSR_HW_IF_CONFIG_REG,
2347 		CSR_INT_COALESCING,
2348 		CSR_INT,
2349 		CSR_INT_MASK,
2350 		CSR_FH_INT_STATUS,
2351 		CSR_GPIO_IN,
2352 		CSR_RESET,
2353 		CSR_GP_CNTRL,
2354 		CSR_HW_REV,
2355 		CSR_EEPROM_REG,
2356 		CSR_EEPROM_GP,
2357 		CSR_OTP_GP_REG,
2358 		CSR_GIO_REG,
2359 		CSR_GP_UCODE_REG,
2360 		CSR_GP_DRIVER_REG,
2361 		CSR_UCODE_DRV_GP1,
2362 		CSR_UCODE_DRV_GP2,
2363 		CSR_LED_REG,
2364 		CSR_DRAM_INT_TBL_REG,
2365 		CSR_GIO_CHICKEN_BITS,
2366 		CSR_ANA_PLL_CFG,
2367 		CSR_MONITOR_STATUS_REG,
2368 		CSR_HW_REV_WA_REG,
2369 		CSR_DBG_HPET_MEM_REG
2370 	};
2371 	IWL_ERR(trans, "CSR values:\n");
2372 	IWL_ERR(trans, "(2nd byte of CSR_INT_COALESCING is "
2373 		"CSR_INT_PERIODIC_REG)\n");
2374 	for (i = 0; i <  ARRAY_SIZE(csr_tbl); i++) {
2375 		IWL_ERR(trans, "  %25s: 0X%08x\n",
2376 			get_csr_string(csr_tbl[i]),
2377 			iwl_read32(trans, csr_tbl[i]));
2378 	}
2379 }
2380 
2381 #ifdef CONFIG_IWLWIFI_DEBUGFS
2382 /* create and remove of files */
2383 #define DEBUGFS_ADD_FILE(name, parent, mode) do {			\
2384 	debugfs_create_file(#name, mode, parent, trans,			\
2385 			    &iwl_dbgfs_##name##_ops);			\
2386 } while (0)
2387 
2388 /* file operation */
2389 #define DEBUGFS_READ_FILE_OPS(name)					\
2390 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2391 	.read = iwl_dbgfs_##name##_read,				\
2392 	.open = simple_open,						\
2393 	.llseek = generic_file_llseek,					\
2394 };
2395 
2396 #define DEBUGFS_WRITE_FILE_OPS(name)                                    \
2397 static const struct file_operations iwl_dbgfs_##name##_ops = {          \
2398 	.write = iwl_dbgfs_##name##_write,                              \
2399 	.open = simple_open,						\
2400 	.llseek = generic_file_llseek,					\
2401 };
2402 
2403 #define DEBUGFS_READ_WRITE_FILE_OPS(name)				\
2404 static const struct file_operations iwl_dbgfs_##name##_ops = {		\
2405 	.write = iwl_dbgfs_##name##_write,				\
2406 	.read = iwl_dbgfs_##name##_read,				\
2407 	.open = simple_open,						\
2408 	.llseek = generic_file_llseek,					\
2409 };
2410 
2411 struct iwl_dbgfs_tx_queue_priv {
2412 	struct iwl_trans *trans;
2413 };
2414 
2415 struct iwl_dbgfs_tx_queue_state {
2416 	loff_t pos;
2417 };
2418 
2419 static void *iwl_dbgfs_tx_queue_seq_start(struct seq_file *seq, loff_t *pos)
2420 {
2421 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2422 	struct iwl_dbgfs_tx_queue_state *state;
2423 
2424 	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2425 		return NULL;
2426 
2427 	state = kmalloc(sizeof(*state), GFP_KERNEL);
2428 	if (!state)
2429 		return NULL;
2430 	state->pos = *pos;
2431 	return state;
2432 }
2433 
2434 static void *iwl_dbgfs_tx_queue_seq_next(struct seq_file *seq,
2435 					 void *v, loff_t *pos)
2436 {
2437 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2438 	struct iwl_dbgfs_tx_queue_state *state = v;
2439 
2440 	*pos = ++state->pos;
2441 
2442 	if (*pos >= priv->trans->trans_cfg->base_params->num_of_queues)
2443 		return NULL;
2444 
2445 	return state;
2446 }
2447 
2448 static void iwl_dbgfs_tx_queue_seq_stop(struct seq_file *seq, void *v)
2449 {
2450 	kfree(v);
2451 }
2452 
2453 static int iwl_dbgfs_tx_queue_seq_show(struct seq_file *seq, void *v)
2454 {
2455 	struct iwl_dbgfs_tx_queue_priv *priv = seq->private;
2456 	struct iwl_dbgfs_tx_queue_state *state = v;
2457 	struct iwl_trans *trans = priv->trans;
2458 	struct iwl_txq *txq = trans->txqs.txq[state->pos];
2459 
2460 	seq_printf(seq, "hwq %.3u: used=%d stopped=%d ",
2461 		   (unsigned int)state->pos,
2462 		   !!test_bit(state->pos, trans->txqs.queue_used),
2463 		   !!test_bit(state->pos, trans->txqs.queue_stopped));
2464 	if (txq)
2465 		seq_printf(seq,
2466 			   "read=%u write=%u need_update=%d frozen=%d n_window=%d ampdu=%d",
2467 			   txq->read_ptr, txq->write_ptr,
2468 			   txq->need_update, txq->frozen,
2469 			   txq->n_window, txq->ampdu);
2470 	else
2471 		seq_puts(seq, "(unallocated)");
2472 
2473 	if (state->pos == trans->txqs.cmd.q_id)
2474 		seq_puts(seq, " (HCMD)");
2475 	seq_puts(seq, "\n");
2476 
2477 	return 0;
2478 }
2479 
2480 static const struct seq_operations iwl_dbgfs_tx_queue_seq_ops = {
2481 	.start = iwl_dbgfs_tx_queue_seq_start,
2482 	.next = iwl_dbgfs_tx_queue_seq_next,
2483 	.stop = iwl_dbgfs_tx_queue_seq_stop,
2484 	.show = iwl_dbgfs_tx_queue_seq_show,
2485 };
2486 
2487 static int iwl_dbgfs_tx_queue_open(struct inode *inode, struct file *filp)
2488 {
2489 	struct iwl_dbgfs_tx_queue_priv *priv;
2490 
2491 	priv = __seq_open_private(filp, &iwl_dbgfs_tx_queue_seq_ops,
2492 				  sizeof(*priv));
2493 
2494 	if (!priv)
2495 		return -ENOMEM;
2496 
2497 	priv->trans = inode->i_private;
2498 	return 0;
2499 }
2500 
2501 static ssize_t iwl_dbgfs_rx_queue_read(struct file *file,
2502 				       char __user *user_buf,
2503 				       size_t count, loff_t *ppos)
2504 {
2505 	struct iwl_trans *trans = file->private_data;
2506 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2507 	char *buf;
2508 	int pos = 0, i, ret;
2509 	size_t bufsz;
2510 
2511 	bufsz = sizeof(char) * 121 * trans->num_rx_queues;
2512 
2513 	if (!trans_pcie->rxq)
2514 		return -EAGAIN;
2515 
2516 	buf = kzalloc(bufsz, GFP_KERNEL);
2517 	if (!buf)
2518 		return -ENOMEM;
2519 
2520 	for (i = 0; i < trans->num_rx_queues && pos < bufsz; i++) {
2521 		struct iwl_rxq *rxq = &trans_pcie->rxq[i];
2522 
2523 		pos += scnprintf(buf + pos, bufsz - pos, "queue#: %2d\n",
2524 				 i);
2525 		pos += scnprintf(buf + pos, bufsz - pos, "\tread: %u\n",
2526 				 rxq->read);
2527 		pos += scnprintf(buf + pos, bufsz - pos, "\twrite: %u\n",
2528 				 rxq->write);
2529 		pos += scnprintf(buf + pos, bufsz - pos, "\twrite_actual: %u\n",
2530 				 rxq->write_actual);
2531 		pos += scnprintf(buf + pos, bufsz - pos, "\tneed_update: %2d\n",
2532 				 rxq->need_update);
2533 		pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n",
2534 				 rxq->free_count);
2535 		if (rxq->rb_stts) {
2536 			u32 r =	__le16_to_cpu(iwl_get_closed_rb_stts(trans,
2537 								     rxq));
2538 			pos += scnprintf(buf + pos, bufsz - pos,
2539 					 "\tclosed_rb_num: %u\n",
2540 					 r & 0x0FFF);
2541 		} else {
2542 			pos += scnprintf(buf + pos, bufsz - pos,
2543 					 "\tclosed_rb_num: Not Allocated\n");
2544 		}
2545 	}
2546 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2547 	kfree(buf);
2548 
2549 	return ret;
2550 }
2551 
2552 static ssize_t iwl_dbgfs_interrupt_read(struct file *file,
2553 					char __user *user_buf,
2554 					size_t count, loff_t *ppos)
2555 {
2556 	struct iwl_trans *trans = file->private_data;
2557 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2558 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2559 
2560 	int pos = 0;
2561 	char *buf;
2562 	int bufsz = 24 * 64; /* 24 items * 64 char per item */
2563 	ssize_t ret;
2564 
2565 	buf = kzalloc(bufsz, GFP_KERNEL);
2566 	if (!buf)
2567 		return -ENOMEM;
2568 
2569 	pos += scnprintf(buf + pos, bufsz - pos,
2570 			"Interrupt Statistics Report:\n");
2571 
2572 	pos += scnprintf(buf + pos, bufsz - pos, "HW Error:\t\t\t %u\n",
2573 		isr_stats->hw);
2574 	pos += scnprintf(buf + pos, bufsz - pos, "SW Error:\t\t\t %u\n",
2575 		isr_stats->sw);
2576 	if (isr_stats->sw || isr_stats->hw) {
2577 		pos += scnprintf(buf + pos, bufsz - pos,
2578 			"\tLast Restarting Code:  0x%X\n",
2579 			isr_stats->err_code);
2580 	}
2581 #ifdef CONFIG_IWLWIFI_DEBUG
2582 	pos += scnprintf(buf + pos, bufsz - pos, "Frame transmitted:\t\t %u\n",
2583 		isr_stats->sch);
2584 	pos += scnprintf(buf + pos, bufsz - pos, "Alive interrupt:\t\t %u\n",
2585 		isr_stats->alive);
2586 #endif
2587 	pos += scnprintf(buf + pos, bufsz - pos,
2588 		"HW RF KILL switch toggled:\t %u\n", isr_stats->rfkill);
2589 
2590 	pos += scnprintf(buf + pos, bufsz - pos, "CT KILL:\t\t\t %u\n",
2591 		isr_stats->ctkill);
2592 
2593 	pos += scnprintf(buf + pos, bufsz - pos, "Wakeup Interrupt:\t\t %u\n",
2594 		isr_stats->wakeup);
2595 
2596 	pos += scnprintf(buf + pos, bufsz - pos,
2597 		"Rx command responses:\t\t %u\n", isr_stats->rx);
2598 
2599 	pos += scnprintf(buf + pos, bufsz - pos, "Tx/FH interrupt:\t\t %u\n",
2600 		isr_stats->tx);
2601 
2602 	pos += scnprintf(buf + pos, bufsz - pos, "Unexpected INTA:\t\t %u\n",
2603 		isr_stats->unhandled);
2604 
2605 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2606 	kfree(buf);
2607 	return ret;
2608 }
2609 
2610 static ssize_t iwl_dbgfs_interrupt_write(struct file *file,
2611 					 const char __user *user_buf,
2612 					 size_t count, loff_t *ppos)
2613 {
2614 	struct iwl_trans *trans = file->private_data;
2615 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2616 	struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
2617 	u32 reset_flag;
2618 	int ret;
2619 
2620 	ret = kstrtou32_from_user(user_buf, count, 16, &reset_flag);
2621 	if (ret)
2622 		return ret;
2623 	if (reset_flag == 0)
2624 		memset(isr_stats, 0, sizeof(*isr_stats));
2625 
2626 	return count;
2627 }
2628 
2629 static ssize_t iwl_dbgfs_csr_write(struct file *file,
2630 				   const char __user *user_buf,
2631 				   size_t count, loff_t *ppos)
2632 {
2633 	struct iwl_trans *trans = file->private_data;
2634 
2635 	iwl_pcie_dump_csr(trans);
2636 
2637 	return count;
2638 }
2639 
2640 static ssize_t iwl_dbgfs_fh_reg_read(struct file *file,
2641 				     char __user *user_buf,
2642 				     size_t count, loff_t *ppos)
2643 {
2644 	struct iwl_trans *trans = file->private_data;
2645 	char *buf = NULL;
2646 	ssize_t ret;
2647 
2648 	ret = iwl_dump_fh(trans, &buf);
2649 	if (ret < 0)
2650 		return ret;
2651 	if (!buf)
2652 		return -EINVAL;
2653 	ret = simple_read_from_buffer(user_buf, count, ppos, buf, ret);
2654 	kfree(buf);
2655 	return ret;
2656 }
2657 
2658 static ssize_t iwl_dbgfs_rfkill_read(struct file *file,
2659 				     char __user *user_buf,
2660 				     size_t count, loff_t *ppos)
2661 {
2662 	struct iwl_trans *trans = file->private_data;
2663 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2664 	char buf[100];
2665 	int pos;
2666 
2667 	pos = scnprintf(buf, sizeof(buf), "debug: %d\nhw: %d\n",
2668 			trans_pcie->debug_rfkill,
2669 			!(iwl_read32(trans, CSR_GP_CNTRL) &
2670 				CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW));
2671 
2672 	return simple_read_from_buffer(user_buf, count, ppos, buf, pos);
2673 }
2674 
2675 static ssize_t iwl_dbgfs_rfkill_write(struct file *file,
2676 				      const char __user *user_buf,
2677 				      size_t count, loff_t *ppos)
2678 {
2679 	struct iwl_trans *trans = file->private_data;
2680 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2681 	bool new_value;
2682 	int ret;
2683 
2684 	ret = kstrtobool_from_user(user_buf, count, &new_value);
2685 	if (ret)
2686 		return ret;
2687 	if (new_value == trans_pcie->debug_rfkill)
2688 		return count;
2689 	IWL_WARN(trans, "changing debug rfkill %d->%d\n",
2690 		 trans_pcie->debug_rfkill, new_value);
2691 	trans_pcie->debug_rfkill = new_value;
2692 	iwl_pcie_handle_rfkill_irq(trans);
2693 
2694 	return count;
2695 }
2696 
2697 static int iwl_dbgfs_monitor_data_open(struct inode *inode,
2698 				       struct file *file)
2699 {
2700 	struct iwl_trans *trans = inode->i_private;
2701 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2702 
2703 	if (!trans->dbg.dest_tlv ||
2704 	    trans->dbg.dest_tlv->monitor_mode != EXTERNAL_MODE) {
2705 		IWL_ERR(trans, "Debug destination is not set to DRAM\n");
2706 		return -ENOENT;
2707 	}
2708 
2709 	if (trans_pcie->fw_mon_data.state != IWL_FW_MON_DBGFS_STATE_CLOSED)
2710 		return -EBUSY;
2711 
2712 	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_OPEN;
2713 	return simple_open(inode, file);
2714 }
2715 
2716 static int iwl_dbgfs_monitor_data_release(struct inode *inode,
2717 					  struct file *file)
2718 {
2719 	struct iwl_trans_pcie *trans_pcie =
2720 		IWL_TRANS_GET_PCIE_TRANS(inode->i_private);
2721 
2722 	if (trans_pcie->fw_mon_data.state == IWL_FW_MON_DBGFS_STATE_OPEN)
2723 		trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
2724 	return 0;
2725 }
2726 
2727 static bool iwl_write_to_user_buf(char __user *user_buf, ssize_t count,
2728 				  void *buf, ssize_t *size,
2729 				  ssize_t *bytes_copied)
2730 {
2731 	int buf_size_left = count - *bytes_copied;
2732 
2733 	buf_size_left = buf_size_left - (buf_size_left % sizeof(u32));
2734 	if (*size > buf_size_left)
2735 		*size = buf_size_left;
2736 
2737 	*size -= copy_to_user(user_buf, buf, *size);
2738 	*bytes_copied += *size;
2739 
2740 	if (buf_size_left == *size)
2741 		return true;
2742 	return false;
2743 }
2744 
2745 static ssize_t iwl_dbgfs_monitor_data_read(struct file *file,
2746 					   char __user *user_buf,
2747 					   size_t count, loff_t *ppos)
2748 {
2749 	struct iwl_trans *trans = file->private_data;
2750 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2751 	void *cpu_addr = (void *)trans->dbg.fw_mon.block, *curr_buf;
2752 	struct cont_rec *data = &trans_pcie->fw_mon_data;
2753 	u32 write_ptr_addr, wrap_cnt_addr, write_ptr, wrap_cnt;
2754 	ssize_t size, bytes_copied = 0;
2755 	bool b_full;
2756 
2757 	if (trans->dbg.dest_tlv) {
2758 		write_ptr_addr =
2759 			le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
2760 		wrap_cnt_addr = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
2761 	} else {
2762 		write_ptr_addr = MON_BUFF_WRPTR;
2763 		wrap_cnt_addr = MON_BUFF_CYCLE_CNT;
2764 	}
2765 
2766 	if (unlikely(!trans->dbg.rec_on))
2767 		return 0;
2768 
2769 	mutex_lock(&data->mutex);
2770 	if (data->state ==
2771 	    IWL_FW_MON_DBGFS_STATE_DISABLED) {
2772 		mutex_unlock(&data->mutex);
2773 		return 0;
2774 	}
2775 
2776 	/* write_ptr position in bytes rather then DW */
2777 	write_ptr = iwl_read_prph(trans, write_ptr_addr) * sizeof(u32);
2778 	wrap_cnt = iwl_read_prph(trans, wrap_cnt_addr);
2779 
2780 	if (data->prev_wrap_cnt == wrap_cnt) {
2781 		size = write_ptr - data->prev_wr_ptr;
2782 		curr_buf = cpu_addr + data->prev_wr_ptr;
2783 		b_full = iwl_write_to_user_buf(user_buf, count,
2784 					       curr_buf, &size,
2785 					       &bytes_copied);
2786 		data->prev_wr_ptr += size;
2787 
2788 	} else if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2789 		   write_ptr < data->prev_wr_ptr) {
2790 		size = trans->dbg.fw_mon.size - data->prev_wr_ptr;
2791 		curr_buf = cpu_addr + data->prev_wr_ptr;
2792 		b_full = iwl_write_to_user_buf(user_buf, count,
2793 					       curr_buf, &size,
2794 					       &bytes_copied);
2795 		data->prev_wr_ptr += size;
2796 
2797 		if (!b_full) {
2798 			size = write_ptr;
2799 			b_full = iwl_write_to_user_buf(user_buf, count,
2800 						       cpu_addr, &size,
2801 						       &bytes_copied);
2802 			data->prev_wr_ptr = size;
2803 			data->prev_wrap_cnt++;
2804 		}
2805 	} else {
2806 		if (data->prev_wrap_cnt == wrap_cnt - 1 &&
2807 		    write_ptr > data->prev_wr_ptr)
2808 			IWL_WARN(trans,
2809 				 "write pointer passed previous write pointer, start copying from the beginning\n");
2810 		else if (!unlikely(data->prev_wrap_cnt == 0 &&
2811 				   data->prev_wr_ptr == 0))
2812 			IWL_WARN(trans,
2813 				 "monitor data is out of sync, start copying from the beginning\n");
2814 
2815 		size = write_ptr;
2816 		b_full = iwl_write_to_user_buf(user_buf, count,
2817 					       cpu_addr, &size,
2818 					       &bytes_copied);
2819 		data->prev_wr_ptr = size;
2820 		data->prev_wrap_cnt = wrap_cnt;
2821 	}
2822 
2823 	mutex_unlock(&data->mutex);
2824 
2825 	return bytes_copied;
2826 }
2827 
2828 DEBUGFS_READ_WRITE_FILE_OPS(interrupt);
2829 DEBUGFS_READ_FILE_OPS(fh_reg);
2830 DEBUGFS_READ_FILE_OPS(rx_queue);
2831 DEBUGFS_WRITE_FILE_OPS(csr);
2832 DEBUGFS_READ_WRITE_FILE_OPS(rfkill);
2833 static const struct file_operations iwl_dbgfs_tx_queue_ops = {
2834 	.owner = THIS_MODULE,
2835 	.open = iwl_dbgfs_tx_queue_open,
2836 	.read = seq_read,
2837 	.llseek = seq_lseek,
2838 	.release = seq_release_private,
2839 };
2840 
2841 static const struct file_operations iwl_dbgfs_monitor_data_ops = {
2842 	.read = iwl_dbgfs_monitor_data_read,
2843 	.open = iwl_dbgfs_monitor_data_open,
2844 	.release = iwl_dbgfs_monitor_data_release,
2845 };
2846 
2847 /* Create the debugfs files and directories */
2848 void iwl_trans_pcie_dbgfs_register(struct iwl_trans *trans)
2849 {
2850 	struct dentry *dir = trans->dbgfs_dir;
2851 
2852 	DEBUGFS_ADD_FILE(rx_queue, dir, 0400);
2853 	DEBUGFS_ADD_FILE(tx_queue, dir, 0400);
2854 	DEBUGFS_ADD_FILE(interrupt, dir, 0600);
2855 	DEBUGFS_ADD_FILE(csr, dir, 0200);
2856 	DEBUGFS_ADD_FILE(fh_reg, dir, 0400);
2857 	DEBUGFS_ADD_FILE(rfkill, dir, 0600);
2858 	DEBUGFS_ADD_FILE(monitor_data, dir, 0400);
2859 }
2860 
2861 static void iwl_trans_pcie_debugfs_cleanup(struct iwl_trans *trans)
2862 {
2863 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2864 	struct cont_rec *data = &trans_pcie->fw_mon_data;
2865 
2866 	mutex_lock(&data->mutex);
2867 	data->state = IWL_FW_MON_DBGFS_STATE_DISABLED;
2868 	mutex_unlock(&data->mutex);
2869 }
2870 #endif /*CONFIG_IWLWIFI_DEBUGFS */
2871 
2872 static u32 iwl_trans_pcie_get_cmdlen(struct iwl_trans *trans, void *tfd)
2873 {
2874 	u32 cmdlen = 0;
2875 	int i;
2876 
2877 	for (i = 0; i < trans->txqs.tfd.max_tbs; i++)
2878 		cmdlen += iwl_txq_gen1_tfd_tb_get_len(trans, tfd, i);
2879 
2880 	return cmdlen;
2881 }
2882 
2883 static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans,
2884 				   struct iwl_fw_error_dump_data **data,
2885 				   int allocated_rb_nums)
2886 {
2887 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
2888 	int max_len = trans_pcie->rx_buf_bytes;
2889 	/* Dump RBs is supported only for pre-9000 devices (1 queue) */
2890 	struct iwl_rxq *rxq = &trans_pcie->rxq[0];
2891 	u32 i, r, j, rb_len = 0;
2892 
2893 	spin_lock(&rxq->lock);
2894 
2895 	r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF;
2896 
2897 	for (i = rxq->read, j = 0;
2898 	     i != r && j < allocated_rb_nums;
2899 	     i = (i + 1) & RX_QUEUE_MASK, j++) {
2900 		struct iwl_rx_mem_buffer *rxb = rxq->queue[i];
2901 		struct iwl_fw_error_dump_rb *rb;
2902 
2903 		dma_unmap_page(trans->dev, rxb->page_dma, max_len,
2904 			       DMA_FROM_DEVICE);
2905 
2906 		rb_len += sizeof(**data) + sizeof(*rb) + max_len;
2907 
2908 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_RB);
2909 		(*data)->len = cpu_to_le32(sizeof(*rb) + max_len);
2910 		rb = (void *)(*data)->data;
2911 		rb->index = cpu_to_le32(i);
2912 		memcpy(rb->data, page_address(rxb->page), max_len);
2913 		/* remap the page for the free benefit */
2914 		rxb->page_dma = dma_map_page(trans->dev, rxb->page,
2915 					     rxb->offset, max_len,
2916 					     DMA_FROM_DEVICE);
2917 
2918 		*data = iwl_fw_error_next_data(*data);
2919 	}
2920 
2921 	spin_unlock(&rxq->lock);
2922 
2923 	return rb_len;
2924 }
2925 #define IWL_CSR_TO_DUMP (0x250)
2926 
2927 static u32 iwl_trans_pcie_dump_csr(struct iwl_trans *trans,
2928 				   struct iwl_fw_error_dump_data **data)
2929 {
2930 	u32 csr_len = sizeof(**data) + IWL_CSR_TO_DUMP;
2931 	__le32 *val;
2932 	int i;
2933 
2934 	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_CSR);
2935 	(*data)->len = cpu_to_le32(IWL_CSR_TO_DUMP);
2936 	val = (void *)(*data)->data;
2937 
2938 	for (i = 0; i < IWL_CSR_TO_DUMP; i += 4)
2939 		*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2940 
2941 	*data = iwl_fw_error_next_data(*data);
2942 
2943 	return csr_len;
2944 }
2945 
2946 static u32 iwl_trans_pcie_fh_regs_dump(struct iwl_trans *trans,
2947 				       struct iwl_fw_error_dump_data **data)
2948 {
2949 	u32 fh_regs_len = FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND;
2950 	__le32 *val;
2951 	int i;
2952 
2953 	if (!iwl_trans_grab_nic_access(trans))
2954 		return 0;
2955 
2956 	(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FH_REGS);
2957 	(*data)->len = cpu_to_le32(fh_regs_len);
2958 	val = (void *)(*data)->data;
2959 
2960 	if (!trans->trans_cfg->gen2)
2961 		for (i = FH_MEM_LOWER_BOUND; i < FH_MEM_UPPER_BOUND;
2962 		     i += sizeof(u32))
2963 			*val++ = cpu_to_le32(iwl_trans_pcie_read32(trans, i));
2964 	else
2965 		for (i = iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2);
2966 		     i < iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2);
2967 		     i += sizeof(u32))
2968 			*val++ = cpu_to_le32(iwl_trans_pcie_read_prph(trans,
2969 								      i));
2970 
2971 	iwl_trans_release_nic_access(trans);
2972 
2973 	*data = iwl_fw_error_next_data(*data);
2974 
2975 	return sizeof(**data) + fh_regs_len;
2976 }
2977 
2978 static u32
2979 iwl_trans_pci_dump_marbh_monitor(struct iwl_trans *trans,
2980 				 struct iwl_fw_error_dump_fw_mon *fw_mon_data,
2981 				 u32 monitor_len)
2982 {
2983 	u32 buf_size_in_dwords = (monitor_len >> 2);
2984 	u32 *buffer = (u32 *)fw_mon_data->data;
2985 	u32 i;
2986 
2987 	if (!iwl_trans_grab_nic_access(trans))
2988 		return 0;
2989 
2990 	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x1);
2991 	for (i = 0; i < buf_size_in_dwords; i++)
2992 		buffer[i] = iwl_read_umac_prph_no_grab(trans,
2993 						       MON_DMARB_RD_DATA_ADDR);
2994 	iwl_write_umac_prph_no_grab(trans, MON_DMARB_RD_CTL_ADDR, 0x0);
2995 
2996 	iwl_trans_release_nic_access(trans);
2997 
2998 	return monitor_len;
2999 }
3000 
3001 static void
3002 iwl_trans_pcie_dump_pointers(struct iwl_trans *trans,
3003 			     struct iwl_fw_error_dump_fw_mon *fw_mon_data)
3004 {
3005 	u32 base, base_high, write_ptr, write_ptr_val, wrap_cnt;
3006 
3007 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3008 		base = DBGC_CUR_DBGBUF_BASE_ADDR_LSB;
3009 		base_high = DBGC_CUR_DBGBUF_BASE_ADDR_MSB;
3010 		write_ptr = DBGC_CUR_DBGBUF_STATUS;
3011 		wrap_cnt = DBGC_DBGBUF_WRAP_AROUND;
3012 	} else if (trans->dbg.dest_tlv) {
3013 		write_ptr = le32_to_cpu(trans->dbg.dest_tlv->write_ptr_reg);
3014 		wrap_cnt = le32_to_cpu(trans->dbg.dest_tlv->wrap_count);
3015 		base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3016 	} else {
3017 		base = MON_BUFF_BASE_ADDR;
3018 		write_ptr = MON_BUFF_WRPTR;
3019 		wrap_cnt = MON_BUFF_CYCLE_CNT;
3020 	}
3021 
3022 	write_ptr_val = iwl_read_prph(trans, write_ptr);
3023 	fw_mon_data->fw_mon_cycle_cnt =
3024 		cpu_to_le32(iwl_read_prph(trans, wrap_cnt));
3025 	fw_mon_data->fw_mon_base_ptr =
3026 		cpu_to_le32(iwl_read_prph(trans, base));
3027 	if (trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210) {
3028 		fw_mon_data->fw_mon_base_high_ptr =
3029 			cpu_to_le32(iwl_read_prph(trans, base_high));
3030 		write_ptr_val &= DBGC_CUR_DBGBUF_STATUS_OFFSET_MSK;
3031 		/* convert wrtPtr to DWs, to align with all HWs */
3032 		write_ptr_val >>= 2;
3033 	}
3034 	fw_mon_data->fw_mon_wr_ptr = cpu_to_le32(write_ptr_val);
3035 }
3036 
3037 static u32
3038 iwl_trans_pcie_dump_monitor(struct iwl_trans *trans,
3039 			    struct iwl_fw_error_dump_data **data,
3040 			    u32 monitor_len)
3041 {
3042 	struct iwl_dram_data *fw_mon = &trans->dbg.fw_mon;
3043 	u32 len = 0;
3044 
3045 	if (trans->dbg.dest_tlv ||
3046 	    (fw_mon->size &&
3047 	     (trans->trans_cfg->device_family == IWL_DEVICE_FAMILY_7000 ||
3048 	      trans->trans_cfg->device_family >= IWL_DEVICE_FAMILY_AX210))) {
3049 		struct iwl_fw_error_dump_fw_mon *fw_mon_data;
3050 
3051 		(*data)->type = cpu_to_le32(IWL_FW_ERROR_DUMP_FW_MONITOR);
3052 		fw_mon_data = (void *)(*data)->data;
3053 
3054 		iwl_trans_pcie_dump_pointers(trans, fw_mon_data);
3055 
3056 		len += sizeof(**data) + sizeof(*fw_mon_data);
3057 		if (fw_mon->size) {
3058 			memcpy(fw_mon_data->data, fw_mon->block, fw_mon->size);
3059 			monitor_len = fw_mon->size;
3060 		} else if (trans->dbg.dest_tlv->monitor_mode == SMEM_MODE) {
3061 			u32 base = le32_to_cpu(fw_mon_data->fw_mon_base_ptr);
3062 			/*
3063 			 * Update pointers to reflect actual values after
3064 			 * shifting
3065 			 */
3066 			if (trans->dbg.dest_tlv->version) {
3067 				base = (iwl_read_prph(trans, base) &
3068 					IWL_LDBG_M2S_BUF_BA_MSK) <<
3069 				       trans->dbg.dest_tlv->base_shift;
3070 				base *= IWL_M2S_UNIT_SIZE;
3071 				base += trans->cfg->smem_offset;
3072 			} else {
3073 				base = iwl_read_prph(trans, base) <<
3074 				       trans->dbg.dest_tlv->base_shift;
3075 			}
3076 
3077 			iwl_trans_read_mem(trans, base, fw_mon_data->data,
3078 					   monitor_len / sizeof(u32));
3079 		} else if (trans->dbg.dest_tlv->monitor_mode == MARBH_MODE) {
3080 			monitor_len =
3081 				iwl_trans_pci_dump_marbh_monitor(trans,
3082 								 fw_mon_data,
3083 								 monitor_len);
3084 		} else {
3085 			/* Didn't match anything - output no monitor data */
3086 			monitor_len = 0;
3087 		}
3088 
3089 		len += monitor_len;
3090 		(*data)->len = cpu_to_le32(monitor_len + sizeof(*fw_mon_data));
3091 	}
3092 
3093 	return len;
3094 }
3095 
3096 static int iwl_trans_get_fw_monitor_len(struct iwl_trans *trans, u32 *len)
3097 {
3098 	if (trans->dbg.fw_mon.size) {
3099 		*len += sizeof(struct iwl_fw_error_dump_data) +
3100 			sizeof(struct iwl_fw_error_dump_fw_mon) +
3101 			trans->dbg.fw_mon.size;
3102 		return trans->dbg.fw_mon.size;
3103 	} else if (trans->dbg.dest_tlv) {
3104 		u32 base, end, cfg_reg, monitor_len;
3105 
3106 		if (trans->dbg.dest_tlv->version == 1) {
3107 			cfg_reg = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3108 			cfg_reg = iwl_read_prph(trans, cfg_reg);
3109 			base = (cfg_reg & IWL_LDBG_M2S_BUF_BA_MSK) <<
3110 				trans->dbg.dest_tlv->base_shift;
3111 			base *= IWL_M2S_UNIT_SIZE;
3112 			base += trans->cfg->smem_offset;
3113 
3114 			monitor_len =
3115 				(cfg_reg & IWL_LDBG_M2S_BUF_SIZE_MSK) >>
3116 				trans->dbg.dest_tlv->end_shift;
3117 			monitor_len *= IWL_M2S_UNIT_SIZE;
3118 		} else {
3119 			base = le32_to_cpu(trans->dbg.dest_tlv->base_reg);
3120 			end = le32_to_cpu(trans->dbg.dest_tlv->end_reg);
3121 
3122 			base = iwl_read_prph(trans, base) <<
3123 			       trans->dbg.dest_tlv->base_shift;
3124 			end = iwl_read_prph(trans, end) <<
3125 			      trans->dbg.dest_tlv->end_shift;
3126 
3127 			/* Make "end" point to the actual end */
3128 			if (trans->trans_cfg->device_family >=
3129 			    IWL_DEVICE_FAMILY_8000 ||
3130 			    trans->dbg.dest_tlv->monitor_mode == MARBH_MODE)
3131 				end += (1 << trans->dbg.dest_tlv->end_shift);
3132 			monitor_len = end - base;
3133 		}
3134 		*len += sizeof(struct iwl_fw_error_dump_data) +
3135 			sizeof(struct iwl_fw_error_dump_fw_mon) +
3136 			monitor_len;
3137 		return monitor_len;
3138 	}
3139 	return 0;
3140 }
3141 
3142 static struct iwl_trans_dump_data
3143 *iwl_trans_pcie_dump_data(struct iwl_trans *trans,
3144 			  u32 dump_mask)
3145 {
3146 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3147 	struct iwl_fw_error_dump_data *data;
3148 	struct iwl_txq *cmdq = trans->txqs.txq[trans->txqs.cmd.q_id];
3149 	struct iwl_fw_error_dump_txcmd *txcmd;
3150 	struct iwl_trans_dump_data *dump_data;
3151 	u32 len, num_rbs = 0, monitor_len = 0;
3152 	int i, ptr;
3153 	bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) &&
3154 			!trans->trans_cfg->mq_rx_supported &&
3155 			dump_mask & BIT(IWL_FW_ERROR_DUMP_RB);
3156 
3157 	if (!dump_mask)
3158 		return NULL;
3159 
3160 	/* transport dump header */
3161 	len = sizeof(*dump_data);
3162 
3163 	/* host commands */
3164 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq)
3165 		len += sizeof(*data) +
3166 			cmdq->n_window * (sizeof(*txcmd) +
3167 					  TFD_MAX_PAYLOAD_SIZE);
3168 
3169 	/* FW monitor */
3170 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3171 		monitor_len = iwl_trans_get_fw_monitor_len(trans, &len);
3172 
3173 	/* CSR registers */
3174 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3175 		len += sizeof(*data) + IWL_CSR_TO_DUMP;
3176 
3177 	/* FH registers */
3178 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) {
3179 		if (trans->trans_cfg->gen2)
3180 			len += sizeof(*data) +
3181 			       (iwl_umac_prph(trans, FH_MEM_UPPER_BOUND_GEN2) -
3182 				iwl_umac_prph(trans, FH_MEM_LOWER_BOUND_GEN2));
3183 		else
3184 			len += sizeof(*data) +
3185 			       (FH_MEM_UPPER_BOUND -
3186 				FH_MEM_LOWER_BOUND);
3187 	}
3188 
3189 	if (dump_rbs) {
3190 		/* Dump RBs is supported only for pre-9000 devices (1 queue) */
3191 		struct iwl_rxq *rxq = &trans_pcie->rxq[0];
3192 		/* RBs */
3193 		num_rbs =
3194 			le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq))
3195 			& 0x0FFF;
3196 		num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK;
3197 		len += num_rbs * (sizeof(*data) +
3198 				  sizeof(struct iwl_fw_error_dump_rb) +
3199 				  (PAGE_SIZE << trans_pcie->rx_page_order));
3200 	}
3201 
3202 	/* Paged memory for gen2 HW */
3203 	if (trans->trans_cfg->gen2 && dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING))
3204 		for (i = 0; i < trans->init_dram.paging_cnt; i++)
3205 			len += sizeof(*data) +
3206 			       sizeof(struct iwl_fw_error_dump_paging) +
3207 			       trans->init_dram.paging[i].size;
3208 
3209 	dump_data = vzalloc(len);
3210 	if (!dump_data)
3211 		return NULL;
3212 
3213 	len = 0;
3214 	data = (void *)dump_data->data;
3215 
3216 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD) && cmdq) {
3217 		u16 tfd_size = trans->txqs.tfd.size;
3218 
3219 		data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD);
3220 		txcmd = (void *)data->data;
3221 		spin_lock_bh(&cmdq->lock);
3222 		ptr = cmdq->write_ptr;
3223 		for (i = 0; i < cmdq->n_window; i++) {
3224 			u8 idx = iwl_txq_get_cmd_index(cmdq, ptr);
3225 			u8 tfdidx;
3226 			u32 caplen, cmdlen;
3227 
3228 			if (trans->trans_cfg->use_tfh)
3229 				tfdidx = idx;
3230 			else
3231 				tfdidx = ptr;
3232 
3233 			cmdlen = iwl_trans_pcie_get_cmdlen(trans,
3234 							   (u8 *)cmdq->tfds +
3235 							   tfd_size * tfdidx);
3236 			caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen);
3237 
3238 			if (cmdlen) {
3239 				len += sizeof(*txcmd) + caplen;
3240 				txcmd->cmdlen = cpu_to_le32(cmdlen);
3241 				txcmd->caplen = cpu_to_le32(caplen);
3242 				memcpy(txcmd->data, cmdq->entries[idx].cmd,
3243 				       caplen);
3244 				txcmd = (void *)((u8 *)txcmd->data + caplen);
3245 			}
3246 
3247 			ptr = iwl_txq_dec_wrap(trans, ptr);
3248 		}
3249 		spin_unlock_bh(&cmdq->lock);
3250 
3251 		data->len = cpu_to_le32(len);
3252 		len += sizeof(*data);
3253 		data = iwl_fw_error_next_data(data);
3254 	}
3255 
3256 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR))
3257 		len += iwl_trans_pcie_dump_csr(trans, &data);
3258 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS))
3259 		len += iwl_trans_pcie_fh_regs_dump(trans, &data);
3260 	if (dump_rbs)
3261 		len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs);
3262 
3263 	/* Paged memory for gen2 HW */
3264 	if (trans->trans_cfg->gen2 &&
3265 	    dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) {
3266 		for (i = 0; i < trans->init_dram.paging_cnt; i++) {
3267 			struct iwl_fw_error_dump_paging *paging;
3268 			u32 page_len = trans->init_dram.paging[i].size;
3269 
3270 			data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_PAGING);
3271 			data->len = cpu_to_le32(sizeof(*paging) + page_len);
3272 			paging = (void *)data->data;
3273 			paging->index = cpu_to_le32(i);
3274 			memcpy(paging->data,
3275 			       trans->init_dram.paging[i].block, page_len);
3276 			data = iwl_fw_error_next_data(data);
3277 
3278 			len += sizeof(*data) + sizeof(*paging) + page_len;
3279 		}
3280 	}
3281 	if (dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))
3282 		len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len);
3283 
3284 	dump_data->len = len;
3285 
3286 	return dump_data;
3287 }
3288 
3289 static void iwl_trans_pci_interrupts(struct iwl_trans *trans, bool enable)
3290 {
3291 	if (enable)
3292 		iwl_enable_interrupts(trans);
3293 	else
3294 		iwl_disable_interrupts(trans);
3295 }
3296 
3297 static void iwl_trans_pcie_sync_nmi(struct iwl_trans *trans)
3298 {
3299 	u32 inta_addr, sw_err_bit;
3300 	struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3301 
3302 	if (trans_pcie->msix_enabled) {
3303 		inta_addr = CSR_MSIX_HW_INT_CAUSES_AD;
3304 		sw_err_bit = MSIX_HW_INT_CAUSES_REG_SW_ERR;
3305 	} else {
3306 		inta_addr = CSR_INT;
3307 		sw_err_bit = CSR_INT_BIT_SW_ERR;
3308 	}
3309 
3310 	iwl_trans_sync_nmi_with_addr(trans, inta_addr, sw_err_bit);
3311 }
3312 
3313 #define IWL_TRANS_COMMON_OPS						\
3314 	.op_mode_leave = iwl_trans_pcie_op_mode_leave,			\
3315 	.write8 = iwl_trans_pcie_write8,				\
3316 	.write32 = iwl_trans_pcie_write32,				\
3317 	.read32 = iwl_trans_pcie_read32,				\
3318 	.read_prph = iwl_trans_pcie_read_prph,				\
3319 	.write_prph = iwl_trans_pcie_write_prph,			\
3320 	.read_mem = iwl_trans_pcie_read_mem,				\
3321 	.write_mem = iwl_trans_pcie_write_mem,				\
3322 	.read_config32 = iwl_trans_pcie_read_config32,			\
3323 	.configure = iwl_trans_pcie_configure,				\
3324 	.set_pmi = iwl_trans_pcie_set_pmi,				\
3325 	.sw_reset = iwl_trans_pcie_sw_reset,				\
3326 	.grab_nic_access = iwl_trans_pcie_grab_nic_access,		\
3327 	.release_nic_access = iwl_trans_pcie_release_nic_access,	\
3328 	.set_bits_mask = iwl_trans_pcie_set_bits_mask,			\
3329 	.dump_data = iwl_trans_pcie_dump_data,				\
3330 	.d3_suspend = iwl_trans_pcie_d3_suspend,			\
3331 	.d3_resume = iwl_trans_pcie_d3_resume,				\
3332 	.interrupts = iwl_trans_pci_interrupts,				\
3333 	.sync_nmi = iwl_trans_pcie_sync_nmi				\
3334 
3335 static const struct iwl_trans_ops trans_ops_pcie = {
3336 	IWL_TRANS_COMMON_OPS,
3337 	.start_hw = iwl_trans_pcie_start_hw,
3338 	.fw_alive = iwl_trans_pcie_fw_alive,
3339 	.start_fw = iwl_trans_pcie_start_fw,
3340 	.stop_device = iwl_trans_pcie_stop_device,
3341 
3342 	.send_cmd = iwl_pcie_enqueue_hcmd,
3343 
3344 	.tx = iwl_trans_pcie_tx,
3345 	.reclaim = iwl_txq_reclaim,
3346 
3347 	.txq_disable = iwl_trans_pcie_txq_disable,
3348 	.txq_enable = iwl_trans_pcie_txq_enable,
3349 
3350 	.txq_set_shared_mode = iwl_trans_pcie_txq_set_shared_mode,
3351 
3352 	.wait_tx_queues_empty = iwl_trans_pcie_wait_txqs_empty,
3353 
3354 	.freeze_txq_timer = iwl_trans_txq_freeze_timer,
3355 	.block_txq_ptrs = iwl_trans_pcie_block_txq_ptrs,
3356 #ifdef CONFIG_IWLWIFI_DEBUGFS
3357 	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3358 #endif
3359 };
3360 
3361 static const struct iwl_trans_ops trans_ops_pcie_gen2 = {
3362 	IWL_TRANS_COMMON_OPS,
3363 	.start_hw = iwl_trans_pcie_start_hw,
3364 	.fw_alive = iwl_trans_pcie_gen2_fw_alive,
3365 	.start_fw = iwl_trans_pcie_gen2_start_fw,
3366 	.stop_device = iwl_trans_pcie_gen2_stop_device,
3367 
3368 	.send_cmd = iwl_pcie_gen2_enqueue_hcmd,
3369 
3370 	.tx = iwl_txq_gen2_tx,
3371 	.reclaim = iwl_txq_reclaim,
3372 
3373 	.set_q_ptrs = iwl_txq_set_q_ptrs,
3374 
3375 	.txq_alloc = iwl_txq_dyn_alloc,
3376 	.txq_free = iwl_txq_dyn_free,
3377 	.wait_txq_empty = iwl_trans_pcie_wait_txq_empty,
3378 	.rxq_dma_data = iwl_trans_pcie_rxq_dma_data,
3379 	.set_pnvm = iwl_trans_pcie_ctx_info_gen3_set_pnvm,
3380 #ifdef CONFIG_IWLWIFI_DEBUGFS
3381 	.debugfs_cleanup = iwl_trans_pcie_debugfs_cleanup,
3382 #endif
3383 };
3384 
3385 struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
3386 			       const struct pci_device_id *ent,
3387 			       const struct iwl_cfg_trans_params *cfg_trans)
3388 {
3389 	struct iwl_trans_pcie *trans_pcie;
3390 	struct iwl_trans *trans;
3391 	int ret, addr_size;
3392 	const struct iwl_trans_ops *ops = &trans_ops_pcie_gen2;
3393 
3394 	if (!cfg_trans->gen2)
3395 		ops = &trans_ops_pcie;
3396 
3397 	ret = pcim_enable_device(pdev);
3398 	if (ret)
3399 		return ERR_PTR(ret);
3400 
3401 	trans = iwl_trans_alloc(sizeof(struct iwl_trans_pcie), &pdev->dev, ops,
3402 				cfg_trans);
3403 	if (!trans)
3404 		return ERR_PTR(-ENOMEM);
3405 
3406 	trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
3407 
3408 	trans_pcie->trans = trans;
3409 	trans_pcie->opmode_down = true;
3410 	spin_lock_init(&trans_pcie->irq_lock);
3411 	spin_lock_init(&trans_pcie->reg_lock);
3412 	spin_lock_init(&trans_pcie->alloc_page_lock);
3413 	mutex_init(&trans_pcie->mutex);
3414 	init_waitqueue_head(&trans_pcie->ucode_write_waitq);
3415 	init_waitqueue_head(&trans_pcie->fw_reset_waitq);
3416 
3417 	trans_pcie->rba.alloc_wq = alloc_workqueue("rb_allocator",
3418 						   WQ_HIGHPRI | WQ_UNBOUND, 1);
3419 	if (!trans_pcie->rba.alloc_wq) {
3420 		ret = -ENOMEM;
3421 		goto out_free_trans;
3422 	}
3423 	INIT_WORK(&trans_pcie->rba.rx_alloc, iwl_pcie_rx_allocator_work);
3424 
3425 	trans_pcie->debug_rfkill = -1;
3426 
3427 	if (!cfg_trans->base_params->pcie_l1_allowed) {
3428 		/*
3429 		 * W/A - seems to solve weird behavior. We need to remove this
3430 		 * if we don't want to stay in L1 all the time. This wastes a
3431 		 * lot of power.
3432 		 */
3433 		pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S |
3434 				       PCIE_LINK_STATE_L1 |
3435 				       PCIE_LINK_STATE_CLKPM);
3436 	}
3437 
3438 	trans_pcie->def_rx_queue = 0;
3439 
3440 	pci_set_master(pdev);
3441 
3442 	addr_size = trans->txqs.tfd.addr_size;
3443 	ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(addr_size));
3444 	if (!ret)
3445 		ret = pci_set_consistent_dma_mask(pdev,
3446 						  DMA_BIT_MASK(addr_size));
3447 	if (ret) {
3448 		ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
3449 		if (!ret)
3450 			ret = pci_set_consistent_dma_mask(pdev,
3451 							  DMA_BIT_MASK(32));
3452 		/* both attempts failed: */
3453 		if (ret) {
3454 			dev_err(&pdev->dev, "No suitable DMA available\n");
3455 			goto out_no_pci;
3456 		}
3457 	}
3458 
3459 	ret = pcim_iomap_regions_request_all(pdev, BIT(0), DRV_NAME);
3460 	if (ret) {
3461 		dev_err(&pdev->dev, "pcim_iomap_regions_request_all failed\n");
3462 		goto out_no_pci;
3463 	}
3464 
3465 	trans_pcie->hw_base = pcim_iomap_table(pdev)[0];
3466 	if (!trans_pcie->hw_base) {
3467 		dev_err(&pdev->dev, "pcim_iomap_table failed\n");
3468 		ret = -ENODEV;
3469 		goto out_no_pci;
3470 	}
3471 
3472 	/* We disable the RETRY_TIMEOUT register (0x41) to keep
3473 	 * PCI Tx retries from interfering with C3 CPU state */
3474 	pci_write_config_byte(pdev, PCI_CFG_RETRY_TIMEOUT, 0x00);
3475 
3476 	trans_pcie->pci_dev = pdev;
3477 	iwl_disable_interrupts(trans);
3478 
3479 	trans->hw_rev = iwl_read32(trans, CSR_HW_REV);
3480 	if (trans->hw_rev == 0xffffffff) {
3481 		dev_err(&pdev->dev, "HW_REV=0xFFFFFFFF, PCI issues?\n");
3482 		ret = -EIO;
3483 		goto out_no_pci;
3484 	}
3485 
3486 	/*
3487 	 * In the 8000 HW family the format of the 4 bytes of CSR_HW_REV have
3488 	 * changed, and now the revision step also includes bit 0-1 (no more
3489 	 * "dash" value). To keep hw_rev backwards compatible - we'll store it
3490 	 * in the old format.
3491 	 */
3492 	if (cfg_trans->device_family >= IWL_DEVICE_FAMILY_8000)
3493 		trans->hw_rev = (trans->hw_rev & 0xfff0) |
3494 				(CSR_HW_REV_STEP(trans->hw_rev << 2) << 2);
3495 
3496 	IWL_DEBUG_INFO(trans, "HW REV: 0x%0x\n", trans->hw_rev);
3497 
3498 	iwl_pcie_set_interrupt_capa(pdev, trans, cfg_trans);
3499 	trans->hw_id = (pdev->device << 16) + pdev->subsystem_device;
3500 	snprintf(trans->hw_id_str, sizeof(trans->hw_id_str),
3501 		 "PCI ID: 0x%04X:0x%04X", pdev->device, pdev->subsystem_device);
3502 
3503 	init_waitqueue_head(&trans_pcie->sx_waitq);
3504 
3505 
3506 	if (trans_pcie->msix_enabled) {
3507 		ret = iwl_pcie_init_msix_handler(pdev, trans_pcie);
3508 		if (ret)
3509 			goto out_no_pci;
3510 	 } else {
3511 		ret = iwl_pcie_alloc_ict(trans);
3512 		if (ret)
3513 			goto out_no_pci;
3514 
3515 		ret = devm_request_threaded_irq(&pdev->dev, pdev->irq,
3516 						iwl_pcie_isr,
3517 						iwl_pcie_irq_handler,
3518 						IRQF_SHARED, DRV_NAME, trans);
3519 		if (ret) {
3520 			IWL_ERR(trans, "Error allocating IRQ %d\n", pdev->irq);
3521 			goto out_free_ict;
3522 		}
3523 	 }
3524 
3525 #ifdef CONFIG_IWLWIFI_DEBUGFS
3526 	trans_pcie->fw_mon_data.state = IWL_FW_MON_DBGFS_STATE_CLOSED;
3527 	mutex_init(&trans_pcie->fw_mon_data.mutex);
3528 #endif
3529 
3530 	iwl_dbg_tlv_init(trans);
3531 
3532 	return trans;
3533 
3534 out_free_ict:
3535 	iwl_pcie_free_ict(trans);
3536 out_no_pci:
3537 	destroy_workqueue(trans_pcie->rba.alloc_wq);
3538 out_free_trans:
3539 	iwl_trans_free(trans);
3540 	return ERR_PTR(ret);
3541 }
3542