xref: /linux/drivers/net/wireless/ath/ath12k/pci.c (revision 8f7aa3d3c7323f4ca2768a9e74ebbe359c4f8f88)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/msi.h>
9 #include <linux/pci.h>
10 #include <linux/time.h>
11 #include <linux/vmalloc.h>
12 
13 #include "pci.h"
14 #include "core.h"
15 #include "hif.h"
16 #include "mhi.h"
17 #include "debug.h"
18 
19 #define ATH12K_PCI_BAR_NUM		0
20 #define ATH12K_PCI_DMA_MASK		36
21 
22 #define ATH12K_PCI_IRQ_CE0_OFFSET		3
23 
24 #define WINDOW_ENABLE_BIT		0x40000000
25 #define WINDOW_REG_ADDRESS		0x310c
26 #define WINDOW_VALUE_MASK		GENMASK(24, 19)
27 #define WINDOW_START			0x80000
28 #define WINDOW_RANGE_MASK		GENMASK(18, 0)
29 #define WINDOW_STATIC_MASK		GENMASK(31, 6)
30 
31 #define TCSR_SOC_HW_VERSION		0x1B00000
32 #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
33 #define TCSR_SOC_HW_VERSION_MINOR_MASK	GENMASK(7, 4)
34 
35 /* BAR0 + 4k is always accessible, and no
36  * need to force wakeup.
37  * 4K - 32 = 0xFE0
38  */
39 #define ACCESS_ALWAYS_OFF 0xFE0
40 
41 #define QCN9274_DEVICE_ID		0x1109
42 #define WCN7850_DEVICE_ID		0x1107
43 
44 #define PCIE_LOCAL_REG_QRTR_NODE_ID	0x1E03164
45 #define DOMAIN_NUMBER_MASK		GENMASK(7, 4)
46 #define BUS_NUMBER_MASK			GENMASK(3, 0)
47 
48 static const struct pci_device_id ath12k_pci_id_table[] = {
49 	{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
50 	{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
51 	{}
52 };
53 
54 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
55 
56 /* TODO: revisit IRQ mapping for new SRNG's */
57 static const struct ath12k_msi_config ath12k_msi_config[] = {
58 	{
59 		.total_vectors = 16,
60 		.total_users = 3,
61 		.users = (struct ath12k_msi_user[]) {
62 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
63 			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
64 			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
65 		},
66 	},
67 };
68 
69 static const struct ath12k_msi_config msi_config_one_msi = {
70 	.total_vectors = 1,
71 	.total_users = 4,
72 	.users = (struct ath12k_msi_user[]) {
73 		{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
74 		{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
75 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
76 		{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
77 	},
78 };
79 
80 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
81 	"bhi",
82 	"mhi-er0",
83 	"mhi-er1",
84 	"ce0",
85 	"ce1",
86 	"ce2",
87 	"ce3",
88 	"ce4",
89 	"ce5",
90 	"ce6",
91 	"ce7",
92 	"ce8",
93 	"ce9",
94 	"ce10",
95 	"ce11",
96 	"ce12",
97 	"ce13",
98 	"ce14",
99 	"ce15",
100 	"host2wbm-desc-feed",
101 	"host2reo-re-injection",
102 	"host2reo-command",
103 	"host2rxdma-monitor-ring3",
104 	"host2rxdma-monitor-ring2",
105 	"host2rxdma-monitor-ring1",
106 	"reo2ost-exception",
107 	"wbm2host-rx-release",
108 	"reo2host-status",
109 	"reo2host-destination-ring4",
110 	"reo2host-destination-ring3",
111 	"reo2host-destination-ring2",
112 	"reo2host-destination-ring1",
113 	"rxdma2host-monitor-destination-mac3",
114 	"rxdma2host-monitor-destination-mac2",
115 	"rxdma2host-monitor-destination-mac1",
116 	"ppdu-end-interrupts-mac3",
117 	"ppdu-end-interrupts-mac2",
118 	"ppdu-end-interrupts-mac1",
119 	"rxdma2host-monitor-status-ring-mac3",
120 	"rxdma2host-monitor-status-ring-mac2",
121 	"rxdma2host-monitor-status-ring-mac1",
122 	"host2rxdma-host-buf-ring-mac3",
123 	"host2rxdma-host-buf-ring-mac2",
124 	"host2rxdma-host-buf-ring-mac1",
125 	"rxdma2host-destination-ring-mac3",
126 	"rxdma2host-destination-ring-mac2",
127 	"rxdma2host-destination-ring-mac1",
128 	"host2tcl-input-ring4",
129 	"host2tcl-input-ring3",
130 	"host2tcl-input-ring2",
131 	"host2tcl-input-ring1",
132 	"wbm2host-tx-completions-ring4",
133 	"wbm2host-tx-completions-ring3",
134 	"wbm2host-tx-completions-ring2",
135 	"wbm2host-tx-completions-ring1",
136 	"tcl2host-status-ring",
137 };
138 
139 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
140 {
141 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
142 
143 	return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
144 }
145 
146 static void ath12k_pci_bus_release(struct ath12k_base *ab)
147 {
148 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
149 
150 	mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
151 }
152 
153 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = {
154 	.wakeup = NULL,
155 	.release = NULL,
156 };
157 
158 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = {
159 	.wakeup = ath12k_pci_bus_wake_up,
160 	.release = ath12k_pci_bus_release,
161 };
162 
163 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
164 {
165 	struct ath12k_base *ab = ab_pci->ab;
166 
167 	u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
168 	u32 static_window;
169 
170 	lockdep_assert_held(&ab_pci->window_lock);
171 
172 	/* Preserve the static window configuration and reset only dynamic window */
173 	static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
174 	window |= static_window;
175 
176 	if (window != ab_pci->register_window) {
177 		iowrite32(WINDOW_ENABLE_BIT | window,
178 			  ab->mem + WINDOW_REG_ADDRESS);
179 		ioread32(ab->mem + WINDOW_REG_ADDRESS);
180 		ab_pci->register_window = window;
181 	}
182 }
183 
184 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
185 {
186 	u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
187 	u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
188 	u32 window;
189 
190 	window = (umac_window << 12) | (ce_window << 6);
191 
192 	spin_lock_bh(&ab_pci->window_lock);
193 	ab_pci->register_window = window;
194 	spin_unlock_bh(&ab_pci->window_lock);
195 
196 	iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
197 }
198 
199 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
200 				       u32 offset)
201 {
202 	u32 window_start;
203 
204 	/* If offset lies within DP register range, use 3rd window */
205 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
206 		window_start = 3 * WINDOW_START;
207 	/* If offset lies within CE register range, use 2nd window */
208 	else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
209 		window_start = 2 * WINDOW_START;
210 	else
211 		window_start = WINDOW_START;
212 
213 	return window_start;
214 }
215 
216 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
217 {
218 	return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
219 }
220 
221 static void ath12k_pci_restore_window(struct ath12k_base *ab)
222 {
223 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
224 
225 	spin_lock_bh(&ab_pci->window_lock);
226 
227 	iowrite32(WINDOW_ENABLE_BIT | ab_pci->register_window,
228 		  ab->mem + WINDOW_REG_ADDRESS);
229 	ioread32(ab->mem + WINDOW_REG_ADDRESS);
230 
231 	spin_unlock_bh(&ab_pci->window_lock);
232 }
233 
234 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
235 {
236 	u32 val, delay;
237 
238 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
239 
240 	val |= PCIE_SOC_GLOBAL_RESET_V;
241 
242 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
243 
244 	/* TODO: exact time to sleep is uncertain */
245 	delay = 10;
246 	mdelay(delay);
247 
248 	/* Need to toggle V bit back otherwise stuck in reset status */
249 	val &= ~PCIE_SOC_GLOBAL_RESET_V;
250 
251 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
252 
253 	mdelay(delay);
254 
255 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
256 	if (val == 0xffffffff)
257 		ath12k_warn(ab, "link down error during global reset\n");
258 
259 	/* Restore window register as its content is cleared during
260 	 * hardware global reset, such that it aligns with host cache.
261 	 */
262 	ath12k_pci_restore_window(ab);
263 }
264 
265 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
266 {
267 	u32 val;
268 
269 	/* read cookie */
270 	val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
271 	ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
272 
273 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
274 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
275 
276 	/* TODO: exact time to sleep is uncertain */
277 	mdelay(10);
278 
279 	/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
280 	 * continuing warm path and entering dead loop.
281 	 */
282 	ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
283 	mdelay(10);
284 
285 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
286 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
287 
288 	/* A read clear register. clear the register to prevent
289 	 * Q6 from entering wrong code path.
290 	 */
291 	val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
292 	ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
293 }
294 
295 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
296 {
297 	u32 val;
298 	int i;
299 
300 	val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
301 
302 	/* PCIE link seems very unstable after the Hot Reset*/
303 	for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
304 		if (val == 0xffffffff)
305 			mdelay(5);
306 
307 		ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
308 		val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
309 	}
310 
311 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
312 
313 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
314 	val |= GCC_GCC_PCIE_HOT_RST_VAL;
315 	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
316 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
317 
318 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
319 
320 	mdelay(5);
321 }
322 
323 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
324 {
325 	/* This is a WAR for PCIE Hotreset.
326 	 * When target receive Hotreset, but will set the interrupt.
327 	 * So when download SBL again, SBL will open Interrupt and
328 	 * receive it, and crash immediately.
329 	 */
330 	ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
331 }
332 
333 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
334 {
335 	u32 val;
336 
337 	val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
338 	val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
339 	ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
340 }
341 
342 static void ath12k_pci_force_wake(struct ath12k_base *ab)
343 {
344 	ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
345 	mdelay(5);
346 }
347 
348 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
349 {
350 	if (power_on) {
351 		ath12k_pci_enable_ltssm(ab);
352 		ath12k_pci_clear_all_intrs(ab);
353 		ath12k_pci_set_wlaon_pwr_ctrl(ab);
354 	}
355 
356 	ath12k_mhi_clear_vector(ab);
357 	ath12k_pci_clear_dbg_registers(ab);
358 	ath12k_pci_soc_global_reset(ab);
359 	ath12k_mhi_set_mhictrl_reset(ab);
360 }
361 
362 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
363 {
364 	int i, j;
365 
366 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
367 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
368 
369 		for (j = 0; j < irq_grp->num_irq; j++)
370 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
371 
372 		netif_napi_del(&irq_grp->napi);
373 		free_netdev(irq_grp->napi_ndev);
374 	}
375 }
376 
377 static void ath12k_pci_free_irq(struct ath12k_base *ab)
378 {
379 	int i, irq_idx;
380 
381 	for (i = 0; i < ab->hw_params->ce_count; i++) {
382 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
383 			continue;
384 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
385 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
386 	}
387 
388 	ath12k_pci_free_ext_irq(ab);
389 }
390 
391 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
392 {
393 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
394 	u32 irq_idx;
395 
396 	/* In case of one MSI vector, we handle irq enable/disable in a
397 	 * uniform way since we only have one irq
398 	 */
399 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
400 		return;
401 
402 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
403 	enable_irq(ab->irq_num[irq_idx]);
404 }
405 
406 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
407 {
408 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
409 	u32 irq_idx;
410 
411 	/* In case of one MSI vector, we handle irq enable/disable in a
412 	 * uniform way since we only have one irq
413 	 */
414 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
415 		return;
416 
417 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
418 	disable_irq_nosync(ab->irq_num[irq_idx]);
419 }
420 
421 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
422 {
423 	int i;
424 
425 	clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
426 
427 	for (i = 0; i < ab->hw_params->ce_count; i++) {
428 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
429 			continue;
430 		ath12k_pci_ce_irq_disable(ab, i);
431 	}
432 }
433 
434 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
435 {
436 	int i;
437 	int irq_idx;
438 
439 	for (i = 0; i < ab->hw_params->ce_count; i++) {
440 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
441 			continue;
442 
443 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
444 		synchronize_irq(ab->irq_num[irq_idx]);
445 	}
446 }
447 
448 static void ath12k_pci_ce_workqueue(struct work_struct *work)
449 {
450 	struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
451 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
452 
453 	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
454 
455 	enable_irq(ce_pipe->ab->irq_num[irq_idx]);
456 }
457 
458 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
459 {
460 	struct ath12k_ce_pipe *ce_pipe = arg;
461 	struct ath12k_base *ab = ce_pipe->ab;
462 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
463 
464 	if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
465 		return IRQ_HANDLED;
466 
467 	/* last interrupt received for this CE */
468 	ce_pipe->timestamp = jiffies;
469 
470 	disable_irq_nosync(ab->irq_num[irq_idx]);
471 
472 	queue_work(system_bh_wq, &ce_pipe->intr_wq);
473 
474 	return IRQ_HANDLED;
475 }
476 
477 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
478 {
479 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
480 	int i;
481 
482 	/* In case of one MSI vector, we handle irq enable/disable
483 	 * in a uniform way since we only have one irq
484 	 */
485 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
486 		return;
487 
488 	for (i = 0; i < irq_grp->num_irq; i++)
489 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
490 }
491 
492 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
493 {
494 	int i;
495 
496 	if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
497 		return;
498 
499 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
500 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
501 
502 		ath12k_pci_ext_grp_disable(irq_grp);
503 
504 		if (irq_grp->napi_enabled) {
505 			napi_synchronize(&irq_grp->napi);
506 			napi_disable(&irq_grp->napi);
507 			irq_grp->napi_enabled = false;
508 		}
509 	}
510 }
511 
512 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
513 {
514 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
515 	int i;
516 
517 	/* In case of one MSI vector, we handle irq enable/disable in a
518 	 * uniform way since we only have one irq
519 	 */
520 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
521 		return;
522 
523 	for (i = 0; i < irq_grp->num_irq; i++)
524 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
525 }
526 
527 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
528 {
529 	int i, j, irq_idx;
530 
531 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
532 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
533 
534 		for (j = 0; j < irq_grp->num_irq; j++) {
535 			irq_idx = irq_grp->irqs[j];
536 			synchronize_irq(ab->irq_num[irq_idx]);
537 		}
538 	}
539 }
540 
541 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
542 {
543 	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
544 						struct ath12k_ext_irq_grp,
545 						napi);
546 	struct ath12k_base *ab = irq_grp->ab;
547 	int work_done;
548 	int i;
549 
550 	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
551 	if (work_done < budget) {
552 		napi_complete_done(napi, work_done);
553 		for (i = 0; i < irq_grp->num_irq; i++)
554 			enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
555 	}
556 
557 	if (work_done > budget)
558 		work_done = budget;
559 
560 	return work_done;
561 }
562 
563 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
564 {
565 	struct ath12k_ext_irq_grp *irq_grp = arg;
566 	struct ath12k_base *ab = irq_grp->ab;
567 	int i;
568 
569 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
570 		return IRQ_HANDLED;
571 
572 	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
573 
574 	/* last interrupt received for this group */
575 	irq_grp->timestamp = jiffies;
576 
577 	for (i = 0; i < irq_grp->num_irq; i++)
578 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
579 
580 	napi_schedule(&irq_grp->napi);
581 
582 	return IRQ_HANDLED;
583 }
584 
585 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
586 {
587 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
588 	int i, j, n, ret, num_vectors = 0;
589 	u32 user_base_data = 0, base_vector = 0, base_idx;
590 	struct ath12k_ext_irq_grp *irq_grp;
591 
592 	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
593 	ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
594 						 &num_vectors,
595 						 &user_base_data,
596 						 &base_vector);
597 	if (ret < 0)
598 		return ret;
599 
600 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
601 		irq_grp = &ab->ext_irq_grp[i];
602 		u32 num_irq = 0;
603 
604 		irq_grp->ab = ab;
605 		irq_grp->grp_id = i;
606 		irq_grp->napi_ndev = alloc_netdev_dummy(0);
607 		if (!irq_grp->napi_ndev) {
608 			ret = -ENOMEM;
609 			goto fail_allocate;
610 		}
611 
612 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
613 			       ath12k_pci_ext_grp_napi_poll);
614 
615 		if (ab->hw_params->ring_mask->tx[i] ||
616 		    ab->hw_params->ring_mask->rx[i] ||
617 		    ab->hw_params->ring_mask->rx_err[i] ||
618 		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
619 		    ab->hw_params->ring_mask->reo_status[i] ||
620 		    ab->hw_params->ring_mask->host2rxdma[i] ||
621 		    ab->hw_params->ring_mask->rx_mon_dest[i] ||
622 		    ab->hw_params->ring_mask->rx_mon_status[i]) {
623 			num_irq = 1;
624 		}
625 
626 		irq_grp->num_irq = num_irq;
627 		irq_grp->irqs[0] = base_idx + i;
628 
629 		for (j = 0; j < irq_grp->num_irq; j++) {
630 			int irq_idx = irq_grp->irqs[j];
631 			int vector = (i % num_vectors) + base_vector;
632 			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
633 
634 			ab->irq_num[irq_idx] = irq;
635 
636 			ath12k_dbg(ab, ATH12K_DBG_PCI,
637 				   "irq:%d group:%d\n", irq, i);
638 
639 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
640 			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
641 					  ab_pci->irq_flags,
642 					  "DP_EXT_IRQ", irq_grp);
643 			if (ret) {
644 				ath12k_err(ab, "failed request irq %d: %d\n",
645 					   vector, ret);
646 				goto fail_request;
647 			}
648 		}
649 		ath12k_pci_ext_grp_disable(irq_grp);
650 	}
651 
652 	return 0;
653 
654 fail_request:
655 	/* i ->napi_ndev was properly allocated. Free it also */
656 	i += 1;
657 fail_allocate:
658 	for (n = 0; n < i; n++) {
659 		irq_grp = &ab->ext_irq_grp[n];
660 		free_netdev(irq_grp->napi_ndev);
661 	}
662 	return ret;
663 }
664 
665 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
666 					    const struct cpumask *m)
667 {
668 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
669 		return 0;
670 
671 	return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
672 }
673 
674 static int ath12k_pci_config_irq(struct ath12k_base *ab)
675 {
676 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
677 	struct ath12k_ce_pipe *ce_pipe;
678 	u32 msi_data_start;
679 	u32 msi_data_count, msi_data_idx;
680 	u32 msi_irq_start;
681 	unsigned int msi_data;
682 	int irq, i, ret, irq_idx;
683 
684 	ret = ath12k_pci_get_user_msi_assignment(ab,
685 						 "CE", &msi_data_count,
686 						 &msi_data_start, &msi_irq_start);
687 	if (ret)
688 		return ret;
689 
690 	/* Configure CE irqs */
691 
692 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
693 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
694 			continue;
695 
696 		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
697 		irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
698 		ce_pipe = &ab->ce.ce_pipe[i];
699 
700 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
701 
702 		INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue);
703 
704 		ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
705 				  ab_pci->irq_flags, irq_name[irq_idx],
706 				  ce_pipe);
707 		if (ret) {
708 			ath12k_err(ab, "failed to request irq %d: %d\n",
709 				   irq_idx, ret);
710 			return ret;
711 		}
712 
713 		ab->irq_num[irq_idx] = irq;
714 		msi_data_idx++;
715 
716 		ath12k_pci_ce_irq_disable(ab, i);
717 	}
718 
719 	ret = ath12k_pci_ext_irq_config(ab);
720 	if (ret)
721 		return ret;
722 
723 	return 0;
724 }
725 
726 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
727 {
728 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
729 
730 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
731 	struct pci_bus *bus = ab_pci->pdev->bus;
732 
733 	cfg->tgt_ce = ab->hw_params->target_ce_config;
734 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
735 
736 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
737 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
738 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
739 
740 	if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID)) {
741 		ab_pci->qmi_instance =
742 			u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
743 			u32_encode_bits(bus->number, BUS_NUMBER_MASK);
744 		ab->qmi.service_ins_id += ab_pci->qmi_instance;
745 	}
746 }
747 
748 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
749 {
750 	int i;
751 
752 	set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
753 
754 	for (i = 0; i < ab->hw_params->ce_count; i++) {
755 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
756 			continue;
757 		ath12k_pci_ce_irq_enable(ab, i);
758 	}
759 }
760 
761 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
762 {
763 	struct pci_dev *dev = ab_pci->pdev;
764 	u16 control;
765 
766 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
767 
768 	if (enable)
769 		control |= PCI_MSI_FLAGS_ENABLE;
770 	else
771 		control &= ~PCI_MSI_FLAGS_ENABLE;
772 
773 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
774 }
775 
776 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
777 {
778 	ath12k_pci_msi_config(ab_pci, true);
779 }
780 
781 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
782 {
783 	ath12k_pci_msi_config(ab_pci, false);
784 }
785 
786 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
787 {
788 	struct ath12k_base *ab = ab_pci->ab;
789 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
790 	struct msi_desc *msi_desc;
791 	int num_vectors;
792 	int ret;
793 
794 	num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
795 					    msi_config->total_vectors,
796 					    msi_config->total_vectors,
797 					    PCI_IRQ_MSI);
798 
799 	if (num_vectors == msi_config->total_vectors) {
800 		set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
801 		ab_pci->irq_flags = IRQF_SHARED;
802 	} else {
803 		num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
804 						    1,
805 						    1,
806 						    PCI_IRQ_MSI);
807 		if (num_vectors < 0) {
808 			ret = -EINVAL;
809 			goto reset_msi_config;
810 		}
811 		clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
812 		ab_pci->msi_config = &msi_config_one_msi;
813 		ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
814 		ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n");
815 	}
816 
817 	ath12k_info(ab, "MSI vectors: %d\n", num_vectors);
818 
819 	ath12k_pci_msi_disable(ab_pci);
820 
821 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
822 	if (!msi_desc) {
823 		ath12k_err(ab, "msi_desc is NULL!\n");
824 		ret = -EINVAL;
825 		goto free_msi_vector;
826 	}
827 
828 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
829 	if (msi_desc->pci.msi_attrib.is_64)
830 		set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
831 
832 	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
833 
834 	return 0;
835 
836 free_msi_vector:
837 	pci_free_irq_vectors(ab_pci->pdev);
838 
839 reset_msi_config:
840 	return ret;
841 }
842 
843 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
844 {
845 	pci_free_irq_vectors(ab_pci->pdev);
846 }
847 
848 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci)
849 {
850 	struct msi_desc *msi_desc;
851 
852 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
853 	if (!msi_desc) {
854 		ath12k_err(ab_pci->ab, "msi_desc is NULL!\n");
855 		pci_free_irq_vectors(ab_pci->pdev);
856 		return -EINVAL;
857 	}
858 
859 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
860 
861 	ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
862 		   ab_pci->msi_ep_base_data);
863 
864 	return 0;
865 }
866 
867 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
868 {
869 	struct ath12k_base *ab = ab_pci->ab;
870 	u16 device_id;
871 	int ret = 0;
872 
873 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
874 	if (device_id != ab_pci->dev_id)  {
875 		ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
876 			   device_id, ab_pci->dev_id);
877 		ret = -EIO;
878 		goto out;
879 	}
880 
881 	ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
882 	if (ret) {
883 		ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
884 		goto out;
885 	}
886 
887 	ret = pci_enable_device(pdev);
888 	if (ret) {
889 		ath12k_err(ab, "failed to enable pci device: %d\n", ret);
890 		goto out;
891 	}
892 
893 	ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
894 	if (ret) {
895 		ath12k_err(ab, "failed to request pci region: %d\n", ret);
896 		goto disable_device;
897 	}
898 
899 	ab_pci->dma_mask = DMA_BIT_MASK(ATH12K_PCI_DMA_MASK);
900 	dma_set_mask(&pdev->dev, ab_pci->dma_mask);
901 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
902 
903 	pci_set_master(pdev);
904 
905 	ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
906 	ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
907 	if (!ab->mem) {
908 		ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
909 		ret = -EIO;
910 		goto release_region;
911 	}
912 
913 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
914 	return 0;
915 
916 release_region:
917 	pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
918 disable_device:
919 	pci_disable_device(pdev);
920 out:
921 	return ret;
922 }
923 
924 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
925 {
926 	struct ath12k_base *ab = ab_pci->ab;
927 	struct pci_dev *pci_dev = ab_pci->pdev;
928 
929 	pci_iounmap(pci_dev, ab->mem);
930 	ab->mem = NULL;
931 	pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
932 	if (pci_is_enabled(pci_dev))
933 		pci_disable_device(pci_dev);
934 }
935 
936 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
937 {
938 	struct ath12k_base *ab = ab_pci->ab;
939 
940 	pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
941 				  &ab_pci->link_ctl);
942 
943 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
944 		   ab_pci->link_ctl,
945 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
946 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
947 
948 	/* disable L0s and L1 */
949 	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
950 				   PCI_EXP_LNKCTL_ASPMC);
951 
952 	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
953 }
954 
955 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
956 {
957 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
958 	u32 reg;
959 
960 	/* On platforms with two or more identical mhi devices, qmi service run
961 	 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
962 	 * cannot register more than one qmi service with identical node ID.
963 	 *
964 	 * This generates a unique instance ID from PCIe domain number and bus number,
965 	 * writes to the given register, it is available for firmware when the QMI service
966 	 * is spawned.
967 	 */
968 	reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK;
969 	ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
970 
971 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
972 		   reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
973 }
974 
975 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
976 {
977 	if (ab_pci->ab->hw_params->supports_aspm &&
978 	    test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
979 		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
980 						   PCI_EXP_LNKCTL_ASPMC,
981 						   ab_pci->link_ctl &
982 						   PCI_EXP_LNKCTL_ASPMC);
983 }
984 
985 static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab)
986 {
987 	int i;
988 
989 	for (i = 0; i < ab->hw_params->ce_count; i++) {
990 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
991 
992 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
993 			continue;
994 
995 		cancel_work_sync(&ce_pipe->intr_wq);
996 	}
997 }
998 
999 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
1000 {
1001 	ath12k_pci_ce_irqs_disable(ab);
1002 	ath12k_pci_sync_ce_irqs(ab);
1003 	ath12k_pci_cancel_workqueue(ab);
1004 }
1005 
1006 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
1007 				   u8 *ul_pipe, u8 *dl_pipe)
1008 {
1009 	const struct service_to_pipe *entry;
1010 	bool ul_set = false, dl_set = false;
1011 	int i;
1012 
1013 	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
1014 		entry = &ab->hw_params->svc_to_ce_map[i];
1015 
1016 		if (__le32_to_cpu(entry->service_id) != service_id)
1017 			continue;
1018 
1019 		switch (__le32_to_cpu(entry->pipedir)) {
1020 		case PIPEDIR_NONE:
1021 			break;
1022 		case PIPEDIR_IN:
1023 			WARN_ON(dl_set);
1024 			*dl_pipe = __le32_to_cpu(entry->pipenum);
1025 			dl_set = true;
1026 			break;
1027 		case PIPEDIR_OUT:
1028 			WARN_ON(ul_set);
1029 			*ul_pipe = __le32_to_cpu(entry->pipenum);
1030 			ul_set = true;
1031 			break;
1032 		case PIPEDIR_INOUT:
1033 			WARN_ON(dl_set);
1034 			WARN_ON(ul_set);
1035 			*dl_pipe = __le32_to_cpu(entry->pipenum);
1036 			*ul_pipe = __le32_to_cpu(entry->pipenum);
1037 			dl_set = true;
1038 			ul_set = true;
1039 			break;
1040 		}
1041 	}
1042 
1043 	if (WARN_ON(!ul_set || !dl_set))
1044 		return -ENOENT;
1045 
1046 	return 0;
1047 }
1048 
1049 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
1050 {
1051 	struct pci_dev *pci_dev = to_pci_dev(dev);
1052 
1053 	return pci_irq_vector(pci_dev, vector);
1054 }
1055 
1056 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
1057 				       int *num_vectors, u32 *user_base_data,
1058 				       u32 *base_vector)
1059 {
1060 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1061 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
1062 	int idx;
1063 
1064 	for (idx = 0; idx < msi_config->total_users; idx++) {
1065 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
1066 			*num_vectors = msi_config->users[idx].num_vectors;
1067 			*base_vector =  msi_config->users[idx].base_vector;
1068 			*user_base_data = *base_vector + ab_pci->msi_ep_base_data;
1069 
1070 			ath12k_dbg(ab, ATH12K_DBG_PCI,
1071 				   "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
1072 				   user_name, *num_vectors, *user_base_data,
1073 				   *base_vector);
1074 
1075 			return 0;
1076 		}
1077 	}
1078 
1079 	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
1080 
1081 	return -EINVAL;
1082 }
1083 
1084 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
1085 				u32 *msi_addr_hi)
1086 {
1087 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1088 	struct pci_dev *pci_dev = to_pci_dev(ab->dev);
1089 
1090 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
1091 			      msi_addr_lo);
1092 
1093 	if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
1094 		pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
1095 				      msi_addr_hi);
1096 	} else {
1097 		*msi_addr_hi = 0;
1098 	}
1099 }
1100 
1101 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
1102 			       u32 *msi_idx)
1103 {
1104 	u32 i, msi_data_idx;
1105 
1106 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
1107 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1108 			continue;
1109 
1110 		if (ce_id == i)
1111 			break;
1112 
1113 		msi_data_idx++;
1114 	}
1115 	*msi_idx = msi_data_idx;
1116 }
1117 
1118 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
1119 {
1120 	ath12k_pci_ce_irqs_enable(ab);
1121 }
1122 
1123 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
1124 {
1125 	ath12k_pci_ce_irq_disable_sync(ab);
1126 }
1127 
1128 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
1129 {
1130 	int i;
1131 
1132 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
1133 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
1134 
1135 		if (!irq_grp->napi_enabled) {
1136 			napi_enable(&irq_grp->napi);
1137 			irq_grp->napi_enabled = true;
1138 		}
1139 
1140 		ath12k_pci_ext_grp_enable(irq_grp);
1141 	}
1142 
1143 	set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
1144 }
1145 
1146 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
1147 {
1148 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
1149 		return;
1150 
1151 	__ath12k_pci_ext_irq_disable(ab);
1152 	ath12k_pci_sync_ext_irqs(ab);
1153 }
1154 
1155 int ath12k_pci_hif_suspend(struct ath12k_base *ab)
1156 {
1157 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1158 
1159 	ath12k_mhi_suspend(ar_pci);
1160 
1161 	return 0;
1162 }
1163 
1164 int ath12k_pci_hif_resume(struct ath12k_base *ab)
1165 {
1166 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1167 
1168 	ath12k_mhi_resume(ar_pci);
1169 
1170 	return 0;
1171 }
1172 
1173 void ath12k_pci_stop(struct ath12k_base *ab)
1174 {
1175 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1176 
1177 	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1178 		return;
1179 
1180 	ath12k_pci_ce_irq_disable_sync(ab);
1181 	ath12k_ce_cleanup_pipes(ab);
1182 }
1183 
1184 int ath12k_pci_start(struct ath12k_base *ab)
1185 {
1186 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1187 
1188 	set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1189 
1190 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
1191 		ath12k_pci_aspm_restore(ab_pci);
1192 	else
1193 		ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
1194 
1195 	ath12k_pci_ce_irqs_enable(ab);
1196 	ath12k_ce_rx_post_buf(ab);
1197 
1198 	return 0;
1199 }
1200 
1201 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
1202 {
1203 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1204 	u32 val, window_start;
1205 	int ret = 0;
1206 
1207 	/* for offset beyond BAR + 4K - 32, may
1208 	 * need to wakeup MHI to access.
1209 	 */
1210 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1211 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1212 		ret = ab_pci->pci_ops->wakeup(ab);
1213 
1214 	if (offset < WINDOW_START) {
1215 		val = ioread32(ab->mem + offset);
1216 	} else {
1217 		if (ab->static_window_map)
1218 			window_start = ath12k_pci_get_window_start(ab, offset);
1219 		else
1220 			window_start = WINDOW_START;
1221 
1222 		if (window_start == WINDOW_START) {
1223 			spin_lock_bh(&ab_pci->window_lock);
1224 			ath12k_pci_select_window(ab_pci, offset);
1225 
1226 			if (ath12k_pci_is_offset_within_mhi_region(offset)) {
1227 				offset = offset - PCI_MHIREGLEN_REG;
1228 				val = ioread32(ab->mem +
1229 					       (offset & WINDOW_RANGE_MASK));
1230 			} else {
1231 				val = ioread32(ab->mem + window_start +
1232 					       (offset & WINDOW_RANGE_MASK));
1233 			}
1234 			spin_unlock_bh(&ab_pci->window_lock);
1235 		} else {
1236 			val = ioread32(ab->mem + window_start +
1237 				       (offset & WINDOW_RANGE_MASK));
1238 		}
1239 	}
1240 
1241 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1242 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1243 	    !ret)
1244 		ab_pci->pci_ops->release(ab);
1245 	return val;
1246 }
1247 
1248 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
1249 {
1250 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1251 	u32 window_start;
1252 	int ret = 0;
1253 
1254 	/* for offset beyond BAR + 4K - 32, may
1255 	 * need to wakeup MHI to access.
1256 	 */
1257 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1258 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1259 		ret = ab_pci->pci_ops->wakeup(ab);
1260 
1261 	if (offset < WINDOW_START) {
1262 		iowrite32(value, ab->mem + offset);
1263 	} else {
1264 		if (ab->static_window_map)
1265 			window_start = ath12k_pci_get_window_start(ab, offset);
1266 		else
1267 			window_start = WINDOW_START;
1268 
1269 		if (window_start == WINDOW_START) {
1270 			spin_lock_bh(&ab_pci->window_lock);
1271 			ath12k_pci_select_window(ab_pci, offset);
1272 
1273 			if (ath12k_pci_is_offset_within_mhi_region(offset)) {
1274 				offset = offset - PCI_MHIREGLEN_REG;
1275 				iowrite32(value, ab->mem +
1276 					  (offset & WINDOW_RANGE_MASK));
1277 			} else {
1278 				iowrite32(value, ab->mem + window_start +
1279 					  (offset & WINDOW_RANGE_MASK));
1280 			}
1281 			spin_unlock_bh(&ab_pci->window_lock);
1282 		} else {
1283 			iowrite32(value, ab->mem + window_start +
1284 				  (offset & WINDOW_RANGE_MASK));
1285 		}
1286 	}
1287 
1288 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1289 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1290 	    !ret)
1291 		ab_pci->pci_ops->release(ab);
1292 }
1293 
1294 #ifdef CONFIG_ATH12K_COREDUMP
1295 static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz)
1296 {
1297 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1298 	struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1299 	struct image_info *rddm_img, *fw_img;
1300 	struct ath12k_tlv_dump_data *dump_tlv;
1301 	enum ath12k_fw_crash_dump_type mem_type;
1302 	u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
1303 	struct ath12k_dump_file_data *file_data;
1304 	int i;
1305 
1306 	rddm_img = mhi_ctrl->rddm_image;
1307 	if (!rddm_img) {
1308 		ath12k_err(ab, "No RDDM dump found\n");
1309 		return 0;
1310 	}
1311 
1312 	fw_img = mhi_ctrl->fbc_image;
1313 
1314 	for (i = 0; i < fw_img->entries ; i++) {
1315 		if (!fw_img->mhi_buf[i].buf)
1316 			continue;
1317 
1318 		paging_tlv_sz += fw_img->mhi_buf[i].len;
1319 	}
1320 	dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
1321 
1322 	for (i = 0; i < rddm_img->entries; i++) {
1323 		if (!rddm_img->mhi_buf[i].buf)
1324 			continue;
1325 
1326 		rddm_tlv_sz += rddm_img->mhi_buf[i].len;
1327 	}
1328 	dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
1329 
1330 	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1331 		mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
1332 
1333 		if (mem_type == FW_CRASH_DUMP_NONE)
1334 			continue;
1335 
1336 		if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
1337 			ath12k_dbg(ab, ATH12K_DBG_PCI,
1338 				   "target mem region type %d not supported",
1339 				   ab->qmi.target_mem[i].type);
1340 			continue;
1341 		}
1342 
1343 		if (!ab->qmi.target_mem[i].paddr)
1344 			continue;
1345 
1346 		dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
1347 	}
1348 
1349 	for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
1350 		if (!dump_seg_sz[i])
1351 			continue;
1352 
1353 		len += sizeof(*dump_tlv) + dump_seg_sz[i];
1354 	}
1355 
1356 	if (len)
1357 		len += sizeof(*file_data);
1358 
1359 	return len;
1360 }
1361 
1362 static void ath12k_pci_coredump_download(struct ath12k_base *ab)
1363 {
1364 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1365 	struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1366 	struct image_info *rddm_img, *fw_img;
1367 	struct timespec64 timestamp;
1368 	int i, len, mem_idx;
1369 	enum ath12k_fw_crash_dump_type mem_type;
1370 	struct ath12k_dump_file_data *file_data;
1371 	struct ath12k_tlv_dump_data *dump_tlv;
1372 	size_t hdr_len = sizeof(*file_data);
1373 	void *buf;
1374 	u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
1375 
1376 	ath12k_mhi_coredump(mhi_ctrl, false);
1377 
1378 	len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz);
1379 	if (!len) {
1380 		ath12k_warn(ab, "No crash dump data found for devcoredump");
1381 		return;
1382 	}
1383 
1384 	rddm_img = mhi_ctrl->rddm_image;
1385 	fw_img = mhi_ctrl->fbc_image;
1386 
1387 	/* dev_coredumpv() requires vmalloc data */
1388 	buf = vzalloc(len);
1389 	if (!buf)
1390 		return;
1391 
1392 	ab->dump_data = buf;
1393 	ab->ath12k_coredump_len = len;
1394 	file_data = ab->dump_data;
1395 	strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic));
1396 	file_data->len = cpu_to_le32(len);
1397 	file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
1398 	file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
1399 	file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
1400 	file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
1401 	guid_gen(&file_data->guid);
1402 	ktime_get_real_ts64(&timestamp);
1403 	file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
1404 	file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
1405 	buf += hdr_len;
1406 	dump_tlv = buf;
1407 	dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
1408 	dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
1409 	buf += COREDUMP_TLV_HDR_SIZE;
1410 
1411 	/* append all segments together as they are all part of a single contiguous
1412 	 * block of memory
1413 	 */
1414 	for (i = 0; i < fw_img->entries ; i++) {
1415 		if (!fw_img->mhi_buf[i].buf)
1416 			continue;
1417 
1418 		memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
1419 			      fw_img->mhi_buf[i].len);
1420 		buf += fw_img->mhi_buf[i].len;
1421 	}
1422 
1423 	dump_tlv = buf;
1424 	dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
1425 	dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
1426 	buf += COREDUMP_TLV_HDR_SIZE;
1427 
1428 	/* append all segments together as they are all part of a single contiguous
1429 	 * block of memory
1430 	 */
1431 	for (i = 0; i < rddm_img->entries; i++) {
1432 		if (!rddm_img->mhi_buf[i].buf)
1433 			continue;
1434 
1435 		memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
1436 			      rddm_img->mhi_buf[i].len);
1437 		buf += rddm_img->mhi_buf[i].len;
1438 	}
1439 
1440 	mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
1441 	for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
1442 		if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE)
1443 			continue;
1444 
1445 		dump_tlv = buf;
1446 		dump_tlv->type = cpu_to_le32(mem_idx);
1447 		dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
1448 		buf += COREDUMP_TLV_HDR_SIZE;
1449 
1450 		for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1451 			mem_type = ath12k_coredump_get_dump_type
1452 							(ab->qmi.target_mem[i].type);
1453 
1454 			if (mem_type != mem_idx)
1455 				continue;
1456 
1457 			if (!ab->qmi.target_mem[i].paddr) {
1458 				ath12k_dbg(ab, ATH12K_DBG_PCI,
1459 					   "Skipping mem region type %d",
1460 					   ab->qmi.target_mem[i].type);
1461 				continue;
1462 			}
1463 
1464 			memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr,
1465 				      ab->qmi.target_mem[i].size);
1466 			buf += ab->qmi.target_mem[i].size;
1467 		}
1468 	}
1469 
1470 	queue_work(ab->workqueue, &ab->dump_work);
1471 }
1472 #endif
1473 
1474 int ath12k_pci_power_up(struct ath12k_base *ab)
1475 {
1476 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1477 	int ret;
1478 
1479 	ab_pci->register_window = 0;
1480 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1481 	ath12k_pci_sw_reset(ab_pci->ab, true);
1482 
1483 	/* Disable ASPM during firmware download due to problems switching
1484 	 * to AMSS state.
1485 	 */
1486 	ath12k_pci_aspm_disable(ab_pci);
1487 
1488 	ath12k_pci_msi_enable(ab_pci);
1489 
1490 	if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID))
1491 		ath12k_pci_update_qrtr_node_id(ab);
1492 
1493 	ret = ath12k_mhi_start(ab_pci);
1494 	if (ret) {
1495 		ath12k_err(ab, "failed to start mhi: %d\n", ret);
1496 		return ret;
1497 	}
1498 
1499 	if (ab->static_window_map)
1500 		ath12k_pci_select_static_window(ab_pci);
1501 
1502 	return 0;
1503 }
1504 
1505 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
1506 {
1507 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1508 
1509 	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1510 		return;
1511 
1512 	/* restore aspm in case firmware bootup fails */
1513 	ath12k_pci_aspm_restore(ab_pci);
1514 
1515 	ath12k_pci_force_wake(ab_pci->ab);
1516 	ath12k_pci_msi_disable(ab_pci);
1517 	ath12k_mhi_stop(ab_pci, is_suspend);
1518 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1519 	ath12k_pci_sw_reset(ab_pci->ab, false);
1520 }
1521 
1522 static int ath12k_pci_panic_handler(struct ath12k_base *ab)
1523 {
1524 	ath12k_pci_sw_reset(ab, false);
1525 
1526 	return NOTIFY_OK;
1527 }
1528 
1529 static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
1530 	.start = ath12k_pci_start,
1531 	.stop = ath12k_pci_stop,
1532 	.read32 = ath12k_pci_read32,
1533 	.write32 = ath12k_pci_write32,
1534 	.power_down = ath12k_pci_power_down,
1535 	.power_up = ath12k_pci_power_up,
1536 	.suspend = ath12k_pci_hif_suspend,
1537 	.resume = ath12k_pci_hif_resume,
1538 	.irq_enable = ath12k_pci_ext_irq_enable,
1539 	.irq_disable = ath12k_pci_ext_irq_disable,
1540 	.get_msi_address = ath12k_pci_get_msi_address,
1541 	.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
1542 	.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
1543 	.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
1544 	.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
1545 	.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
1546 	.panic_handler = ath12k_pci_panic_handler,
1547 #ifdef CONFIG_ATH12K_COREDUMP
1548 	.coredump_download = ath12k_pci_coredump_download,
1549 #endif
1550 };
1551 
1552 static
1553 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
1554 {
1555 	u32 soc_hw_version;
1556 
1557 	soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
1558 	*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
1559 			   soc_hw_version);
1560 	*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
1561 			   soc_hw_version);
1562 
1563 	ath12k_dbg(ab, ATH12K_DBG_PCI,
1564 		   "pci tcsr_soc_hw_version major %d minor %d\n",
1565 		    *major, *minor);
1566 }
1567 
1568 static int ath12k_pci_probe(struct pci_dev *pdev,
1569 			    const struct pci_device_id *pci_dev)
1570 {
1571 	struct ath12k_base *ab;
1572 	struct ath12k_pci *ab_pci;
1573 	u32 soc_hw_version_major, soc_hw_version_minor;
1574 	int ret;
1575 
1576 	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
1577 	if (!ab) {
1578 		dev_err(&pdev->dev, "failed to allocate ath12k base\n");
1579 		return -ENOMEM;
1580 	}
1581 
1582 	ab->dev = &pdev->dev;
1583 	pci_set_drvdata(pdev, ab);
1584 	ab_pci = ath12k_pci_priv(ab);
1585 	ab_pci->dev_id = pci_dev->device;
1586 	ab_pci->ab = ab;
1587 	ab_pci->pdev = pdev;
1588 	ab->hif.ops = &ath12k_pci_hif_ops;
1589 	ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
1590 	pci_set_drvdata(pdev, ab);
1591 	spin_lock_init(&ab_pci->window_lock);
1592 
1593 	ret = ath12k_pci_claim(ab_pci, pdev);
1594 	if (ret) {
1595 		ath12k_err(ab, "failed to claim device: %d\n", ret);
1596 		goto err_free_core;
1597 	}
1598 
1599 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
1600 		   pdev->vendor, pdev->device,
1601 		   pdev->subsystem_vendor, pdev->subsystem_device);
1602 
1603 	ab->id.vendor = pdev->vendor;
1604 	ab->id.device = pdev->device;
1605 	ab->id.subsystem_vendor = pdev->subsystem_vendor;
1606 	ab->id.subsystem_device = pdev->subsystem_device;
1607 
1608 	switch (pci_dev->device) {
1609 	case QCN9274_DEVICE_ID:
1610 		ab_pci->msi_config = &ath12k_msi_config[0];
1611 		ab->static_window_map = true;
1612 		ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
1613 		ab->hal_rx_ops = &hal_rx_qcn9274_ops;
1614 		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1615 					   &soc_hw_version_minor);
1616 		ab->target_mem_mode = ath12k_core_get_memory_mode(ab);
1617 		switch (soc_hw_version_major) {
1618 		case ATH12K_PCI_SOC_HW_VERSION_2:
1619 			ab->hw_rev = ATH12K_HW_QCN9274_HW20;
1620 			break;
1621 		case ATH12K_PCI_SOC_HW_VERSION_1:
1622 			ab->hw_rev = ATH12K_HW_QCN9274_HW10;
1623 			break;
1624 		default:
1625 			dev_err(&pdev->dev,
1626 				"Unknown hardware version found for QCN9274: 0x%x\n",
1627 				soc_hw_version_major);
1628 			ret = -EOPNOTSUPP;
1629 			goto err_pci_free_region;
1630 		}
1631 		break;
1632 	case WCN7850_DEVICE_ID:
1633 		ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD;
1634 		ab_pci->msi_config = &ath12k_msi_config[0];
1635 		ab->static_window_map = false;
1636 		ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
1637 		ab->hal_rx_ops = &hal_rx_wcn7850_ops;
1638 		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1639 					   &soc_hw_version_minor);
1640 		ab->target_mem_mode = ATH12K_QMI_MEMORY_MODE_DEFAULT;
1641 		switch (soc_hw_version_major) {
1642 		case ATH12K_PCI_SOC_HW_VERSION_2:
1643 			ab->hw_rev = ATH12K_HW_WCN7850_HW20;
1644 			break;
1645 		default:
1646 			dev_err(&pdev->dev,
1647 				"Unknown hardware version found for WCN7850: 0x%x\n",
1648 				soc_hw_version_major);
1649 			ret = -EOPNOTSUPP;
1650 			goto err_pci_free_region;
1651 		}
1652 		break;
1653 
1654 	default:
1655 		dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1656 			pci_dev->device);
1657 		ret = -EOPNOTSUPP;
1658 		goto err_pci_free_region;
1659 	}
1660 
1661 	ret = ath12k_pci_msi_alloc(ab_pci);
1662 	if (ret) {
1663 		ath12k_err(ab, "failed to alloc msi: %d\n", ret);
1664 		goto err_pci_free_region;
1665 	}
1666 
1667 	ret = ath12k_core_pre_init(ab);
1668 	if (ret)
1669 		goto err_pci_msi_free;
1670 
1671 	ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1672 	if (ret) {
1673 		ath12k_err(ab, "failed to set irq affinity %d\n", ret);
1674 		goto err_pci_msi_free;
1675 	}
1676 
1677 	ret = ath12k_mhi_register(ab_pci);
1678 	if (ret) {
1679 		ath12k_err(ab, "failed to register mhi: %d\n", ret);
1680 		goto err_irq_affinity_cleanup;
1681 	}
1682 
1683 	ret = ath12k_hal_srng_init(ab);
1684 	if (ret)
1685 		goto err_mhi_unregister;
1686 
1687 	ret = ath12k_ce_alloc_pipes(ab);
1688 	if (ret) {
1689 		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1690 		goto err_hal_srng_deinit;
1691 	}
1692 
1693 	ath12k_pci_init_qmi_ce_config(ab);
1694 
1695 	ret = ath12k_pci_config_irq(ab);
1696 	if (ret) {
1697 		ath12k_err(ab, "failed to config irq: %d\n", ret);
1698 		goto err_ce_free;
1699 	}
1700 
1701 	/* kernel may allocate a dummy vector before request_irq and
1702 	 * then allocate a real vector when request_irq is called.
1703 	 * So get msi_data here again to avoid spurious interrupt
1704 	 * as msi_data will configured to srngs.
1705 	 */
1706 	ret = ath12k_pci_config_msi_data(ab_pci);
1707 	if (ret) {
1708 		ath12k_err(ab, "failed to config msi_data: %d\n", ret);
1709 		goto err_free_irq;
1710 	}
1711 
1712 	ret = ath12k_core_init(ab);
1713 	if (ret) {
1714 		ath12k_err(ab, "failed to init core: %d\n", ret);
1715 		goto err_free_irq;
1716 	}
1717 	return 0;
1718 
1719 err_free_irq:
1720 	/* __free_irq() expects the caller to have cleared the affinity hint */
1721 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1722 	ath12k_pci_free_irq(ab);
1723 
1724 err_ce_free:
1725 	ath12k_ce_free_pipes(ab);
1726 
1727 err_hal_srng_deinit:
1728 	ath12k_hal_srng_deinit(ab);
1729 
1730 err_mhi_unregister:
1731 	ath12k_mhi_unregister(ab_pci);
1732 
1733 err_irq_affinity_cleanup:
1734 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1735 
1736 err_pci_msi_free:
1737 	ath12k_pci_msi_free(ab_pci);
1738 
1739 err_pci_free_region:
1740 	ath12k_pci_free_region(ab_pci);
1741 
1742 err_free_core:
1743 	ath12k_core_free(ab);
1744 
1745 	return ret;
1746 }
1747 
1748 static void ath12k_pci_remove(struct pci_dev *pdev)
1749 {
1750 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1751 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1752 
1753 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1754 
1755 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1756 		ath12k_pci_power_down(ab, false);
1757 		goto qmi_fail;
1758 	}
1759 
1760 	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1761 
1762 	cancel_work_sync(&ab->reset_work);
1763 	cancel_work_sync(&ab->dump_work);
1764 	ath12k_core_hw_group_cleanup(ab->ag);
1765 
1766 qmi_fail:
1767 	ath12k_core_deinit(ab);
1768 	ath12k_fw_unmap(ab);
1769 	ath12k_mhi_unregister(ab_pci);
1770 
1771 	ath12k_pci_free_irq(ab);
1772 	ath12k_pci_msi_free(ab_pci);
1773 	ath12k_pci_free_region(ab_pci);
1774 
1775 	ath12k_hal_srng_deinit(ab);
1776 	ath12k_ce_free_pipes(ab);
1777 	ath12k_core_free(ab);
1778 }
1779 
1780 static void ath12k_pci_hw_group_power_down(struct ath12k_hw_group *ag)
1781 {
1782 	struct ath12k_base *ab;
1783 	int i;
1784 
1785 	if (!ag)
1786 		return;
1787 
1788 	mutex_lock(&ag->mutex);
1789 
1790 	for (i = 0; i < ag->num_devices; i++) {
1791 		ab = ag->ab[i];
1792 		if (!ab)
1793 			continue;
1794 
1795 		ath12k_pci_power_down(ab, false);
1796 	}
1797 
1798 	mutex_unlock(&ag->mutex);
1799 }
1800 
1801 static void ath12k_pci_shutdown(struct pci_dev *pdev)
1802 {
1803 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1804 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1805 
1806 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1807 	ath12k_pci_hw_group_power_down(ab->ag);
1808 }
1809 
1810 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
1811 {
1812 	struct ath12k_base *ab = dev_get_drvdata(dev);
1813 	int ret;
1814 
1815 	ret = ath12k_core_suspend(ab);
1816 	if (ret)
1817 		ath12k_warn(ab, "failed to suspend core: %d\n", ret);
1818 
1819 	return ret;
1820 }
1821 
1822 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
1823 {
1824 	struct ath12k_base *ab = dev_get_drvdata(dev);
1825 	int ret;
1826 
1827 	ret = ath12k_core_resume(ab);
1828 	if (ret)
1829 		ath12k_warn(ab, "failed to resume core: %d\n", ret);
1830 
1831 	return ret;
1832 }
1833 
1834 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev)
1835 {
1836 	struct ath12k_base *ab = dev_get_drvdata(dev);
1837 	int ret;
1838 
1839 	ret = ath12k_core_suspend_late(ab);
1840 	if (ret)
1841 		ath12k_warn(ab, "failed to late suspend core: %d\n", ret);
1842 
1843 	return ret;
1844 }
1845 
1846 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev)
1847 {
1848 	struct ath12k_base *ab = dev_get_drvdata(dev);
1849 	int ret;
1850 
1851 	ret = ath12k_core_resume_early(ab);
1852 	if (ret)
1853 		ath12k_warn(ab, "failed to early resume core: %d\n", ret);
1854 
1855 	return ret;
1856 }
1857 
1858 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
1859 	SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend,
1860 				ath12k_pci_pm_resume)
1861 	SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late,
1862 				     ath12k_pci_pm_resume_early)
1863 };
1864 
1865 static struct pci_driver ath12k_pci_driver = {
1866 	.name = "ath12k_pci",
1867 	.id_table = ath12k_pci_id_table,
1868 	.probe = ath12k_pci_probe,
1869 	.remove = ath12k_pci_remove,
1870 	.shutdown = ath12k_pci_shutdown,
1871 	.driver.pm = &ath12k_pci_pm_ops,
1872 };
1873 
1874 int ath12k_pci_init(void)
1875 {
1876 	int ret;
1877 
1878 	ret = pci_register_driver(&ath12k_pci_driver);
1879 	if (ret) {
1880 		pr_err("failed to register ath12k pci driver: %d\n",
1881 		       ret);
1882 		return ret;
1883 	}
1884 
1885 	return 0;
1886 }
1887 
1888 void ath12k_pci_exit(void)
1889 {
1890 	pci_unregister_driver(&ath12k_pci_driver);
1891 }
1892 
1893 /* firmware files */
1894 MODULE_FIRMWARE(ATH12K_FW_DIR "/QCN9274/hw2.0/*");
1895 MODULE_FIRMWARE(ATH12K_FW_DIR "/WCN7850/hw2.0/*");
1896