xref: /linux/drivers/net/wireless/ath/ath12k/pci.c (revision a34b0e4e21d6be3c3d620aa7f9dfbf0e9550c19e)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) Qualcomm Technologies, Inc. and/or its subsidiaries.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/msi.h>
9 #include <linux/pci.h>
10 #include <linux/time.h>
11 #include <linux/vmalloc.h>
12 
13 #include "pci.h"
14 #include "core.h"
15 #include "hif.h"
16 #include "mhi.h"
17 #include "debug.h"
18 #include "hal.h"
19 
20 #define ATH12K_PCI_BAR_NUM		0
21 #define ATH12K_PCI_DMA_MASK		36
22 
23 #define ATH12K_PCI_IRQ_CE0_OFFSET		3
24 
25 #define WINDOW_ENABLE_BIT		0x40000000
26 #define WINDOW_VALUE_MASK		GENMASK(24, 19)
27 #define WINDOW_START			0x80000
28 #define WINDOW_RANGE_MASK		GENMASK(18, 0)
29 #define WINDOW_STATIC_MASK		GENMASK(31, 6)
30 
31 /* BAR0 + 4k is always accessible, and no
32  * need to force wakeup.
33  * 4K - 32 = 0xFE0
34  */
35 #define ACCESS_ALWAYS_OFF 0xFE0
36 
37 static struct ath12k_pci_driver *ath12k_pci_family_drivers[ATH12K_DEVICE_FAMILY_MAX];
38 static const struct ath12k_msi_config msi_config_one_msi = {
39 	.total_vectors = 1,
40 	.total_users = 4,
41 	.users = (struct ath12k_msi_user[]) {
42 		{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
43 		{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
44 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
45 		{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
46 	},
47 };
48 
49 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
50 	"bhi",
51 	"mhi-er0",
52 	"mhi-er1",
53 	"ce0",
54 	"ce1",
55 	"ce2",
56 	"ce3",
57 	"ce4",
58 	"ce5",
59 	"ce6",
60 	"ce7",
61 	"ce8",
62 	"ce9",
63 	"ce10",
64 	"ce11",
65 	"ce12",
66 	"ce13",
67 	"ce14",
68 	"ce15",
69 	"host2wbm-desc-feed",
70 	"host2reo-re-injection",
71 	"host2reo-command",
72 	"host2rxdma-monitor-ring3",
73 	"host2rxdma-monitor-ring2",
74 	"host2rxdma-monitor-ring1",
75 	"reo2ost-exception",
76 	"wbm2host-rx-release",
77 	"reo2host-status",
78 	"reo2host-destination-ring4",
79 	"reo2host-destination-ring3",
80 	"reo2host-destination-ring2",
81 	"reo2host-destination-ring1",
82 	"rxdma2host-monitor-destination-mac3",
83 	"rxdma2host-monitor-destination-mac2",
84 	"rxdma2host-monitor-destination-mac1",
85 	"ppdu-end-interrupts-mac3",
86 	"ppdu-end-interrupts-mac2",
87 	"ppdu-end-interrupts-mac1",
88 	"rxdma2host-monitor-status-ring-mac3",
89 	"rxdma2host-monitor-status-ring-mac2",
90 	"rxdma2host-monitor-status-ring-mac1",
91 	"host2rxdma-host-buf-ring-mac3",
92 	"host2rxdma-host-buf-ring-mac2",
93 	"host2rxdma-host-buf-ring-mac1",
94 	"rxdma2host-destination-ring-mac3",
95 	"rxdma2host-destination-ring-mac2",
96 	"rxdma2host-destination-ring-mac1",
97 	"host2tcl-input-ring4",
98 	"host2tcl-input-ring3",
99 	"host2tcl-input-ring2",
100 	"host2tcl-input-ring1",
101 	"wbm2host-tx-completions-ring4",
102 	"wbm2host-tx-completions-ring3",
103 	"wbm2host-tx-completions-ring2",
104 	"wbm2host-tx-completions-ring1",
105 	"tcl2host-status-ring",
106 };
107 
108 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
109 {
110 	struct ath12k_base *ab = ab_pci->ab;
111 
112 	u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
113 	u32 static_window;
114 
115 	lockdep_assert_held(&ab_pci->window_lock);
116 
117 	/* Preserve the static window configuration and reset only dynamic window */
118 	static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
119 	window |= static_window;
120 
121 	if (window != ab_pci->register_window) {
122 		iowrite32(WINDOW_ENABLE_BIT | window,
123 			  ab->mem + ab_pci->window_reg_addr);
124 		ioread32(ab->mem + ab_pci->window_reg_addr);
125 		ab_pci->register_window = window;
126 	}
127 }
128 
129 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
130 {
131 	u32 umac_window;
132 	u32 ce_window;
133 	u32 window;
134 
135 	umac_window = u32_get_bits(ab_pci->reg_base->umac_base, WINDOW_VALUE_MASK);
136 	ce_window = u32_get_bits(ab_pci->reg_base->ce_reg_base, WINDOW_VALUE_MASK);
137 	window = (umac_window << 12) | (ce_window << 6);
138 
139 	spin_lock_bh(&ab_pci->window_lock);
140 	ab_pci->register_window = window;
141 	spin_unlock_bh(&ab_pci->window_lock);
142 
143 	iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + ab_pci->window_reg_addr);
144 }
145 
146 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
147 				       u32 offset)
148 {
149 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
150 	u32 window_start;
151 
152 	/* If offset lies within DP register range, use 3rd window */
153 	if ((offset ^ ab_pci->reg_base->umac_base) < WINDOW_RANGE_MASK)
154 		window_start = 3 * WINDOW_START;
155 	/* If offset lies within CE register range, use 2nd window */
156 	else if ((offset ^ ab_pci->reg_base->ce_reg_base) < WINDOW_RANGE_MASK)
157 		window_start = 2 * WINDOW_START;
158 	else
159 		window_start = WINDOW_START;
160 
161 	return window_start;
162 }
163 
164 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset)
165 {
166 	return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END);
167 }
168 
169 static void ath12k_pci_restore_window(struct ath12k_base *ab)
170 {
171 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
172 
173 	spin_lock_bh(&ab_pci->window_lock);
174 
175 	iowrite32(WINDOW_ENABLE_BIT | ab_pci->register_window,
176 		  ab->mem + ab_pci->window_reg_addr);
177 	ioread32(ab->mem + ab_pci->window_reg_addr);
178 
179 	spin_unlock_bh(&ab_pci->window_lock);
180 }
181 
182 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
183 {
184 	u32 val, delay;
185 
186 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
187 
188 	val |= PCIE_SOC_GLOBAL_RESET_V;
189 
190 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
191 
192 	/* TODO: exact time to sleep is uncertain */
193 	delay = 10;
194 	mdelay(delay);
195 
196 	/* Need to toggle V bit back otherwise stuck in reset status */
197 	val &= ~PCIE_SOC_GLOBAL_RESET_V;
198 
199 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
200 
201 	mdelay(delay);
202 
203 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
204 	if (val == 0xffffffff)
205 		ath12k_warn(ab, "link down error during global reset\n");
206 
207 	/* Restore window register as its content is cleared during
208 	 * hardware global reset, such that it aligns with host cache.
209 	 */
210 	ath12k_pci_restore_window(ab);
211 }
212 
213 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
214 {
215 	u32 val;
216 
217 	/* read cookie */
218 	val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
219 	ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
220 
221 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
222 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
223 
224 	/* TODO: exact time to sleep is uncertain */
225 	mdelay(10);
226 
227 	/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
228 	 * continuing warm path and entering dead loop.
229 	 */
230 	ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
231 	mdelay(10);
232 
233 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
234 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
235 
236 	/* A read clear register. clear the register to prevent
237 	 * Q6 from entering wrong code path.
238 	 */
239 	val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
240 	ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
241 }
242 
243 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
244 {
245 	u32 val;
246 	int i;
247 
248 	val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
249 
250 	/* PCIE link seems very unstable after the Hot Reset*/
251 	for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
252 		if (val == 0xffffffff)
253 			mdelay(5);
254 
255 		ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
256 		val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
257 	}
258 
259 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
260 
261 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
262 	val |= GCC_GCC_PCIE_HOT_RST_VAL;
263 	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST(ab), val);
264 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST(ab));
265 
266 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
267 
268 	mdelay(5);
269 }
270 
271 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
272 {
273 	/* This is a WAR for PCIE Hotreset.
274 	 * When target receive Hotreset, but will set the interrupt.
275 	 * So when download SBL again, SBL will open Interrupt and
276 	 * receive it, and crash immediately.
277 	 */
278 	ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
279 }
280 
281 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
282 {
283 	u32 val;
284 
285 	val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
286 	val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
287 	ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
288 }
289 
290 static void ath12k_pci_force_wake(struct ath12k_base *ab)
291 {
292 	ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
293 	mdelay(5);
294 }
295 
296 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
297 {
298 	if (power_on) {
299 		ath12k_pci_enable_ltssm(ab);
300 		ath12k_pci_clear_all_intrs(ab);
301 		ath12k_pci_set_wlaon_pwr_ctrl(ab);
302 	}
303 
304 	ath12k_mhi_clear_vector(ab);
305 	ath12k_pci_clear_dbg_registers(ab);
306 	ath12k_pci_soc_global_reset(ab);
307 	ath12k_mhi_set_mhictrl_reset(ab);
308 }
309 
310 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
311 {
312 	int i, j;
313 
314 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
315 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
316 
317 		for (j = 0; j < irq_grp->num_irq; j++)
318 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
319 
320 		netif_napi_del(&irq_grp->napi);
321 		free_netdev(irq_grp->napi_ndev);
322 	}
323 }
324 
325 static void ath12k_pci_free_irq(struct ath12k_base *ab)
326 {
327 	int i, irq_idx;
328 
329 	for (i = 0; i < ab->hw_params->ce_count; i++) {
330 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
331 			continue;
332 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
333 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
334 	}
335 
336 	ath12k_pci_free_ext_irq(ab);
337 }
338 
339 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
340 {
341 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
342 	u32 irq_idx;
343 
344 	/* In case of one MSI vector, we handle irq enable/disable in a
345 	 * uniform way since we only have one irq
346 	 */
347 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
348 		return;
349 
350 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
351 	enable_irq(ab->irq_num[irq_idx]);
352 }
353 
354 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
355 {
356 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
357 	u32 irq_idx;
358 
359 	/* In case of one MSI vector, we handle irq enable/disable in a
360 	 * uniform way since we only have one irq
361 	 */
362 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
363 		return;
364 
365 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
366 	disable_irq_nosync(ab->irq_num[irq_idx]);
367 }
368 
369 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
370 {
371 	int i;
372 
373 	clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
374 
375 	for (i = 0; i < ab->hw_params->ce_count; i++) {
376 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
377 			continue;
378 		ath12k_pci_ce_irq_disable(ab, i);
379 	}
380 }
381 
382 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
383 {
384 	int i;
385 	int irq_idx;
386 
387 	for (i = 0; i < ab->hw_params->ce_count; i++) {
388 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
389 			continue;
390 
391 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
392 		synchronize_irq(ab->irq_num[irq_idx]);
393 	}
394 }
395 
396 static void ath12k_pci_ce_workqueue(struct work_struct *work)
397 {
398 	struct ath12k_ce_pipe *ce_pipe = from_work(ce_pipe, work, intr_wq);
399 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
400 
401 	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
402 
403 	enable_irq(ce_pipe->ab->irq_num[irq_idx]);
404 }
405 
406 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
407 {
408 	struct ath12k_ce_pipe *ce_pipe = arg;
409 	struct ath12k_base *ab = ce_pipe->ab;
410 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
411 
412 	if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
413 		return IRQ_HANDLED;
414 
415 	/* last interrupt received for this CE */
416 	ce_pipe->timestamp = jiffies;
417 
418 	disable_irq_nosync(ab->irq_num[irq_idx]);
419 
420 	queue_work(system_bh_wq, &ce_pipe->intr_wq);
421 
422 	return IRQ_HANDLED;
423 }
424 
425 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
426 {
427 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
428 	int i;
429 
430 	/* In case of one MSI vector, we handle irq enable/disable
431 	 * in a uniform way since we only have one irq
432 	 */
433 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
434 		return;
435 
436 	for (i = 0; i < irq_grp->num_irq; i++)
437 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
438 }
439 
440 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
441 {
442 	int i;
443 
444 	if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
445 		return;
446 
447 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
448 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
449 
450 		ath12k_pci_ext_grp_disable(irq_grp);
451 
452 		if (irq_grp->napi_enabled) {
453 			napi_synchronize(&irq_grp->napi);
454 			napi_disable(&irq_grp->napi);
455 			irq_grp->napi_enabled = false;
456 		}
457 	}
458 }
459 
460 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
461 {
462 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
463 	int i;
464 
465 	/* In case of one MSI vector, we handle irq enable/disable in a
466 	 * uniform way since we only have one irq
467 	 */
468 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
469 		return;
470 
471 	for (i = 0; i < irq_grp->num_irq; i++)
472 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
473 }
474 
475 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
476 {
477 	int i, j, irq_idx;
478 
479 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
480 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
481 
482 		for (j = 0; j < irq_grp->num_irq; j++) {
483 			irq_idx = irq_grp->irqs[j];
484 			synchronize_irq(ab->irq_num[irq_idx]);
485 		}
486 	}
487 }
488 
489 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
490 {
491 	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
492 						struct ath12k_ext_irq_grp,
493 						napi);
494 	struct ath12k_base *ab = irq_grp->ab;
495 	struct ath12k_dp *dp = ath12k_ab_to_dp(ab);
496 	int work_done;
497 	int i;
498 
499 	work_done = ath12k_dp_service_srng(dp, irq_grp, budget);
500 	if (work_done < budget) {
501 		napi_complete_done(napi, work_done);
502 		for (i = 0; i < irq_grp->num_irq; i++)
503 			enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
504 	}
505 
506 	if (work_done > budget)
507 		work_done = budget;
508 
509 	return work_done;
510 }
511 
512 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
513 {
514 	struct ath12k_ext_irq_grp *irq_grp = arg;
515 	struct ath12k_base *ab = irq_grp->ab;
516 	int i;
517 
518 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
519 		return IRQ_HANDLED;
520 
521 	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
522 
523 	/* last interrupt received for this group */
524 	irq_grp->timestamp = jiffies;
525 
526 	for (i = 0; i < irq_grp->num_irq; i++)
527 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
528 
529 	napi_schedule(&irq_grp->napi);
530 
531 	return IRQ_HANDLED;
532 }
533 
534 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
535 {
536 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
537 	int i, j, n, ret, num_vectors = 0;
538 	u32 user_base_data = 0, base_vector = 0, base_idx;
539 	struct ath12k_ext_irq_grp *irq_grp;
540 
541 	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
542 	ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
543 						 &num_vectors,
544 						 &user_base_data,
545 						 &base_vector);
546 	if (ret < 0)
547 		return ret;
548 
549 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
550 		irq_grp = &ab->ext_irq_grp[i];
551 		u32 num_irq = 0;
552 
553 		irq_grp->ab = ab;
554 		irq_grp->grp_id = i;
555 		irq_grp->napi_ndev = alloc_netdev_dummy(0);
556 		if (!irq_grp->napi_ndev) {
557 			ret = -ENOMEM;
558 			goto fail_allocate;
559 		}
560 
561 		netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi,
562 			       ath12k_pci_ext_grp_napi_poll);
563 
564 		if (ab->hw_params->ring_mask->tx[i] ||
565 		    ab->hw_params->ring_mask->rx[i] ||
566 		    ab->hw_params->ring_mask->rx_err[i] ||
567 		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
568 		    ab->hw_params->ring_mask->reo_status[i] ||
569 		    ab->hw_params->ring_mask->host2rxdma[i] ||
570 		    ab->hw_params->ring_mask->rx_mon_dest[i] ||
571 		    ab->hw_params->ring_mask->rx_mon_status[i]) {
572 			num_irq = 1;
573 		}
574 
575 		irq_grp->num_irq = num_irq;
576 		irq_grp->irqs[0] = base_idx + i;
577 
578 		for (j = 0; j < irq_grp->num_irq; j++) {
579 			int irq_idx = irq_grp->irqs[j];
580 			int vector = (i % num_vectors) + base_vector;
581 			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
582 
583 			ab->irq_num[irq_idx] = irq;
584 
585 			ath12k_dbg(ab, ATH12K_DBG_PCI,
586 				   "irq:%d group:%d\n", irq, i);
587 
588 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
589 			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
590 					  ab_pci->irq_flags,
591 					  "DP_EXT_IRQ", irq_grp);
592 			if (ret) {
593 				ath12k_err(ab, "failed request irq %d: %d\n",
594 					   vector, ret);
595 				goto fail_request;
596 			}
597 		}
598 		ath12k_pci_ext_grp_disable(irq_grp);
599 	}
600 
601 	return 0;
602 
603 fail_request:
604 	/* i ->napi_ndev was properly allocated. Free it also */
605 	i += 1;
606 fail_allocate:
607 	for (n = 0; n < i; n++) {
608 		irq_grp = &ab->ext_irq_grp[n];
609 		free_netdev(irq_grp->napi_ndev);
610 	}
611 	return ret;
612 }
613 
614 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
615 					    const struct cpumask *m)
616 {
617 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
618 		return 0;
619 
620 	return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
621 }
622 
623 static int ath12k_pci_config_irq(struct ath12k_base *ab)
624 {
625 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
626 	struct ath12k_ce_pipe *ce_pipe;
627 	u32 msi_data_start;
628 	u32 msi_data_count, msi_data_idx;
629 	u32 msi_irq_start;
630 	unsigned int msi_data;
631 	int irq, i, ret, irq_idx;
632 
633 	ret = ath12k_pci_get_user_msi_assignment(ab,
634 						 "CE", &msi_data_count,
635 						 &msi_data_start, &msi_irq_start);
636 	if (ret)
637 		return ret;
638 
639 	/* Configure CE irqs */
640 
641 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
642 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
643 			continue;
644 
645 		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
646 		irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
647 		ce_pipe = &ab->ce.ce_pipe[i];
648 
649 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
650 
651 		INIT_WORK(&ce_pipe->intr_wq, ath12k_pci_ce_workqueue);
652 
653 		ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
654 				  ab_pci->irq_flags, irq_name[irq_idx],
655 				  ce_pipe);
656 		if (ret) {
657 			ath12k_err(ab, "failed to request irq %d: %d\n",
658 				   irq_idx, ret);
659 			return ret;
660 		}
661 
662 		ab->irq_num[irq_idx] = irq;
663 		msi_data_idx++;
664 
665 		ath12k_pci_ce_irq_disable(ab, i);
666 	}
667 
668 	ret = ath12k_pci_ext_irq_config(ab);
669 	if (ret)
670 		return ret;
671 
672 	return 0;
673 }
674 
675 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
676 {
677 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
678 
679 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
680 	struct pci_bus *bus = ab_pci->pdev->bus;
681 
682 	cfg->tgt_ce = ab->hw_params->target_ce_config;
683 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
684 
685 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
686 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
687 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
688 
689 	if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID)) {
690 		ab_pci->qmi_instance =
691 			u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) |
692 			u32_encode_bits(bus->number, BUS_NUMBER_MASK);
693 		ab->qmi.service_ins_id += ab_pci->qmi_instance;
694 	}
695 }
696 
697 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
698 {
699 	int i;
700 
701 	set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
702 
703 	for (i = 0; i < ab->hw_params->ce_count; i++) {
704 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
705 			continue;
706 		ath12k_pci_ce_irq_enable(ab, i);
707 	}
708 }
709 
710 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
711 {
712 	struct pci_dev *dev = ab_pci->pdev;
713 	u16 control;
714 
715 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
716 
717 	if (enable)
718 		control |= PCI_MSI_FLAGS_ENABLE;
719 	else
720 		control &= ~PCI_MSI_FLAGS_ENABLE;
721 
722 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
723 }
724 
725 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
726 {
727 	ath12k_pci_msi_config(ab_pci, true);
728 }
729 
730 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
731 {
732 	ath12k_pci_msi_config(ab_pci, false);
733 }
734 
735 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
736 {
737 	struct ath12k_base *ab = ab_pci->ab;
738 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
739 	struct msi_desc *msi_desc;
740 	int num_vectors;
741 	int ret;
742 
743 	num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
744 					    msi_config->total_vectors,
745 					    msi_config->total_vectors,
746 					    PCI_IRQ_MSI);
747 
748 	if (num_vectors == msi_config->total_vectors) {
749 		set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
750 		ab_pci->irq_flags = IRQF_SHARED;
751 	} else {
752 		num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
753 						    1,
754 						    1,
755 						    PCI_IRQ_MSI);
756 		if (num_vectors < 0) {
757 			ret = -EINVAL;
758 			goto reset_msi_config;
759 		}
760 		clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
761 		ab_pci->msi_config = &msi_config_one_msi;
762 		ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
763 		ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n");
764 	}
765 
766 	ath12k_info(ab, "MSI vectors: %d\n", num_vectors);
767 
768 	ath12k_pci_msi_disable(ab_pci);
769 
770 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
771 	if (!msi_desc) {
772 		ath12k_err(ab, "msi_desc is NULL!\n");
773 		ret = -EINVAL;
774 		goto free_msi_vector;
775 	}
776 
777 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
778 	if (msi_desc->pci.msi_attrib.is_64)
779 		set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
780 
781 	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
782 
783 	return 0;
784 
785 free_msi_vector:
786 	pci_free_irq_vectors(ab_pci->pdev);
787 
788 reset_msi_config:
789 	return ret;
790 }
791 
792 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
793 {
794 	pci_free_irq_vectors(ab_pci->pdev);
795 }
796 
797 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci)
798 {
799 	struct msi_desc *msi_desc;
800 
801 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
802 	if (!msi_desc) {
803 		ath12k_err(ab_pci->ab, "msi_desc is NULL!\n");
804 		pci_free_irq_vectors(ab_pci->pdev);
805 		return -EINVAL;
806 	}
807 
808 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
809 
810 	ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
811 		   ab_pci->msi_ep_base_data);
812 
813 	return 0;
814 }
815 
816 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
817 {
818 	struct ath12k_base *ab = ab_pci->ab;
819 	u16 device_id;
820 	int ret = 0;
821 
822 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
823 	if (device_id != ab_pci->dev_id)  {
824 		ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
825 			   device_id, ab_pci->dev_id);
826 		ret = -EIO;
827 		goto out;
828 	}
829 
830 	ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
831 	if (ret) {
832 		ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
833 		goto out;
834 	}
835 
836 	ret = pci_enable_device(pdev);
837 	if (ret) {
838 		ath12k_err(ab, "failed to enable pci device: %d\n", ret);
839 		goto out;
840 	}
841 
842 	ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
843 	if (ret) {
844 		ath12k_err(ab, "failed to request pci region: %d\n", ret);
845 		goto disable_device;
846 	}
847 
848 	ab_pci->dma_mask = DMA_BIT_MASK(ATH12K_PCI_DMA_MASK);
849 	dma_set_mask(&pdev->dev, ab_pci->dma_mask);
850 	dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32));
851 
852 	pci_set_master(pdev);
853 
854 	ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
855 	ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
856 	if (!ab->mem) {
857 		ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
858 		ret = -EIO;
859 		goto release_region;
860 	}
861 
862 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem);
863 	return 0;
864 
865 release_region:
866 	pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
867 disable_device:
868 	pci_disable_device(pdev);
869 out:
870 	return ret;
871 }
872 
873 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
874 {
875 	struct ath12k_base *ab = ab_pci->ab;
876 	struct pci_dev *pci_dev = ab_pci->pdev;
877 
878 	pci_iounmap(pci_dev, ab->mem);
879 	ab->mem = NULL;
880 	pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
881 	if (pci_is_enabled(pci_dev))
882 		pci_disable_device(pci_dev);
883 }
884 
885 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
886 {
887 	struct ath12k_base *ab = ab_pci->ab;
888 
889 	pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
890 				  &ab_pci->link_ctl);
891 
892 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
893 		   ab_pci->link_ctl,
894 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
895 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
896 
897 	/* disable L0s and L1 */
898 	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
899 				   PCI_EXP_LNKCTL_ASPMC);
900 
901 	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
902 }
903 
904 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab)
905 {
906 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
907 	u32 reg;
908 
909 	/* On platforms with two or more identical mhi devices, qmi service run
910 	 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup
911 	 * cannot register more than one qmi service with identical node ID.
912 	 *
913 	 * This generates a unique instance ID from PCIe domain number and bus number,
914 	 * writes to the given register, it is available for firmware when the QMI service
915 	 * is spawned.
916 	 */
917 	reg = PCIE_LOCAL_REG_QRTR_NODE_ID(ab) & WINDOW_RANGE_MASK;
918 	ath12k_pci_write32(ab, reg, ab_pci->qmi_instance);
919 
920 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n",
921 		   reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg));
922 }
923 
924 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
925 {
926 	if (ab_pci->ab->hw_params->supports_aspm &&
927 	    test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
928 		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
929 						   PCI_EXP_LNKCTL_ASPMC,
930 						   ab_pci->link_ctl &
931 						   PCI_EXP_LNKCTL_ASPMC);
932 }
933 
934 static void ath12k_pci_cancel_workqueue(struct ath12k_base *ab)
935 {
936 	int i;
937 
938 	for (i = 0; i < ab->hw_params->ce_count; i++) {
939 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
940 
941 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
942 			continue;
943 
944 		cancel_work_sync(&ce_pipe->intr_wq);
945 	}
946 }
947 
948 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
949 {
950 	ath12k_pci_ce_irqs_disable(ab);
951 	ath12k_pci_sync_ce_irqs(ab);
952 	ath12k_pci_cancel_workqueue(ab);
953 }
954 
955 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
956 				   u8 *ul_pipe, u8 *dl_pipe)
957 {
958 	const struct service_to_pipe *entry;
959 	bool ul_set = false, dl_set = false;
960 	int i;
961 
962 	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
963 		entry = &ab->hw_params->svc_to_ce_map[i];
964 
965 		if (__le32_to_cpu(entry->service_id) != service_id)
966 			continue;
967 
968 		switch (__le32_to_cpu(entry->pipedir)) {
969 		case PIPEDIR_NONE:
970 			break;
971 		case PIPEDIR_IN:
972 			WARN_ON(dl_set);
973 			*dl_pipe = __le32_to_cpu(entry->pipenum);
974 			dl_set = true;
975 			break;
976 		case PIPEDIR_OUT:
977 			WARN_ON(ul_set);
978 			*ul_pipe = __le32_to_cpu(entry->pipenum);
979 			ul_set = true;
980 			break;
981 		case PIPEDIR_INOUT:
982 			WARN_ON(dl_set);
983 			WARN_ON(ul_set);
984 			*dl_pipe = __le32_to_cpu(entry->pipenum);
985 			*ul_pipe = __le32_to_cpu(entry->pipenum);
986 			dl_set = true;
987 			ul_set = true;
988 			break;
989 		}
990 	}
991 
992 	if (WARN_ON(!ul_set || !dl_set))
993 		return -ENOENT;
994 
995 	return 0;
996 }
997 
998 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
999 {
1000 	struct pci_dev *pci_dev = to_pci_dev(dev);
1001 
1002 	return pci_irq_vector(pci_dev, vector);
1003 }
1004 
1005 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
1006 				       int *num_vectors, u32 *user_base_data,
1007 				       u32 *base_vector)
1008 {
1009 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1010 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
1011 	int idx;
1012 
1013 	for (idx = 0; idx < msi_config->total_users; idx++) {
1014 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
1015 			*num_vectors = msi_config->users[idx].num_vectors;
1016 			*base_vector =  msi_config->users[idx].base_vector;
1017 			*user_base_data = *base_vector + ab_pci->msi_ep_base_data;
1018 
1019 			ath12k_dbg(ab, ATH12K_DBG_PCI,
1020 				   "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
1021 				   user_name, *num_vectors, *user_base_data,
1022 				   *base_vector);
1023 
1024 			return 0;
1025 		}
1026 	}
1027 
1028 	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
1029 
1030 	return -EINVAL;
1031 }
1032 
1033 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
1034 				u32 *msi_addr_hi)
1035 {
1036 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1037 	struct pci_dev *pci_dev = to_pci_dev(ab->dev);
1038 
1039 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
1040 			      msi_addr_lo);
1041 
1042 	if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
1043 		pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
1044 				      msi_addr_hi);
1045 	} else {
1046 		*msi_addr_hi = 0;
1047 	}
1048 }
1049 
1050 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
1051 			       u32 *msi_idx)
1052 {
1053 	u32 i, msi_data_idx;
1054 
1055 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
1056 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1057 			continue;
1058 
1059 		if (ce_id == i)
1060 			break;
1061 
1062 		msi_data_idx++;
1063 	}
1064 	*msi_idx = msi_data_idx;
1065 }
1066 
1067 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
1068 {
1069 	ath12k_pci_ce_irqs_enable(ab);
1070 }
1071 
1072 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
1073 {
1074 	ath12k_pci_ce_irq_disable_sync(ab);
1075 }
1076 
1077 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
1078 {
1079 	int i;
1080 
1081 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
1082 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
1083 
1084 		if (!irq_grp->napi_enabled) {
1085 			napi_enable(&irq_grp->napi);
1086 			irq_grp->napi_enabled = true;
1087 		}
1088 
1089 		ath12k_pci_ext_grp_enable(irq_grp);
1090 	}
1091 
1092 	set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
1093 }
1094 
1095 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
1096 {
1097 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
1098 		return;
1099 
1100 	__ath12k_pci_ext_irq_disable(ab);
1101 	ath12k_pci_sync_ext_irqs(ab);
1102 }
1103 
1104 int ath12k_pci_hif_suspend(struct ath12k_base *ab)
1105 {
1106 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1107 
1108 	ath12k_mhi_suspend(ar_pci);
1109 
1110 	return 0;
1111 }
1112 
1113 int ath12k_pci_hif_resume(struct ath12k_base *ab)
1114 {
1115 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1116 
1117 	ath12k_mhi_resume(ar_pci);
1118 
1119 	return 0;
1120 }
1121 
1122 void ath12k_pci_stop(struct ath12k_base *ab)
1123 {
1124 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1125 
1126 	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1127 		return;
1128 
1129 	ath12k_pci_ce_irq_disable_sync(ab);
1130 	ath12k_ce_cleanup_pipes(ab);
1131 }
1132 
1133 int ath12k_pci_start(struct ath12k_base *ab)
1134 {
1135 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1136 
1137 	set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1138 
1139 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
1140 		ath12k_pci_aspm_restore(ab_pci);
1141 	else
1142 		ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
1143 
1144 	ath12k_pci_ce_irqs_enable(ab);
1145 	ath12k_ce_rx_post_buf(ab);
1146 
1147 	return 0;
1148 }
1149 
1150 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
1151 {
1152 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1153 	u32 val, window_start;
1154 	int ret = 0;
1155 
1156 	/* for offset beyond BAR + 4K - 32, may
1157 	 * need to wakeup MHI to access.
1158 	 */
1159 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1160 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1161 		ret = ab_pci->pci_ops->wakeup(ab);
1162 
1163 	if (offset < WINDOW_START) {
1164 		val = ioread32(ab->mem + offset);
1165 	} else {
1166 		if (ab->static_window_map)
1167 			window_start = ath12k_pci_get_window_start(ab, offset);
1168 		else
1169 			window_start = WINDOW_START;
1170 
1171 		if (window_start == WINDOW_START) {
1172 			spin_lock_bh(&ab_pci->window_lock);
1173 			ath12k_pci_select_window(ab_pci, offset);
1174 
1175 			if (ath12k_pci_is_offset_within_mhi_region(offset)) {
1176 				offset = offset - PCI_MHIREGLEN_REG;
1177 				val = ioread32(ab->mem +
1178 					       (offset & WINDOW_RANGE_MASK));
1179 			} else {
1180 				val = ioread32(ab->mem + window_start +
1181 					       (offset & WINDOW_RANGE_MASK));
1182 			}
1183 			spin_unlock_bh(&ab_pci->window_lock);
1184 		} else {
1185 			val = ioread32(ab->mem + window_start +
1186 				       (offset & WINDOW_RANGE_MASK));
1187 		}
1188 	}
1189 
1190 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1191 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1192 	    !ret)
1193 		ab_pci->pci_ops->release(ab);
1194 	return val;
1195 }
1196 EXPORT_SYMBOL(ath12k_pci_read32);
1197 
1198 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
1199 {
1200 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1201 	u32 window_start;
1202 	int ret = 0;
1203 
1204 	/* for offset beyond BAR + 4K - 32, may
1205 	 * need to wakeup MHI to access.
1206 	 */
1207 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1208 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1209 		ret = ab_pci->pci_ops->wakeup(ab);
1210 
1211 	if (offset < WINDOW_START) {
1212 		iowrite32(value, ab->mem + offset);
1213 	} else {
1214 		if (ab->static_window_map)
1215 			window_start = ath12k_pci_get_window_start(ab, offset);
1216 		else
1217 			window_start = WINDOW_START;
1218 
1219 		if (window_start == WINDOW_START) {
1220 			spin_lock_bh(&ab_pci->window_lock);
1221 			ath12k_pci_select_window(ab_pci, offset);
1222 
1223 			if (ath12k_pci_is_offset_within_mhi_region(offset)) {
1224 				offset = offset - PCI_MHIREGLEN_REG;
1225 				iowrite32(value, ab->mem +
1226 					  (offset & WINDOW_RANGE_MASK));
1227 			} else {
1228 				iowrite32(value, ab->mem + window_start +
1229 					  (offset & WINDOW_RANGE_MASK));
1230 			}
1231 			spin_unlock_bh(&ab_pci->window_lock);
1232 		} else {
1233 			iowrite32(value, ab->mem + window_start +
1234 				  (offset & WINDOW_RANGE_MASK));
1235 		}
1236 	}
1237 
1238 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1239 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1240 	    !ret)
1241 		ab_pci->pci_ops->release(ab);
1242 }
1243 
1244 #ifdef CONFIG_ATH12K_COREDUMP
1245 static int ath12k_pci_coredump_calculate_size(struct ath12k_base *ab, u32 *dump_seg_sz)
1246 {
1247 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1248 	struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1249 	struct image_info *rddm_img, *fw_img;
1250 	struct ath12k_tlv_dump_data *dump_tlv;
1251 	enum ath12k_fw_crash_dump_type mem_type;
1252 	u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
1253 	struct ath12k_dump_file_data *file_data;
1254 	int i;
1255 
1256 	rddm_img = mhi_ctrl->rddm_image;
1257 	if (!rddm_img) {
1258 		ath12k_err(ab, "No RDDM dump found\n");
1259 		return 0;
1260 	}
1261 
1262 	fw_img = mhi_ctrl->fbc_image;
1263 
1264 	for (i = 0; i < fw_img->entries ; i++) {
1265 		if (!fw_img->mhi_buf[i].buf)
1266 			continue;
1267 
1268 		paging_tlv_sz += fw_img->mhi_buf[i].len;
1269 	}
1270 	dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
1271 
1272 	for (i = 0; i < rddm_img->entries; i++) {
1273 		if (!rddm_img->mhi_buf[i].buf)
1274 			continue;
1275 
1276 		rddm_tlv_sz += rddm_img->mhi_buf[i].len;
1277 	}
1278 	dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
1279 
1280 	for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1281 		mem_type = ath12k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
1282 
1283 		if (mem_type == FW_CRASH_DUMP_NONE)
1284 			continue;
1285 
1286 		if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
1287 			ath12k_dbg(ab, ATH12K_DBG_PCI,
1288 				   "target mem region type %d not supported",
1289 				   ab->qmi.target_mem[i].type);
1290 			continue;
1291 		}
1292 
1293 		if (!ab->qmi.target_mem[i].paddr)
1294 			continue;
1295 
1296 		dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
1297 	}
1298 
1299 	for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
1300 		if (!dump_seg_sz[i])
1301 			continue;
1302 
1303 		len += sizeof(*dump_tlv) + dump_seg_sz[i];
1304 	}
1305 
1306 	if (len)
1307 		len += sizeof(*file_data);
1308 
1309 	return len;
1310 }
1311 
1312 static void ath12k_pci_coredump_download(struct ath12k_base *ab)
1313 {
1314 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1315 	struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
1316 	struct image_info *rddm_img, *fw_img;
1317 	struct timespec64 timestamp;
1318 	int i, len, mem_idx;
1319 	enum ath12k_fw_crash_dump_type mem_type;
1320 	struct ath12k_dump_file_data *file_data;
1321 	struct ath12k_tlv_dump_data *dump_tlv;
1322 	size_t hdr_len = sizeof(*file_data);
1323 	void *buf;
1324 	u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
1325 
1326 	ath12k_mhi_coredump(mhi_ctrl, false);
1327 
1328 	len = ath12k_pci_coredump_calculate_size(ab, dump_seg_sz);
1329 	if (!len) {
1330 		ath12k_warn(ab, "No crash dump data found for devcoredump");
1331 		return;
1332 	}
1333 
1334 	rddm_img = mhi_ctrl->rddm_image;
1335 	fw_img = mhi_ctrl->fbc_image;
1336 
1337 	/* dev_coredumpv() requires vmalloc data */
1338 	buf = vzalloc(len);
1339 	if (!buf)
1340 		return;
1341 
1342 	ab->dump_data = buf;
1343 	ab->ath12k_coredump_len = len;
1344 	file_data = ab->dump_data;
1345 	strscpy(file_data->df_magic, "ATH12K-FW-DUMP", sizeof(file_data->df_magic));
1346 	file_data->len = cpu_to_le32(len);
1347 	file_data->version = cpu_to_le32(ATH12K_FW_CRASH_DUMP_V2);
1348 	file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
1349 	file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
1350 	file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
1351 	guid_gen(&file_data->guid);
1352 	ktime_get_real_ts64(&timestamp);
1353 	file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
1354 	file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
1355 	buf += hdr_len;
1356 	dump_tlv = buf;
1357 	dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
1358 	dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
1359 	buf += COREDUMP_TLV_HDR_SIZE;
1360 
1361 	/* append all segments together as they are all part of a single contiguous
1362 	 * block of memory
1363 	 */
1364 	for (i = 0; i < fw_img->entries ; i++) {
1365 		if (!fw_img->mhi_buf[i].buf)
1366 			continue;
1367 
1368 		memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
1369 			      fw_img->mhi_buf[i].len);
1370 		buf += fw_img->mhi_buf[i].len;
1371 	}
1372 
1373 	dump_tlv = buf;
1374 	dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
1375 	dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
1376 	buf += COREDUMP_TLV_HDR_SIZE;
1377 
1378 	/* append all segments together as they are all part of a single contiguous
1379 	 * block of memory
1380 	 */
1381 	for (i = 0; i < rddm_img->entries; i++) {
1382 		if (!rddm_img->mhi_buf[i].buf)
1383 			continue;
1384 
1385 		memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
1386 			      rddm_img->mhi_buf[i].len);
1387 		buf += rddm_img->mhi_buf[i].len;
1388 	}
1389 
1390 	mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
1391 	for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
1392 		if (!dump_seg_sz[mem_idx] || mem_idx == FW_CRASH_DUMP_NONE)
1393 			continue;
1394 
1395 		dump_tlv = buf;
1396 		dump_tlv->type = cpu_to_le32(mem_idx);
1397 		dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
1398 		buf += COREDUMP_TLV_HDR_SIZE;
1399 
1400 		for (i = 0; i < ab->qmi.mem_seg_count; i++) {
1401 			mem_type = ath12k_coredump_get_dump_type
1402 							(ab->qmi.target_mem[i].type);
1403 
1404 			if (mem_type != mem_idx)
1405 				continue;
1406 
1407 			if (!ab->qmi.target_mem[i].paddr) {
1408 				ath12k_dbg(ab, ATH12K_DBG_PCI,
1409 					   "Skipping mem region type %d",
1410 					   ab->qmi.target_mem[i].type);
1411 				continue;
1412 			}
1413 
1414 			memcpy_fromio(buf, ab->qmi.target_mem[i].v.ioaddr,
1415 				      ab->qmi.target_mem[i].size);
1416 			buf += ab->qmi.target_mem[i].size;
1417 		}
1418 	}
1419 
1420 	queue_work(ab->workqueue, &ab->dump_work);
1421 }
1422 #endif
1423 
1424 int ath12k_pci_power_up(struct ath12k_base *ab)
1425 {
1426 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1427 	int ret;
1428 
1429 	ab_pci->register_window = 0;
1430 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1431 	ath12k_pci_sw_reset(ab_pci->ab, true);
1432 
1433 	/* Disable ASPM during firmware download due to problems switching
1434 	 * to AMSS state.
1435 	 */
1436 	ath12k_pci_aspm_disable(ab_pci);
1437 
1438 	ath12k_pci_msi_enable(ab_pci);
1439 
1440 	if (ath12k_fw_feature_supported(ab, ATH12K_FW_FEATURE_MULTI_QRTR_ID))
1441 		ath12k_pci_update_qrtr_node_id(ab);
1442 
1443 	ret = ath12k_mhi_start(ab_pci);
1444 	if (ret) {
1445 		ath12k_err(ab, "failed to start mhi: %d\n", ret);
1446 		return ret;
1447 	}
1448 
1449 	if (ab->static_window_map)
1450 		ath12k_pci_select_static_window(ab_pci);
1451 
1452 	return 0;
1453 }
1454 
1455 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend)
1456 {
1457 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1458 
1459 	if (!test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags))
1460 		return;
1461 
1462 	/* restore aspm in case firmware bootup fails */
1463 	ath12k_pci_aspm_restore(ab_pci);
1464 
1465 	ath12k_pci_force_wake(ab_pci->ab);
1466 	ath12k_pci_msi_disable(ab_pci);
1467 	ath12k_mhi_stop(ab_pci, is_suspend);
1468 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1469 	ath12k_pci_sw_reset(ab_pci->ab, false);
1470 }
1471 
1472 static int ath12k_pci_panic_handler(struct ath12k_base *ab)
1473 {
1474 	ath12k_pci_sw_reset(ab, false);
1475 
1476 	return NOTIFY_OK;
1477 }
1478 
1479 static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
1480 	.start = ath12k_pci_start,
1481 	.stop = ath12k_pci_stop,
1482 	.read32 = ath12k_pci_read32,
1483 	.write32 = ath12k_pci_write32,
1484 	.power_down = ath12k_pci_power_down,
1485 	.power_up = ath12k_pci_power_up,
1486 	.suspend = ath12k_pci_hif_suspend,
1487 	.resume = ath12k_pci_hif_resume,
1488 	.irq_enable = ath12k_pci_ext_irq_enable,
1489 	.irq_disable = ath12k_pci_ext_irq_disable,
1490 	.get_msi_address = ath12k_pci_get_msi_address,
1491 	.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
1492 	.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
1493 	.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
1494 	.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
1495 	.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
1496 	.panic_handler = ath12k_pci_panic_handler,
1497 #ifdef CONFIG_ATH12K_COREDUMP
1498 	.coredump_download = ath12k_pci_coredump_download,
1499 #endif
1500 };
1501 
1502 static enum ath12k_device_family
1503 ath12k_get_device_family(const struct pci_device_id *pci_dev)
1504 {
1505 	enum ath12k_device_family device_family_id;
1506 	const struct pci_device_id *id;
1507 
1508 	for (device_family_id = ATH12K_DEVICE_FAMILY_START;
1509 	     device_family_id < ATH12K_DEVICE_FAMILY_MAX; device_family_id++) {
1510 		if (!ath12k_pci_family_drivers[device_family_id])
1511 			continue;
1512 
1513 		id = ath12k_pci_family_drivers[device_family_id]->id_table;
1514 		while (id->device) {
1515 			if (id->device == pci_dev->device)
1516 				return device_family_id;
1517 			id += 1;
1518 		}
1519 	}
1520 
1521 	return ATH12K_DEVICE_FAMILY_MAX;
1522 }
1523 
1524 static int ath12k_pci_probe(struct pci_dev *pdev,
1525 			    const struct pci_device_id *pci_dev)
1526 {
1527 	enum ath12k_device_family device_id;
1528 	struct ath12k_pci *ab_pci;
1529 	struct ath12k_base *ab;
1530 	int ret;
1531 
1532 	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
1533 	if (!ab) {
1534 		dev_err(&pdev->dev, "failed to allocate ath12k base\n");
1535 		return -ENOMEM;
1536 	}
1537 
1538 	ab->dev = &pdev->dev;
1539 	ab_pci = ath12k_pci_priv(ab);
1540 	ab_pci->dev_id = pci_dev->device;
1541 	ab_pci->ab = ab;
1542 	ab_pci->pdev = pdev;
1543 	ab->hif.ops = &ath12k_pci_hif_ops;
1544 	ab->fw_mode = ATH12K_FIRMWARE_MODE_NORMAL;
1545 	pci_set_drvdata(pdev, ab);
1546 	spin_lock_init(&ab_pci->window_lock);
1547 
1548 	ret = ath12k_pci_claim(ab_pci, pdev);
1549 	if (ret) {
1550 		ath12k_err(ab, "failed to claim device: %d\n", ret);
1551 		goto err_free_core;
1552 	}
1553 
1554 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
1555 		   pdev->vendor, pdev->device,
1556 		   pdev->subsystem_vendor, pdev->subsystem_device);
1557 
1558 	ab->id.vendor = pdev->vendor;
1559 	ab->id.device = pdev->device;
1560 	ab->id.subsystem_vendor = pdev->subsystem_vendor;
1561 	ab->id.subsystem_device = pdev->subsystem_device;
1562 
1563 	device_id = ath12k_get_device_family(pci_dev);
1564 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX) {
1565 		ath12k_err(ab, "failed to get device family id\n");
1566 		ret = -EINVAL;
1567 		goto err_pci_free_region;
1568 	}
1569 
1570 	ath12k_dbg(ab, ATH12K_DBG_PCI, "PCI device family id: %d\n", device_id);
1571 
1572 	ab_pci->device_family_ops = &ath12k_pci_family_drivers[device_id]->ops;
1573 	ab_pci->reg_base = ath12k_pci_family_drivers[device_id]->reg_base;
1574 
1575 	/* Call device specific probe. This is the callback that can
1576 	 * be used to override any ops in future
1577 	 * probe is validated for NULL during registration.
1578 	 */
1579 	ret = ab_pci->device_family_ops->probe(pdev, pci_dev);
1580 	if (ret) {
1581 		ath12k_err(ab, "failed to probe device: %d\n", ret);
1582 		goto err_pci_free_region;
1583 	}
1584 
1585 	ret = ath12k_pci_msi_alloc(ab_pci);
1586 	if (ret) {
1587 		ath12k_err(ab, "failed to alloc msi: %d\n", ret);
1588 		goto err_pci_free_region;
1589 	}
1590 
1591 	ret = ath12k_core_pre_init(ab);
1592 	if (ret)
1593 		goto err_pci_msi_free;
1594 
1595 	ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1596 	if (ret) {
1597 		ath12k_err(ab, "failed to set irq affinity %d\n", ret);
1598 		goto err_pci_msi_free;
1599 	}
1600 
1601 	ret = ath12k_mhi_register(ab_pci);
1602 	if (ret) {
1603 		ath12k_err(ab, "failed to register mhi: %d\n", ret);
1604 		goto err_irq_affinity_cleanup;
1605 	}
1606 
1607 	ret = ath12k_hal_srng_init(ab);
1608 	if (ret)
1609 		goto err_mhi_unregister;
1610 
1611 	ret = ath12k_ce_alloc_pipes(ab);
1612 	if (ret) {
1613 		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1614 		goto err_hal_srng_deinit;
1615 	}
1616 
1617 	ath12k_pci_init_qmi_ce_config(ab);
1618 
1619 	ret = ath12k_pci_config_irq(ab);
1620 	if (ret) {
1621 		ath12k_err(ab, "failed to config irq: %d\n", ret);
1622 		goto err_ce_free;
1623 	}
1624 
1625 	/* kernel may allocate a dummy vector before request_irq and
1626 	 * then allocate a real vector when request_irq is called.
1627 	 * So get msi_data here again to avoid spurious interrupt
1628 	 * as msi_data will configured to srngs.
1629 	 */
1630 	ret = ath12k_pci_config_msi_data(ab_pci);
1631 	if (ret) {
1632 		ath12k_err(ab, "failed to config msi_data: %d\n", ret);
1633 		goto err_free_irq;
1634 	}
1635 
1636 	/* Invoke arch_init here so that arch-specific init operations
1637 	 * can utilize already initialized ab fields, such as HAL SRNGs.
1638 	 */
1639 	ret = ab_pci->device_family_ops->arch_init(ab);
1640 	if (ret) {
1641 		ath12k_err(ab, "PCI arch_init failed %d\n", ret);
1642 		goto err_pci_msi_free;
1643 	}
1644 
1645 	ret = ath12k_core_init(ab);
1646 	if (ret) {
1647 		ath12k_err(ab, "failed to init core: %d\n", ret);
1648 		goto err_deinit_arch;
1649 	}
1650 	return 0;
1651 
1652 err_deinit_arch:
1653 	ab_pci->device_family_ops->arch_deinit(ab);
1654 
1655 err_free_irq:
1656 	/* __free_irq() expects the caller to have cleared the affinity hint */
1657 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1658 	ath12k_pci_free_irq(ab);
1659 
1660 err_ce_free:
1661 	ath12k_ce_free_pipes(ab);
1662 
1663 err_hal_srng_deinit:
1664 	ath12k_hal_srng_deinit(ab);
1665 
1666 err_mhi_unregister:
1667 	ath12k_mhi_unregister(ab_pci);
1668 
1669 err_irq_affinity_cleanup:
1670 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1671 
1672 err_pci_msi_free:
1673 	ath12k_pci_msi_free(ab_pci);
1674 
1675 err_pci_free_region:
1676 	ath12k_pci_free_region(ab_pci);
1677 
1678 err_free_core:
1679 	ath12k_core_free(ab);
1680 
1681 	return ret;
1682 }
1683 
1684 static void ath12k_pci_remove(struct pci_dev *pdev)
1685 {
1686 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1687 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1688 
1689 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1690 
1691 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1692 		ath12k_pci_power_down(ab, false);
1693 		goto qmi_fail;
1694 	}
1695 
1696 	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1697 
1698 	cancel_work_sync(&ab->reset_work);
1699 	cancel_work_sync(&ab->dump_work);
1700 	ath12k_core_hw_group_cleanup(ab->ag);
1701 
1702 qmi_fail:
1703 	ath12k_core_deinit(ab);
1704 	ath12k_fw_unmap(ab);
1705 	ath12k_mhi_unregister(ab_pci);
1706 
1707 	ath12k_pci_free_irq(ab);
1708 	ath12k_pci_msi_free(ab_pci);
1709 	ath12k_pci_free_region(ab_pci);
1710 
1711 	ath12k_hal_srng_deinit(ab);
1712 	ath12k_ce_free_pipes(ab);
1713 
1714 	ab_pci->device_family_ops->arch_deinit(ab);
1715 
1716 	ath12k_core_free(ab);
1717 }
1718 
1719 static void ath12k_pci_hw_group_power_down(struct ath12k_hw_group *ag)
1720 {
1721 	struct ath12k_base *ab;
1722 	int i;
1723 
1724 	if (!ag)
1725 		return;
1726 
1727 	mutex_lock(&ag->mutex);
1728 
1729 	for (i = 0; i < ag->num_devices; i++) {
1730 		ab = ag->ab[i];
1731 		if (!ab)
1732 			continue;
1733 
1734 		ath12k_pci_power_down(ab, false);
1735 	}
1736 
1737 	mutex_unlock(&ag->mutex);
1738 }
1739 
1740 static void ath12k_pci_shutdown(struct pci_dev *pdev)
1741 {
1742 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1743 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1744 
1745 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1746 	ath12k_pci_hw_group_power_down(ab->ag);
1747 }
1748 
1749 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
1750 {
1751 	struct ath12k_base *ab = dev_get_drvdata(dev);
1752 	int ret;
1753 
1754 	ret = ath12k_core_suspend(ab);
1755 	if (ret)
1756 		ath12k_warn(ab, "failed to suspend core: %d\n", ret);
1757 
1758 	return ret;
1759 }
1760 
1761 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
1762 {
1763 	struct ath12k_base *ab = dev_get_drvdata(dev);
1764 	int ret;
1765 
1766 	ret = ath12k_core_resume(ab);
1767 	if (ret)
1768 		ath12k_warn(ab, "failed to resume core: %d\n", ret);
1769 
1770 	return ret;
1771 }
1772 
1773 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev)
1774 {
1775 	struct ath12k_base *ab = dev_get_drvdata(dev);
1776 	int ret;
1777 
1778 	ret = ath12k_core_suspend_late(ab);
1779 	if (ret)
1780 		ath12k_warn(ab, "failed to late suspend core: %d\n", ret);
1781 
1782 	return ret;
1783 }
1784 
1785 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev)
1786 {
1787 	struct ath12k_base *ab = dev_get_drvdata(dev);
1788 	int ret;
1789 
1790 	ret = ath12k_core_resume_early(ab);
1791 	if (ret)
1792 		ath12k_warn(ab, "failed to early resume core: %d\n", ret);
1793 
1794 	return ret;
1795 }
1796 
1797 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = {
1798 	SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend,
1799 				ath12k_pci_pm_resume)
1800 	SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late,
1801 				     ath12k_pci_pm_resume_early)
1802 };
1803 
1804 int ath12k_pci_register_driver(const enum ath12k_device_family device_id,
1805 			       struct ath12k_pci_driver *driver)
1806 {
1807 	struct pci_driver *pci_driver;
1808 
1809 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX)
1810 		return -EINVAL;
1811 
1812 	if (!driver || !driver->ops.probe ||
1813 	    !driver->ops.arch_init || !driver->ops.arch_deinit)
1814 		return -EINVAL;
1815 
1816 	if (ath12k_pci_family_drivers[device_id]) {
1817 		pr_err("Driver already registered for %d\n", device_id);
1818 		return -EALREADY;
1819 	}
1820 
1821 	ath12k_pci_family_drivers[device_id] = driver;
1822 
1823 	pci_driver = &ath12k_pci_family_drivers[device_id]->driver;
1824 	pci_driver->name = driver->name;
1825 	pci_driver->id_table = driver->id_table;
1826 	pci_driver->probe = ath12k_pci_probe;
1827 	pci_driver->remove = ath12k_pci_remove;
1828 	pci_driver->shutdown = ath12k_pci_shutdown;
1829 	pci_driver->driver.pm = &ath12k_pci_pm_ops;
1830 
1831 	return pci_register_driver(pci_driver);
1832 }
1833 EXPORT_SYMBOL(ath12k_pci_register_driver);
1834 
1835 void ath12k_pci_unregister_driver(const enum ath12k_device_family device_id)
1836 {
1837 	if (device_id >= ATH12K_DEVICE_FAMILY_MAX ||
1838 	    !ath12k_pci_family_drivers[device_id])
1839 		return;
1840 
1841 	pci_unregister_driver(&ath12k_pci_family_drivers[device_id]->driver);
1842 	ath12k_pci_family_drivers[device_id] = NULL;
1843 }
1844 EXPORT_SYMBOL(ath12k_pci_unregister_driver);
1845 
1846 /* firmware files */
1847 MODULE_FIRMWARE(ATH12K_FW_DIR "/QCN9274/hw2.0/*");
1848 MODULE_FIRMWARE(ATH12K_FW_DIR "/WCN7850/hw2.0/*");
1849