xref: /linux/drivers/net/wireless/ath/ath12k/pci.c (revision e7d759f31ca295d589f7420719c311870bb3166f)
1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3  * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved.
4  * Copyright (c) 2021-2023 Qualcomm Innovation Center, Inc. All rights reserved.
5  */
6 
7 #include <linux/module.h>
8 #include <linux/msi.h>
9 #include <linux/pci.h>
10 
11 #include "pci.h"
12 #include "core.h"
13 #include "hif.h"
14 #include "mhi.h"
15 #include "debug.h"
16 
17 #define ATH12K_PCI_BAR_NUM		0
18 #define ATH12K_PCI_DMA_MASK		32
19 
20 #define ATH12K_PCI_IRQ_CE0_OFFSET		3
21 
22 #define WINDOW_ENABLE_BIT		0x40000000
23 #define WINDOW_REG_ADDRESS		0x310c
24 #define WINDOW_VALUE_MASK		GENMASK(24, 19)
25 #define WINDOW_START			0x80000
26 #define WINDOW_RANGE_MASK		GENMASK(18, 0)
27 #define WINDOW_STATIC_MASK		GENMASK(31, 6)
28 
29 #define TCSR_SOC_HW_VERSION		0x1B00000
30 #define TCSR_SOC_HW_VERSION_MAJOR_MASK	GENMASK(11, 8)
31 #define TCSR_SOC_HW_VERSION_MINOR_MASK	GENMASK(7, 4)
32 
33 /* BAR0 + 4k is always accessible, and no
34  * need to force wakeup.
35  * 4K - 32 = 0xFE0
36  */
37 #define ACCESS_ALWAYS_OFF 0xFE0
38 
39 #define QCN9274_DEVICE_ID		0x1109
40 #define WCN7850_DEVICE_ID		0x1107
41 
42 static const struct pci_device_id ath12k_pci_id_table[] = {
43 	{ PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) },
44 	{ PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) },
45 	{0}
46 };
47 
48 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table);
49 
50 /* TODO: revisit IRQ mapping for new SRNG's */
51 static const struct ath12k_msi_config ath12k_msi_config[] = {
52 	{
53 		.total_vectors = 16,
54 		.total_users = 3,
55 		.users = (struct ath12k_msi_user[]) {
56 			{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
57 			{ .name = "CE", .num_vectors = 5, .base_vector = 3 },
58 			{ .name = "DP", .num_vectors = 8, .base_vector = 8 },
59 		},
60 	},
61 };
62 
63 static const struct ath12k_msi_config msi_config_one_msi = {
64 	.total_vectors = 1,
65 	.total_users = 4,
66 	.users = (struct ath12k_msi_user[]) {
67 		{ .name = "MHI", .num_vectors = 3, .base_vector = 0 },
68 		{ .name = "CE", .num_vectors = 1, .base_vector = 0 },
69 		{ .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
70 		{ .name = "DP", .num_vectors = 1, .base_vector = 0 },
71 	},
72 };
73 
74 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = {
75 	"bhi",
76 	"mhi-er0",
77 	"mhi-er1",
78 	"ce0",
79 	"ce1",
80 	"ce2",
81 	"ce3",
82 	"ce4",
83 	"ce5",
84 	"ce6",
85 	"ce7",
86 	"ce8",
87 	"ce9",
88 	"ce10",
89 	"ce11",
90 	"ce12",
91 	"ce13",
92 	"ce14",
93 	"ce15",
94 	"host2wbm-desc-feed",
95 	"host2reo-re-injection",
96 	"host2reo-command",
97 	"host2rxdma-monitor-ring3",
98 	"host2rxdma-monitor-ring2",
99 	"host2rxdma-monitor-ring1",
100 	"reo2ost-exception",
101 	"wbm2host-rx-release",
102 	"reo2host-status",
103 	"reo2host-destination-ring4",
104 	"reo2host-destination-ring3",
105 	"reo2host-destination-ring2",
106 	"reo2host-destination-ring1",
107 	"rxdma2host-monitor-destination-mac3",
108 	"rxdma2host-monitor-destination-mac2",
109 	"rxdma2host-monitor-destination-mac1",
110 	"ppdu-end-interrupts-mac3",
111 	"ppdu-end-interrupts-mac2",
112 	"ppdu-end-interrupts-mac1",
113 	"rxdma2host-monitor-status-ring-mac3",
114 	"rxdma2host-monitor-status-ring-mac2",
115 	"rxdma2host-monitor-status-ring-mac1",
116 	"host2rxdma-host-buf-ring-mac3",
117 	"host2rxdma-host-buf-ring-mac2",
118 	"host2rxdma-host-buf-ring-mac1",
119 	"rxdma2host-destination-ring-mac3",
120 	"rxdma2host-destination-ring-mac2",
121 	"rxdma2host-destination-ring-mac1",
122 	"host2tcl-input-ring4",
123 	"host2tcl-input-ring3",
124 	"host2tcl-input-ring2",
125 	"host2tcl-input-ring1",
126 	"wbm2host-tx-completions-ring4",
127 	"wbm2host-tx-completions-ring3",
128 	"wbm2host-tx-completions-ring2",
129 	"wbm2host-tx-completions-ring1",
130 	"tcl2host-status-ring",
131 };
132 
133 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab)
134 {
135 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
136 
137 	return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
138 }
139 
140 static void ath12k_pci_bus_release(struct ath12k_base *ab)
141 {
142 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
143 
144 	mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
145 }
146 
147 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = {
148 	.wakeup = NULL,
149 	.release = NULL,
150 };
151 
152 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = {
153 	.wakeup = ath12k_pci_bus_wake_up,
154 	.release = ath12k_pci_bus_release,
155 };
156 
157 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset)
158 {
159 	struct ath12k_base *ab = ab_pci->ab;
160 
161 	u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK);
162 	u32 static_window;
163 
164 	lockdep_assert_held(&ab_pci->window_lock);
165 
166 	/* Preserve the static window configuration and reset only dynamic window */
167 	static_window = ab_pci->register_window & WINDOW_STATIC_MASK;
168 	window |= static_window;
169 
170 	if (window != ab_pci->register_window) {
171 		iowrite32(WINDOW_ENABLE_BIT | window,
172 			  ab->mem + WINDOW_REG_ADDRESS);
173 		ioread32(ab->mem + WINDOW_REG_ADDRESS);
174 		ab_pci->register_window = window;
175 	}
176 }
177 
178 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci)
179 {
180 	u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK);
181 	u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK);
182 	u32 window;
183 
184 	window = (umac_window << 12) | (ce_window << 6);
185 
186 	spin_lock_bh(&ab_pci->window_lock);
187 	ab_pci->register_window = window;
188 	spin_unlock_bh(&ab_pci->window_lock);
189 
190 	iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS);
191 }
192 
193 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab,
194 				       u32 offset)
195 {
196 	u32 window_start;
197 
198 	/* If offset lies within DP register range, use 3rd window */
199 	if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK)
200 		window_start = 3 * WINDOW_START;
201 	/* If offset lies within CE register range, use 2nd window */
202 	else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK)
203 		window_start = 2 * WINDOW_START;
204 	/* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE
205 	 * use 0th window
206 	 */
207 	else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) &&
208 		 !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK))
209 		window_start = 0;
210 	else
211 		window_start = WINDOW_START;
212 
213 	return window_start;
214 }
215 
216 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab)
217 {
218 	u32 val, delay;
219 
220 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
221 
222 	val |= PCIE_SOC_GLOBAL_RESET_V;
223 
224 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
225 
226 	/* TODO: exact time to sleep is uncertain */
227 	delay = 10;
228 	mdelay(delay);
229 
230 	/* Need to toggle V bit back otherwise stuck in reset status */
231 	val &= ~PCIE_SOC_GLOBAL_RESET_V;
232 
233 	ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
234 
235 	mdelay(delay);
236 
237 	val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET);
238 	if (val == 0xffffffff)
239 		ath12k_warn(ab, "link down error during global reset\n");
240 }
241 
242 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab)
243 {
244 	u32 val;
245 
246 	/* read cookie */
247 	val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR);
248 	ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val);
249 
250 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
251 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
252 
253 	/* TODO: exact time to sleep is uncertain */
254 	mdelay(10);
255 
256 	/* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
257 	 * continuing warm path and entering dead loop.
258 	 */
259 	ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0);
260 	mdelay(10);
261 
262 	val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY);
263 	ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val);
264 
265 	/* A read clear register. clear the register to prevent
266 	 * Q6 from entering wrong code path.
267 	 */
268 	val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
269 	ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val);
270 }
271 
272 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab)
273 {
274 	u32 val;
275 	int i;
276 
277 	val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
278 
279 	/* PCIE link seems very unstable after the Hot Reset*/
280 	for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
281 		if (val == 0xffffffff)
282 			mdelay(5);
283 
284 		ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
285 		val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM);
286 	}
287 
288 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val);
289 
290 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
291 	val |= GCC_GCC_PCIE_HOT_RST_VAL;
292 	ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
293 	val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST);
294 
295 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val);
296 
297 	mdelay(5);
298 }
299 
300 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab)
301 {
302 	/* This is a WAR for PCIE Hotreset.
303 	 * When target receive Hotreset, but will set the interrupt.
304 	 * So when download SBL again, SBL will open Interrupt and
305 	 * receive it, and crash immediately.
306 	 */
307 	ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
308 }
309 
310 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab)
311 {
312 	u32 val;
313 
314 	val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
315 	val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
316 	ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
317 }
318 
319 static void ath12k_pci_force_wake(struct ath12k_base *ab)
320 {
321 	ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
322 	mdelay(5);
323 }
324 
325 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on)
326 {
327 	if (power_on) {
328 		ath12k_pci_enable_ltssm(ab);
329 		ath12k_pci_clear_all_intrs(ab);
330 		ath12k_pci_set_wlaon_pwr_ctrl(ab);
331 	}
332 
333 	ath12k_mhi_clear_vector(ab);
334 	ath12k_pci_clear_dbg_registers(ab);
335 	ath12k_pci_soc_global_reset(ab);
336 	ath12k_mhi_set_mhictrl_reset(ab);
337 }
338 
339 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab)
340 {
341 	int i, j;
342 
343 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
344 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
345 
346 		for (j = 0; j < irq_grp->num_irq; j++)
347 			free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp);
348 
349 		netif_napi_del(&irq_grp->napi);
350 	}
351 }
352 
353 static void ath12k_pci_free_irq(struct ath12k_base *ab)
354 {
355 	int i, irq_idx;
356 
357 	for (i = 0; i < ab->hw_params->ce_count; i++) {
358 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
359 			continue;
360 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
361 		free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]);
362 	}
363 
364 	ath12k_pci_free_ext_irq(ab);
365 }
366 
367 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id)
368 {
369 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
370 	u32 irq_idx;
371 
372 	/* In case of one MSI vector, we handle irq enable/disable in a
373 	 * uniform way since we only have one irq
374 	 */
375 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
376 		return;
377 
378 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
379 	enable_irq(ab->irq_num[irq_idx]);
380 }
381 
382 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id)
383 {
384 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
385 	u32 irq_idx;
386 
387 	/* In case of one MSI vector, we handle irq enable/disable in a
388 	 * uniform way since we only have one irq
389 	 */
390 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
391 		return;
392 
393 	irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id;
394 	disable_irq_nosync(ab->irq_num[irq_idx]);
395 }
396 
397 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab)
398 {
399 	int i;
400 
401 	clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
402 
403 	for (i = 0; i < ab->hw_params->ce_count; i++) {
404 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
405 			continue;
406 		ath12k_pci_ce_irq_disable(ab, i);
407 	}
408 }
409 
410 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab)
411 {
412 	int i;
413 	int irq_idx;
414 
415 	for (i = 0; i < ab->hw_params->ce_count; i++) {
416 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
417 			continue;
418 
419 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
420 		synchronize_irq(ab->irq_num[irq_idx]);
421 	}
422 }
423 
424 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t)
425 {
426 	struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq);
427 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
428 
429 	ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num);
430 
431 	enable_irq(ce_pipe->ab->irq_num[irq_idx]);
432 }
433 
434 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg)
435 {
436 	struct ath12k_ce_pipe *ce_pipe = arg;
437 	struct ath12k_base *ab = ce_pipe->ab;
438 	int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num;
439 
440 	if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags))
441 		return IRQ_HANDLED;
442 
443 	/* last interrupt received for this CE */
444 	ce_pipe->timestamp = jiffies;
445 
446 	disable_irq_nosync(ab->irq_num[irq_idx]);
447 
448 	tasklet_schedule(&ce_pipe->intr_tq);
449 
450 	return IRQ_HANDLED;
451 }
452 
453 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp)
454 {
455 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
456 	int i;
457 
458 	/* In case of one MSI vector, we handle irq enable/disable
459 	 * in a uniform way since we only have one irq
460 	 */
461 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
462 		return;
463 
464 	for (i = 0; i < irq_grp->num_irq; i++)
465 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
466 }
467 
468 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
469 {
470 	int i;
471 
472 	clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
473 
474 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
475 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
476 
477 		ath12k_pci_ext_grp_disable(irq_grp);
478 
479 		napi_synchronize(&irq_grp->napi);
480 		napi_disable(&irq_grp->napi);
481 	}
482 }
483 
484 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp)
485 {
486 	struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab);
487 	int i;
488 
489 	/* In case of one MSI vector, we handle irq enable/disable in a
490 	 * uniform way since we only have one irq
491 	 */
492 	if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
493 		return;
494 
495 	for (i = 0; i < irq_grp->num_irq; i++)
496 		enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
497 }
498 
499 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab)
500 {
501 	int i, j, irq_idx;
502 
503 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
504 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
505 
506 		for (j = 0; j < irq_grp->num_irq; j++) {
507 			irq_idx = irq_grp->irqs[j];
508 			synchronize_irq(ab->irq_num[irq_idx]);
509 		}
510 	}
511 }
512 
513 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget)
514 {
515 	struct ath12k_ext_irq_grp *irq_grp = container_of(napi,
516 						struct ath12k_ext_irq_grp,
517 						napi);
518 	struct ath12k_base *ab = irq_grp->ab;
519 	int work_done;
520 	int i;
521 
522 	work_done = ath12k_dp_service_srng(ab, irq_grp, budget);
523 	if (work_done < budget) {
524 		napi_complete_done(napi, work_done);
525 		for (i = 0; i < irq_grp->num_irq; i++)
526 			enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
527 	}
528 
529 	if (work_done > budget)
530 		work_done = budget;
531 
532 	return work_done;
533 }
534 
535 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg)
536 {
537 	struct ath12k_ext_irq_grp *irq_grp = arg;
538 	struct ath12k_base *ab = irq_grp->ab;
539 	int i;
540 
541 	if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags))
542 		return IRQ_HANDLED;
543 
544 	ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq);
545 
546 	/* last interrupt received for this group */
547 	irq_grp->timestamp = jiffies;
548 
549 	for (i = 0; i < irq_grp->num_irq; i++)
550 		disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]);
551 
552 	napi_schedule(&irq_grp->napi);
553 
554 	return IRQ_HANDLED;
555 }
556 
557 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab)
558 {
559 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
560 	int i, j, ret, num_vectors = 0;
561 	u32 user_base_data = 0, base_vector = 0, base_idx;
562 
563 	base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX;
564 	ret = ath12k_pci_get_user_msi_assignment(ab, "DP",
565 						 &num_vectors,
566 						 &user_base_data,
567 						 &base_vector);
568 	if (ret < 0)
569 		return ret;
570 
571 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
572 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
573 		u32 num_irq = 0;
574 
575 		irq_grp->ab = ab;
576 		irq_grp->grp_id = i;
577 		init_dummy_netdev(&irq_grp->napi_ndev);
578 		netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi,
579 			       ath12k_pci_ext_grp_napi_poll);
580 
581 		if (ab->hw_params->ring_mask->tx[i] ||
582 		    ab->hw_params->ring_mask->rx[i] ||
583 		    ab->hw_params->ring_mask->rx_err[i] ||
584 		    ab->hw_params->ring_mask->rx_wbm_rel[i] ||
585 		    ab->hw_params->ring_mask->reo_status[i] ||
586 		    ab->hw_params->ring_mask->host2rxdma[i] ||
587 		    ab->hw_params->ring_mask->rx_mon_dest[i]) {
588 			num_irq = 1;
589 		}
590 
591 		irq_grp->num_irq = num_irq;
592 		irq_grp->irqs[0] = base_idx + i;
593 
594 		for (j = 0; j < irq_grp->num_irq; j++) {
595 			int irq_idx = irq_grp->irqs[j];
596 			int vector = (i % num_vectors) + base_vector;
597 			int irq = ath12k_pci_get_msi_irq(ab->dev, vector);
598 
599 			ab->irq_num[irq_idx] = irq;
600 
601 			ath12k_dbg(ab, ATH12K_DBG_PCI,
602 				   "irq:%d group:%d\n", irq, i);
603 
604 			irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY);
605 			ret = request_irq(irq, ath12k_pci_ext_interrupt_handler,
606 					  ab_pci->irq_flags,
607 					  "DP_EXT_IRQ", irq_grp);
608 			if (ret) {
609 				ath12k_err(ab, "failed request irq %d: %d\n",
610 					   vector, ret);
611 				return ret;
612 			}
613 		}
614 		ath12k_pci_ext_grp_disable(irq_grp);
615 	}
616 
617 	return 0;
618 }
619 
620 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci,
621 					    const struct cpumask *m)
622 {
623 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
624 		return 0;
625 
626 	return irq_set_affinity_hint(ab_pci->pdev->irq, m);
627 }
628 
629 static int ath12k_pci_config_irq(struct ath12k_base *ab)
630 {
631 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
632 	struct ath12k_ce_pipe *ce_pipe;
633 	u32 msi_data_start;
634 	u32 msi_data_count, msi_data_idx;
635 	u32 msi_irq_start;
636 	unsigned int msi_data;
637 	int irq, i, ret, irq_idx;
638 
639 	ret = ath12k_pci_get_user_msi_assignment(ab,
640 						 "CE", &msi_data_count,
641 						 &msi_data_start, &msi_irq_start);
642 	if (ret)
643 		return ret;
644 
645 	/* Configure CE irqs */
646 
647 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
648 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
649 			continue;
650 
651 		msi_data = (msi_data_idx % msi_data_count) + msi_irq_start;
652 		irq = ath12k_pci_get_msi_irq(ab->dev, msi_data);
653 		ce_pipe = &ab->ce.ce_pipe[i];
654 
655 		irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i;
656 
657 		tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet);
658 
659 		ret = request_irq(irq, ath12k_pci_ce_interrupt_handler,
660 				  ab_pci->irq_flags, irq_name[irq_idx],
661 				  ce_pipe);
662 		if (ret) {
663 			ath12k_err(ab, "failed to request irq %d: %d\n",
664 				   irq_idx, ret);
665 			return ret;
666 		}
667 
668 		ab->irq_num[irq_idx] = irq;
669 		msi_data_idx++;
670 
671 		ath12k_pci_ce_irq_disable(ab, i);
672 	}
673 
674 	ret = ath12k_pci_ext_irq_config(ab);
675 	if (ret)
676 		return ret;
677 
678 	return 0;
679 }
680 
681 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab)
682 {
683 	struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
684 
685 	cfg->tgt_ce = ab->hw_params->target_ce_config;
686 	cfg->tgt_ce_len = ab->hw_params->target_ce_count;
687 
688 	cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map;
689 	cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len;
690 	ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id;
691 }
692 
693 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab)
694 {
695 	int i;
696 
697 	set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags);
698 
699 	for (i = 0; i < ab->hw_params->ce_count; i++) {
700 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
701 			continue;
702 		ath12k_pci_ce_irq_enable(ab, i);
703 	}
704 }
705 
706 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable)
707 {
708 	struct pci_dev *dev = ab_pci->pdev;
709 	u16 control;
710 
711 	pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
712 
713 	if (enable)
714 		control |= PCI_MSI_FLAGS_ENABLE;
715 	else
716 		control &= ~PCI_MSI_FLAGS_ENABLE;
717 
718 	pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
719 }
720 
721 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci)
722 {
723 	ath12k_pci_msi_config(ab_pci, true);
724 }
725 
726 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci)
727 {
728 	ath12k_pci_msi_config(ab_pci, false);
729 }
730 
731 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci)
732 {
733 	struct ath12k_base *ab = ab_pci->ab;
734 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
735 	struct msi_desc *msi_desc;
736 	int num_vectors;
737 	int ret;
738 
739 	num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
740 					    msi_config->total_vectors,
741 					    msi_config->total_vectors,
742 					    PCI_IRQ_MSI);
743 
744 	if (num_vectors == msi_config->total_vectors) {
745 		set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
746 		ab_pci->irq_flags = IRQF_SHARED;
747 	} else {
748 		num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
749 						    1,
750 						    1,
751 						    PCI_IRQ_MSI);
752 		if (num_vectors < 0) {
753 			ret = -EINVAL;
754 			goto reset_msi_config;
755 		}
756 		clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags);
757 		ab_pci->msi_config = &msi_config_one_msi;
758 		ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING;
759 		ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n");
760 	}
761 
762 	ath12k_info(ab, "MSI vectors: %d\n", num_vectors);
763 
764 	ath12k_pci_msi_disable(ab_pci);
765 
766 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
767 	if (!msi_desc) {
768 		ath12k_err(ab, "msi_desc is NULL!\n");
769 		ret = -EINVAL;
770 		goto free_msi_vector;
771 	}
772 
773 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
774 	if (msi_desc->pci.msi_attrib.is_64)
775 		set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags);
776 
777 	ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data);
778 
779 	return 0;
780 
781 free_msi_vector:
782 	pci_free_irq_vectors(ab_pci->pdev);
783 
784 reset_msi_config:
785 	return ret;
786 }
787 
788 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci)
789 {
790 	pci_free_irq_vectors(ab_pci->pdev);
791 }
792 
793 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci)
794 {
795 	struct msi_desc *msi_desc;
796 
797 	msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
798 	if (!msi_desc) {
799 		ath12k_err(ab_pci->ab, "msi_desc is NULL!\n");
800 		pci_free_irq_vectors(ab_pci->pdev);
801 		return -EINVAL;
802 	}
803 
804 	ab_pci->msi_ep_base_data = msi_desc->msg.data;
805 
806 	ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n",
807 		   ab_pci->msi_ep_base_data);
808 
809 	return 0;
810 }
811 
812 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev)
813 {
814 	struct ath12k_base *ab = ab_pci->ab;
815 	u16 device_id;
816 	int ret = 0;
817 
818 	pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
819 	if (device_id != ab_pci->dev_id)  {
820 		ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
821 			   device_id, ab_pci->dev_id);
822 		ret = -EIO;
823 		goto out;
824 	}
825 
826 	ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM);
827 	if (ret) {
828 		ath12k_err(ab, "failed to assign pci resource: %d\n", ret);
829 		goto out;
830 	}
831 
832 	ret = pci_enable_device(pdev);
833 	if (ret) {
834 		ath12k_err(ab, "failed to enable pci device: %d\n", ret);
835 		goto out;
836 	}
837 
838 	ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci");
839 	if (ret) {
840 		ath12k_err(ab, "failed to request pci region: %d\n", ret);
841 		goto disable_device;
842 	}
843 
844 	ret = dma_set_mask_and_coherent(&pdev->dev,
845 					DMA_BIT_MASK(ATH12K_PCI_DMA_MASK));
846 	if (ret) {
847 		ath12k_err(ab, "failed to set pci dma mask to %d: %d\n",
848 			   ATH12K_PCI_DMA_MASK, ret);
849 		goto release_region;
850 	}
851 
852 	pci_set_master(pdev);
853 
854 	ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM);
855 	ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0);
856 	if (!ab->mem) {
857 		ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM);
858 		ret = -EIO;
859 		goto release_region;
860 	}
861 
862 	ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem);
863 	return 0;
864 
865 release_region:
866 	pci_release_region(pdev, ATH12K_PCI_BAR_NUM);
867 disable_device:
868 	pci_disable_device(pdev);
869 out:
870 	return ret;
871 }
872 
873 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci)
874 {
875 	struct ath12k_base *ab = ab_pci->ab;
876 	struct pci_dev *pci_dev = ab_pci->pdev;
877 
878 	pci_iounmap(pci_dev, ab->mem);
879 	ab->mem = NULL;
880 	pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM);
881 	if (pci_is_enabled(pci_dev))
882 		pci_disable_device(pci_dev);
883 }
884 
885 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci)
886 {
887 	struct ath12k_base *ab = ab_pci->ab;
888 
889 	pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
890 				  &ab_pci->link_ctl);
891 
892 	ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n",
893 		   ab_pci->link_ctl,
894 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
895 		   u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
896 
897 	/* disable L0s and L1 */
898 	pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
899 				   PCI_EXP_LNKCTL_ASPMC);
900 
901 	set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags);
902 }
903 
904 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci)
905 {
906 	if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags))
907 		pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
908 						   PCI_EXP_LNKCTL_ASPMC,
909 						   ab_pci->link_ctl &
910 						   PCI_EXP_LNKCTL_ASPMC);
911 }
912 
913 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab)
914 {
915 	int i;
916 
917 	for (i = 0; i < ab->hw_params->ce_count; i++) {
918 		struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i];
919 
920 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
921 			continue;
922 
923 		tasklet_kill(&ce_pipe->intr_tq);
924 	}
925 }
926 
927 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab)
928 {
929 	ath12k_pci_ce_irqs_disable(ab);
930 	ath12k_pci_sync_ce_irqs(ab);
931 	ath12k_pci_kill_tasklets(ab);
932 }
933 
934 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id,
935 				   u8 *ul_pipe, u8 *dl_pipe)
936 {
937 	const struct service_to_pipe *entry;
938 	bool ul_set = false, dl_set = false;
939 	int i;
940 
941 	for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) {
942 		entry = &ab->hw_params->svc_to_ce_map[i];
943 
944 		if (__le32_to_cpu(entry->service_id) != service_id)
945 			continue;
946 
947 		switch (__le32_to_cpu(entry->pipedir)) {
948 		case PIPEDIR_NONE:
949 			break;
950 		case PIPEDIR_IN:
951 			WARN_ON(dl_set);
952 			*dl_pipe = __le32_to_cpu(entry->pipenum);
953 			dl_set = true;
954 			break;
955 		case PIPEDIR_OUT:
956 			WARN_ON(ul_set);
957 			*ul_pipe = __le32_to_cpu(entry->pipenum);
958 			ul_set = true;
959 			break;
960 		case PIPEDIR_INOUT:
961 			WARN_ON(dl_set);
962 			WARN_ON(ul_set);
963 			*dl_pipe = __le32_to_cpu(entry->pipenum);
964 			*ul_pipe = __le32_to_cpu(entry->pipenum);
965 			dl_set = true;
966 			ul_set = true;
967 			break;
968 		}
969 	}
970 
971 	if (WARN_ON(!ul_set || !dl_set))
972 		return -ENOENT;
973 
974 	return 0;
975 }
976 
977 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector)
978 {
979 	struct pci_dev *pci_dev = to_pci_dev(dev);
980 
981 	return pci_irq_vector(pci_dev, vector);
982 }
983 
984 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name,
985 				       int *num_vectors, u32 *user_base_data,
986 				       u32 *base_vector)
987 {
988 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
989 	const struct ath12k_msi_config *msi_config = ab_pci->msi_config;
990 	int idx;
991 
992 	for (idx = 0; idx < msi_config->total_users; idx++) {
993 		if (strcmp(user_name, msi_config->users[idx].name) == 0) {
994 			*num_vectors = msi_config->users[idx].num_vectors;
995 			*base_vector =  msi_config->users[idx].base_vector;
996 			*user_base_data = *base_vector + ab_pci->msi_ep_base_data;
997 
998 			ath12k_dbg(ab, ATH12K_DBG_PCI,
999 				   "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n",
1000 				   user_name, *num_vectors, *user_base_data,
1001 				   *base_vector);
1002 
1003 			return 0;
1004 		}
1005 	}
1006 
1007 	ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name);
1008 
1009 	return -EINVAL;
1010 }
1011 
1012 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo,
1013 				u32 *msi_addr_hi)
1014 {
1015 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1016 	struct pci_dev *pci_dev = to_pci_dev(ab->dev);
1017 
1018 	pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
1019 			      msi_addr_lo);
1020 
1021 	if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) {
1022 		pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
1023 				      msi_addr_hi);
1024 	} else {
1025 		*msi_addr_hi = 0;
1026 	}
1027 }
1028 
1029 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id,
1030 			       u32 *msi_idx)
1031 {
1032 	u32 i, msi_data_idx;
1033 
1034 	for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) {
1035 		if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR)
1036 			continue;
1037 
1038 		if (ce_id == i)
1039 			break;
1040 
1041 		msi_data_idx++;
1042 	}
1043 	*msi_idx = msi_data_idx;
1044 }
1045 
1046 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab)
1047 {
1048 	ath12k_pci_ce_irqs_enable(ab);
1049 }
1050 
1051 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab)
1052 {
1053 	ath12k_pci_ce_irq_disable_sync(ab);
1054 }
1055 
1056 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab)
1057 {
1058 	int i;
1059 
1060 	set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags);
1061 
1062 	for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) {
1063 		struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i];
1064 
1065 		napi_enable(&irq_grp->napi);
1066 		ath12k_pci_ext_grp_enable(irq_grp);
1067 	}
1068 }
1069 
1070 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab)
1071 {
1072 	__ath12k_pci_ext_irq_disable(ab);
1073 	ath12k_pci_sync_ext_irqs(ab);
1074 }
1075 
1076 int ath12k_pci_hif_suspend(struct ath12k_base *ab)
1077 {
1078 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1079 
1080 	ath12k_mhi_suspend(ar_pci);
1081 
1082 	return 0;
1083 }
1084 
1085 int ath12k_pci_hif_resume(struct ath12k_base *ab)
1086 {
1087 	struct ath12k_pci *ar_pci = ath12k_pci_priv(ab);
1088 
1089 	ath12k_mhi_resume(ar_pci);
1090 
1091 	return 0;
1092 }
1093 
1094 void ath12k_pci_stop(struct ath12k_base *ab)
1095 {
1096 	ath12k_pci_ce_irq_disable_sync(ab);
1097 	ath12k_ce_cleanup_pipes(ab);
1098 }
1099 
1100 int ath12k_pci_start(struct ath12k_base *ab)
1101 {
1102 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1103 
1104 	set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1105 
1106 	if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags))
1107 		ath12k_pci_aspm_restore(ab_pci);
1108 	else
1109 		ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
1110 
1111 	ath12k_pci_ce_irqs_enable(ab);
1112 	ath12k_ce_rx_post_buf(ab);
1113 
1114 	return 0;
1115 }
1116 
1117 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset)
1118 {
1119 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1120 	u32 val, window_start;
1121 	int ret = 0;
1122 
1123 	/* for offset beyond BAR + 4K - 32, may
1124 	 * need to wakeup MHI to access.
1125 	 */
1126 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1127 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1128 		ret = ab_pci->pci_ops->wakeup(ab);
1129 
1130 	if (offset < WINDOW_START) {
1131 		val = ioread32(ab->mem + offset);
1132 	} else {
1133 		if (ab->static_window_map)
1134 			window_start = ath12k_pci_get_window_start(ab, offset);
1135 		else
1136 			window_start = WINDOW_START;
1137 
1138 		if (window_start == WINDOW_START) {
1139 			spin_lock_bh(&ab_pci->window_lock);
1140 			ath12k_pci_select_window(ab_pci, offset);
1141 			val = ioread32(ab->mem + window_start +
1142 				       (offset & WINDOW_RANGE_MASK));
1143 			spin_unlock_bh(&ab_pci->window_lock);
1144 		} else {
1145 			if ((!window_start) &&
1146 			    (offset >= PCI_MHIREGLEN_REG &&
1147 			     offset <= PCI_MHI_REGION_END))
1148 				offset = offset - PCI_MHIREGLEN_REG;
1149 
1150 			val = ioread32(ab->mem + window_start +
1151 				       (offset & WINDOW_RANGE_MASK));
1152 		}
1153 	}
1154 
1155 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1156 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1157 	    !ret)
1158 		ab_pci->pci_ops->release(ab);
1159 	return val;
1160 }
1161 
1162 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value)
1163 {
1164 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1165 	u32 window_start;
1166 	int ret = 0;
1167 
1168 	/* for offset beyond BAR + 4K - 32, may
1169 	 * need to wakeup MHI to access.
1170 	 */
1171 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1172 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup)
1173 		ret = ab_pci->pci_ops->wakeup(ab);
1174 
1175 	if (offset < WINDOW_START) {
1176 		iowrite32(value, ab->mem + offset);
1177 	} else {
1178 		if (ab->static_window_map)
1179 			window_start = ath12k_pci_get_window_start(ab, offset);
1180 		else
1181 			window_start = WINDOW_START;
1182 
1183 		if (window_start == WINDOW_START) {
1184 			spin_lock_bh(&ab_pci->window_lock);
1185 			ath12k_pci_select_window(ab_pci, offset);
1186 			iowrite32(value, ab->mem + window_start +
1187 				  (offset & WINDOW_RANGE_MASK));
1188 			spin_unlock_bh(&ab_pci->window_lock);
1189 		} else {
1190 			if ((!window_start) &&
1191 			    (offset >= PCI_MHIREGLEN_REG &&
1192 			     offset <= PCI_MHI_REGION_END))
1193 				offset = offset - PCI_MHIREGLEN_REG;
1194 
1195 			iowrite32(value, ab->mem + window_start +
1196 				  (offset & WINDOW_RANGE_MASK));
1197 		}
1198 	}
1199 
1200 	if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) &&
1201 	    offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release &&
1202 	    !ret)
1203 		ab_pci->pci_ops->release(ab);
1204 }
1205 
1206 int ath12k_pci_power_up(struct ath12k_base *ab)
1207 {
1208 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1209 	int ret;
1210 
1211 	ab_pci->register_window = 0;
1212 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1213 	ath12k_pci_sw_reset(ab_pci->ab, true);
1214 
1215 	/* Disable ASPM during firmware download due to problems switching
1216 	 * to AMSS state.
1217 	 */
1218 	ath12k_pci_aspm_disable(ab_pci);
1219 
1220 	ath12k_pci_msi_enable(ab_pci);
1221 
1222 	ret = ath12k_mhi_start(ab_pci);
1223 	if (ret) {
1224 		ath12k_err(ab, "failed to start mhi: %d\n", ret);
1225 		return ret;
1226 	}
1227 
1228 	if (ab->static_window_map)
1229 		ath12k_pci_select_static_window(ab_pci);
1230 
1231 	return 0;
1232 }
1233 
1234 void ath12k_pci_power_down(struct ath12k_base *ab)
1235 {
1236 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1237 
1238 	/* restore aspm in case firmware bootup fails */
1239 	ath12k_pci_aspm_restore(ab_pci);
1240 
1241 	ath12k_pci_force_wake(ab_pci->ab);
1242 	ath12k_pci_msi_disable(ab_pci);
1243 	ath12k_mhi_stop(ab_pci);
1244 	clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags);
1245 	ath12k_pci_sw_reset(ab_pci->ab, false);
1246 }
1247 
1248 static const struct ath12k_hif_ops ath12k_pci_hif_ops = {
1249 	.start = ath12k_pci_start,
1250 	.stop = ath12k_pci_stop,
1251 	.read32 = ath12k_pci_read32,
1252 	.write32 = ath12k_pci_write32,
1253 	.power_down = ath12k_pci_power_down,
1254 	.power_up = ath12k_pci_power_up,
1255 	.suspend = ath12k_pci_hif_suspend,
1256 	.resume = ath12k_pci_hif_resume,
1257 	.irq_enable = ath12k_pci_ext_irq_enable,
1258 	.irq_disable = ath12k_pci_ext_irq_disable,
1259 	.get_msi_address = ath12k_pci_get_msi_address,
1260 	.get_user_msi_vector = ath12k_pci_get_user_msi_assignment,
1261 	.map_service_to_pipe = ath12k_pci_map_service_to_pipe,
1262 	.ce_irq_enable = ath12k_pci_hif_ce_irq_enable,
1263 	.ce_irq_disable = ath12k_pci_hif_ce_irq_disable,
1264 	.get_ce_msi_idx = ath12k_pci_get_ce_msi_idx,
1265 };
1266 
1267 static
1268 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor)
1269 {
1270 	u32 soc_hw_version;
1271 
1272 	soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION);
1273 	*major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
1274 			   soc_hw_version);
1275 	*minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
1276 			   soc_hw_version);
1277 
1278 	ath12k_dbg(ab, ATH12K_DBG_PCI,
1279 		   "pci tcsr_soc_hw_version major %d minor %d\n",
1280 		    *major, *minor);
1281 }
1282 
1283 static int ath12k_pci_probe(struct pci_dev *pdev,
1284 			    const struct pci_device_id *pci_dev)
1285 {
1286 	struct ath12k_base *ab;
1287 	struct ath12k_pci *ab_pci;
1288 	u32 soc_hw_version_major, soc_hw_version_minor;
1289 	int ret;
1290 
1291 	ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI);
1292 	if (!ab) {
1293 		dev_err(&pdev->dev, "failed to allocate ath12k base\n");
1294 		return -ENOMEM;
1295 	}
1296 
1297 	ab->dev = &pdev->dev;
1298 	pci_set_drvdata(pdev, ab);
1299 	ab_pci = ath12k_pci_priv(ab);
1300 	ab_pci->dev_id = pci_dev->device;
1301 	ab_pci->ab = ab;
1302 	ab_pci->pdev = pdev;
1303 	ab->hif.ops = &ath12k_pci_hif_ops;
1304 	pci_set_drvdata(pdev, ab);
1305 	spin_lock_init(&ab_pci->window_lock);
1306 
1307 	ret = ath12k_pci_claim(ab_pci, pdev);
1308 	if (ret) {
1309 		ath12k_err(ab, "failed to claim device: %d\n", ret);
1310 		goto err_free_core;
1311 	}
1312 
1313 	switch (pci_dev->device) {
1314 	case QCN9274_DEVICE_ID:
1315 		ab_pci->msi_config = &ath12k_msi_config[0];
1316 		ab->static_window_map = true;
1317 		ab_pci->pci_ops = &ath12k_pci_ops_qcn9274;
1318 		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1319 					   &soc_hw_version_minor);
1320 		switch (soc_hw_version_major) {
1321 		case ATH12K_PCI_SOC_HW_VERSION_2:
1322 			ab->hw_rev = ATH12K_HW_QCN9274_HW20;
1323 			break;
1324 		case ATH12K_PCI_SOC_HW_VERSION_1:
1325 			ab->hw_rev = ATH12K_HW_QCN9274_HW10;
1326 			break;
1327 		default:
1328 			dev_err(&pdev->dev,
1329 				"Unknown hardware version found for QCN9274: 0x%x\n",
1330 				soc_hw_version_major);
1331 			ret = -EOPNOTSUPP;
1332 			goto err_pci_free_region;
1333 		}
1334 		break;
1335 	case WCN7850_DEVICE_ID:
1336 		ab_pci->msi_config = &ath12k_msi_config[0];
1337 		ab->static_window_map = false;
1338 		ab_pci->pci_ops = &ath12k_pci_ops_wcn7850;
1339 		ath12k_pci_read_hw_version(ab, &soc_hw_version_major,
1340 					   &soc_hw_version_minor);
1341 		switch (soc_hw_version_major) {
1342 		case ATH12K_PCI_SOC_HW_VERSION_2:
1343 			ab->hw_rev = ATH12K_HW_WCN7850_HW20;
1344 			break;
1345 		default:
1346 			dev_err(&pdev->dev,
1347 				"Unknown hardware version found for WCN7850: 0x%x\n",
1348 				soc_hw_version_major);
1349 			ret = -EOPNOTSUPP;
1350 			goto err_pci_free_region;
1351 		}
1352 		break;
1353 
1354 	default:
1355 		dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1356 			pci_dev->device);
1357 		ret = -EOPNOTSUPP;
1358 		goto err_pci_free_region;
1359 	}
1360 
1361 	ret = ath12k_pci_msi_alloc(ab_pci);
1362 	if (ret) {
1363 		ath12k_err(ab, "failed to alloc msi: %d\n", ret);
1364 		goto err_pci_free_region;
1365 	}
1366 
1367 	ret = ath12k_core_pre_init(ab);
1368 	if (ret)
1369 		goto err_pci_msi_free;
1370 
1371 	ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1372 	if (ret) {
1373 		ath12k_err(ab, "failed to set irq affinity %d\n", ret);
1374 		goto err_pci_msi_free;
1375 	}
1376 
1377 	ret = ath12k_mhi_register(ab_pci);
1378 	if (ret) {
1379 		ath12k_err(ab, "failed to register mhi: %d\n", ret);
1380 		goto err_irq_affinity_cleanup;
1381 	}
1382 
1383 	ret = ath12k_hal_srng_init(ab);
1384 	if (ret)
1385 		goto err_mhi_unregister;
1386 
1387 	ret = ath12k_ce_alloc_pipes(ab);
1388 	if (ret) {
1389 		ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1390 		goto err_hal_srng_deinit;
1391 	}
1392 
1393 	ath12k_pci_init_qmi_ce_config(ab);
1394 
1395 	ret = ath12k_pci_config_irq(ab);
1396 	if (ret) {
1397 		ath12k_err(ab, "failed to config irq: %d\n", ret);
1398 		goto err_ce_free;
1399 	}
1400 
1401 	/* kernel may allocate a dummy vector before request_irq and
1402 	 * then allocate a real vector when request_irq is called.
1403 	 * So get msi_data here again to avoid spurious interrupt
1404 	 * as msi_data will configured to srngs.
1405 	 */
1406 	ret = ath12k_pci_config_msi_data(ab_pci);
1407 	if (ret) {
1408 		ath12k_err(ab, "failed to config msi_data: %d\n", ret);
1409 		goto err_free_irq;
1410 	}
1411 
1412 	ret = ath12k_core_init(ab);
1413 	if (ret) {
1414 		ath12k_err(ab, "failed to init core: %d\n", ret);
1415 		goto err_free_irq;
1416 	}
1417 	return 0;
1418 
1419 err_free_irq:
1420 	ath12k_pci_free_irq(ab);
1421 
1422 err_ce_free:
1423 	ath12k_ce_free_pipes(ab);
1424 
1425 err_hal_srng_deinit:
1426 	ath12k_hal_srng_deinit(ab);
1427 
1428 err_mhi_unregister:
1429 	ath12k_mhi_unregister(ab_pci);
1430 
1431 err_pci_msi_free:
1432 	ath12k_pci_msi_free(ab_pci);
1433 
1434 err_irq_affinity_cleanup:
1435 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1436 
1437 err_pci_free_region:
1438 	ath12k_pci_free_region(ab_pci);
1439 
1440 err_free_core:
1441 	ath12k_core_free(ab);
1442 
1443 	return ret;
1444 }
1445 
1446 static void ath12k_pci_remove(struct pci_dev *pdev)
1447 {
1448 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1449 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1450 
1451 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1452 
1453 	if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1454 		ath12k_pci_power_down(ab);
1455 		ath12k_qmi_deinit_service(ab);
1456 		goto qmi_fail;
1457 	}
1458 
1459 	set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags);
1460 
1461 	cancel_work_sync(&ab->reset_work);
1462 	ath12k_core_deinit(ab);
1463 
1464 qmi_fail:
1465 	ath12k_mhi_unregister(ab_pci);
1466 
1467 	ath12k_pci_free_irq(ab);
1468 	ath12k_pci_msi_free(ab_pci);
1469 	ath12k_pci_free_region(ab_pci);
1470 
1471 	ath12k_hal_srng_deinit(ab);
1472 	ath12k_ce_free_pipes(ab);
1473 	ath12k_core_free(ab);
1474 }
1475 
1476 static void ath12k_pci_shutdown(struct pci_dev *pdev)
1477 {
1478 	struct ath12k_base *ab = pci_get_drvdata(pdev);
1479 	struct ath12k_pci *ab_pci = ath12k_pci_priv(ab);
1480 
1481 	ath12k_pci_set_irq_affinity_hint(ab_pci, NULL);
1482 	ath12k_pci_power_down(ab);
1483 }
1484 
1485 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev)
1486 {
1487 	struct ath12k_base *ab = dev_get_drvdata(dev);
1488 	int ret;
1489 
1490 	ret = ath12k_core_suspend(ab);
1491 	if (ret)
1492 		ath12k_warn(ab, "failed to suspend core: %d\n", ret);
1493 
1494 	return ret;
1495 }
1496 
1497 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev)
1498 {
1499 	struct ath12k_base *ab = dev_get_drvdata(dev);
1500 	int ret;
1501 
1502 	ret = ath12k_core_resume(ab);
1503 	if (ret)
1504 		ath12k_warn(ab, "failed to resume core: %d\n", ret);
1505 
1506 	return ret;
1507 }
1508 
1509 static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops,
1510 			 ath12k_pci_pm_suspend,
1511 			 ath12k_pci_pm_resume);
1512 
1513 static struct pci_driver ath12k_pci_driver = {
1514 	.name = "ath12k_pci",
1515 	.id_table = ath12k_pci_id_table,
1516 	.probe = ath12k_pci_probe,
1517 	.remove = ath12k_pci_remove,
1518 	.shutdown = ath12k_pci_shutdown,
1519 	.driver.pm = &ath12k_pci_pm_ops,
1520 };
1521 
1522 static int ath12k_pci_init(void)
1523 {
1524 	int ret;
1525 
1526 	ret = pci_register_driver(&ath12k_pci_driver);
1527 	if (ret) {
1528 		pr_err("failed to register ath12k pci driver: %d\n",
1529 		       ret);
1530 		return ret;
1531 	}
1532 
1533 	return 0;
1534 }
1535 module_init(ath12k_pci_init);
1536 
1537 static void ath12k_pci_exit(void)
1538 {
1539 	pci_unregister_driver(&ath12k_pci_driver);
1540 }
1541 
1542 module_exit(ath12k_pci_exit);
1543 
1544 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices");
1545 MODULE_LICENSE("Dual BSD/GPL");
1546