1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/msi.h>
9 #include <linux/pci.h>
10 #if defined(CONFIG_OF)
11 #include <linux/of.h>
12 #endif
13 #if defined(__FreeBSD__)
14 #include <linux/delay.h>
15 #include <linux/cpu.h>
16 #endif
17 #include <linux/time.h>
18 #include <linux/vmalloc.h>
19
20 #include "pci.h"
21 #include "core.h"
22 #include "hif.h"
23 #include "mhi.h"
24 #include "debug.h"
25 #include "pcic.h"
26 #include "qmi.h"
27
28 #define ATH11K_PCI_BAR_NUM 0
29 #define ATH11K_PCI_DMA_MASK 36
30 #define ATH11K_PCI_COHERENT_DMA_MASK 32
31
32 #define TCSR_SOC_HW_VERSION 0x0224
33 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
34 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
35
36 #define QCA6390_DEVICE_ID 0x1101
37 #define QCN9074_DEVICE_ID 0x1104
38 #define WCN6855_DEVICE_ID 0x1103
39
40 #define TCSR_SOC_HW_SUB_VER 0x1910010
41
42 static const struct pci_device_id ath11k_pci_id_table[] = {
43 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
44 { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
45 { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
46 {}
47 };
48
49 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
50
ath11k_pci_bus_wake_up(struct ath11k_base * ab)51 static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
52 {
53 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
54
55 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
56 }
57
ath11k_pci_bus_release(struct ath11k_base * ab)58 static void ath11k_pci_bus_release(struct ath11k_base *ab)
59 {
60 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
61
62 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
63 }
64
ath11k_pci_get_window_start(struct ath11k_base * ab,u32 offset)65 static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
66 {
67 if (!ab->hw_params.static_window_map)
68 return ATH11K_PCI_WINDOW_START;
69
70 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
71 /* if offset lies within DP register range, use 3rd window */
72 return 3 * ATH11K_PCI_WINDOW_START;
73 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
74 ATH11K_PCI_WINDOW_RANGE_MASK)
75 /* if offset lies within CE register range, use 2nd window */
76 return 2 * ATH11K_PCI_WINDOW_START;
77 else
78 return ATH11K_PCI_WINDOW_START;
79 }
80
ath11k_pci_select_window(struct ath11k_pci * ab_pci,u32 offset)81 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
82 {
83 struct ath11k_base *ab = ab_pci->ab;
84
85 u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
86
87 lockdep_assert_held(&ab_pci->window_lock);
88
89 if (window != ab_pci->register_window) {
90 #if defined(__linux__)
91 iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
92 ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
93 ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
94 #elif defined(__FreeBSD__)
95 iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
96 (char *)ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
97 ioread32((char *)ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
98 #endif
99 ab_pci->register_window = window;
100 }
101 }
102
103 static void
ath11k_pci_window_write32(struct ath11k_base * ab,u32 offset,u32 value)104 ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
105 {
106 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
107 u32 window_start;
108
109 window_start = ath11k_pci_get_window_start(ab, offset);
110
111 if (window_start == ATH11K_PCI_WINDOW_START) {
112 spin_lock_bh(&ab_pci->window_lock);
113 ath11k_pci_select_window(ab_pci, offset);
114 #if defined(__linux__)
115 iowrite32(value, ab->mem + window_start +
116 #elif defined(__FreeBSD__)
117 iowrite32(value, (char *)ab->mem + window_start +
118 #endif
119 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
120 spin_unlock_bh(&ab_pci->window_lock);
121 } else {
122 #if defined(__linux__)
123 iowrite32(value, ab->mem + window_start +
124 #elif defined(__FreeBSD__)
125 iowrite32(value, (char *)ab->mem + window_start +
126 #endif
127 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
128 }
129 }
130
ath11k_pci_window_read32(struct ath11k_base * ab,u32 offset)131 static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
132 {
133 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
134 u32 window_start, val;
135
136 window_start = ath11k_pci_get_window_start(ab, offset);
137
138 if (window_start == ATH11K_PCI_WINDOW_START) {
139 spin_lock_bh(&ab_pci->window_lock);
140 ath11k_pci_select_window(ab_pci, offset);
141 #if defined(__linux__)
142 val = ioread32(ab->mem + window_start +
143 #elif defined(__FreeBSD__)
144 val = ioread32((char *)ab->mem + window_start +
145 #endif
146 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
147 spin_unlock_bh(&ab_pci->window_lock);
148 } else {
149 #if defined(__linux__)
150 val = ioread32(ab->mem + window_start +
151 #elif defined(__FreeBSD__)
152 val = ioread32((char *)ab->mem + window_start +
153 #endif
154 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
155 }
156
157 return val;
158 }
159
ath11k_pci_get_msi_irq(struct ath11k_base * ab,unsigned int vector)160 int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
161 {
162 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
163
164 return pci_irq_vector(pci_dev, vector);
165 }
166
167 static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
168 .wakeup = ath11k_pci_bus_wake_up,
169 .release = ath11k_pci_bus_release,
170 .get_msi_irq = ath11k_pci_get_msi_irq,
171 .window_write32 = ath11k_pci_window_write32,
172 .window_read32 = ath11k_pci_window_read32,
173 };
174
175 static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
176 .wakeup = NULL,
177 .release = NULL,
178 .get_msi_irq = ath11k_pci_get_msi_irq,
179 .window_write32 = ath11k_pci_window_write32,
180 .window_read32 = ath11k_pci_window_read32,
181 };
182
183 static const struct ath11k_msi_config msi_config_one_msi = {
184 .total_vectors = 1,
185 .total_users = 4,
186 .users = (struct ath11k_msi_user[]) {
187 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
188 { .name = "CE", .num_vectors = 1, .base_vector = 0 },
189 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
190 { .name = "DP", .num_vectors = 1, .base_vector = 0 },
191 },
192 };
193
ath11k_pci_select_static_window(struct ath11k_pci * ab_pci)194 static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
195 {
196 u32 umac_window;
197 u32 ce_window;
198 u32 window;
199
200 umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
201 ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
202 window = (umac_window << 12) | (ce_window << 6);
203
204 iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
205 #if defined(__linux__)
206 ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
207 #elif defined(__FreeBSD__)
208 (char *)ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
209 #endif
210 }
211
ath11k_pci_soc_global_reset(struct ath11k_base * ab)212 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
213 {
214 u32 val, delay;
215
216 val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
217
218 val |= PCIE_SOC_GLOBAL_RESET_V;
219
220 ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
221
222 /* TODO: exact time to sleep is uncertain */
223 delay = 10;
224 mdelay(delay);
225
226 /* Need to toggle V bit back otherwise stuck in reset status */
227 val &= ~PCIE_SOC_GLOBAL_RESET_V;
228
229 ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
230
231 mdelay(delay);
232
233 val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
234 if (val == 0xffffffff)
235 ath11k_warn(ab, "link down error during global reset\n");
236 }
237
ath11k_pci_clear_dbg_registers(struct ath11k_base * ab)238 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
239 {
240 u32 val;
241
242 /* read cookie */
243 val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
244 ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val);
245
246 val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
247 ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
248
249 /* TODO: exact time to sleep is uncertain */
250 mdelay(10);
251
252 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
253 * continuing warm path and entering dead loop.
254 */
255 ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
256 mdelay(10);
257
258 val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
259 ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
260
261 /* A read clear register. clear the register to prevent
262 * Q6 from entering wrong code path.
263 */
264 val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
265 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
266 }
267
ath11k_pci_set_link_reg(struct ath11k_base * ab,u32 offset,u32 value,u32 mask)268 static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
269 u32 offset, u32 value, u32 mask)
270 {
271 u32 v;
272 int i;
273
274 v = ath11k_pcic_read32(ab, offset);
275 if ((v & mask) == value)
276 return 0;
277
278 for (i = 0; i < 10; i++) {
279 ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
280
281 v = ath11k_pcic_read32(ab, offset);
282 if ((v & mask) == value)
283 return 0;
284
285 mdelay(2);
286 }
287
288 ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
289 offset, v & mask, value);
290
291 return -ETIMEDOUT;
292 }
293
ath11k_pci_fix_l1ss(struct ath11k_base * ab)294 static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
295 {
296 int ret;
297
298 ret = ath11k_pci_set_link_reg(ab,
299 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
300 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
301 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
302 if (ret) {
303 ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
304 return ret;
305 }
306
307 ret = ath11k_pci_set_link_reg(ab,
308 PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
309 PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
310 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
311 if (ret) {
312 ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
313 return ret;
314 }
315
316 ret = ath11k_pci_set_link_reg(ab,
317 PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
318 PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
319 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
320 if (ret) {
321 ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
322 return ret;
323 }
324
325 ret = ath11k_pci_set_link_reg(ab,
326 PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
327 PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
328 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
329 if (ret) {
330 ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
331 return ret;
332 }
333
334 return 0;
335 }
336
ath11k_pci_enable_ltssm(struct ath11k_base * ab)337 static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
338 {
339 u32 val;
340 int i;
341
342 val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
343
344 /* PCIE link seems very unstable after the Hot Reset*/
345 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
346 if (val == 0xffffffff)
347 mdelay(5);
348
349 ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
350 val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
351 }
352
353 ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val);
354
355 val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
356 val |= GCC_GCC_PCIE_HOT_RST_VAL;
357 ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
358 val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
359
360 ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val);
361
362 mdelay(5);
363 }
364
ath11k_pci_clear_all_intrs(struct ath11k_base * ab)365 static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
366 {
367 /* This is a WAR for PCIE Hotreset.
368 * When target receive Hotreset, but will set the interrupt.
369 * So when download SBL again, SBL will open Interrupt and
370 * receive it, and crash immediately.
371 */
372 ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
373 }
374
ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base * ab)375 static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
376 {
377 u32 val;
378
379 val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
380 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
381 ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
382 }
383
ath11k_pci_force_wake(struct ath11k_base * ab)384 static void ath11k_pci_force_wake(struct ath11k_base *ab)
385 {
386 ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
387 mdelay(5);
388 }
389
ath11k_pci_sw_reset(struct ath11k_base * ab,bool power_on)390 static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
391 {
392 mdelay(100);
393
394 if (power_on) {
395 ath11k_pci_enable_ltssm(ab);
396 ath11k_pci_clear_all_intrs(ab);
397 ath11k_pci_set_wlaon_pwr_ctrl(ab);
398 if (ab->hw_params.fix_l1ss)
399 ath11k_pci_fix_l1ss(ab);
400 }
401
402 ath11k_mhi_clear_vector(ab);
403 ath11k_pci_clear_dbg_registers(ab);
404 ath11k_pci_soc_global_reset(ab);
405 ath11k_mhi_set_mhictrl_reset(ab);
406 }
407
ath11k_pci_init_qmi_ce_config(struct ath11k_base * ab)408 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
409 {
410 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
411
412 cfg->tgt_ce = ab->hw_params.target_ce_config;
413 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
414
415 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
416 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
417 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
418
419 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
420 &cfg->shadow_reg_v2_len);
421 }
422
ath11k_pci_msi_config(struct ath11k_pci * ab_pci,bool enable)423 static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
424 {
425 struct pci_dev *dev = ab_pci->pdev;
426 u16 control;
427
428 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
429
430 if (enable)
431 control |= PCI_MSI_FLAGS_ENABLE;
432 else
433 control &= ~PCI_MSI_FLAGS_ENABLE;
434
435 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
436 }
437
ath11k_pci_msi_enable(struct ath11k_pci * ab_pci)438 static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
439 {
440 ath11k_pci_msi_config(ab_pci, true);
441 }
442
ath11k_pci_msi_disable(struct ath11k_pci * ab_pci)443 static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
444 {
445 ath11k_pci_msi_config(ab_pci, false);
446 }
447
ath11k_pci_alloc_msi(struct ath11k_pci * ab_pci)448 static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
449 {
450 struct ath11k_base *ab = ab_pci->ab;
451 const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
452 struct pci_dev *pci_dev = ab_pci->pdev;
453 struct msi_desc *msi_desc;
454 int num_vectors;
455 int ret;
456
457 num_vectors = pci_alloc_irq_vectors(pci_dev,
458 msi_config->total_vectors,
459 msi_config->total_vectors,
460 PCI_IRQ_MSI);
461 if (num_vectors == msi_config->total_vectors) {
462 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
463 } else {
464 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
465 1,
466 1,
467 PCI_IRQ_MSI);
468 if (num_vectors < 0) {
469 ret = -EINVAL;
470 goto reset_msi_config;
471 }
472 clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
473 ab->pci.msi.config = &msi_config_one_msi;
474 ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n");
475 }
476 ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
477
478 ath11k_pci_msi_disable(ab_pci);
479
480 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
481 if (!msi_desc) {
482 ath11k_err(ab, "msi_desc is NULL!\n");
483 ret = -EINVAL;
484 goto free_msi_vector;
485 }
486
487 ab->pci.msi.ep_base_data = msi_desc->msg.data;
488
489 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
490 &ab->pci.msi.addr_lo);
491
492 if (msi_desc->pci.msi_attrib.is_64) {
493 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
494 &ab->pci.msi.addr_hi);
495 } else {
496 ab->pci.msi.addr_hi = 0;
497 }
498
499 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
500
501 return 0;
502
503 free_msi_vector:
504 pci_free_irq_vectors(ab_pci->pdev);
505
506 reset_msi_config:
507 return ret;
508 }
509
ath11k_pci_free_msi(struct ath11k_pci * ab_pci)510 static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
511 {
512 pci_free_irq_vectors(ab_pci->pdev);
513 }
514
ath11k_pci_config_msi_data(struct ath11k_pci * ab_pci)515 static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
516 {
517 struct msi_desc *msi_desc;
518
519 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
520 if (!msi_desc) {
521 ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
522 pci_free_irq_vectors(ab_pci->pdev);
523 return -EINVAL;
524 }
525
526 ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
527
528 ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n",
529 ab_pci->ab->pci.msi.ep_base_data);
530
531 return 0;
532 }
533
ath11k_pci_claim(struct ath11k_pci * ab_pci,struct pci_dev * pdev)534 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
535 {
536 struct ath11k_base *ab = ab_pci->ab;
537 u16 device_id;
538 int ret = 0;
539
540 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
541 if (device_id != ab_pci->dev_id) {
542 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
543 device_id, ab_pci->dev_id);
544 ret = -EIO;
545 goto out;
546 }
547
548 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
549 if (ret) {
550 ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
551 goto out;
552 }
553
554 ret = pci_enable_device(pdev);
555 if (ret) {
556 ath11k_err(ab, "failed to enable pci device: %d\n", ret);
557 goto out;
558 }
559
560 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
561 if (ret) {
562 ath11k_err(ab, "failed to request pci region: %d\n", ret);
563 goto disable_device;
564 }
565
566 ret = dma_set_mask(&pdev->dev,
567 DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
568 if (ret) {
569 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
570 ATH11K_PCI_DMA_MASK, ret);
571 goto release_region;
572 }
573
574 ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
575
576 ret = dma_set_coherent_mask(&pdev->dev,
577 DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
578 if (ret) {
579 ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
580 ATH11K_PCI_COHERENT_DMA_MASK, ret);
581 goto release_region;
582 }
583
584 pci_set_master(pdev);
585
586 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
587 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
588 if (!ab->mem) {
589 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
590 ret = -EIO;
591 goto release_region;
592 }
593
594 ab->mem_ce = ab->mem;
595
596 ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem);
597 return 0;
598
599 release_region:
600 pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
601 disable_device:
602 pci_disable_device(pdev);
603 out:
604 return ret;
605 }
606
ath11k_pci_free_region(struct ath11k_pci * ab_pci)607 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
608 {
609 struct ath11k_base *ab = ab_pci->ab;
610 struct pci_dev *pci_dev = ab_pci->pdev;
611
612 pci_iounmap(pci_dev, ab->mem);
613 ab->mem = NULL;
614 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
615 if (pci_is_enabled(pci_dev))
616 pci_disable_device(pci_dev);
617 }
618
ath11k_pci_aspm_disable(struct ath11k_pci * ab_pci)619 static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
620 {
621 struct ath11k_base *ab = ab_pci->ab;
622
623 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
624 &ab_pci->link_ctl);
625
626 ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n",
627 ab_pci->link_ctl,
628 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
629 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
630
631 /* disable L0s and L1 */
632 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
633 PCI_EXP_LNKCTL_ASPMC);
634
635 set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
636 }
637
ath11k_pci_aspm_restore(struct ath11k_pci * ab_pci)638 static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
639 {
640 if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
641 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
642 PCI_EXP_LNKCTL_ASPMC,
643 ab_pci->link_ctl &
644 PCI_EXP_LNKCTL_ASPMC);
645 }
646
647 #ifdef CONFIG_DEV_COREDUMP
ath11k_pci_coredump_calculate_size(struct ath11k_base * ab,u32 * dump_seg_sz)648 static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
649 {
650 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
651 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
652 struct image_info *rddm_img, *fw_img;
653 struct ath11k_tlv_dump_data *dump_tlv;
654 enum ath11k_fw_crash_dump_type mem_type;
655 u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
656 struct ath11k_dump_file_data *file_data;
657 int i;
658
659 rddm_img = mhi_ctrl->rddm_image;
660 if (!rddm_img) {
661 ath11k_err(ab, "No RDDM dump found\n");
662 return 0;
663 }
664
665 fw_img = mhi_ctrl->fbc_image;
666
667 for (i = 0; i < fw_img->entries ; i++) {
668 if (!fw_img->mhi_buf[i].buf)
669 continue;
670
671 paging_tlv_sz += fw_img->mhi_buf[i].len;
672 }
673 dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
674
675 for (i = 0; i < rddm_img->entries; i++) {
676 if (!rddm_img->mhi_buf[i].buf)
677 continue;
678
679 rddm_tlv_sz += rddm_img->mhi_buf[i].len;
680 }
681 dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
682
683 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
684 mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
685
686 if (mem_type == FW_CRASH_DUMP_NONE)
687 continue;
688
689 if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
690 ath11k_dbg(ab, ATH11K_DBG_PCI,
691 "target mem region type %d not supported",
692 ab->qmi.target_mem[i].type);
693 continue;
694 }
695
696 if (!ab->qmi.target_mem[i].anyaddr)
697 continue;
698
699 dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
700 }
701
702 for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
703 if (!dump_seg_sz[i])
704 continue;
705
706 len += sizeof(*dump_tlv) + dump_seg_sz[i];
707 }
708
709 if (len)
710 len += sizeof(*file_data);
711
712 return len;
713 }
714
ath11k_pci_coredump_download(struct ath11k_base * ab)715 static void ath11k_pci_coredump_download(struct ath11k_base *ab)
716 {
717 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
718 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
719 struct image_info *rddm_img, *fw_img;
720 struct timespec64 timestamp;
721 int i, len, mem_idx;
722 enum ath11k_fw_crash_dump_type mem_type;
723 struct ath11k_dump_file_data *file_data;
724 struct ath11k_tlv_dump_data *dump_tlv;
725 size_t hdr_len = sizeof(*file_data);
726 void *buf;
727 u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
728
729 ath11k_mhi_coredump(mhi_ctrl, false);
730
731 len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
732 if (!len) {
733 ath11k_warn(ab, "No crash dump data found for devcoredump");
734 return;
735 }
736
737 rddm_img = mhi_ctrl->rddm_image;
738 fw_img = mhi_ctrl->fbc_image;
739
740 /* dev_coredumpv() requires vmalloc data */
741 buf = vzalloc(len);
742 if (!buf)
743 return;
744
745 ab->dump_data = buf;
746 ab->ath11k_coredump_len = len;
747 file_data = ab->dump_data;
748 strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
749 file_data->len = cpu_to_le32(len);
750 file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
751 file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
752 file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
753 file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
754 guid_gen(&file_data->guid);
755 ktime_get_real_ts64(×tamp);
756 file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
757 file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
758 buf += hdr_len;
759 dump_tlv = buf;
760 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
761 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
762 buf += COREDUMP_TLV_HDR_SIZE;
763
764 /* append all segments together as they are all part of a single contiguous
765 * block of memory
766 */
767 for (i = 0; i < fw_img->entries ; i++) {
768 if (!fw_img->mhi_buf[i].buf)
769 continue;
770
771 memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
772 fw_img->mhi_buf[i].len);
773 buf += fw_img->mhi_buf[i].len;
774 }
775
776 dump_tlv = buf;
777 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
778 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
779 buf += COREDUMP_TLV_HDR_SIZE;
780
781 /* append all segments together as they are all part of a single contiguous
782 * block of memory
783 */
784 for (i = 0; i < rddm_img->entries; i++) {
785 if (!rddm_img->mhi_buf[i].buf)
786 continue;
787
788 memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
789 rddm_img->mhi_buf[i].len);
790 buf += rddm_img->mhi_buf[i].len;
791 }
792
793 mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
794 for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
795 if (mem_idx == FW_CRASH_DUMP_NONE)
796 continue;
797
798 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
799 mem_type = ath11k_coredump_get_dump_type
800 (ab->qmi.target_mem[i].type);
801
802 if (mem_type != mem_idx)
803 continue;
804
805 if (!ab->qmi.target_mem[i].anyaddr) {
806 ath11k_dbg(ab, ATH11K_DBG_PCI,
807 "Skipping mem region type %d",
808 ab->qmi.target_mem[i].type);
809 continue;
810 }
811
812 dump_tlv = buf;
813 dump_tlv->type = cpu_to_le32(mem_idx);
814 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
815 buf += COREDUMP_TLV_HDR_SIZE;
816
817 memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
818 ab->qmi.target_mem[i].size);
819
820 buf += ab->qmi.target_mem[i].size;
821 }
822 }
823
824 queue_work(ab->workqueue, &ab->dump_work);
825 }
826 #endif
827
ath11k_pci_power_up(struct ath11k_base * ab)828 static int ath11k_pci_power_up(struct ath11k_base *ab)
829 {
830 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
831 int ret;
832
833 ab_pci->register_window = 0;
834 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
835 ath11k_pci_sw_reset(ab_pci->ab, true);
836
837 /* Disable ASPM during firmware download due to problems switching
838 * to AMSS state.
839 */
840 ath11k_pci_aspm_disable(ab_pci);
841
842 ath11k_pci_msi_enable(ab_pci);
843
844 ret = ath11k_mhi_start(ab_pci);
845 if (ret) {
846 ath11k_err(ab, "failed to start mhi: %d\n", ret);
847 return ret;
848 }
849
850 if (ab->hw_params.static_window_map)
851 ath11k_pci_select_static_window(ab_pci);
852
853 return 0;
854 }
855
ath11k_pci_power_down(struct ath11k_base * ab,bool is_suspend)856 static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
857 {
858 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
859
860 /* restore aspm in case firmware bootup fails */
861 ath11k_pci_aspm_restore(ab_pci);
862
863 ath11k_pci_force_wake(ab_pci->ab);
864
865 ath11k_pci_msi_disable(ab_pci);
866
867 ath11k_mhi_stop(ab_pci, is_suspend);
868 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
869 ath11k_pci_sw_reset(ab_pci->ab, false);
870 }
871
ath11k_pci_hif_suspend(struct ath11k_base * ab)872 static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
873 {
874 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
875
876 return ath11k_mhi_suspend(ar_pci);
877 }
878
ath11k_pci_hif_resume(struct ath11k_base * ab)879 static int ath11k_pci_hif_resume(struct ath11k_base *ab)
880 {
881 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
882
883 return ath11k_mhi_resume(ar_pci);
884 }
885
ath11k_pci_hif_ce_irq_enable(struct ath11k_base * ab)886 static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
887 {
888 ath11k_pcic_ce_irqs_enable(ab);
889 }
890
ath11k_pci_hif_ce_irq_disable(struct ath11k_base * ab)891 static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
892 {
893 ath11k_pcic_ce_irq_disable_sync(ab);
894 }
895
ath11k_pci_start(struct ath11k_base * ab)896 static int ath11k_pci_start(struct ath11k_base *ab)
897 {
898 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
899
900 /* TODO: for now don't restore ASPM in case of single MSI
901 * vector as MHI register reading in M2 causes system hang.
902 */
903 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
904 ath11k_pci_aspm_restore(ab_pci);
905 else
906 ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
907
908 ath11k_pcic_start(ab);
909
910 return 0;
911 }
912
913 static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
914 .start = ath11k_pci_start,
915 .stop = ath11k_pcic_stop,
916 .read32 = ath11k_pcic_read32,
917 .write32 = ath11k_pcic_write32,
918 .read = ath11k_pcic_read,
919 .power_down = ath11k_pci_power_down,
920 .power_up = ath11k_pci_power_up,
921 .suspend = ath11k_pci_hif_suspend,
922 .resume = ath11k_pci_hif_resume,
923 .irq_enable = ath11k_pcic_ext_irq_enable,
924 .irq_disable = ath11k_pcic_ext_irq_disable,
925 .get_msi_address = ath11k_pcic_get_msi_address,
926 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
927 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
928 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
929 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
930 .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
931 #ifdef CONFIG_DEV_COREDUMP
932 .coredump_download = ath11k_pci_coredump_download,
933 #endif
934 };
935
ath11k_pci_read_hw_version(struct ath11k_base * ab,u32 * major,u32 * minor)936 static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
937 {
938 u32 soc_hw_version;
939
940 soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
941 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
942 soc_hw_version);
943 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
944 soc_hw_version);
945
946 ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n",
947 *major, *minor);
948 }
949
ath11k_pci_set_irq_affinity_hint(struct ath11k_pci * ab_pci,const struct cpumask * m)950 static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
951 #if defined(__linux__)
952 const struct cpumask *m)
953 #elif defined(__FreeBSD__)
954 const cpumask_t *m)
955 #endif
956 {
957 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
958 return 0;
959
960 return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
961 }
962
ath11k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)963 static int ath11k_pci_probe(struct pci_dev *pdev,
964 const struct pci_device_id *pci_dev)
965 {
966 struct ath11k_base *ab;
967 struct ath11k_pci *ab_pci;
968 u32 soc_hw_version_major, soc_hw_version_minor;
969 const struct ath11k_pci_ops *pci_ops;
970 int ret;
971 u32 sub_version;
972
973 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
974
975 if (!ab) {
976 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
977 return -ENOMEM;
978 }
979
980 ab->dev = &pdev->dev;
981 pci_set_drvdata(pdev, ab);
982 ab_pci = ath11k_pci_priv(ab);
983 ab_pci->dev_id = pci_dev->device;
984 ab_pci->ab = ab;
985 ab_pci->pdev = pdev;
986 ab->hif.ops = &ath11k_pci_hif_ops;
987 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
988 pci_set_drvdata(pdev, ab);
989 spin_lock_init(&ab_pci->window_lock);
990
991 /* Set fixed_mem_region to true for platforms support reserved memory
992 * from DT. If memory is reserved from DT for FW, ath11k driver need not
993 * allocate memory.
994 */
995 #if defined(__linux__)
996 if (of_property_present(ab->dev->of_node, "memory-region"))
997 set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
998 #endif
999
1000 ret = ath11k_pci_claim(ab_pci, pdev);
1001 if (ret) {
1002 ath11k_err(ab, "failed to claim device: %d\n", ret);
1003 goto err_free_core;
1004 }
1005
1006 ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
1007 pdev->vendor, pdev->device,
1008 pdev->subsystem_vendor, pdev->subsystem_device);
1009
1010 ab->id.vendor = pdev->vendor;
1011 ab->id.device = pdev->device;
1012 ab->id.subsystem_vendor = pdev->subsystem_vendor;
1013 ab->id.subsystem_device = pdev->subsystem_device;
1014
1015 switch (pci_dev->device) {
1016 case QCA6390_DEVICE_ID:
1017 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
1018 if (ret) {
1019 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1020 goto err_pci_free_region;
1021 }
1022
1023 ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
1024 &soc_hw_version_minor);
1025 switch (soc_hw_version_major) {
1026 case 2:
1027 ab->hw_rev = ATH11K_HW_QCA6390_HW20;
1028 break;
1029 default:
1030 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
1031 soc_hw_version_major, soc_hw_version_minor);
1032 ret = -EOPNOTSUPP;
1033 goto err_pci_free_region;
1034 }
1035
1036 break;
1037 case QCN9074_DEVICE_ID:
1038 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
1039 if (ret) {
1040 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1041 goto err_pci_free_region;
1042 }
1043 ab->hw_rev = ATH11K_HW_QCN9074_HW10;
1044 break;
1045 case WCN6855_DEVICE_ID:
1046 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
1047 if (ret) {
1048 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1049 goto err_pci_free_region;
1050 }
1051 ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
1052 ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
1053 &soc_hw_version_minor);
1054 switch (soc_hw_version_major) {
1055 case 2:
1056 switch (soc_hw_version_minor) {
1057 case 0x00:
1058 case 0x01:
1059 ab->hw_rev = ATH11K_HW_WCN6855_HW20;
1060 break;
1061 case 0x10:
1062 case 0x11:
1063 sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
1064 ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
1065 sub_version);
1066 switch (sub_version) {
1067 case 0x1019A0E1:
1068 case 0x1019B0E1:
1069 case 0x1019C0E1:
1070 case 0x1019D0E1:
1071 ab->hw_rev = ATH11K_HW_QCA2066_HW21;
1072 break;
1073 case 0x001e60e1:
1074 ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
1075 break;
1076 default:
1077 ab->hw_rev = ATH11K_HW_WCN6855_HW21;
1078 }
1079 break;
1080 default:
1081 goto unsupported_wcn6855_soc;
1082 }
1083 break;
1084 default:
1085 unsupported_wcn6855_soc:
1086 dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
1087 soc_hw_version_major, soc_hw_version_minor);
1088 ret = -EOPNOTSUPP;
1089 goto err_pci_free_region;
1090 }
1091
1092 break;
1093 default:
1094 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1095 pci_dev->device);
1096 ret = -EOPNOTSUPP;
1097 goto err_pci_free_region;
1098 }
1099
1100 ret = ath11k_pcic_init_msi_config(ab);
1101 if (ret) {
1102 ath11k_err(ab, "failed to init msi config: %d\n", ret);
1103 goto err_pci_free_region;
1104 }
1105
1106 ret = ath11k_pci_alloc_msi(ab_pci);
1107 if (ret) {
1108 ath11k_err(ab, "failed to enable msi: %d\n", ret);
1109 goto err_pci_free_region;
1110 }
1111
1112 ret = ath11k_core_pre_init(ab);
1113 if (ret)
1114 goto err_pci_disable_msi;
1115
1116 ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1117 if (ret) {
1118 ath11k_err(ab, "failed to set irq affinity %d\n", ret);
1119 goto err_pci_disable_msi;
1120 }
1121
1122 ret = ath11k_mhi_register(ab_pci);
1123 if (ret) {
1124 ath11k_err(ab, "failed to register mhi: %d\n", ret);
1125 goto err_irq_affinity_cleanup;
1126 }
1127
1128 ret = ath11k_hal_srng_init(ab);
1129 if (ret)
1130 goto err_mhi_unregister;
1131
1132 ret = ath11k_ce_alloc_pipes(ab);
1133 if (ret) {
1134 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1135 goto err_hal_srng_deinit;
1136 }
1137
1138 ath11k_pci_init_qmi_ce_config(ab);
1139
1140 ret = ath11k_pcic_config_irq(ab);
1141 if (ret) {
1142 ath11k_err(ab, "failed to config irq: %d\n", ret);
1143 goto err_ce_free;
1144 }
1145
1146 /* kernel may allocate a dummy vector before request_irq and
1147 * then allocate a real vector when request_irq is called.
1148 * So get msi_data here again to avoid spurious interrupt
1149 * as msi_data will configured to srngs.
1150 */
1151 ret = ath11k_pci_config_msi_data(ab_pci);
1152 if (ret) {
1153 ath11k_err(ab, "failed to config msi_data: %d\n", ret);
1154 goto err_free_irq;
1155 }
1156
1157 ret = ath11k_core_init(ab);
1158 if (ret) {
1159 ath11k_err(ab, "failed to init core: %d\n", ret);
1160 goto err_free_irq;
1161 }
1162 ath11k_qmi_fwreset_from_cold_boot(ab);
1163 return 0;
1164
1165 err_free_irq:
1166 /* __free_irq() expects the caller to have cleared the affinity hint */
1167 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1168 ath11k_pcic_free_irq(ab);
1169
1170 err_ce_free:
1171 ath11k_ce_free_pipes(ab);
1172
1173 err_hal_srng_deinit:
1174 ath11k_hal_srng_deinit(ab);
1175
1176 err_mhi_unregister:
1177 ath11k_mhi_unregister(ab_pci);
1178
1179 err_irq_affinity_cleanup:
1180 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1181
1182 err_pci_disable_msi:
1183 ath11k_pci_free_msi(ab_pci);
1184
1185 err_pci_free_region:
1186 ath11k_pci_free_region(ab_pci);
1187
1188 err_free_core:
1189 ath11k_core_free(ab);
1190
1191 return ret;
1192 }
1193
ath11k_pci_remove(struct pci_dev * pdev)1194 static void ath11k_pci_remove(struct pci_dev *pdev)
1195 {
1196 struct ath11k_base *ab = pci_get_drvdata(pdev);
1197 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1198
1199 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1200
1201 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1202 ath11k_pci_power_down(ab, false);
1203 ath11k_debugfs_soc_destroy(ab);
1204 ath11k_qmi_deinit_service(ab);
1205 ath11k_core_pm_notifier_unregister(ab);
1206 goto qmi_fail;
1207 }
1208
1209 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1210
1211 cancel_work_sync(&ab->reset_work);
1212 cancel_work_sync(&ab->dump_work);
1213 ath11k_core_deinit(ab);
1214
1215 qmi_fail:
1216 ath11k_fw_destroy(ab);
1217 ath11k_mhi_unregister(ab_pci);
1218
1219 ath11k_pcic_free_irq(ab);
1220 ath11k_pci_free_msi(ab_pci);
1221 ath11k_pci_free_region(ab_pci);
1222
1223 ath11k_hal_srng_deinit(ab);
1224 ath11k_ce_free_pipes(ab);
1225 ath11k_core_free(ab);
1226 }
1227
ath11k_pci_shutdown(struct pci_dev * pdev)1228 static void ath11k_pci_shutdown(struct pci_dev *pdev)
1229 {
1230 struct ath11k_base *ab = pci_get_drvdata(pdev);
1231 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1232
1233 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1234 ath11k_pci_power_down(ab, false);
1235 }
1236
1237 #ifdef CONFIG_PM
ath11k_pci_pm_suspend(struct device * dev)1238 static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
1239 {
1240 struct ath11k_base *ab = dev_get_drvdata(dev);
1241 int ret;
1242
1243 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1244 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
1245 return 0;
1246 }
1247
1248 ret = ath11k_core_suspend(ab);
1249 if (ret)
1250 ath11k_warn(ab, "failed to suspend core: %d\n", ret);
1251
1252 return 0;
1253 }
1254
ath11k_pci_pm_resume(struct device * dev)1255 static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
1256 {
1257 struct ath11k_base *ab = dev_get_drvdata(dev);
1258 int ret;
1259
1260 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1261 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
1262 return 0;
1263 }
1264
1265 ret = ath11k_core_resume(ab);
1266 if (ret)
1267 ath11k_warn(ab, "failed to resume core: %d\n", ret);
1268
1269 return ret;
1270 }
1271
1272 static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops,
1273 ath11k_pci_pm_suspend,
1274 ath11k_pci_pm_resume);
1275 #endif
1276
1277 static struct pci_driver ath11k_pci_driver = {
1278 .name = "ath11k_pci",
1279 .id_table = ath11k_pci_id_table,
1280 .probe = ath11k_pci_probe,
1281 .remove = ath11k_pci_remove,
1282 .shutdown = ath11k_pci_shutdown,
1283 #ifdef CONFIG_PM
1284 .driver.pm = &ath11k_pci_pm_ops,
1285 #endif
1286 };
1287
ath11k_pci_init(void)1288 static int ath11k_pci_init(void)
1289 {
1290 int ret;
1291
1292 ret = pci_register_driver(&ath11k_pci_driver);
1293 if (ret)
1294 pr_err("failed to register ath11k pci driver: %d\n",
1295 ret);
1296
1297 return ret;
1298 }
1299 module_init(ath11k_pci_init);
1300
ath11k_pci_exit(void)1301 static void ath11k_pci_exit(void)
1302 {
1303 pci_unregister_driver(&ath11k_pci_driver);
1304 }
1305
1306 module_exit(ath11k_pci_exit);
1307
1308 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
1309 MODULE_LICENSE("Dual BSD/GPL");
1310 #if defined(__FreeBSD__)
1311 MODULE_VERSION(ath11k_pci, 1);
1312 MODULE_DEPEND(ath11k_pci, linuxkpi, 1, 1, 1);
1313 MODULE_DEPEND(ath11k_pci, linuxkpi_wlan, 1, 1, 1);
1314 MODULE_DEPEND(ath11k_pci, athk_common, 1, 1, 1);
1315 #ifdef CONFIG_ATH11K_DEBUGFS
1316 MODULE_DEPEND(ath11k_pci, debugfs, 1, 1, 1);
1317 #endif
1318 #endif
1319
1320 /* firmware files */
1321 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*");
1322 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*");
1323 MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*");
1324 MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*");
1325