1 // SPDX-License-Identifier: BSD-3-Clause-Clear
2 /*
3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
4 * Copyright (c) 2021-2025 Qualcomm Innovation Center, Inc. All rights reserved.
5 */
6
7 #include <linux/module.h>
8 #include <linux/msi.h>
9 #include <linux/pci.h>
10 #include <linux/of.h>
11 #include <linux/time.h>
12 #include <linux/vmalloc.h>
13
14 #include "pci.h"
15 #include "core.h"
16 #include "hif.h"
17 #include "mhi.h"
18 #include "debug.h"
19 #include "pcic.h"
20 #include "qmi.h"
21
22 #define ATH11K_PCI_BAR_NUM 0
23 #define ATH11K_PCI_DMA_MASK 36
24 #define ATH11K_PCI_COHERENT_DMA_MASK 32
25
26 #define TCSR_SOC_HW_VERSION 0x0224
27 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8)
28 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0)
29
30 #define QCA6390_DEVICE_ID 0x1101
31 #define QCN9074_DEVICE_ID 0x1104
32 #define WCN6855_DEVICE_ID 0x1103
33
34 #define TCSR_SOC_HW_SUB_VER 0x1910010
35
36 static const struct pci_device_id ath11k_pci_id_table[] = {
37 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) },
38 { PCI_VDEVICE(QCOM, WCN6855_DEVICE_ID) },
39 { PCI_VDEVICE(QCOM, QCN9074_DEVICE_ID) },
40 {}
41 };
42
43 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table);
44
ath11k_pci_bus_wake_up(struct ath11k_base * ab)45 static int ath11k_pci_bus_wake_up(struct ath11k_base *ab)
46 {
47 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
48
49 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev);
50 }
51
ath11k_pci_bus_release(struct ath11k_base * ab)52 static void ath11k_pci_bus_release(struct ath11k_base *ab)
53 {
54 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
55
56 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev);
57 }
58
ath11k_pci_get_window_start(struct ath11k_base * ab,u32 offset)59 static u32 ath11k_pci_get_window_start(struct ath11k_base *ab, u32 offset)
60 {
61 if (!ab->hw_params.static_window_map)
62 return ATH11K_PCI_WINDOW_START;
63
64 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK)
65 /* if offset lies within DP register range, use 3rd window */
66 return 3 * ATH11K_PCI_WINDOW_START;
67 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) <
68 ATH11K_PCI_WINDOW_RANGE_MASK)
69 /* if offset lies within CE register range, use 2nd window */
70 return 2 * ATH11K_PCI_WINDOW_START;
71 else
72 return ATH11K_PCI_WINDOW_START;
73 }
74
ath11k_pci_select_window(struct ath11k_pci * ab_pci,u32 offset)75 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset)
76 {
77 struct ath11k_base *ab = ab_pci->ab;
78
79 u32 window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, offset);
80
81 lockdep_assert_held(&ab_pci->window_lock);
82
83 if (window != ab_pci->register_window) {
84 iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
85 ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
86 ioread32(ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
87 ab_pci->register_window = window;
88 }
89 }
90
91 static void
ath11k_pci_window_write32(struct ath11k_base * ab,u32 offset,u32 value)92 ath11k_pci_window_write32(struct ath11k_base *ab, u32 offset, u32 value)
93 {
94 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
95 u32 window_start;
96
97 window_start = ath11k_pci_get_window_start(ab, offset);
98
99 if (window_start == ATH11K_PCI_WINDOW_START) {
100 spin_lock_bh(&ab_pci->window_lock);
101 ath11k_pci_select_window(ab_pci, offset);
102 iowrite32(value, ab->mem + window_start +
103 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
104 spin_unlock_bh(&ab_pci->window_lock);
105 } else {
106 iowrite32(value, ab->mem + window_start +
107 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
108 }
109 }
110
ath11k_pci_window_read32(struct ath11k_base * ab,u32 offset)111 static u32 ath11k_pci_window_read32(struct ath11k_base *ab, u32 offset)
112 {
113 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
114 u32 window_start, val;
115
116 window_start = ath11k_pci_get_window_start(ab, offset);
117
118 if (window_start == ATH11K_PCI_WINDOW_START) {
119 spin_lock_bh(&ab_pci->window_lock);
120 ath11k_pci_select_window(ab_pci, offset);
121 val = ioread32(ab->mem + window_start +
122 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
123 spin_unlock_bh(&ab_pci->window_lock);
124 } else {
125 val = ioread32(ab->mem + window_start +
126 (offset & ATH11K_PCI_WINDOW_RANGE_MASK));
127 }
128
129 return val;
130 }
131
ath11k_pci_get_msi_irq(struct ath11k_base * ab,unsigned int vector)132 int ath11k_pci_get_msi_irq(struct ath11k_base *ab, unsigned int vector)
133 {
134 struct pci_dev *pci_dev = to_pci_dev(ab->dev);
135
136 return pci_irq_vector(pci_dev, vector);
137 }
138
139 static const struct ath11k_pci_ops ath11k_pci_ops_qca6390 = {
140 .wakeup = ath11k_pci_bus_wake_up,
141 .release = ath11k_pci_bus_release,
142 .get_msi_irq = ath11k_pci_get_msi_irq,
143 .window_write32 = ath11k_pci_window_write32,
144 .window_read32 = ath11k_pci_window_read32,
145 };
146
147 static const struct ath11k_pci_ops ath11k_pci_ops_qcn9074 = {
148 .wakeup = NULL,
149 .release = NULL,
150 .get_msi_irq = ath11k_pci_get_msi_irq,
151 .window_write32 = ath11k_pci_window_write32,
152 .window_read32 = ath11k_pci_window_read32,
153 };
154
155 static const struct ath11k_msi_config msi_config_one_msi = {
156 .total_vectors = 1,
157 .total_users = 4,
158 .users = (struct ath11k_msi_user[]) {
159 { .name = "MHI", .num_vectors = 3, .base_vector = 0 },
160 { .name = "CE", .num_vectors = 1, .base_vector = 0 },
161 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 },
162 { .name = "DP", .num_vectors = 1, .base_vector = 0 },
163 },
164 };
165
ath11k_pci_select_static_window(struct ath11k_pci * ab_pci)166 static inline void ath11k_pci_select_static_window(struct ath11k_pci *ab_pci)
167 {
168 u32 umac_window;
169 u32 ce_window;
170 u32 window;
171
172 umac_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_SEQ_WCSS_UMAC_OFFSET);
173 ce_window = FIELD_GET(ATH11K_PCI_WINDOW_VALUE_MASK, HAL_CE_WFSS_CE_REG_BASE);
174 window = (umac_window << 12) | (ce_window << 6);
175
176 iowrite32(ATH11K_PCI_WINDOW_ENABLE_BIT | window,
177 ab_pci->ab->mem + ATH11K_PCI_WINDOW_REG_ADDRESS);
178 }
179
ath11k_pci_soc_global_reset(struct ath11k_base * ab)180 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab)
181 {
182 u32 val, delay;
183
184 val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
185
186 val |= PCIE_SOC_GLOBAL_RESET_V;
187
188 ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
189
190 /* TODO: exact time to sleep is uncertain */
191 delay = 10;
192 mdelay(delay);
193
194 /* Need to toggle V bit back otherwise stuck in reset status */
195 val &= ~PCIE_SOC_GLOBAL_RESET_V;
196
197 ath11k_pcic_write32(ab, PCIE_SOC_GLOBAL_RESET, val);
198
199 mdelay(delay);
200
201 val = ath11k_pcic_read32(ab, PCIE_SOC_GLOBAL_RESET);
202 if (val == 0xffffffff)
203 ath11k_warn(ab, "link down error during global reset\n");
204 }
205
ath11k_pci_clear_dbg_registers(struct ath11k_base * ab)206 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab)
207 {
208 u32 val;
209
210 /* read cookie */
211 val = ath11k_pcic_read32(ab, PCIE_Q6_COOKIE_ADDR);
212 ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_q6_cookie_addr 0x%x\n", val);
213
214 val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
215 ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
216
217 /* TODO: exact time to sleep is uncertain */
218 mdelay(10);
219
220 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from
221 * continuing warm path and entering dead loop.
222 */
223 ath11k_pcic_write32(ab, WLAON_WARM_SW_ENTRY, 0);
224 mdelay(10);
225
226 val = ath11k_pcic_read32(ab, WLAON_WARM_SW_ENTRY);
227 ath11k_dbg(ab, ATH11K_DBG_PCI, "wlaon_warm_sw_entry 0x%x\n", val);
228
229 /* A read clear register. clear the register to prevent
230 * Q6 from entering wrong code path.
231 */
232 val = ath11k_pcic_read32(ab, WLAON_SOC_RESET_CAUSE_REG);
233 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause %d\n", val);
234 }
235
ath11k_pci_set_link_reg(struct ath11k_base * ab,u32 offset,u32 value,u32 mask)236 static int ath11k_pci_set_link_reg(struct ath11k_base *ab,
237 u32 offset, u32 value, u32 mask)
238 {
239 u32 v;
240 int i;
241
242 v = ath11k_pcic_read32(ab, offset);
243 if ((v & mask) == value)
244 return 0;
245
246 for (i = 0; i < 10; i++) {
247 ath11k_pcic_write32(ab, offset, (v & ~mask) | value);
248
249 v = ath11k_pcic_read32(ab, offset);
250 if ((v & mask) == value)
251 return 0;
252
253 mdelay(2);
254 }
255
256 ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n",
257 offset, v & mask, value);
258
259 return -ETIMEDOUT;
260 }
261
ath11k_pci_fix_l1ss(struct ath11k_base * ab)262 static int ath11k_pci_fix_l1ss(struct ath11k_base *ab)
263 {
264 int ret;
265
266 ret = ath11k_pci_set_link_reg(ab,
267 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG(ab),
268 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL,
269 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK);
270 if (ret) {
271 ath11k_warn(ab, "failed to set sysclk: %d\n", ret);
272 return ret;
273 }
274
275 ret = ath11k_pci_set_link_reg(ab,
276 PCIE_PCS_OSC_DTCT_CONFIG1_REG(ab),
277 PCIE_PCS_OSC_DTCT_CONFIG1_VAL,
278 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
279 if (ret) {
280 ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret);
281 return ret;
282 }
283
284 ret = ath11k_pci_set_link_reg(ab,
285 PCIE_PCS_OSC_DTCT_CONFIG2_REG(ab),
286 PCIE_PCS_OSC_DTCT_CONFIG2_VAL,
287 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
288 if (ret) {
289 ath11k_warn(ab, "failed to set dtct config2: %d\n", ret);
290 return ret;
291 }
292
293 ret = ath11k_pci_set_link_reg(ab,
294 PCIE_PCS_OSC_DTCT_CONFIG4_REG(ab),
295 PCIE_PCS_OSC_DTCT_CONFIG4_VAL,
296 PCIE_PCS_OSC_DTCT_CONFIG_MSK);
297 if (ret) {
298 ath11k_warn(ab, "failed to set dtct config4: %d\n", ret);
299 return ret;
300 }
301
302 return 0;
303 }
304
ath11k_pci_enable_ltssm(struct ath11k_base * ab)305 static void ath11k_pci_enable_ltssm(struct ath11k_base *ab)
306 {
307 u32 val;
308 int i;
309
310 val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
311
312 /* PCIE link seems very unstable after the Hot Reset*/
313 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) {
314 if (val == 0xffffffff)
315 mdelay(5);
316
317 ath11k_pcic_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE);
318 val = ath11k_pcic_read32(ab, PCIE_PCIE_PARF_LTSSM);
319 }
320
321 ath11k_dbg(ab, ATH11K_DBG_PCI, "ltssm 0x%x\n", val);
322
323 val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
324 val |= GCC_GCC_PCIE_HOT_RST_VAL;
325 ath11k_pcic_write32(ab, GCC_GCC_PCIE_HOT_RST, val);
326 val = ath11k_pcic_read32(ab, GCC_GCC_PCIE_HOT_RST);
327
328 ath11k_dbg(ab, ATH11K_DBG_PCI, "pcie_hot_rst 0x%x\n", val);
329
330 mdelay(5);
331 }
332
ath11k_pci_clear_all_intrs(struct ath11k_base * ab)333 static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab)
334 {
335 /* This is a WAR for PCIE Hotreset.
336 * When target receive Hotreset, but will set the interrupt.
337 * So when download SBL again, SBL will open Interrupt and
338 * receive it, and crash immediately.
339 */
340 ath11k_pcic_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL);
341 }
342
ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base * ab)343 static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab)
344 {
345 u32 val;
346
347 val = ath11k_pcic_read32(ab, WLAON_QFPROM_PWR_CTRL_REG);
348 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK;
349 ath11k_pcic_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val);
350 }
351
ath11k_pci_force_wake(struct ath11k_base * ab)352 static void ath11k_pci_force_wake(struct ath11k_base *ab)
353 {
354 ath11k_pcic_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1);
355 mdelay(5);
356 }
357
ath11k_pci_sw_reset(struct ath11k_base * ab,bool power_on)358 static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on)
359 {
360 mdelay(100);
361
362 if (power_on) {
363 ath11k_pci_enable_ltssm(ab);
364 ath11k_pci_clear_all_intrs(ab);
365 ath11k_pci_set_wlaon_pwr_ctrl(ab);
366 if (ab->hw_params.fix_l1ss)
367 ath11k_pci_fix_l1ss(ab);
368 }
369
370 ath11k_mhi_clear_vector(ab);
371 ath11k_pci_clear_dbg_registers(ab);
372 ath11k_pci_soc_global_reset(ab);
373 ath11k_mhi_set_mhictrl_reset(ab);
374 }
375
ath11k_pci_init_qmi_ce_config(struct ath11k_base * ab)376 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab)
377 {
378 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg;
379
380 cfg->tgt_ce = ab->hw_params.target_ce_config;
381 cfg->tgt_ce_len = ab->hw_params.target_ce_count;
382
383 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map;
384 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len;
385 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id;
386
387 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2,
388 &cfg->shadow_reg_v2_len);
389 }
390
ath11k_pci_msi_config(struct ath11k_pci * ab_pci,bool enable)391 static void ath11k_pci_msi_config(struct ath11k_pci *ab_pci, bool enable)
392 {
393 struct pci_dev *dev = ab_pci->pdev;
394 u16 control;
395
396 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control);
397
398 if (enable)
399 control |= PCI_MSI_FLAGS_ENABLE;
400 else
401 control &= ~PCI_MSI_FLAGS_ENABLE;
402
403 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control);
404 }
405
ath11k_pci_msi_enable(struct ath11k_pci * ab_pci)406 static void ath11k_pci_msi_enable(struct ath11k_pci *ab_pci)
407 {
408 ath11k_pci_msi_config(ab_pci, true);
409 }
410
ath11k_pci_msi_disable(struct ath11k_pci * ab_pci)411 static void ath11k_pci_msi_disable(struct ath11k_pci *ab_pci)
412 {
413 ath11k_pci_msi_config(ab_pci, false);
414 }
415
ath11k_pci_alloc_msi(struct ath11k_pci * ab_pci)416 static int ath11k_pci_alloc_msi(struct ath11k_pci *ab_pci)
417 {
418 struct ath11k_base *ab = ab_pci->ab;
419 const struct ath11k_msi_config *msi_config = ab->pci.msi.config;
420 struct pci_dev *pci_dev = ab_pci->pdev;
421 struct msi_desc *msi_desc;
422 int num_vectors;
423 int ret;
424
425 num_vectors = pci_alloc_irq_vectors(pci_dev,
426 msi_config->total_vectors,
427 msi_config->total_vectors,
428 PCI_IRQ_MSI);
429 if (num_vectors == msi_config->total_vectors) {
430 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
431 } else {
432 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev,
433 1,
434 1,
435 PCI_IRQ_MSI);
436 if (num_vectors < 0) {
437 ret = -EINVAL;
438 goto reset_msi_config;
439 }
440 clear_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags);
441 ab->pci.msi.config = &msi_config_one_msi;
442 ath11k_dbg(ab, ATH11K_DBG_PCI, "request one msi vector\n");
443 }
444 ath11k_info(ab, "MSI vectors: %d\n", num_vectors);
445
446 ath11k_pci_msi_disable(ab_pci);
447
448 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
449 if (!msi_desc) {
450 ath11k_err(ab, "msi_desc is NULL!\n");
451 ret = -EINVAL;
452 goto free_msi_vector;
453 }
454
455 ab->pci.msi.ep_base_data = msi_desc->msg.data;
456
457 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO,
458 &ab->pci.msi.addr_lo);
459
460 if (msi_desc->pci.msi_attrib.is_64) {
461 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI,
462 &ab->pci.msi.addr_hi);
463 } else {
464 ab->pci.msi.addr_hi = 0;
465 }
466
467 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab->pci.msi.ep_base_data);
468
469 return 0;
470
471 free_msi_vector:
472 pci_free_irq_vectors(ab_pci->pdev);
473
474 reset_msi_config:
475 return ret;
476 }
477
ath11k_pci_free_msi(struct ath11k_pci * ab_pci)478 static void ath11k_pci_free_msi(struct ath11k_pci *ab_pci)
479 {
480 pci_free_irq_vectors(ab_pci->pdev);
481 }
482
ath11k_pci_config_msi_data(struct ath11k_pci * ab_pci)483 static int ath11k_pci_config_msi_data(struct ath11k_pci *ab_pci)
484 {
485 struct msi_desc *msi_desc;
486
487 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq);
488 if (!msi_desc) {
489 ath11k_err(ab_pci->ab, "msi_desc is NULL!\n");
490 pci_free_irq_vectors(ab_pci->pdev);
491 return -EINVAL;
492 }
493
494 ab_pci->ab->pci.msi.ep_base_data = msi_desc->msg.data;
495
496 ath11k_dbg(ab_pci->ab, ATH11K_DBG_PCI, "after request_irq msi_ep_base_data %d\n",
497 ab_pci->ab->pci.msi.ep_base_data);
498
499 return 0;
500 }
501
ath11k_pci_claim(struct ath11k_pci * ab_pci,struct pci_dev * pdev)502 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev)
503 {
504 struct ath11k_base *ab = ab_pci->ab;
505 u16 device_id;
506 int ret = 0;
507
508 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id);
509 if (device_id != ab_pci->dev_id) {
510 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n",
511 device_id, ab_pci->dev_id);
512 ret = -EIO;
513 goto out;
514 }
515
516 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM);
517 if (ret) {
518 ath11k_err(ab, "failed to assign pci resource: %d\n", ret);
519 goto out;
520 }
521
522 ret = pci_enable_device(pdev);
523 if (ret) {
524 ath11k_err(ab, "failed to enable pci device: %d\n", ret);
525 goto out;
526 }
527
528 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci");
529 if (ret) {
530 ath11k_err(ab, "failed to request pci region: %d\n", ret);
531 goto disable_device;
532 }
533
534 ret = dma_set_mask(&pdev->dev,
535 DMA_BIT_MASK(ATH11K_PCI_DMA_MASK));
536 if (ret) {
537 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n",
538 ATH11K_PCI_DMA_MASK, ret);
539 goto release_region;
540 }
541
542 ab_pci->dma_mask = DMA_BIT_MASK(ATH11K_PCI_DMA_MASK);
543
544 ret = dma_set_coherent_mask(&pdev->dev,
545 DMA_BIT_MASK(ATH11K_PCI_COHERENT_DMA_MASK));
546 if (ret) {
547 ath11k_err(ab, "failed to set pci coherent dma mask to %d: %d\n",
548 ATH11K_PCI_COHERENT_DMA_MASK, ret);
549 goto release_region;
550 }
551
552 pci_set_master(pdev);
553
554 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM);
555 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0);
556 if (!ab->mem) {
557 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM);
558 ret = -EIO;
559 goto release_region;
560 }
561
562 ab->mem_ce = ab->mem;
563
564 ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci_mem 0x%p\n", ab->mem);
565 return 0;
566
567 release_region:
568 pci_release_region(pdev, ATH11K_PCI_BAR_NUM);
569 disable_device:
570 pci_disable_device(pdev);
571 out:
572 return ret;
573 }
574
ath11k_pci_free_region(struct ath11k_pci * ab_pci)575 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci)
576 {
577 struct ath11k_base *ab = ab_pci->ab;
578 struct pci_dev *pci_dev = ab_pci->pdev;
579
580 pci_iounmap(pci_dev, ab->mem);
581 ab->mem = NULL;
582 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM);
583 if (pci_is_enabled(pci_dev))
584 pci_disable_device(pci_dev);
585 }
586
ath11k_pci_aspm_disable(struct ath11k_pci * ab_pci)587 static void ath11k_pci_aspm_disable(struct ath11k_pci *ab_pci)
588 {
589 struct ath11k_base *ab = ab_pci->ab;
590
591 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL,
592 &ab_pci->link_ctl);
593
594 ath11k_dbg(ab, ATH11K_DBG_PCI, "link_ctl 0x%04x L0s %d L1 %d\n",
595 ab_pci->link_ctl,
596 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S),
597 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1));
598
599 /* disable L0s and L1 */
600 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL,
601 PCI_EXP_LNKCTL_ASPMC);
602
603 set_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags);
604 }
605
ath11k_pci_aspm_restore(struct ath11k_pci * ab_pci)606 static void ath11k_pci_aspm_restore(struct ath11k_pci *ab_pci)
607 {
608 if (test_and_clear_bit(ATH11K_PCI_ASPM_RESTORE, &ab_pci->flags))
609 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL,
610 PCI_EXP_LNKCTL_ASPMC,
611 ab_pci->link_ctl &
612 PCI_EXP_LNKCTL_ASPMC);
613 }
614
615 #ifdef CONFIG_DEV_COREDUMP
ath11k_pci_coredump_calculate_size(struct ath11k_base * ab,u32 * dump_seg_sz)616 static int ath11k_pci_coredump_calculate_size(struct ath11k_base *ab, u32 *dump_seg_sz)
617 {
618 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
619 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
620 struct image_info *rddm_img, *fw_img;
621 struct ath11k_tlv_dump_data *dump_tlv;
622 enum ath11k_fw_crash_dump_type mem_type;
623 u32 len = 0, rddm_tlv_sz = 0, paging_tlv_sz = 0;
624 struct ath11k_dump_file_data *file_data;
625 int i;
626
627 rddm_img = mhi_ctrl->rddm_image;
628 if (!rddm_img) {
629 ath11k_err(ab, "No RDDM dump found\n");
630 return 0;
631 }
632
633 fw_img = mhi_ctrl->fbc_image;
634
635 for (i = 0; i < fw_img->entries ; i++) {
636 if (!fw_img->mhi_buf[i].buf)
637 continue;
638
639 paging_tlv_sz += fw_img->mhi_buf[i].len;
640 }
641 dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA] = paging_tlv_sz;
642
643 for (i = 0; i < rddm_img->entries; i++) {
644 if (!rddm_img->mhi_buf[i].buf)
645 continue;
646
647 rddm_tlv_sz += rddm_img->mhi_buf[i].len;
648 }
649 dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA] = rddm_tlv_sz;
650
651 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
652 mem_type = ath11k_coredump_get_dump_type(ab->qmi.target_mem[i].type);
653
654 if (mem_type == FW_CRASH_DUMP_NONE)
655 continue;
656
657 if (mem_type == FW_CRASH_DUMP_TYPE_MAX) {
658 ath11k_dbg(ab, ATH11K_DBG_PCI,
659 "target mem region type %d not supported",
660 ab->qmi.target_mem[i].type);
661 continue;
662 }
663
664 if (!ab->qmi.target_mem[i].anyaddr)
665 continue;
666
667 dump_seg_sz[mem_type] += ab->qmi.target_mem[i].size;
668 }
669
670 for (i = 0; i < FW_CRASH_DUMP_TYPE_MAX; i++) {
671 if (!dump_seg_sz[i])
672 continue;
673
674 len += sizeof(*dump_tlv) + dump_seg_sz[i];
675 }
676
677 if (len)
678 len += sizeof(*file_data);
679
680 return len;
681 }
682
ath11k_pci_coredump_download(struct ath11k_base * ab)683 static void ath11k_pci_coredump_download(struct ath11k_base *ab)
684 {
685 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
686 struct mhi_controller *mhi_ctrl = ab_pci->mhi_ctrl;
687 struct image_info *rddm_img, *fw_img;
688 struct timespec64 timestamp;
689 int i, len, mem_idx;
690 enum ath11k_fw_crash_dump_type mem_type;
691 struct ath11k_dump_file_data *file_data;
692 struct ath11k_tlv_dump_data *dump_tlv;
693 size_t hdr_len = sizeof(*file_data);
694 void *buf;
695 u32 dump_seg_sz[FW_CRASH_DUMP_TYPE_MAX] = {};
696
697 ath11k_mhi_coredump(mhi_ctrl, false);
698
699 len = ath11k_pci_coredump_calculate_size(ab, dump_seg_sz);
700 if (!len) {
701 ath11k_warn(ab, "No crash dump data found for devcoredump");
702 return;
703 }
704
705 rddm_img = mhi_ctrl->rddm_image;
706 fw_img = mhi_ctrl->fbc_image;
707
708 /* dev_coredumpv() requires vmalloc data */
709 buf = vzalloc(len);
710 if (!buf)
711 return;
712
713 ab->dump_data = buf;
714 ab->ath11k_coredump_len = len;
715 file_data = ab->dump_data;
716 strscpy(file_data->df_magic, "ATH11K-FW-DUMP", sizeof(file_data->df_magic));
717 file_data->len = cpu_to_le32(len);
718 file_data->version = cpu_to_le32(ATH11K_FW_CRASH_DUMP_V2);
719 file_data->chip_id = cpu_to_le32(ab_pci->dev_id);
720 file_data->qrtr_id = cpu_to_le32(ab_pci->ab->qmi.service_ins_id);
721 file_data->bus_id = cpu_to_le32(pci_domain_nr(ab_pci->pdev->bus));
722 guid_gen(&file_data->guid);
723 ktime_get_real_ts64(×tamp);
724 file_data->tv_sec = cpu_to_le64(timestamp.tv_sec);
725 file_data->tv_nsec = cpu_to_le64(timestamp.tv_nsec);
726 buf += hdr_len;
727 dump_tlv = buf;
728 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_PAGING_DATA);
729 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_PAGING_DATA]);
730 buf += COREDUMP_TLV_HDR_SIZE;
731
732 /* append all segments together as they are all part of a single contiguous
733 * block of memory
734 */
735 for (i = 0; i < fw_img->entries ; i++) {
736 if (!fw_img->mhi_buf[i].buf)
737 continue;
738
739 memcpy_fromio(buf, (void const __iomem *)fw_img->mhi_buf[i].buf,
740 fw_img->mhi_buf[i].len);
741 buf += fw_img->mhi_buf[i].len;
742 }
743
744 dump_tlv = buf;
745 dump_tlv->type = cpu_to_le32(FW_CRASH_DUMP_RDDM_DATA);
746 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[FW_CRASH_DUMP_RDDM_DATA]);
747 buf += COREDUMP_TLV_HDR_SIZE;
748
749 /* append all segments together as they are all part of a single contiguous
750 * block of memory
751 */
752 for (i = 0; i < rddm_img->entries; i++) {
753 if (!rddm_img->mhi_buf[i].buf)
754 continue;
755
756 memcpy_fromio(buf, (void const __iomem *)rddm_img->mhi_buf[i].buf,
757 rddm_img->mhi_buf[i].len);
758 buf += rddm_img->mhi_buf[i].len;
759 }
760
761 mem_idx = FW_CRASH_DUMP_REMOTE_MEM_DATA;
762 for (; mem_idx < FW_CRASH_DUMP_TYPE_MAX; mem_idx++) {
763 if (mem_idx == FW_CRASH_DUMP_NONE)
764 continue;
765
766 for (i = 0; i < ab->qmi.mem_seg_count; i++) {
767 mem_type = ath11k_coredump_get_dump_type
768 (ab->qmi.target_mem[i].type);
769
770 if (mem_type != mem_idx)
771 continue;
772
773 if (!ab->qmi.target_mem[i].anyaddr) {
774 ath11k_dbg(ab, ATH11K_DBG_PCI,
775 "Skipping mem region type %d",
776 ab->qmi.target_mem[i].type);
777 continue;
778 }
779
780 dump_tlv = buf;
781 dump_tlv->type = cpu_to_le32(mem_idx);
782 dump_tlv->tlv_len = cpu_to_le32(dump_seg_sz[mem_idx]);
783 buf += COREDUMP_TLV_HDR_SIZE;
784
785 memcpy_fromio(buf, ab->qmi.target_mem[i].iaddr,
786 ab->qmi.target_mem[i].size);
787
788 buf += ab->qmi.target_mem[i].size;
789 }
790 }
791
792 queue_work(ab->workqueue, &ab->dump_work);
793 }
794 #endif
795
ath11k_pci_power_up(struct ath11k_base * ab)796 static int ath11k_pci_power_up(struct ath11k_base *ab)
797 {
798 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
799 int ret;
800
801 ab_pci->register_window = 0;
802 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
803 ath11k_pci_sw_reset(ab_pci->ab, true);
804
805 /* Disable ASPM during firmware download due to problems switching
806 * to AMSS state.
807 */
808 ath11k_pci_aspm_disable(ab_pci);
809
810 ath11k_pci_msi_enable(ab_pci);
811
812 ret = ath11k_mhi_start(ab_pci);
813 if (ret) {
814 ath11k_err(ab, "failed to start mhi: %d\n", ret);
815 return ret;
816 }
817
818 if (ab->hw_params.static_window_map)
819 ath11k_pci_select_static_window(ab_pci);
820
821 return 0;
822 }
823
ath11k_pci_power_down(struct ath11k_base * ab,bool is_suspend)824 static void ath11k_pci_power_down(struct ath11k_base *ab, bool is_suspend)
825 {
826 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
827
828 /* restore aspm in case firmware bootup fails */
829 ath11k_pci_aspm_restore(ab_pci);
830
831 ath11k_pci_force_wake(ab_pci->ab);
832
833 ath11k_pci_msi_disable(ab_pci);
834
835 ath11k_mhi_stop(ab_pci, is_suspend);
836 clear_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags);
837 ath11k_pci_sw_reset(ab_pci->ab, false);
838 }
839
ath11k_pci_hif_suspend(struct ath11k_base * ab)840 static int ath11k_pci_hif_suspend(struct ath11k_base *ab)
841 {
842 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
843
844 return ath11k_mhi_suspend(ar_pci);
845 }
846
ath11k_pci_hif_resume(struct ath11k_base * ab)847 static int ath11k_pci_hif_resume(struct ath11k_base *ab)
848 {
849 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab);
850
851 return ath11k_mhi_resume(ar_pci);
852 }
853
ath11k_pci_hif_ce_irq_enable(struct ath11k_base * ab)854 static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab)
855 {
856 ath11k_pcic_ce_irqs_enable(ab);
857 }
858
ath11k_pci_hif_ce_irq_disable(struct ath11k_base * ab)859 static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab)
860 {
861 ath11k_pcic_ce_irq_disable_sync(ab);
862 }
863
ath11k_pci_start(struct ath11k_base * ab)864 static int ath11k_pci_start(struct ath11k_base *ab)
865 {
866 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
867
868 /* TODO: for now don't restore ASPM in case of single MSI
869 * vector as MHI register reading in M2 causes system hang.
870 */
871 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags))
872 ath11k_pci_aspm_restore(ab_pci);
873 else
874 ath11k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n");
875
876 ath11k_pcic_start(ab);
877
878 return 0;
879 }
880
881 static const struct ath11k_hif_ops ath11k_pci_hif_ops = {
882 .start = ath11k_pci_start,
883 .stop = ath11k_pcic_stop,
884 .read32 = ath11k_pcic_read32,
885 .write32 = ath11k_pcic_write32,
886 .read = ath11k_pcic_read,
887 .power_down = ath11k_pci_power_down,
888 .power_up = ath11k_pci_power_up,
889 .suspend = ath11k_pci_hif_suspend,
890 .resume = ath11k_pci_hif_resume,
891 .irq_enable = ath11k_pcic_ext_irq_enable,
892 .irq_disable = ath11k_pcic_ext_irq_disable,
893 .get_msi_address = ath11k_pcic_get_msi_address,
894 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment,
895 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe,
896 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable,
897 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable,
898 .get_ce_msi_idx = ath11k_pcic_get_ce_msi_idx,
899 #ifdef CONFIG_DEV_COREDUMP
900 .coredump_download = ath11k_pci_coredump_download,
901 #endif
902 };
903
ath11k_pci_read_hw_version(struct ath11k_base * ab,u32 * major,u32 * minor)904 static void ath11k_pci_read_hw_version(struct ath11k_base *ab, u32 *major, u32 *minor)
905 {
906 u32 soc_hw_version;
907
908 soc_hw_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_VERSION);
909 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK,
910 soc_hw_version);
911 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK,
912 soc_hw_version);
913
914 ath11k_dbg(ab, ATH11K_DBG_PCI, "tcsr_soc_hw_version major %d minor %d\n",
915 *major, *minor);
916 }
917
ath11k_pci_set_irq_affinity_hint(struct ath11k_pci * ab_pci,const struct cpumask * m)918 static int ath11k_pci_set_irq_affinity_hint(struct ath11k_pci *ab_pci,
919 const struct cpumask *m)
920 {
921 if (test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab_pci->ab->dev_flags))
922 return 0;
923
924 return irq_set_affinity_and_hint(ab_pci->pdev->irq, m);
925 }
926
ath11k_pci_probe(struct pci_dev * pdev,const struct pci_device_id * pci_dev)927 static int ath11k_pci_probe(struct pci_dev *pdev,
928 const struct pci_device_id *pci_dev)
929 {
930 struct ath11k_base *ab;
931 struct ath11k_pci *ab_pci;
932 u32 soc_hw_version_major, soc_hw_version_minor;
933 int ret;
934 u32 sub_version;
935
936 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI);
937
938 if (!ab) {
939 dev_err(&pdev->dev, "failed to allocate ath11k base\n");
940 return -ENOMEM;
941 }
942
943 ab->dev = &pdev->dev;
944 pci_set_drvdata(pdev, ab);
945 ab_pci = ath11k_pci_priv(ab);
946 ab_pci->dev_id = pci_dev->device;
947 ab_pci->ab = ab;
948 ab_pci->pdev = pdev;
949 ab->hif.ops = &ath11k_pci_hif_ops;
950 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL;
951 pci_set_drvdata(pdev, ab);
952 spin_lock_init(&ab_pci->window_lock);
953
954 /* Set fixed_mem_region to true for platforms support reserved memory
955 * from DT. If memory is reserved from DT for FW, ath11k driver need not
956 * allocate memory.
957 */
958 if (of_property_present(ab->dev->of_node, "memory-region"))
959 set_bit(ATH11K_FLAG_FIXED_MEM_RGN, &ab->dev_flags);
960
961 ret = ath11k_pci_claim(ab_pci, pdev);
962 if (ret) {
963 ath11k_err(ab, "failed to claim device: %d\n", ret);
964 goto err_free_core;
965 }
966
967 ath11k_dbg(ab, ATH11K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n",
968 pdev->vendor, pdev->device,
969 pdev->subsystem_vendor, pdev->subsystem_device);
970
971 ab->id.vendor = pdev->vendor;
972 ab->id.device = pdev->device;
973 ab->id.subsystem_vendor = pdev->subsystem_vendor;
974 ab->id.subsystem_device = pdev->subsystem_device;
975
976 switch (pci_dev->device) {
977 case QCA6390_DEVICE_ID:
978 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
979 if (ret) {
980 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
981 goto err_pci_free_region;
982 }
983
984 ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
985 &soc_hw_version_minor);
986 switch (soc_hw_version_major) {
987 case 2:
988 ab->hw_rev = ATH11K_HW_QCA6390_HW20;
989 break;
990 default:
991 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n",
992 soc_hw_version_major, soc_hw_version_minor);
993 ret = -EOPNOTSUPP;
994 goto err_pci_free_region;
995 }
996
997 break;
998 case QCN9074_DEVICE_ID:
999 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qcn9074);
1000 if (ret) {
1001 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1002 goto err_pci_free_region;
1003 }
1004 ab->hw_rev = ATH11K_HW_QCN9074_HW10;
1005 break;
1006 case WCN6855_DEVICE_ID:
1007 ret = ath11k_pcic_register_pci_ops(ab, &ath11k_pci_ops_qca6390);
1008 if (ret) {
1009 ath11k_err(ab, "failed to register PCI ops: %d\n", ret);
1010 goto err_pci_free_region;
1011 }
1012 ab->id.bdf_search = ATH11K_BDF_SEARCH_BUS_AND_BOARD;
1013 ath11k_pci_read_hw_version(ab, &soc_hw_version_major,
1014 &soc_hw_version_minor);
1015 switch (soc_hw_version_major) {
1016 case 2:
1017 switch (soc_hw_version_minor) {
1018 case 0x00:
1019 case 0x01:
1020 ab->hw_rev = ATH11K_HW_WCN6855_HW20;
1021 break;
1022 case 0x10:
1023 case 0x11:
1024 sub_version = ath11k_pcic_read32(ab, TCSR_SOC_HW_SUB_VER);
1025 ath11k_dbg(ab, ATH11K_DBG_PCI, "sub_version 0x%x\n",
1026 sub_version);
1027 switch (sub_version) {
1028 case 0x1019A0E1:
1029 case 0x1019B0E1:
1030 case 0x1019C0E1:
1031 case 0x1019D0E1:
1032 ab->hw_rev = ATH11K_HW_QCA2066_HW21;
1033 break;
1034 case 0x001e60e1:
1035 ab->hw_rev = ATH11K_HW_QCA6698AQ_HW21;
1036 break;
1037 default:
1038 ab->hw_rev = ATH11K_HW_WCN6855_HW21;
1039 }
1040 break;
1041 default:
1042 goto unsupported_wcn6855_soc;
1043 }
1044 break;
1045 default:
1046 unsupported_wcn6855_soc:
1047 dev_err(&pdev->dev, "Unsupported WCN6855 SOC hardware version: %d %d\n",
1048 soc_hw_version_major, soc_hw_version_minor);
1049 ret = -EOPNOTSUPP;
1050 goto err_pci_free_region;
1051 }
1052
1053 break;
1054 default:
1055 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n",
1056 pci_dev->device);
1057 ret = -EOPNOTSUPP;
1058 goto err_pci_free_region;
1059 }
1060
1061 ret = ath11k_pcic_init_msi_config(ab);
1062 if (ret) {
1063 ath11k_err(ab, "failed to init msi config: %d\n", ret);
1064 goto err_pci_free_region;
1065 }
1066
1067 ret = ath11k_pci_alloc_msi(ab_pci);
1068 if (ret) {
1069 ath11k_err(ab, "failed to enable msi: %d\n", ret);
1070 goto err_pci_free_region;
1071 }
1072
1073 ret = ath11k_core_pre_init(ab);
1074 if (ret)
1075 goto err_pci_disable_msi;
1076
1077 ret = ath11k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0));
1078 if (ret) {
1079 ath11k_err(ab, "failed to set irq affinity %d\n", ret);
1080 goto err_pci_disable_msi;
1081 }
1082
1083 ret = ath11k_mhi_register(ab_pci);
1084 if (ret) {
1085 ath11k_err(ab, "failed to register mhi: %d\n", ret);
1086 goto err_irq_affinity_cleanup;
1087 }
1088
1089 ret = ath11k_hal_srng_init(ab);
1090 if (ret)
1091 goto err_mhi_unregister;
1092
1093 ret = ath11k_ce_alloc_pipes(ab);
1094 if (ret) {
1095 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret);
1096 goto err_hal_srng_deinit;
1097 }
1098
1099 ath11k_pci_init_qmi_ce_config(ab);
1100
1101 ret = ath11k_pcic_config_irq(ab);
1102 if (ret) {
1103 ath11k_err(ab, "failed to config irq: %d\n", ret);
1104 goto err_ce_free;
1105 }
1106
1107 /* kernel may allocate a dummy vector before request_irq and
1108 * then allocate a real vector when request_irq is called.
1109 * So get msi_data here again to avoid spurious interrupt
1110 * as msi_data will configured to srngs.
1111 */
1112 ret = ath11k_pci_config_msi_data(ab_pci);
1113 if (ret) {
1114 ath11k_err(ab, "failed to config msi_data: %d\n", ret);
1115 goto err_free_irq;
1116 }
1117
1118 ret = ath11k_core_init(ab);
1119 if (ret) {
1120 ath11k_err(ab, "failed to init core: %d\n", ret);
1121 goto err_free_irq;
1122 }
1123 ath11k_qmi_fwreset_from_cold_boot(ab);
1124 return 0;
1125
1126 err_free_irq:
1127 /* __free_irq() expects the caller to have cleared the affinity hint */
1128 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1129 ath11k_pcic_free_irq(ab);
1130
1131 err_ce_free:
1132 ath11k_ce_free_pipes(ab);
1133
1134 err_hal_srng_deinit:
1135 ath11k_hal_srng_deinit(ab);
1136
1137 err_mhi_unregister:
1138 ath11k_mhi_unregister(ab_pci);
1139
1140 err_irq_affinity_cleanup:
1141 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1142
1143 err_pci_disable_msi:
1144 ath11k_pci_free_msi(ab_pci);
1145
1146 err_pci_free_region:
1147 ath11k_pci_free_region(ab_pci);
1148
1149 err_free_core:
1150 ath11k_core_free(ab);
1151
1152 return ret;
1153 }
1154
ath11k_pci_remove(struct pci_dev * pdev)1155 static void ath11k_pci_remove(struct pci_dev *pdev)
1156 {
1157 struct ath11k_base *ab = pci_get_drvdata(pdev);
1158 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1159
1160 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1161
1162 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1163 ath11k_pci_power_down(ab, false);
1164 ath11k_debugfs_soc_destroy(ab);
1165 ath11k_qmi_deinit_service(ab);
1166 ath11k_core_pm_notifier_unregister(ab);
1167 goto qmi_fail;
1168 }
1169
1170 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags);
1171
1172 cancel_work_sync(&ab->reset_work);
1173 cancel_work_sync(&ab->dump_work);
1174 ath11k_core_deinit(ab);
1175
1176 qmi_fail:
1177 ath11k_fw_destroy(ab);
1178 ath11k_mhi_unregister(ab_pci);
1179
1180 ath11k_pcic_free_irq(ab);
1181 ath11k_pci_free_msi(ab_pci);
1182 ath11k_pci_free_region(ab_pci);
1183
1184 ath11k_hal_srng_deinit(ab);
1185 ath11k_ce_free_pipes(ab);
1186 ath11k_core_free(ab);
1187 }
1188
ath11k_pci_shutdown(struct pci_dev * pdev)1189 static void ath11k_pci_shutdown(struct pci_dev *pdev)
1190 {
1191 struct ath11k_base *ab = pci_get_drvdata(pdev);
1192 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab);
1193
1194 ath11k_pci_set_irq_affinity_hint(ab_pci, NULL);
1195 ath11k_pci_power_down(ab, false);
1196 }
1197
ath11k_pci_pm_suspend(struct device * dev)1198 static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev)
1199 {
1200 struct ath11k_base *ab = dev_get_drvdata(dev);
1201 int ret;
1202
1203 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1204 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci suspend as qmi is not initialised\n");
1205 return 0;
1206 }
1207
1208 ret = ath11k_core_suspend(ab);
1209 if (ret)
1210 ath11k_warn(ab, "failed to suspend core: %d\n", ret);
1211
1212 return 0;
1213 }
1214
ath11k_pci_pm_resume(struct device * dev)1215 static __maybe_unused int ath11k_pci_pm_resume(struct device *dev)
1216 {
1217 struct ath11k_base *ab = dev_get_drvdata(dev);
1218 int ret;
1219
1220 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) {
1221 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot skipping pci resume as qmi is not initialised\n");
1222 return 0;
1223 }
1224
1225 ret = ath11k_core_resume(ab);
1226 if (ret)
1227 ath11k_warn(ab, "failed to resume core: %d\n", ret);
1228
1229 return ret;
1230 }
1231
ath11k_pci_pm_suspend_late(struct device * dev)1232 static __maybe_unused int ath11k_pci_pm_suspend_late(struct device *dev)
1233 {
1234 struct ath11k_base *ab = dev_get_drvdata(dev);
1235 int ret;
1236
1237 ret = ath11k_core_suspend_late(ab);
1238 if (ret)
1239 ath11k_warn(ab, "failed to late suspend core: %d\n", ret);
1240
1241 /* Similar to ath11k_pci_pm_suspend(), we return success here
1242 * even error happens, to allow system suspend/hibernation survive.
1243 */
1244 return 0;
1245 }
1246
ath11k_pci_pm_resume_early(struct device * dev)1247 static __maybe_unused int ath11k_pci_pm_resume_early(struct device *dev)
1248 {
1249 struct ath11k_base *ab = dev_get_drvdata(dev);
1250 int ret;
1251
1252 ret = ath11k_core_resume_early(ab);
1253 if (ret)
1254 ath11k_warn(ab, "failed to early resume core: %d\n", ret);
1255
1256 return ret;
1257 }
1258
1259 static const struct dev_pm_ops __maybe_unused ath11k_pci_pm_ops = {
1260 SET_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend,
1261 ath11k_pci_pm_resume)
1262 SET_LATE_SYSTEM_SLEEP_PM_OPS(ath11k_pci_pm_suspend_late,
1263 ath11k_pci_pm_resume_early)
1264 };
1265
1266 static struct pci_driver ath11k_pci_driver = {
1267 .name = "ath11k_pci",
1268 .id_table = ath11k_pci_id_table,
1269 .probe = ath11k_pci_probe,
1270 .remove = ath11k_pci_remove,
1271 .shutdown = ath11k_pci_shutdown,
1272 #ifdef CONFIG_PM
1273 .driver.pm = &ath11k_pci_pm_ops,
1274 #endif
1275 };
1276
ath11k_pci_init(void)1277 static int ath11k_pci_init(void)
1278 {
1279 int ret;
1280
1281 ret = pci_register_driver(&ath11k_pci_driver);
1282 if (ret)
1283 pr_err("failed to register ath11k pci driver: %d\n",
1284 ret);
1285
1286 return ret;
1287 }
1288 module_init(ath11k_pci_init);
1289
ath11k_pci_exit(void)1290 static void ath11k_pci_exit(void)
1291 {
1292 pci_unregister_driver(&ath11k_pci_driver);
1293 }
1294
1295 module_exit(ath11k_pci_exit);
1296
1297 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11ax WLAN devices");
1298 MODULE_LICENSE("Dual BSD/GPL");
1299
1300 /* firmware files */
1301 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/*");
1302 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCN9074/hw1.0/*");
1303 MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.0/*");
1304 MODULE_FIRMWARE(ATH11K_FW_DIR "/WCN6855/hw2.1/*");
1305