1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved. 4 */ 5 6 #include <linux/module.h> 7 #include <linux/msi.h> 8 #include <linux/pci.h> 9 10 #include "pci.h" 11 #include "core.h" 12 #include "hif.h" 13 #include "mhi.h" 14 #include "debug.h" 15 16 #define ATH11K_PCI_BAR_NUM 0 17 #define ATH11K_PCI_DMA_MASK 32 18 19 #define ATH11K_PCI_IRQ_CE0_OFFSET 3 20 21 #define WINDOW_ENABLE_BIT 0x40000000 22 #define WINDOW_REG_ADDRESS 0x310c 23 #define WINDOW_VALUE_MASK GENMASK(24, 19) 24 #define WINDOW_START 0x80000 25 #define WINDOW_RANGE_MASK GENMASK(18, 0) 26 27 #define TCSR_SOC_HW_VERSION 0x0224 28 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(16, 8) 29 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 0) 30 31 /* BAR0 + 4k is always accessible, and no 32 * need to force wakeup. 33 * 4K - 32 = 0xFE0 34 */ 35 #define ACCESS_ALWAYS_OFF 0xFE0 36 37 #define QCA6390_DEVICE_ID 0x1101 38 39 static const struct pci_device_id ath11k_pci_id_table[] = { 40 { PCI_VDEVICE(QCOM, QCA6390_DEVICE_ID) }, 41 {0} 42 }; 43 44 MODULE_DEVICE_TABLE(pci, ath11k_pci_id_table); 45 46 static const struct ath11k_bus_params ath11k_pci_bus_params = { 47 .mhi_support = true, 48 .m3_fw_support = true, 49 .fixed_bdf_addr = false, 50 .fixed_mem_region = false, 51 }; 52 53 static const struct ath11k_msi_config msi_config = { 54 .total_vectors = 32, 55 .total_users = 4, 56 .users = (struct ath11k_msi_user[]) { 57 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 58 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 59 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 60 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 61 }, 62 }; 63 64 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 65 "bhi", 66 "mhi-er0", 67 "mhi-er1", 68 "ce0", 69 "ce1", 70 "ce2", 71 "ce3", 72 "ce4", 73 "ce5", 74 "ce6", 75 "ce7", 76 "ce8", 77 "ce9", 78 "ce10", 79 "ce11", 80 "host2wbm-desc-feed", 81 "host2reo-re-injection", 82 "host2reo-command", 83 "host2rxdma-monitor-ring3", 84 "host2rxdma-monitor-ring2", 85 "host2rxdma-monitor-ring1", 86 "reo2ost-exception", 87 "wbm2host-rx-release", 88 "reo2host-status", 89 "reo2host-destination-ring4", 90 "reo2host-destination-ring3", 91 "reo2host-destination-ring2", 92 "reo2host-destination-ring1", 93 "rxdma2host-monitor-destination-mac3", 94 "rxdma2host-monitor-destination-mac2", 95 "rxdma2host-monitor-destination-mac1", 96 "ppdu-end-interrupts-mac3", 97 "ppdu-end-interrupts-mac2", 98 "ppdu-end-interrupts-mac1", 99 "rxdma2host-monitor-status-ring-mac3", 100 "rxdma2host-monitor-status-ring-mac2", 101 "rxdma2host-monitor-status-ring-mac1", 102 "host2rxdma-host-buf-ring-mac3", 103 "host2rxdma-host-buf-ring-mac2", 104 "host2rxdma-host-buf-ring-mac1", 105 "rxdma2host-destination-ring-mac3", 106 "rxdma2host-destination-ring-mac2", 107 "rxdma2host-destination-ring-mac1", 108 "host2tcl-input-ring4", 109 "host2tcl-input-ring3", 110 "host2tcl-input-ring2", 111 "host2tcl-input-ring1", 112 "wbm2host-tx-completions-ring3", 113 "wbm2host-tx-completions-ring2", 114 "wbm2host-tx-completions-ring1", 115 "tcl2host-status-ring", 116 }; 117 118 static inline void ath11k_pci_select_window(struct ath11k_pci *ab_pci, u32 offset) 119 { 120 struct ath11k_base *ab = ab_pci->ab; 121 122 u32 window = FIELD_GET(WINDOW_VALUE_MASK, offset); 123 124 lockdep_assert_held(&ab_pci->window_lock); 125 126 if (window != ab_pci->register_window) { 127 iowrite32(WINDOW_ENABLE_BIT | window, 128 ab->mem + WINDOW_REG_ADDRESS); 129 ioread32(ab->mem + WINDOW_REG_ADDRESS); 130 ab_pci->register_window = window; 131 } 132 } 133 134 void ath11k_pci_write32(struct ath11k_base *ab, u32 offset, u32 value) 135 { 136 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 137 138 /* for offset beyond BAR + 4K - 32, may 139 * need to wakeup MHI to access. 140 */ 141 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 142 offset >= ACCESS_ALWAYS_OFF) 143 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 144 145 if (offset < WINDOW_START) { 146 iowrite32(value, ab->mem + offset); 147 } else { 148 spin_lock_bh(&ab_pci->window_lock); 149 ath11k_pci_select_window(ab_pci, offset); 150 iowrite32(value, ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); 151 spin_unlock_bh(&ab_pci->window_lock); 152 } 153 154 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 155 offset >= ACCESS_ALWAYS_OFF) 156 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 157 } 158 159 u32 ath11k_pci_read32(struct ath11k_base *ab, u32 offset) 160 { 161 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 162 u32 val; 163 164 /* for offset beyond BAR + 4K - 32, may 165 * need to wakeup MHI to access. 166 */ 167 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 168 offset >= ACCESS_ALWAYS_OFF) 169 mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 170 171 if (offset < WINDOW_START) { 172 val = ioread32(ab->mem + offset); 173 } else { 174 spin_lock_bh(&ab_pci->window_lock); 175 ath11k_pci_select_window(ab_pci, offset); 176 val = ioread32(ab->mem + WINDOW_START + (offset & WINDOW_RANGE_MASK)); 177 spin_unlock_bh(&ab_pci->window_lock); 178 } 179 180 if (test_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 181 offset >= ACCESS_ALWAYS_OFF) 182 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 183 184 return val; 185 } 186 187 static void ath11k_pci_soc_global_reset(struct ath11k_base *ab) 188 { 189 u32 val, delay; 190 191 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 192 193 val |= PCIE_SOC_GLOBAL_RESET_V; 194 195 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 196 197 /* TODO: exact time to sleep is uncertain */ 198 delay = 10; 199 mdelay(delay); 200 201 /* Need to toggle V bit back otherwise stuck in reset status */ 202 val &= ~PCIE_SOC_GLOBAL_RESET_V; 203 204 ath11k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 205 206 mdelay(delay); 207 208 val = ath11k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 209 if (val == 0xffffffff) 210 ath11k_warn(ab, "link down error during global reset\n"); 211 } 212 213 static void ath11k_pci_clear_dbg_registers(struct ath11k_base *ab) 214 { 215 u32 val; 216 217 /* read cookie */ 218 val = ath11k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 219 ath11k_dbg(ab, ATH11K_DBG_PCI, "cookie:0x%x\n", val); 220 221 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 222 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 223 224 /* TODO: exact time to sleep is uncertain */ 225 mdelay(10); 226 227 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 228 * continuing warm path and entering dead loop. 229 */ 230 ath11k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 231 mdelay(10); 232 233 val = ath11k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 234 ath11k_dbg(ab, ATH11K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 235 236 /* A read clear register. clear the register to prevent 237 * Q6 from entering wrong code path. 238 */ 239 val = ath11k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 240 ath11k_dbg(ab, ATH11K_DBG_PCI, "soc reset cause:%d\n", val); 241 } 242 243 static int ath11k_pci_set_link_reg(struct ath11k_base *ab, 244 u32 offset, u32 value, u32 mask) 245 { 246 u32 v; 247 int i; 248 249 v = ath11k_pci_read32(ab, offset); 250 if ((v & mask) == value) 251 return 0; 252 253 for (i = 0; i < 10; i++) { 254 ath11k_pci_write32(ab, offset, (v & ~mask) | value); 255 256 v = ath11k_pci_read32(ab, offset); 257 if ((v & mask) == value) 258 return 0; 259 260 mdelay(2); 261 } 262 263 ath11k_warn(ab, "failed to set pcie link register 0x%08x: 0x%08x != 0x%08x\n", 264 offset, v & mask, value); 265 266 return -ETIMEDOUT; 267 } 268 269 static int ath11k_pci_fix_l1ss(struct ath11k_base *ab) 270 { 271 int ret; 272 273 ret = ath11k_pci_set_link_reg(ab, 274 PCIE_QSERDES_COM_SYSCLK_EN_SEL_REG, 275 PCIE_QSERDES_COM_SYSCLK_EN_SEL_VAL, 276 PCIE_QSERDES_COM_SYSCLK_EN_SEL_MSK); 277 if (!ret) { 278 ath11k_warn(ab, "failed to set sysclk: %d\n", ret); 279 return ret; 280 } 281 282 ret = ath11k_pci_set_link_reg(ab, 283 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_REG, 284 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG1_VAL, 285 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); 286 if (!ret) { 287 ath11k_warn(ab, "failed to set dtct config1 error: %d\n", ret); 288 return ret; 289 } 290 291 ret = ath11k_pci_set_link_reg(ab, 292 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_REG, 293 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG2_VAL, 294 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); 295 if (!ret) { 296 ath11k_warn(ab, "failed to set dtct config2: %d\n", ret); 297 return ret; 298 } 299 300 ret = ath11k_pci_set_link_reg(ab, 301 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_REG, 302 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG4_VAL, 303 PCIE_USB3_PCS_MISC_OSC_DTCT_CONFIG_MSK); 304 if (!ret) { 305 ath11k_warn(ab, "failed to set dtct config4: %d\n", ret); 306 return ret; 307 } 308 309 return 0; 310 } 311 312 static void ath11k_pci_enable_ltssm(struct ath11k_base *ab) 313 { 314 u32 val; 315 int i; 316 317 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 318 319 /* PCIE link seems very unstable after the Hot Reset*/ 320 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 321 if (val == 0xffffffff) 322 mdelay(5); 323 324 ath11k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 325 val = ath11k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 326 } 327 328 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci ltssm 0x%x\n", val); 329 330 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 331 val |= GCC_GCC_PCIE_HOT_RST_VAL | 0x10; 332 ath11k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 333 val = ath11k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 334 335 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 336 337 mdelay(5); 338 } 339 340 static void ath11k_pci_clear_all_intrs(struct ath11k_base *ab) 341 { 342 /* This is a WAR for PCIE Hotreset. 343 * When target receive Hotreset, but will set the interrupt. 344 * So when download SBL again, SBL will open Interrupt and 345 * receive it, and crash immediately. 346 */ 347 ath11k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 348 } 349 350 static void ath11k_pci_set_wlaon_pwr_ctrl(struct ath11k_base *ab) 351 { 352 u32 val; 353 354 val = ath11k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 355 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 356 ath11k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 357 } 358 359 static void ath11k_pci_force_wake(struct ath11k_base *ab) 360 { 361 ath11k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 362 mdelay(5); 363 } 364 365 static void ath11k_pci_sw_reset(struct ath11k_base *ab, bool power_on) 366 { 367 if (power_on) { 368 ath11k_pci_enable_ltssm(ab); 369 ath11k_pci_clear_all_intrs(ab); 370 ath11k_pci_set_wlaon_pwr_ctrl(ab); 371 ath11k_pci_fix_l1ss(ab); 372 } 373 374 ath11k_mhi_clear_vector(ab); 375 ath11k_pci_soc_global_reset(ab); 376 ath11k_mhi_set_mhictrl_reset(ab); 377 ath11k_pci_clear_dbg_registers(ab); 378 } 379 380 int ath11k_pci_get_msi_irq(struct device *dev, unsigned int vector) 381 { 382 struct pci_dev *pci_dev = to_pci_dev(dev); 383 384 return pci_irq_vector(pci_dev, vector); 385 } 386 387 static void ath11k_pci_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, 388 u32 *msi_addr_hi) 389 { 390 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 391 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 392 393 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 394 msi_addr_lo); 395 396 if (test_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 397 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 398 msi_addr_hi); 399 } else { 400 *msi_addr_hi = 0; 401 } 402 } 403 404 int ath11k_pci_get_user_msi_assignment(struct ath11k_pci *ab_pci, char *user_name, 405 int *num_vectors, u32 *user_base_data, 406 u32 *base_vector) 407 { 408 struct ath11k_base *ab = ab_pci->ab; 409 int idx; 410 411 for (idx = 0; idx < msi_config.total_users; idx++) { 412 if (strcmp(user_name, msi_config.users[idx].name) == 0) { 413 *num_vectors = msi_config.users[idx].num_vectors; 414 *user_base_data = msi_config.users[idx].base_vector 415 + ab_pci->msi_ep_base_data; 416 *base_vector = msi_config.users[idx].base_vector; 417 418 ath11k_dbg(ab, ATH11K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 419 user_name, *num_vectors, *user_base_data, 420 *base_vector); 421 422 return 0; 423 } 424 } 425 426 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 427 428 return -EINVAL; 429 } 430 431 static int ath11k_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, 432 int *num_vectors, u32 *user_base_data, 433 u32 *base_vector) 434 { 435 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 436 437 return ath11k_pci_get_user_msi_assignment(ab_pci, user_name, 438 num_vectors, user_base_data, 439 base_vector); 440 } 441 442 static void ath11k_pci_free_ext_irq(struct ath11k_base *ab) 443 { 444 int i, j; 445 446 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 447 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 448 449 for (j = 0; j < irq_grp->num_irq; j++) 450 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 451 452 netif_napi_del(&irq_grp->napi); 453 } 454 } 455 456 static void ath11k_pci_free_irq(struct ath11k_base *ab) 457 { 458 int i, irq_idx; 459 460 for (i = 0; i < ab->hw_params.ce_count; i++) { 461 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 462 continue; 463 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 464 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 465 } 466 467 ath11k_pci_free_ext_irq(ab); 468 } 469 470 static void ath11k_pci_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 471 { 472 u32 irq_idx; 473 474 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 475 enable_irq(ab->irq_num[irq_idx]); 476 } 477 478 static void ath11k_pci_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 479 { 480 u32 irq_idx; 481 482 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 483 disable_irq_nosync(ab->irq_num[irq_idx]); 484 } 485 486 static void ath11k_pci_ce_irqs_disable(struct ath11k_base *ab) 487 { 488 int i; 489 490 for (i = 0; i < ab->hw_params.ce_count; i++) { 491 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 492 continue; 493 ath11k_pci_ce_irq_disable(ab, i); 494 } 495 } 496 497 static void ath11k_pci_sync_ce_irqs(struct ath11k_base *ab) 498 { 499 int i; 500 int irq_idx; 501 502 for (i = 0; i < ab->hw_params.ce_count; i++) { 503 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 504 continue; 505 506 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 507 synchronize_irq(ab->irq_num[irq_idx]); 508 } 509 } 510 511 static void ath11k_pci_ce_tasklet(struct tasklet_struct *t) 512 { 513 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 514 515 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 516 517 ath11k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 518 } 519 520 static irqreturn_t ath11k_pci_ce_interrupt_handler(int irq, void *arg) 521 { 522 struct ath11k_ce_pipe *ce_pipe = arg; 523 524 ath11k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 525 tasklet_schedule(&ce_pipe->intr_tq); 526 527 return IRQ_HANDLED; 528 } 529 530 static void ath11k_pci_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 531 { 532 int i; 533 534 for (i = 0; i < irq_grp->num_irq; i++) 535 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 536 } 537 538 static void __ath11k_pci_ext_irq_disable(struct ath11k_base *sc) 539 { 540 int i; 541 542 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 543 struct ath11k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; 544 545 ath11k_pci_ext_grp_disable(irq_grp); 546 547 napi_synchronize(&irq_grp->napi); 548 napi_disable(&irq_grp->napi); 549 } 550 } 551 552 static void ath11k_pci_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 553 { 554 int i; 555 556 for (i = 0; i < irq_grp->num_irq; i++) 557 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 558 } 559 560 static void ath11k_pci_ext_irq_enable(struct ath11k_base *ab) 561 { 562 int i; 563 564 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 565 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 566 567 napi_enable(&irq_grp->napi); 568 ath11k_pci_ext_grp_enable(irq_grp); 569 } 570 } 571 572 static void ath11k_pci_sync_ext_irqs(struct ath11k_base *ab) 573 { 574 int i, j, irq_idx; 575 576 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 577 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 578 579 for (j = 0; j < irq_grp->num_irq; j++) { 580 irq_idx = irq_grp->irqs[j]; 581 synchronize_irq(ab->irq_num[irq_idx]); 582 } 583 } 584 } 585 586 static void ath11k_pci_ext_irq_disable(struct ath11k_base *ab) 587 { 588 __ath11k_pci_ext_irq_disable(ab); 589 ath11k_pci_sync_ext_irqs(ab); 590 } 591 592 static int ath11k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 593 { 594 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 595 struct ath11k_ext_irq_grp, 596 napi); 597 struct ath11k_base *ab = irq_grp->ab; 598 int work_done; 599 600 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 601 if (work_done < budget) { 602 napi_complete_done(napi, work_done); 603 ath11k_pci_ext_grp_enable(irq_grp); 604 } 605 606 if (work_done > budget) 607 work_done = budget; 608 609 return work_done; 610 } 611 612 static irqreturn_t ath11k_pci_ext_interrupt_handler(int irq, void *arg) 613 { 614 struct ath11k_ext_irq_grp *irq_grp = arg; 615 616 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq:%d\n", irq); 617 618 ath11k_pci_ext_grp_disable(irq_grp); 619 620 napi_schedule(&irq_grp->napi); 621 622 return IRQ_HANDLED; 623 } 624 625 static int ath11k_pci_ext_irq_config(struct ath11k_base *ab) 626 { 627 int i, j, ret, num_vectors = 0; 628 u32 user_base_data = 0, base_vector = 0; 629 630 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), "DP", 631 &num_vectors, 632 &user_base_data, 633 &base_vector); 634 if (ret < 0) 635 return ret; 636 637 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 638 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 639 u32 num_irq = 0; 640 641 irq_grp->ab = ab; 642 irq_grp->grp_id = i; 643 init_dummy_netdev(&irq_grp->napi_ndev); 644 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 645 ath11k_pci_ext_grp_napi_poll, NAPI_POLL_WEIGHT); 646 647 if (ab->hw_params.ring_mask->tx[i] || 648 ab->hw_params.ring_mask->rx[i] || 649 ab->hw_params.ring_mask->rx_err[i] || 650 ab->hw_params.ring_mask->rx_wbm_rel[i] || 651 ab->hw_params.ring_mask->reo_status[i] || 652 ab->hw_params.ring_mask->rxdma2host[i] || 653 ab->hw_params.ring_mask->host2rxdma[i] || 654 ab->hw_params.ring_mask->rx_mon_status[i]) { 655 num_irq = 1; 656 } 657 658 irq_grp->num_irq = num_irq; 659 irq_grp->irqs[0] = base_vector + i; 660 661 for (j = 0; j < irq_grp->num_irq; j++) { 662 int irq_idx = irq_grp->irqs[j]; 663 int vector = (i % num_vectors) + base_vector; 664 int irq = ath11k_pci_get_msi_irq(ab->dev, vector); 665 666 ab->irq_num[irq_idx] = irq; 667 668 ath11k_dbg(ab, ATH11K_DBG_PCI, 669 "irq:%d group:%d\n", irq, i); 670 ret = request_irq(irq, ath11k_pci_ext_interrupt_handler, 671 IRQF_SHARED, 672 "DP_EXT_IRQ", irq_grp); 673 if (ret) { 674 ath11k_err(ab, "failed request irq %d: %d\n", 675 vector, ret); 676 return ret; 677 } 678 679 disable_irq_nosync(ab->irq_num[irq_idx]); 680 } 681 } 682 683 return 0; 684 } 685 686 static int ath11k_pci_config_irq(struct ath11k_base *ab) 687 { 688 struct ath11k_ce_pipe *ce_pipe; 689 u32 msi_data_start; 690 u32 msi_data_count; 691 u32 msi_irq_start; 692 unsigned int msi_data; 693 int irq, i, ret, irq_idx; 694 695 ret = ath11k_pci_get_user_msi_assignment(ath11k_pci_priv(ab), 696 "CE", &msi_data_count, 697 &msi_data_start, &msi_irq_start); 698 if (ret) 699 return ret; 700 701 /* Configure CE irqs */ 702 for (i = 0; i < ab->hw_params.ce_count; i++) { 703 msi_data = (i % msi_data_count) + msi_irq_start; 704 irq = ath11k_pci_get_msi_irq(ab->dev, msi_data); 705 ce_pipe = &ab->ce.ce_pipe[i]; 706 707 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 708 continue; 709 710 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 711 712 tasklet_setup(&ce_pipe->intr_tq, ath11k_pci_ce_tasklet); 713 714 ret = request_irq(irq, ath11k_pci_ce_interrupt_handler, 715 IRQF_SHARED, irq_name[irq_idx], 716 ce_pipe); 717 if (ret) { 718 ath11k_err(ab, "failed to request irq %d: %d\n", 719 irq_idx, ret); 720 return ret; 721 } 722 723 ab->irq_num[irq_idx] = irq; 724 ath11k_pci_ce_irq_disable(ab, i); 725 } 726 727 ret = ath11k_pci_ext_irq_config(ab); 728 if (ret) 729 return ret; 730 731 return 0; 732 } 733 734 static void ath11k_pci_init_qmi_ce_config(struct ath11k_base *ab) 735 { 736 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 737 738 cfg->tgt_ce = ab->hw_params.target_ce_config; 739 cfg->tgt_ce_len = ab->hw_params.target_ce_count; 740 741 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; 742 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; 743 ab->qmi.service_ins_id = ATH11K_QMI_WLFW_SERVICE_INS_ID_V01_QCA6390; 744 745 ath11k_ce_get_shadow_config(ab, &cfg->shadow_reg_v2, 746 &cfg->shadow_reg_v2_len); 747 } 748 749 static void ath11k_pci_ce_irqs_enable(struct ath11k_base *ab) 750 { 751 int i; 752 753 for (i = 0; i < ab->hw_params.ce_count; i++) { 754 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 755 continue; 756 ath11k_pci_ce_irq_enable(ab, i); 757 } 758 } 759 760 static int ath11k_pci_enable_msi(struct ath11k_pci *ab_pci) 761 { 762 struct ath11k_base *ab = ab_pci->ab; 763 struct msi_desc *msi_desc; 764 int num_vectors; 765 int ret; 766 767 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 768 msi_config.total_vectors, 769 msi_config.total_vectors, 770 PCI_IRQ_MSI); 771 if (num_vectors != msi_config.total_vectors) { 772 ath11k_err(ab, "failed to get %d MSI vectors, only %d available", 773 msi_config.total_vectors, num_vectors); 774 775 if (num_vectors >= 0) 776 return -EINVAL; 777 else 778 return num_vectors; 779 } 780 781 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 782 if (!msi_desc) { 783 ath11k_err(ab, "msi_desc is NULL!\n"); 784 ret = -EINVAL; 785 goto free_msi_vector; 786 } 787 788 ab_pci->msi_ep_base_data = msi_desc->msg.data; 789 if (msi_desc->msi_attrib.is_64) 790 set_bit(ATH11K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 791 792 ath11k_dbg(ab, ATH11K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 793 794 return 0; 795 796 free_msi_vector: 797 pci_free_irq_vectors(ab_pci->pdev); 798 799 return ret; 800 } 801 802 static void ath11k_pci_disable_msi(struct ath11k_pci *ab_pci) 803 { 804 pci_free_irq_vectors(ab_pci->pdev); 805 } 806 807 static int ath11k_pci_claim(struct ath11k_pci *ab_pci, struct pci_dev *pdev) 808 { 809 struct ath11k_base *ab = ab_pci->ab; 810 u16 device_id; 811 int ret = 0; 812 813 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 814 if (device_id != ab_pci->dev_id) { 815 ath11k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 816 device_id, ab_pci->dev_id); 817 ret = -EIO; 818 goto out; 819 } 820 821 ret = pci_assign_resource(pdev, ATH11K_PCI_BAR_NUM); 822 if (ret) { 823 ath11k_err(ab, "failed to assign pci resource: %d\n", ret); 824 goto out; 825 } 826 827 ret = pci_enable_device(pdev); 828 if (ret) { 829 ath11k_err(ab, "failed to enable pci device: %d\n", ret); 830 goto out; 831 } 832 833 ret = pci_request_region(pdev, ATH11K_PCI_BAR_NUM, "ath11k_pci"); 834 if (ret) { 835 ath11k_err(ab, "failed to request pci region: %d\n", ret); 836 goto disable_device; 837 } 838 839 ret = pci_set_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 840 if (ret) { 841 ath11k_err(ab, "failed to set pci dma mask to %d: %d\n", 842 ATH11K_PCI_DMA_MASK, ret); 843 goto release_region; 844 } 845 846 ret = pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(ATH11K_PCI_DMA_MASK)); 847 if (ret) { 848 ath11k_err(ab, "failed to set pci consistent dma mask to %d: %d\n", 849 ATH11K_PCI_DMA_MASK, ret); 850 goto release_region; 851 } 852 853 pci_set_master(pdev); 854 855 ab->mem_len = pci_resource_len(pdev, ATH11K_PCI_BAR_NUM); 856 ab->mem = pci_iomap(pdev, ATH11K_PCI_BAR_NUM, 0); 857 if (!ab->mem) { 858 ath11k_err(ab, "failed to map pci bar %d\n", ATH11K_PCI_BAR_NUM); 859 ret = -EIO; 860 goto clear_master; 861 } 862 863 ath11k_dbg(ab, ATH11K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); 864 return 0; 865 866 clear_master: 867 pci_clear_master(pdev); 868 release_region: 869 pci_release_region(pdev, ATH11K_PCI_BAR_NUM); 870 disable_device: 871 pci_disable_device(pdev); 872 out: 873 return ret; 874 } 875 876 static void ath11k_pci_free_region(struct ath11k_pci *ab_pci) 877 { 878 struct ath11k_base *ab = ab_pci->ab; 879 struct pci_dev *pci_dev = ab_pci->pdev; 880 881 pci_iounmap(pci_dev, ab->mem); 882 ab->mem = NULL; 883 pci_clear_master(pci_dev); 884 pci_release_region(pci_dev, ATH11K_PCI_BAR_NUM); 885 if (pci_is_enabled(pci_dev)) 886 pci_disable_device(pci_dev); 887 } 888 889 static int ath11k_pci_power_up(struct ath11k_base *ab) 890 { 891 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 892 int ret; 893 894 ab_pci->register_window = 0; 895 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 896 ath11k_pci_sw_reset(ab_pci->ab, true); 897 898 ret = ath11k_mhi_start(ab_pci); 899 if (ret) { 900 ath11k_err(ab, "failed to start mhi: %d\n", ret); 901 return ret; 902 } 903 904 return 0; 905 } 906 907 static void ath11k_pci_power_down(struct ath11k_base *ab) 908 { 909 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 910 911 ath11k_pci_force_wake(ab_pci->ab); 912 ath11k_mhi_stop(ab_pci); 913 clear_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 914 ath11k_pci_sw_reset(ab_pci->ab, false); 915 } 916 917 static int ath11k_pci_hif_suspend(struct ath11k_base *ab) 918 { 919 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); 920 921 ath11k_mhi_suspend(ar_pci); 922 923 return 0; 924 } 925 926 static int ath11k_pci_hif_resume(struct ath11k_base *ab) 927 { 928 struct ath11k_pci *ar_pci = ath11k_pci_priv(ab); 929 930 ath11k_mhi_resume(ar_pci); 931 932 return 0; 933 } 934 935 static void ath11k_pci_kill_tasklets(struct ath11k_base *ab) 936 { 937 int i; 938 939 for (i = 0; i < ab->hw_params.ce_count; i++) { 940 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 941 942 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 943 continue; 944 945 tasklet_kill(&ce_pipe->intr_tq); 946 } 947 } 948 949 static void ath11k_pci_ce_irq_disable_sync(struct ath11k_base *ab) 950 { 951 ath11k_pci_ce_irqs_disable(ab); 952 ath11k_pci_sync_ce_irqs(ab); 953 ath11k_pci_kill_tasklets(ab); 954 } 955 956 static void ath11k_pci_stop(struct ath11k_base *ab) 957 { 958 ath11k_pci_ce_irq_disable_sync(ab); 959 ath11k_ce_cleanup_pipes(ab); 960 } 961 962 static int ath11k_pci_start(struct ath11k_base *ab) 963 { 964 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 965 966 set_bit(ATH11K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 967 968 ath11k_pci_ce_irqs_enable(ab); 969 ath11k_ce_rx_post_buf(ab); 970 971 return 0; 972 } 973 974 static void ath11k_pci_hif_ce_irq_enable(struct ath11k_base *ab) 975 { 976 ath11k_pci_ce_irqs_enable(ab); 977 } 978 979 static void ath11k_pci_hif_ce_irq_disable(struct ath11k_base *ab) 980 { 981 ath11k_pci_ce_irq_disable_sync(ab); 982 } 983 984 static int ath11k_pci_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 985 u8 *ul_pipe, u8 *dl_pipe) 986 { 987 const struct service_to_pipe *entry; 988 bool ul_set = false, dl_set = false; 989 int i; 990 991 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { 992 entry = &ab->hw_params.svc_to_ce_map[i]; 993 994 if (__le32_to_cpu(entry->service_id) != service_id) 995 continue; 996 997 switch (__le32_to_cpu(entry->pipedir)) { 998 case PIPEDIR_NONE: 999 break; 1000 case PIPEDIR_IN: 1001 WARN_ON(dl_set); 1002 *dl_pipe = __le32_to_cpu(entry->pipenum); 1003 dl_set = true; 1004 break; 1005 case PIPEDIR_OUT: 1006 WARN_ON(ul_set); 1007 *ul_pipe = __le32_to_cpu(entry->pipenum); 1008 ul_set = true; 1009 break; 1010 case PIPEDIR_INOUT: 1011 WARN_ON(dl_set); 1012 WARN_ON(ul_set); 1013 *dl_pipe = __le32_to_cpu(entry->pipenum); 1014 *ul_pipe = __le32_to_cpu(entry->pipenum); 1015 dl_set = true; 1016 ul_set = true; 1017 break; 1018 } 1019 } 1020 1021 if (WARN_ON(!ul_set || !dl_set)) 1022 return -ENOENT; 1023 1024 return 0; 1025 } 1026 1027 static const struct ath11k_hif_ops ath11k_pci_hif_ops = { 1028 .start = ath11k_pci_start, 1029 .stop = ath11k_pci_stop, 1030 .read32 = ath11k_pci_read32, 1031 .write32 = ath11k_pci_write32, 1032 .power_down = ath11k_pci_power_down, 1033 .power_up = ath11k_pci_power_up, 1034 .suspend = ath11k_pci_hif_suspend, 1035 .resume = ath11k_pci_hif_resume, 1036 .irq_enable = ath11k_pci_ext_irq_enable, 1037 .irq_disable = ath11k_pci_ext_irq_disable, 1038 .get_msi_address = ath11k_pci_get_msi_address, 1039 .get_user_msi_vector = ath11k_get_user_msi_assignment, 1040 .map_service_to_pipe = ath11k_pci_map_service_to_pipe, 1041 .ce_irq_enable = ath11k_pci_hif_ce_irq_enable, 1042 .ce_irq_disable = ath11k_pci_hif_ce_irq_disable, 1043 }; 1044 1045 static int ath11k_pci_probe(struct pci_dev *pdev, 1046 const struct pci_device_id *pci_dev) 1047 { 1048 struct ath11k_base *ab; 1049 struct ath11k_pci *ab_pci; 1050 u32 soc_hw_version, soc_hw_version_major, soc_hw_version_minor; 1051 int ret; 1052 1053 dev_warn(&pdev->dev, "WARNING: ath11k PCI support is experimental!\n"); 1054 1055 ab = ath11k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH11K_BUS_PCI, 1056 &ath11k_pci_bus_params); 1057 if (!ab) { 1058 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 1059 return -ENOMEM; 1060 } 1061 1062 ab->dev = &pdev->dev; 1063 pci_set_drvdata(pdev, ab); 1064 ab_pci = ath11k_pci_priv(ab); 1065 ab_pci->dev_id = pci_dev->device; 1066 ab_pci->ab = ab; 1067 ab_pci->pdev = pdev; 1068 ab->hif.ops = &ath11k_pci_hif_ops; 1069 pci_set_drvdata(pdev, ab); 1070 spin_lock_init(&ab_pci->window_lock); 1071 1072 ret = ath11k_pci_claim(ab_pci, pdev); 1073 if (ret) { 1074 ath11k_err(ab, "failed to claim device: %d\n", ret); 1075 goto err_free_core; 1076 } 1077 1078 switch (pci_dev->device) { 1079 case QCA6390_DEVICE_ID: 1080 soc_hw_version = ath11k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1081 soc_hw_version_major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1082 soc_hw_version); 1083 soc_hw_version_minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1084 soc_hw_version); 1085 1086 ath11k_dbg(ab, ATH11K_DBG_PCI, "pci tcsr_soc_hw_version major %d minor %d\n", 1087 soc_hw_version_major, soc_hw_version_minor); 1088 1089 switch (soc_hw_version_major) { 1090 case 2: 1091 ab->hw_rev = ATH11K_HW_QCA6390_HW20; 1092 break; 1093 default: 1094 dev_err(&pdev->dev, "Unsupported QCA6390 SOC hardware version: %d %d\n", 1095 soc_hw_version_major, soc_hw_version_minor); 1096 ret = -EOPNOTSUPP; 1097 goto err_pci_free_region; 1098 } 1099 break; 1100 default: 1101 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1102 pci_dev->device); 1103 ret = -EOPNOTSUPP; 1104 goto err_pci_free_region; 1105 } 1106 1107 ret = ath11k_pci_enable_msi(ab_pci); 1108 if (ret) { 1109 ath11k_err(ab, "failed to enable msi: %d\n", ret); 1110 goto err_pci_free_region; 1111 } 1112 1113 ret = ath11k_core_pre_init(ab); 1114 if (ret) 1115 goto err_pci_disable_msi; 1116 1117 ret = ath11k_mhi_register(ab_pci); 1118 if (ret) { 1119 ath11k_err(ab, "failed to register mhi: %d\n", ret); 1120 goto err_pci_disable_msi; 1121 } 1122 1123 ret = ath11k_hal_srng_init(ab); 1124 if (ret) 1125 goto err_mhi_unregister; 1126 1127 ret = ath11k_ce_alloc_pipes(ab); 1128 if (ret) { 1129 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1130 goto err_hal_srng_deinit; 1131 } 1132 1133 ath11k_pci_init_qmi_ce_config(ab); 1134 1135 ret = ath11k_pci_config_irq(ab); 1136 if (ret) { 1137 ath11k_err(ab, "failed to config irq: %d\n", ret); 1138 goto err_ce_free; 1139 } 1140 1141 ret = ath11k_core_init(ab); 1142 if (ret) { 1143 ath11k_err(ab, "failed to init core: %d\n", ret); 1144 goto err_free_irq; 1145 } 1146 return 0; 1147 1148 err_free_irq: 1149 ath11k_pci_free_irq(ab); 1150 1151 err_ce_free: 1152 ath11k_ce_free_pipes(ab); 1153 1154 err_hal_srng_deinit: 1155 ath11k_hal_srng_deinit(ab); 1156 1157 err_mhi_unregister: 1158 ath11k_mhi_unregister(ab_pci); 1159 1160 err_pci_disable_msi: 1161 ath11k_pci_disable_msi(ab_pci); 1162 1163 err_pci_free_region: 1164 ath11k_pci_free_region(ab_pci); 1165 1166 err_free_core: 1167 ath11k_core_free(ab); 1168 1169 return ret; 1170 } 1171 1172 static void ath11k_pci_remove(struct pci_dev *pdev) 1173 { 1174 struct ath11k_base *ab = pci_get_drvdata(pdev); 1175 struct ath11k_pci *ab_pci = ath11k_pci_priv(ab); 1176 1177 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1178 ath11k_pci_power_down(ab); 1179 ath11k_debugfs_soc_destroy(ab); 1180 ath11k_qmi_deinit_service(ab); 1181 goto qmi_fail; 1182 } 1183 1184 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1185 1186 ath11k_core_deinit(ab); 1187 1188 qmi_fail: 1189 ath11k_mhi_unregister(ab_pci); 1190 1191 ath11k_pci_free_irq(ab); 1192 ath11k_pci_disable_msi(ab_pci); 1193 ath11k_pci_free_region(ab_pci); 1194 1195 ath11k_hal_srng_deinit(ab); 1196 ath11k_ce_free_pipes(ab); 1197 ath11k_core_free(ab); 1198 } 1199 1200 static void ath11k_pci_shutdown(struct pci_dev *pdev) 1201 { 1202 struct ath11k_base *ab = pci_get_drvdata(pdev); 1203 1204 ath11k_pci_power_down(ab); 1205 } 1206 1207 static __maybe_unused int ath11k_pci_pm_suspend(struct device *dev) 1208 { 1209 struct ath11k_base *ab = dev_get_drvdata(dev); 1210 int ret; 1211 1212 ret = ath11k_core_suspend(ab); 1213 if (ret) 1214 ath11k_warn(ab, "failed to suspend core: %d\n", ret); 1215 1216 return ret; 1217 } 1218 1219 static __maybe_unused int ath11k_pci_pm_resume(struct device *dev) 1220 { 1221 struct ath11k_base *ab = dev_get_drvdata(dev); 1222 int ret; 1223 1224 ret = ath11k_core_resume(ab); 1225 if (ret) 1226 ath11k_warn(ab, "failed to resume core: %d\n", ret); 1227 1228 return ret; 1229 } 1230 1231 static SIMPLE_DEV_PM_OPS(ath11k_pci_pm_ops, 1232 ath11k_pci_pm_suspend, 1233 ath11k_pci_pm_resume); 1234 1235 static struct pci_driver ath11k_pci_driver = { 1236 .name = "ath11k_pci", 1237 .id_table = ath11k_pci_id_table, 1238 .probe = ath11k_pci_probe, 1239 .remove = ath11k_pci_remove, 1240 .shutdown = ath11k_pci_shutdown, 1241 #ifdef CONFIG_PM 1242 .driver.pm = &ath11k_pci_pm_ops, 1243 #endif 1244 }; 1245 1246 static int ath11k_pci_init(void) 1247 { 1248 int ret; 1249 1250 ret = pci_register_driver(&ath11k_pci_driver); 1251 if (ret) 1252 pr_err("failed to register ath11k pci driver: %d\n", 1253 ret); 1254 1255 return ret; 1256 } 1257 module_init(ath11k_pci_init); 1258 1259 static void ath11k_pci_exit(void) 1260 { 1261 pci_unregister_driver(&ath11k_pci_driver); 1262 } 1263 1264 module_exit(ath11k_pci_exit); 1265 1266 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN PCIe devices"); 1267 MODULE_LICENSE("Dual BSD/GPL"); 1268 1269 /* QCA639x 2.0 firmware files */ 1270 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_BOARD_API2_FILE); 1271 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_AMSS_FILE); 1272 MODULE_FIRMWARE(ATH11K_FW_DIR "/QCA6390/hw2.0/" ATH11K_M3_FILE); 1273