1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/msi.h> 9 #include <linux/pci.h> 10 #if defined(__FreeBSD__) 11 #include <linux/delay.h> 12 #endif 13 14 #include "pci.h" 15 #include "core.h" 16 #include "hif.h" 17 #include "mhi.h" 18 #include "debug.h" 19 20 #define ATH12K_PCI_BAR_NUM 0 21 #define ATH12K_PCI_DMA_MASK 32 22 23 #define ATH12K_PCI_IRQ_CE0_OFFSET 3 24 25 #define WINDOW_ENABLE_BIT 0x40000000 26 #define WINDOW_REG_ADDRESS 0x310c 27 #define WINDOW_VALUE_MASK GENMASK(24, 19) 28 #define WINDOW_START 0x80000 29 #define WINDOW_RANGE_MASK GENMASK(18, 0) 30 #define WINDOW_STATIC_MASK GENMASK(31, 6) 31 32 #define TCSR_SOC_HW_VERSION 0x1B00000 33 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 34 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) 35 36 /* BAR0 + 4k is always accessible, and no 37 * need to force wakeup. 38 * 4K - 32 = 0xFE0 39 */ 40 #define ACCESS_ALWAYS_OFF 0xFE0 41 42 #define QCN9274_DEVICE_ID 0x1109 43 #define WCN7850_DEVICE_ID 0x1107 44 45 static const struct pci_device_id ath12k_pci_id_table[] = { 46 { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, 47 { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, 48 {0} 49 }; 50 51 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); 52 53 /* TODO: revisit IRQ mapping for new SRNG's */ 54 static const struct ath12k_msi_config ath12k_msi_config[] = { 55 { 56 .total_vectors = 16, 57 .total_users = 3, 58 .users = (struct ath12k_msi_user[]) { 59 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 60 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 61 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 62 }, 63 }, 64 }; 65 66 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { 67 "bhi", 68 "mhi-er0", 69 "mhi-er1", 70 "ce0", 71 "ce1", 72 "ce2", 73 "ce3", 74 "ce4", 75 "ce5", 76 "ce6", 77 "ce7", 78 "ce8", 79 "ce9", 80 "ce10", 81 "ce11", 82 "ce12", 83 "ce13", 84 "ce14", 85 "ce15", 86 "host2wbm-desc-feed", 87 "host2reo-re-injection", 88 "host2reo-command", 89 "host2rxdma-monitor-ring3", 90 "host2rxdma-monitor-ring2", 91 "host2rxdma-monitor-ring1", 92 "reo2ost-exception", 93 "wbm2host-rx-release", 94 "reo2host-status", 95 "reo2host-destination-ring4", 96 "reo2host-destination-ring3", 97 "reo2host-destination-ring2", 98 "reo2host-destination-ring1", 99 "rxdma2host-monitor-destination-mac3", 100 "rxdma2host-monitor-destination-mac2", 101 "rxdma2host-monitor-destination-mac1", 102 "ppdu-end-interrupts-mac3", 103 "ppdu-end-interrupts-mac2", 104 "ppdu-end-interrupts-mac1", 105 "rxdma2host-monitor-status-ring-mac3", 106 "rxdma2host-monitor-status-ring-mac2", 107 "rxdma2host-monitor-status-ring-mac1", 108 "host2rxdma-host-buf-ring-mac3", 109 "host2rxdma-host-buf-ring-mac2", 110 "host2rxdma-host-buf-ring-mac1", 111 "rxdma2host-destination-ring-mac3", 112 "rxdma2host-destination-ring-mac2", 113 "rxdma2host-destination-ring-mac1", 114 "host2tcl-input-ring4", 115 "host2tcl-input-ring3", 116 "host2tcl-input-ring2", 117 "host2tcl-input-ring1", 118 "wbm2host-tx-completions-ring4", 119 "wbm2host-tx-completions-ring3", 120 "wbm2host-tx-completions-ring2", 121 "wbm2host-tx-completions-ring1", 122 "tcl2host-status-ring", 123 }; 124 125 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab) 126 { 127 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 128 129 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 130 } 131 132 static void ath12k_pci_bus_release(struct ath12k_base *ab) 133 { 134 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 135 136 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 137 } 138 139 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = { 140 .wakeup = NULL, 141 .release = NULL, 142 }; 143 144 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = { 145 .wakeup = ath12k_pci_bus_wake_up, 146 .release = ath12k_pci_bus_release, 147 }; 148 149 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) 150 { 151 struct ath12k_base *ab = ab_pci->ab; 152 153 u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); 154 u32 static_window; 155 156 lockdep_assert_held(&ab_pci->window_lock); 157 158 /* Preserve the static window configuration and reset only dynamic window */ 159 static_window = ab_pci->register_window & WINDOW_STATIC_MASK; 160 window |= static_window; 161 162 if (window != ab_pci->register_window) { 163 iowrite32(WINDOW_ENABLE_BIT | window, 164 #if defined(__linux__) 165 ab->mem + WINDOW_REG_ADDRESS); 166 ioread32(ab->mem + WINDOW_REG_ADDRESS); 167 #elif defined(__FreeBSD__) 168 (char *)ab->mem + WINDOW_REG_ADDRESS); 169 ioread32((char *)ab->mem + WINDOW_REG_ADDRESS); 170 #endif 171 ab_pci->register_window = window; 172 } 173 } 174 175 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) 176 { 177 u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); 178 u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); 179 u32 window; 180 181 window = (umac_window << 12) | (ce_window << 6); 182 183 spin_lock_bh(&ab_pci->window_lock); 184 ab_pci->register_window = window; 185 spin_unlock_bh(&ab_pci->window_lock); 186 187 #if defined(__linux__) 188 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 189 #elif defined(__FreeBSD__) 190 iowrite32(WINDOW_ENABLE_BIT | window, (char *)ab_pci->ab->mem + WINDOW_REG_ADDRESS); 191 #endif 192 } 193 194 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, 195 u32 offset) 196 { 197 u32 window_start; 198 199 /* If offset lies within DP register range, use 3rd window */ 200 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 201 window_start = 3 * WINDOW_START; 202 /* If offset lies within CE register range, use 2nd window */ 203 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 204 window_start = 2 * WINDOW_START; 205 /* If offset lies within PCI_BAR_WINDOW0_BASE and within PCI_SOC_PCI_REG_BASE 206 * use 0th window 207 */ 208 else if (((offset ^ PCI_BAR_WINDOW0_BASE) < WINDOW_RANGE_MASK) && 209 !((offset ^ PCI_SOC_PCI_REG_BASE) < PCI_SOC_RANGE_MASK)) 210 window_start = 0; 211 else 212 window_start = WINDOW_START; 213 214 return window_start; 215 } 216 217 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) 218 { 219 u32 val, delay; 220 221 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 222 223 val |= PCIE_SOC_GLOBAL_RESET_V; 224 225 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 226 227 /* TODO: exact time to sleep is uncertain */ 228 delay = 10; 229 mdelay(delay); 230 231 /* Need to toggle V bit back otherwise stuck in reset status */ 232 val &= ~PCIE_SOC_GLOBAL_RESET_V; 233 234 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 235 236 mdelay(delay); 237 238 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 239 if (val == 0xffffffff) 240 ath12k_warn(ab, "link down error during global reset\n"); 241 } 242 243 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) 244 { 245 u32 val; 246 247 /* read cookie */ 248 val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 249 ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); 250 251 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 252 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 253 254 /* TODO: exact time to sleep is uncertain */ 255 mdelay(10); 256 257 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 258 * continuing warm path and entering dead loop. 259 */ 260 ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 261 mdelay(10); 262 263 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 264 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 265 266 /* A read clear register. clear the register to prevent 267 * Q6 from entering wrong code path. 268 */ 269 val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 270 ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); 271 } 272 273 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) 274 { 275 u32 val; 276 int i; 277 278 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 279 280 /* PCIE link seems very unstable after the Hot Reset*/ 281 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 282 if (val == 0xffffffff) 283 mdelay(5); 284 285 ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 286 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 287 } 288 289 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); 290 291 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 292 val |= GCC_GCC_PCIE_HOT_RST_VAL; 293 ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 294 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 295 296 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 297 298 mdelay(5); 299 } 300 301 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) 302 { 303 /* This is a WAR for PCIE Hotreset. 304 * When target receive Hotreset, but will set the interrupt. 305 * So when download SBL again, SBL will open Interrupt and 306 * receive it, and crash immediately. 307 */ 308 ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 309 } 310 311 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) 312 { 313 u32 val; 314 315 val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 316 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 317 ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 318 } 319 320 static void ath12k_pci_force_wake(struct ath12k_base *ab) 321 { 322 ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 323 mdelay(5); 324 } 325 326 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) 327 { 328 if (power_on) { 329 ath12k_pci_enable_ltssm(ab); 330 ath12k_pci_clear_all_intrs(ab); 331 ath12k_pci_set_wlaon_pwr_ctrl(ab); 332 } 333 334 ath12k_mhi_clear_vector(ab); 335 ath12k_pci_clear_dbg_registers(ab); 336 ath12k_pci_soc_global_reset(ab); 337 ath12k_mhi_set_mhictrl_reset(ab); 338 } 339 340 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) 341 { 342 int i, j; 343 344 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 345 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 346 347 for (j = 0; j < irq_grp->num_irq; j++) 348 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 349 350 netif_napi_del(&irq_grp->napi); 351 } 352 } 353 354 static void ath12k_pci_free_irq(struct ath12k_base *ab) 355 { 356 int i, irq_idx; 357 358 for (i = 0; i < ab->hw_params->ce_count; i++) { 359 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 360 continue; 361 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 362 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 363 } 364 365 ath12k_pci_free_ext_irq(ab); 366 } 367 368 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) 369 { 370 u32 irq_idx; 371 372 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 373 enable_irq(ab->irq_num[irq_idx]); 374 } 375 376 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) 377 { 378 u32 irq_idx; 379 380 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 381 disable_irq_nosync(ab->irq_num[irq_idx]); 382 } 383 384 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) 385 { 386 int i; 387 388 for (i = 0; i < ab->hw_params->ce_count; i++) { 389 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 390 continue; 391 ath12k_pci_ce_irq_disable(ab, i); 392 } 393 } 394 395 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) 396 { 397 int i; 398 int irq_idx; 399 400 for (i = 0; i < ab->hw_params->ce_count; i++) { 401 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 402 continue; 403 404 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 405 synchronize_irq(ab->irq_num[irq_idx]); 406 } 407 } 408 409 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) 410 { 411 struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 412 413 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 414 415 ath12k_pci_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 416 } 417 418 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) 419 { 420 struct ath12k_ce_pipe *ce_pipe = arg; 421 422 /* last interrupt received for this CE */ 423 ce_pipe->timestamp = jiffies; 424 425 ath12k_pci_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 426 tasklet_schedule(&ce_pipe->intr_tq); 427 428 return IRQ_HANDLED; 429 } 430 431 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) 432 { 433 int i; 434 435 for (i = 0; i < irq_grp->num_irq; i++) 436 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 437 } 438 439 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *sc) 440 { 441 int i; 442 443 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 444 struct ath12k_ext_irq_grp *irq_grp = &sc->ext_irq_grp[i]; 445 446 ath12k_pci_ext_grp_disable(irq_grp); 447 448 napi_synchronize(&irq_grp->napi); 449 napi_disable(&irq_grp->napi); 450 } 451 } 452 453 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) 454 { 455 int i; 456 457 for (i = 0; i < irq_grp->num_irq; i++) 458 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 459 } 460 461 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) 462 { 463 int i, j, irq_idx; 464 465 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 466 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 467 468 for (j = 0; j < irq_grp->num_irq; j++) { 469 irq_idx = irq_grp->irqs[j]; 470 synchronize_irq(ab->irq_num[irq_idx]); 471 } 472 } 473 } 474 475 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 476 { 477 struct ath12k_ext_irq_grp *irq_grp = container_of(napi, 478 struct ath12k_ext_irq_grp, 479 napi); 480 struct ath12k_base *ab = irq_grp->ab; 481 int work_done; 482 483 work_done = ath12k_dp_service_srng(ab, irq_grp, budget); 484 if (work_done < budget) { 485 napi_complete_done(napi, work_done); 486 ath12k_pci_ext_grp_enable(irq_grp); 487 } 488 489 if (work_done > budget) 490 work_done = budget; 491 492 return work_done; 493 } 494 495 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) 496 { 497 struct ath12k_ext_irq_grp *irq_grp = arg; 498 499 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); 500 501 /* last interrupt received for this group */ 502 irq_grp->timestamp = jiffies; 503 504 ath12k_pci_ext_grp_disable(irq_grp); 505 506 napi_schedule(&irq_grp->napi); 507 508 return IRQ_HANDLED; 509 } 510 511 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) 512 { 513 int i, j, ret, num_vectors = 0; 514 u32 user_base_data = 0, base_vector = 0, base_idx; 515 516 base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 517 ret = ath12k_pci_get_user_msi_assignment(ab, "DP", 518 &num_vectors, 519 &user_base_data, 520 &base_vector); 521 if (ret < 0) 522 return ret; 523 524 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 525 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 526 u32 num_irq = 0; 527 528 irq_grp->ab = ab; 529 irq_grp->grp_id = i; 530 init_dummy_netdev(&irq_grp->napi_ndev); 531 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 532 ath12k_pci_ext_grp_napi_poll); 533 534 if (ab->hw_params->ring_mask->tx[i] || 535 ab->hw_params->ring_mask->rx[i] || 536 ab->hw_params->ring_mask->rx_err[i] || 537 ab->hw_params->ring_mask->rx_wbm_rel[i] || 538 ab->hw_params->ring_mask->reo_status[i] || 539 ab->hw_params->ring_mask->host2rxdma[i] || 540 ab->hw_params->ring_mask->rx_mon_dest[i]) { 541 num_irq = 1; 542 } 543 544 irq_grp->num_irq = num_irq; 545 irq_grp->irqs[0] = base_idx + i; 546 547 for (j = 0; j < irq_grp->num_irq; j++) { 548 int irq_idx = irq_grp->irqs[j]; 549 int vector = (i % num_vectors) + base_vector; 550 int irq = ath12k_pci_get_msi_irq(ab->dev, vector); 551 552 ab->irq_num[irq_idx] = irq; 553 554 ath12k_dbg(ab, ATH12K_DBG_PCI, 555 "irq:%d group:%d\n", irq, i); 556 557 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 558 ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, 559 IRQF_SHARED, 560 "DP_EXT_IRQ", irq_grp); 561 if (ret) { 562 ath12k_err(ab, "failed request irq %d: %d\n", 563 vector, ret); 564 return ret; 565 } 566 567 disable_irq_nosync(ab->irq_num[irq_idx]); 568 } 569 } 570 571 return 0; 572 } 573 574 static int ath12k_pci_config_irq(struct ath12k_base *ab) 575 { 576 struct ath12k_ce_pipe *ce_pipe; 577 u32 msi_data_start; 578 u32 msi_data_count, msi_data_idx; 579 u32 msi_irq_start; 580 unsigned int msi_data; 581 int irq, i, ret, irq_idx; 582 583 ret = ath12k_pci_get_user_msi_assignment(ab, 584 "CE", &msi_data_count, 585 &msi_data_start, &msi_irq_start); 586 if (ret) 587 return ret; 588 589 /* Configure CE irqs */ 590 591 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 592 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 593 continue; 594 595 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 596 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); 597 ce_pipe = &ab->ce.ce_pipe[i]; 598 599 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 600 601 tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); 602 603 ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, 604 IRQF_SHARED, irq_name[irq_idx], 605 ce_pipe); 606 if (ret) { 607 ath12k_err(ab, "failed to request irq %d: %d\n", 608 irq_idx, ret); 609 return ret; 610 } 611 612 ab->irq_num[irq_idx] = irq; 613 msi_data_idx++; 614 615 ath12k_pci_ce_irq_disable(ab, i); 616 } 617 618 ret = ath12k_pci_ext_irq_config(ab); 619 if (ret) 620 return ret; 621 622 return 0; 623 } 624 625 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) 626 { 627 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 628 629 cfg->tgt_ce = ab->hw_params->target_ce_config; 630 cfg->tgt_ce_len = ab->hw_params->target_ce_count; 631 632 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; 633 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; 634 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; 635 } 636 637 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) 638 { 639 int i; 640 641 for (i = 0; i < ab->hw_params->ce_count; i++) { 642 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 643 continue; 644 ath12k_pci_ce_irq_enable(ab, i); 645 } 646 } 647 648 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) 649 { 650 struct pci_dev *dev = ab_pci->pdev; 651 u16 control; 652 653 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 654 655 if (enable) 656 control |= PCI_MSI_FLAGS_ENABLE; 657 else 658 control &= ~PCI_MSI_FLAGS_ENABLE; 659 660 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 661 } 662 663 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) 664 { 665 ath12k_pci_msi_config(ab_pci, true); 666 } 667 668 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) 669 { 670 ath12k_pci_msi_config(ab_pci, false); 671 } 672 673 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) 674 { 675 struct ath12k_base *ab = ab_pci->ab; 676 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 677 struct msi_desc *msi_desc; 678 int num_vectors; 679 int ret; 680 681 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 682 msi_config->total_vectors, 683 msi_config->total_vectors, 684 PCI_IRQ_MSI); 685 if (num_vectors != msi_config->total_vectors) { 686 ath12k_err(ab, "failed to get %d MSI vectors, only %d available", 687 msi_config->total_vectors, num_vectors); 688 689 if (num_vectors >= 0) 690 return -EINVAL; 691 else 692 return num_vectors; 693 } 694 695 ath12k_pci_msi_disable(ab_pci); 696 697 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 698 if (!msi_desc) { 699 ath12k_err(ab, "msi_desc is NULL!\n"); 700 ret = -EINVAL; 701 goto free_msi_vector; 702 } 703 704 ab_pci->msi_ep_base_data = msi_desc->msg.data; 705 if (msi_desc->pci.msi_attrib.is_64) 706 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 707 708 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 709 710 return 0; 711 712 free_msi_vector: 713 pci_free_irq_vectors(ab_pci->pdev); 714 715 return ret; 716 } 717 718 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) 719 { 720 pci_free_irq_vectors(ab_pci->pdev); 721 } 722 723 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) 724 { 725 struct ath12k_base *ab = ab_pci->ab; 726 u16 device_id; 727 int ret = 0; 728 729 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 730 if (device_id != ab_pci->dev_id) { 731 ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 732 device_id, ab_pci->dev_id); 733 ret = -EIO; 734 goto out; 735 } 736 737 ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); 738 if (ret) { 739 ath12k_err(ab, "failed to assign pci resource: %d\n", ret); 740 goto out; 741 } 742 743 ret = pci_enable_device(pdev); 744 if (ret) { 745 ath12k_err(ab, "failed to enable pci device: %d\n", ret); 746 goto out; 747 } 748 749 ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); 750 if (ret) { 751 ath12k_err(ab, "failed to request pci region: %d\n", ret); 752 goto disable_device; 753 } 754 755 ret = dma_set_mask_and_coherent(&pdev->dev, 756 DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); 757 if (ret) { 758 ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", 759 ATH12K_PCI_DMA_MASK, ret); 760 goto release_region; 761 } 762 763 pci_set_master(pdev); 764 765 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); 766 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); 767 if (!ab->mem) { 768 ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); 769 ret = -EIO; 770 goto release_region; 771 } 772 773 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%pK\n", ab->mem); 774 return 0; 775 776 release_region: 777 pci_release_region(pdev, ATH12K_PCI_BAR_NUM); 778 disable_device: 779 pci_disable_device(pdev); 780 out: 781 return ret; 782 } 783 784 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) 785 { 786 struct ath12k_base *ab = ab_pci->ab; 787 struct pci_dev *pci_dev = ab_pci->pdev; 788 789 pci_iounmap(pci_dev, ab->mem); 790 ab->mem = NULL; 791 pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); 792 if (pci_is_enabled(pci_dev)) 793 pci_disable_device(pci_dev); 794 } 795 796 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) 797 { 798 struct ath12k_base *ab = ab_pci->ab; 799 800 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 801 &ab_pci->link_ctl); 802 803 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 804 ab_pci->link_ctl, 805 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 806 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 807 808 /* disable L0s and L1 */ 809 pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, 810 ab_pci->link_ctl & ~PCI_EXP_LNKCTL_ASPMC); 811 812 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); 813 } 814 815 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) 816 { 817 if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) 818 pcie_capability_write_word(ab_pci->pdev, PCI_EXP_LNKCTL, 819 ab_pci->link_ctl); 820 } 821 822 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) 823 { 824 int i; 825 826 for (i = 0; i < ab->hw_params->ce_count; i++) { 827 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 828 829 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 830 continue; 831 832 tasklet_kill(&ce_pipe->intr_tq); 833 } 834 } 835 836 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) 837 { 838 ath12k_pci_ce_irqs_disable(ab); 839 ath12k_pci_sync_ce_irqs(ab); 840 ath12k_pci_kill_tasklets(ab); 841 } 842 843 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, 844 u8 *ul_pipe, u8 *dl_pipe) 845 { 846 const struct service_to_pipe *entry; 847 bool ul_set = false, dl_set = false; 848 int i; 849 850 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { 851 entry = &ab->hw_params->svc_to_ce_map[i]; 852 853 if (__le32_to_cpu(entry->service_id) != service_id) 854 continue; 855 856 switch (__le32_to_cpu(entry->pipedir)) { 857 case PIPEDIR_NONE: 858 break; 859 case PIPEDIR_IN: 860 WARN_ON(dl_set); 861 *dl_pipe = __le32_to_cpu(entry->pipenum); 862 dl_set = true; 863 break; 864 case PIPEDIR_OUT: 865 WARN_ON(ul_set); 866 *ul_pipe = __le32_to_cpu(entry->pipenum); 867 ul_set = true; 868 break; 869 case PIPEDIR_INOUT: 870 WARN_ON(dl_set); 871 WARN_ON(ul_set); 872 *dl_pipe = __le32_to_cpu(entry->pipenum); 873 *ul_pipe = __le32_to_cpu(entry->pipenum); 874 dl_set = true; 875 ul_set = true; 876 break; 877 } 878 } 879 880 if (WARN_ON(!ul_set || !dl_set)) 881 return -ENOENT; 882 883 return 0; 884 } 885 886 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) 887 { 888 struct pci_dev *pci_dev = to_pci_dev(dev); 889 890 return pci_irq_vector(pci_dev, vector); 891 } 892 893 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, 894 int *num_vectors, u32 *user_base_data, 895 u32 *base_vector) 896 { 897 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 898 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 899 int idx; 900 901 for (idx = 0; idx < msi_config->total_users; idx++) { 902 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 903 *num_vectors = msi_config->users[idx].num_vectors; 904 *user_base_data = msi_config->users[idx].base_vector 905 + ab_pci->msi_ep_base_data; 906 *base_vector = msi_config->users[idx].base_vector; 907 908 ath12k_dbg(ab, ATH12K_DBG_PCI, "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 909 user_name, *num_vectors, *user_base_data, 910 *base_vector); 911 912 return 0; 913 } 914 } 915 916 ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 917 918 return -EINVAL; 919 } 920 921 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, 922 u32 *msi_addr_hi) 923 { 924 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 925 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 926 927 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 928 msi_addr_lo); 929 930 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 931 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 932 msi_addr_hi); 933 } else { 934 *msi_addr_hi = 0; 935 } 936 } 937 938 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, 939 u32 *msi_idx) 940 { 941 u32 i, msi_data_idx; 942 943 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 944 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 945 continue; 946 947 if (ce_id == i) 948 break; 949 950 msi_data_idx++; 951 } 952 *msi_idx = msi_data_idx; 953 } 954 955 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) 956 { 957 ath12k_pci_ce_irqs_enable(ab); 958 } 959 960 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) 961 { 962 ath12k_pci_ce_irq_disable_sync(ab); 963 } 964 965 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) 966 { 967 int i; 968 969 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 970 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 971 972 napi_enable(&irq_grp->napi); 973 ath12k_pci_ext_grp_enable(irq_grp); 974 } 975 } 976 977 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 978 { 979 __ath12k_pci_ext_irq_disable(ab); 980 ath12k_pci_sync_ext_irqs(ab); 981 } 982 983 int ath12k_pci_hif_suspend(struct ath12k_base *ab) 984 { 985 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 986 987 ath12k_mhi_suspend(ar_pci); 988 989 return 0; 990 } 991 992 int ath12k_pci_hif_resume(struct ath12k_base *ab) 993 { 994 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 995 996 ath12k_mhi_resume(ar_pci); 997 998 return 0; 999 } 1000 1001 void ath12k_pci_stop(struct ath12k_base *ab) 1002 { 1003 ath12k_pci_ce_irq_disable_sync(ab); 1004 ath12k_ce_cleanup_pipes(ab); 1005 } 1006 1007 int ath12k_pci_start(struct ath12k_base *ab) 1008 { 1009 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1010 1011 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1012 1013 ath12k_pci_aspm_restore(ab_pci); 1014 1015 ath12k_pci_ce_irqs_enable(ab); 1016 ath12k_ce_rx_post_buf(ab); 1017 1018 return 0; 1019 } 1020 1021 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) 1022 { 1023 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1024 u32 val, window_start; 1025 int ret = 0; 1026 1027 /* for offset beyond BAR + 4K - 32, may 1028 * need to wakeup MHI to access. 1029 */ 1030 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1031 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1032 ret = ab_pci->pci_ops->wakeup(ab); 1033 1034 if (offset < WINDOW_START) { 1035 #if defined(__linux__) 1036 val = ioread32(ab->mem + offset); 1037 #elif defined(__FreeBSD__) 1038 val = ioread32((char *)ab->mem + offset); 1039 #endif 1040 } else { 1041 if (ab->static_window_map) 1042 window_start = ath12k_pci_get_window_start(ab, offset); 1043 else 1044 window_start = WINDOW_START; 1045 1046 if (window_start == WINDOW_START) { 1047 spin_lock_bh(&ab_pci->window_lock); 1048 ath12k_pci_select_window(ab_pci, offset); 1049 #if defined(__linux__) 1050 val = ioread32(ab->mem + window_start + 1051 #elif defined(__FreeBSD__) 1052 val = ioread32((char *)ab->mem + window_start + 1053 #endif 1054 (offset & WINDOW_RANGE_MASK)); 1055 spin_unlock_bh(&ab_pci->window_lock); 1056 } else { 1057 if ((!window_start) && 1058 (offset >= PCI_MHIREGLEN_REG && 1059 offset <= PCI_MHI_REGION_END)) 1060 offset = offset - PCI_MHIREGLEN_REG; 1061 1062 #if defined(__linux__) 1063 val = ioread32(ab->mem + window_start + 1064 #elif defined(__FreeBSD__) 1065 val = ioread32((char *)ab->mem + window_start + 1066 #endif 1067 (offset & WINDOW_RANGE_MASK)); 1068 } 1069 } 1070 1071 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1072 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1073 !ret) 1074 ab_pci->pci_ops->release(ab); 1075 return val; 1076 } 1077 1078 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) 1079 { 1080 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1081 u32 window_start; 1082 int ret = 0; 1083 1084 /* for offset beyond BAR + 4K - 32, may 1085 * need to wakeup MHI to access. 1086 */ 1087 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1088 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1089 ret = ab_pci->pci_ops->wakeup(ab); 1090 1091 if (offset < WINDOW_START) { 1092 #if defined(__linux__) 1093 iowrite32(value, ab->mem + offset); 1094 #elif defined(__FreeBSD__) 1095 iowrite32(value, (char *)ab->mem + offset); 1096 #endif 1097 } else { 1098 if (ab->static_window_map) 1099 window_start = ath12k_pci_get_window_start(ab, offset); 1100 else 1101 window_start = WINDOW_START; 1102 1103 if (window_start == WINDOW_START) { 1104 spin_lock_bh(&ab_pci->window_lock); 1105 ath12k_pci_select_window(ab_pci, offset); 1106 #if defined(__linux__) 1107 iowrite32(value, ab->mem + window_start + 1108 #elif defined(__FreeBSD__) 1109 iowrite32(value, (char *)ab->mem + window_start + 1110 #endif 1111 (offset & WINDOW_RANGE_MASK)); 1112 spin_unlock_bh(&ab_pci->window_lock); 1113 } else { 1114 if ((!window_start) && 1115 (offset >= PCI_MHIREGLEN_REG && 1116 offset <= PCI_MHI_REGION_END)) 1117 offset = offset - PCI_MHIREGLEN_REG; 1118 1119 #if defined(__linux__) 1120 iowrite32(value, ab->mem + window_start + 1121 #elif defined(__FreeBSD__) 1122 iowrite32(value, (char *)ab->mem + window_start + 1123 #endif 1124 (offset & WINDOW_RANGE_MASK)); 1125 } 1126 } 1127 1128 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1129 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1130 !ret) 1131 ab_pci->pci_ops->release(ab); 1132 } 1133 1134 int ath12k_pci_power_up(struct ath12k_base *ab) 1135 { 1136 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1137 int ret; 1138 1139 ab_pci->register_window = 0; 1140 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1141 ath12k_pci_sw_reset(ab_pci->ab, true); 1142 1143 /* Disable ASPM during firmware download due to problems switching 1144 * to AMSS state. 1145 */ 1146 ath12k_pci_aspm_disable(ab_pci); 1147 1148 ath12k_pci_msi_enable(ab_pci); 1149 1150 ret = ath12k_mhi_start(ab_pci); 1151 if (ret) { 1152 ath12k_err(ab, "failed to start mhi: %d\n", ret); 1153 return ret; 1154 } 1155 1156 if (ab->static_window_map) 1157 ath12k_pci_select_static_window(ab_pci); 1158 1159 return 0; 1160 } 1161 1162 void ath12k_pci_power_down(struct ath12k_base *ab) 1163 { 1164 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1165 1166 /* restore aspm in case firmware bootup fails */ 1167 ath12k_pci_aspm_restore(ab_pci); 1168 1169 ath12k_pci_force_wake(ab_pci->ab); 1170 ath12k_pci_msi_disable(ab_pci); 1171 ath12k_mhi_stop(ab_pci); 1172 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1173 ath12k_pci_sw_reset(ab_pci->ab, false); 1174 } 1175 1176 static const struct ath12k_hif_ops ath12k_pci_hif_ops = { 1177 .start = ath12k_pci_start, 1178 .stop = ath12k_pci_stop, 1179 .read32 = ath12k_pci_read32, 1180 .write32 = ath12k_pci_write32, 1181 .power_down = ath12k_pci_power_down, 1182 .power_up = ath12k_pci_power_up, 1183 .suspend = ath12k_pci_hif_suspend, 1184 .resume = ath12k_pci_hif_resume, 1185 .irq_enable = ath12k_pci_ext_irq_enable, 1186 .irq_disable = ath12k_pci_ext_irq_disable, 1187 .get_msi_address = ath12k_pci_get_msi_address, 1188 .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, 1189 .map_service_to_pipe = ath12k_pci_map_service_to_pipe, 1190 .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, 1191 .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, 1192 .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, 1193 }; 1194 1195 static 1196 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) 1197 { 1198 u32 soc_hw_version; 1199 1200 soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1201 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1202 soc_hw_version); 1203 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1204 soc_hw_version); 1205 1206 ath12k_dbg(ab, ATH12K_DBG_PCI, 1207 "pci tcsr_soc_hw_version major %d minor %d\n", 1208 *major, *minor); 1209 } 1210 1211 static int ath12k_pci_probe(struct pci_dev *pdev, 1212 const struct pci_device_id *pci_dev) 1213 { 1214 struct ath12k_base *ab; 1215 struct ath12k_pci *ab_pci; 1216 u32 soc_hw_version_major, soc_hw_version_minor; 1217 int ret; 1218 1219 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); 1220 if (!ab) { 1221 dev_err(&pdev->dev, "failed to allocate ath12k base\n"); 1222 return -ENOMEM; 1223 } 1224 1225 ab->dev = &pdev->dev; 1226 pci_set_drvdata(pdev, ab); 1227 ab_pci = ath12k_pci_priv(ab); 1228 ab_pci->dev_id = pci_dev->device; 1229 ab_pci->ab = ab; 1230 ab_pci->pdev = pdev; 1231 ab->hif.ops = &ath12k_pci_hif_ops; 1232 pci_set_drvdata(pdev, ab); 1233 spin_lock_init(&ab_pci->window_lock); 1234 1235 ret = ath12k_pci_claim(ab_pci, pdev); 1236 if (ret) { 1237 ath12k_err(ab, "failed to claim device: %d\n", ret); 1238 goto err_free_core; 1239 } 1240 1241 switch (pci_dev->device) { 1242 case QCN9274_DEVICE_ID: 1243 ab_pci->msi_config = &ath12k_msi_config[0]; 1244 ab->static_window_map = true; 1245 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; 1246 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1247 &soc_hw_version_minor); 1248 switch (soc_hw_version_major) { 1249 case ATH12K_PCI_SOC_HW_VERSION_2: 1250 ab->hw_rev = ATH12K_HW_QCN9274_HW20; 1251 break; 1252 case ATH12K_PCI_SOC_HW_VERSION_1: 1253 ab->hw_rev = ATH12K_HW_QCN9274_HW10; 1254 break; 1255 default: 1256 dev_err(&pdev->dev, 1257 "Unknown hardware version found for QCN9274: 0x%x\n", 1258 soc_hw_version_major); 1259 ret = -EOPNOTSUPP; 1260 goto err_pci_free_region; 1261 } 1262 break; 1263 case WCN7850_DEVICE_ID: 1264 ab_pci->msi_config = &ath12k_msi_config[0]; 1265 ab->static_window_map = false; 1266 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; 1267 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1268 &soc_hw_version_minor); 1269 switch (soc_hw_version_major) { 1270 case ATH12K_PCI_SOC_HW_VERSION_2: 1271 ab->hw_rev = ATH12K_HW_WCN7850_HW20; 1272 break; 1273 default: 1274 dev_err(&pdev->dev, 1275 "Unknown hardware version found for WCN7850: 0x%x\n", 1276 soc_hw_version_major); 1277 ret = -EOPNOTSUPP; 1278 goto err_pci_free_region; 1279 } 1280 break; 1281 1282 default: 1283 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1284 pci_dev->device); 1285 ret = -EOPNOTSUPP; 1286 goto err_pci_free_region; 1287 } 1288 1289 ret = ath12k_pci_msi_alloc(ab_pci); 1290 if (ret) { 1291 ath12k_err(ab, "failed to alloc msi: %d\n", ret); 1292 goto err_pci_free_region; 1293 } 1294 1295 ret = ath12k_core_pre_init(ab); 1296 if (ret) 1297 goto err_pci_msi_free; 1298 1299 ret = ath12k_mhi_register(ab_pci); 1300 if (ret) { 1301 ath12k_err(ab, "failed to register mhi: %d\n", ret); 1302 goto err_pci_msi_free; 1303 } 1304 1305 ret = ath12k_hal_srng_init(ab); 1306 if (ret) 1307 goto err_mhi_unregister; 1308 1309 ret = ath12k_ce_alloc_pipes(ab); 1310 if (ret) { 1311 ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1312 goto err_hal_srng_deinit; 1313 } 1314 1315 ath12k_pci_init_qmi_ce_config(ab); 1316 1317 ret = ath12k_pci_config_irq(ab); 1318 if (ret) { 1319 ath12k_err(ab, "failed to config irq: %d\n", ret); 1320 goto err_ce_free; 1321 } 1322 1323 ret = ath12k_core_init(ab); 1324 if (ret) { 1325 ath12k_err(ab, "failed to init core: %d\n", ret); 1326 goto err_free_irq; 1327 } 1328 return 0; 1329 1330 err_free_irq: 1331 ath12k_pci_free_irq(ab); 1332 1333 err_ce_free: 1334 ath12k_ce_free_pipes(ab); 1335 1336 err_hal_srng_deinit: 1337 ath12k_hal_srng_deinit(ab); 1338 1339 err_mhi_unregister: 1340 ath12k_mhi_unregister(ab_pci); 1341 1342 err_pci_msi_free: 1343 ath12k_pci_msi_free(ab_pci); 1344 1345 err_pci_free_region: 1346 ath12k_pci_free_region(ab_pci); 1347 1348 err_free_core: 1349 ath12k_core_free(ab); 1350 1351 return ret; 1352 } 1353 1354 static void ath12k_pci_remove(struct pci_dev *pdev) 1355 { 1356 struct ath12k_base *ab = pci_get_drvdata(pdev); 1357 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1358 1359 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1360 ath12k_pci_power_down(ab); 1361 ath12k_qmi_deinit_service(ab); 1362 goto qmi_fail; 1363 } 1364 1365 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); 1366 1367 cancel_work_sync(&ab->reset_work); 1368 ath12k_core_deinit(ab); 1369 1370 qmi_fail: 1371 ath12k_mhi_unregister(ab_pci); 1372 1373 ath12k_pci_free_irq(ab); 1374 ath12k_pci_msi_free(ab_pci); 1375 ath12k_pci_free_region(ab_pci); 1376 1377 ath12k_hal_srng_deinit(ab); 1378 ath12k_ce_free_pipes(ab); 1379 ath12k_core_free(ab); 1380 } 1381 1382 static void ath12k_pci_shutdown(struct pci_dev *pdev) 1383 { 1384 struct ath12k_base *ab = pci_get_drvdata(pdev); 1385 1386 ath12k_pci_power_down(ab); 1387 } 1388 1389 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) 1390 { 1391 struct ath12k_base *ab = dev_get_drvdata(dev); 1392 int ret; 1393 1394 ret = ath12k_core_suspend(ab); 1395 if (ret) 1396 ath12k_warn(ab, "failed to suspend core: %d\n", ret); 1397 1398 return ret; 1399 } 1400 1401 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) 1402 { 1403 struct ath12k_base *ab = dev_get_drvdata(dev); 1404 int ret; 1405 1406 ret = ath12k_core_resume(ab); 1407 if (ret) 1408 ath12k_warn(ab, "failed to resume core: %d\n", ret); 1409 1410 return ret; 1411 } 1412 1413 static SIMPLE_DEV_PM_OPS(ath12k_pci_pm_ops, 1414 ath12k_pci_pm_suspend, 1415 ath12k_pci_pm_resume); 1416 1417 static struct pci_driver ath12k_pci_driver = { 1418 .name = "ath12k_pci", 1419 .id_table = ath12k_pci_id_table, 1420 .probe = ath12k_pci_probe, 1421 .remove = ath12k_pci_remove, 1422 .shutdown = ath12k_pci_shutdown, 1423 .driver.pm = &ath12k_pci_pm_ops, 1424 }; 1425 1426 static int ath12k_pci_init(void) 1427 { 1428 int ret; 1429 1430 ret = pci_register_driver(&ath12k_pci_driver); 1431 if (ret) { 1432 pr_err("failed to register ath12k pci driver: %d\n", 1433 ret); 1434 return ret; 1435 } 1436 1437 return 0; 1438 } 1439 module_init(ath12k_pci_init); 1440 1441 static void ath12k_pci_exit(void) 1442 { 1443 pci_unregister_driver(&ath12k_pci_driver); 1444 } 1445 1446 module_exit(ath12k_pci_exit); 1447 1448 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11be WLAN PCIe devices"); 1449 MODULE_LICENSE("Dual BSD/GPL"); 1450