1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/msi.h> 9 #include <linux/pci.h> 10 11 #include "pci.h" 12 #include "core.h" 13 #include "hif.h" 14 #include "mhi.h" 15 #include "debug.h" 16 17 #define ATH12K_PCI_BAR_NUM 0 18 #define ATH12K_PCI_DMA_MASK 32 19 20 #define ATH12K_PCI_IRQ_CE0_OFFSET 3 21 22 #define WINDOW_ENABLE_BIT 0x40000000 23 #define WINDOW_REG_ADDRESS 0x310c 24 #define WINDOW_VALUE_MASK GENMASK(24, 19) 25 #define WINDOW_START 0x80000 26 #define WINDOW_RANGE_MASK GENMASK(18, 0) 27 #define WINDOW_STATIC_MASK GENMASK(31, 6) 28 29 #define TCSR_SOC_HW_VERSION 0x1B00000 30 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 31 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) 32 33 /* BAR0 + 4k is always accessible, and no 34 * need to force wakeup. 35 * 4K - 32 = 0xFE0 36 */ 37 #define ACCESS_ALWAYS_OFF 0xFE0 38 39 #define QCN9274_DEVICE_ID 0x1109 40 #define WCN7850_DEVICE_ID 0x1107 41 42 #define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164 43 #define DOMAIN_NUMBER_MASK GENMASK(7, 4) 44 #define BUS_NUMBER_MASK GENMASK(3, 0) 45 46 static const struct pci_device_id ath12k_pci_id_table[] = { 47 { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, 48 { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, 49 {0} 50 }; 51 52 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); 53 54 /* TODO: revisit IRQ mapping for new SRNG's */ 55 static const struct ath12k_msi_config ath12k_msi_config[] = { 56 { 57 .total_vectors = 16, 58 .total_users = 3, 59 .users = (struct ath12k_msi_user[]) { 60 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 61 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 62 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 63 }, 64 }, 65 }; 66 67 static const struct ath12k_msi_config msi_config_one_msi = { 68 .total_vectors = 1, 69 .total_users = 4, 70 .users = (struct ath12k_msi_user[]) { 71 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 72 { .name = "CE", .num_vectors = 1, .base_vector = 0 }, 73 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, 74 { .name = "DP", .num_vectors = 1, .base_vector = 0 }, 75 }, 76 }; 77 78 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { 79 "bhi", 80 "mhi-er0", 81 "mhi-er1", 82 "ce0", 83 "ce1", 84 "ce2", 85 "ce3", 86 "ce4", 87 "ce5", 88 "ce6", 89 "ce7", 90 "ce8", 91 "ce9", 92 "ce10", 93 "ce11", 94 "ce12", 95 "ce13", 96 "ce14", 97 "ce15", 98 "host2wbm-desc-feed", 99 "host2reo-re-injection", 100 "host2reo-command", 101 "host2rxdma-monitor-ring3", 102 "host2rxdma-monitor-ring2", 103 "host2rxdma-monitor-ring1", 104 "reo2ost-exception", 105 "wbm2host-rx-release", 106 "reo2host-status", 107 "reo2host-destination-ring4", 108 "reo2host-destination-ring3", 109 "reo2host-destination-ring2", 110 "reo2host-destination-ring1", 111 "rxdma2host-monitor-destination-mac3", 112 "rxdma2host-monitor-destination-mac2", 113 "rxdma2host-monitor-destination-mac1", 114 "ppdu-end-interrupts-mac3", 115 "ppdu-end-interrupts-mac2", 116 "ppdu-end-interrupts-mac1", 117 "rxdma2host-monitor-status-ring-mac3", 118 "rxdma2host-monitor-status-ring-mac2", 119 "rxdma2host-monitor-status-ring-mac1", 120 "host2rxdma-host-buf-ring-mac3", 121 "host2rxdma-host-buf-ring-mac2", 122 "host2rxdma-host-buf-ring-mac1", 123 "rxdma2host-destination-ring-mac3", 124 "rxdma2host-destination-ring-mac2", 125 "rxdma2host-destination-ring-mac1", 126 "host2tcl-input-ring4", 127 "host2tcl-input-ring3", 128 "host2tcl-input-ring2", 129 "host2tcl-input-ring1", 130 "wbm2host-tx-completions-ring4", 131 "wbm2host-tx-completions-ring3", 132 "wbm2host-tx-completions-ring2", 133 "wbm2host-tx-completions-ring1", 134 "tcl2host-status-ring", 135 }; 136 137 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab) 138 { 139 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 140 141 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 142 } 143 144 static void ath12k_pci_bus_release(struct ath12k_base *ab) 145 { 146 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 147 148 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 149 } 150 151 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = { 152 .wakeup = NULL, 153 .release = NULL, 154 }; 155 156 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = { 157 .wakeup = ath12k_pci_bus_wake_up, 158 .release = ath12k_pci_bus_release, 159 }; 160 161 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) 162 { 163 struct ath12k_base *ab = ab_pci->ab; 164 165 u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); 166 u32 static_window; 167 168 lockdep_assert_held(&ab_pci->window_lock); 169 170 /* Preserve the static window configuration and reset only dynamic window */ 171 static_window = ab_pci->register_window & WINDOW_STATIC_MASK; 172 window |= static_window; 173 174 if (window != ab_pci->register_window) { 175 iowrite32(WINDOW_ENABLE_BIT | window, 176 ab->mem + WINDOW_REG_ADDRESS); 177 ioread32(ab->mem + WINDOW_REG_ADDRESS); 178 ab_pci->register_window = window; 179 } 180 } 181 182 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) 183 { 184 u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); 185 u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); 186 u32 window; 187 188 window = (umac_window << 12) | (ce_window << 6); 189 190 spin_lock_bh(&ab_pci->window_lock); 191 ab_pci->register_window = window; 192 spin_unlock_bh(&ab_pci->window_lock); 193 194 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 195 } 196 197 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, 198 u32 offset) 199 { 200 u32 window_start; 201 202 /* If offset lies within DP register range, use 3rd window */ 203 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 204 window_start = 3 * WINDOW_START; 205 /* If offset lies within CE register range, use 2nd window */ 206 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 207 window_start = 2 * WINDOW_START; 208 else 209 window_start = WINDOW_START; 210 211 return window_start; 212 } 213 214 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) 215 { 216 return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); 217 } 218 219 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) 220 { 221 u32 val, delay; 222 223 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 224 225 val |= PCIE_SOC_GLOBAL_RESET_V; 226 227 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 228 229 /* TODO: exact time to sleep is uncertain */ 230 delay = 10; 231 mdelay(delay); 232 233 /* Need to toggle V bit back otherwise stuck in reset status */ 234 val &= ~PCIE_SOC_GLOBAL_RESET_V; 235 236 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 237 238 mdelay(delay); 239 240 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 241 if (val == 0xffffffff) 242 ath12k_warn(ab, "link down error during global reset\n"); 243 } 244 245 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) 246 { 247 u32 val; 248 249 /* read cookie */ 250 val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 251 ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); 252 253 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 254 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 255 256 /* TODO: exact time to sleep is uncertain */ 257 mdelay(10); 258 259 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 260 * continuing warm path and entering dead loop. 261 */ 262 ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 263 mdelay(10); 264 265 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 266 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 267 268 /* A read clear register. clear the register to prevent 269 * Q6 from entering wrong code path. 270 */ 271 val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 272 ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); 273 } 274 275 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) 276 { 277 u32 val; 278 int i; 279 280 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 281 282 /* PCIE link seems very unstable after the Hot Reset*/ 283 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 284 if (val == 0xffffffff) 285 mdelay(5); 286 287 ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 288 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 289 } 290 291 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); 292 293 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 294 val |= GCC_GCC_PCIE_HOT_RST_VAL; 295 ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 296 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 297 298 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 299 300 mdelay(5); 301 } 302 303 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) 304 { 305 /* This is a WAR for PCIE Hotreset. 306 * When target receive Hotreset, but will set the interrupt. 307 * So when download SBL again, SBL will open Interrupt and 308 * receive it, and crash immediately. 309 */ 310 ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 311 } 312 313 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) 314 { 315 u32 val; 316 317 val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 318 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 319 ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 320 } 321 322 static void ath12k_pci_force_wake(struct ath12k_base *ab) 323 { 324 ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 325 mdelay(5); 326 } 327 328 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) 329 { 330 if (power_on) { 331 ath12k_pci_enable_ltssm(ab); 332 ath12k_pci_clear_all_intrs(ab); 333 ath12k_pci_set_wlaon_pwr_ctrl(ab); 334 } 335 336 ath12k_mhi_clear_vector(ab); 337 ath12k_pci_clear_dbg_registers(ab); 338 ath12k_pci_soc_global_reset(ab); 339 ath12k_mhi_set_mhictrl_reset(ab); 340 } 341 342 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) 343 { 344 int i, j; 345 346 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 347 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 348 349 for (j = 0; j < irq_grp->num_irq; j++) 350 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 351 352 netif_napi_del(&irq_grp->napi); 353 } 354 } 355 356 static void ath12k_pci_free_irq(struct ath12k_base *ab) 357 { 358 int i, irq_idx; 359 360 for (i = 0; i < ab->hw_params->ce_count; i++) { 361 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 362 continue; 363 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 364 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 365 } 366 367 ath12k_pci_free_ext_irq(ab); 368 } 369 370 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) 371 { 372 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 373 u32 irq_idx; 374 375 /* In case of one MSI vector, we handle irq enable/disable in a 376 * uniform way since we only have one irq 377 */ 378 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 379 return; 380 381 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 382 enable_irq(ab->irq_num[irq_idx]); 383 } 384 385 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) 386 { 387 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 388 u32 irq_idx; 389 390 /* In case of one MSI vector, we handle irq enable/disable in a 391 * uniform way since we only have one irq 392 */ 393 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 394 return; 395 396 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 397 disable_irq_nosync(ab->irq_num[irq_idx]); 398 } 399 400 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) 401 { 402 int i; 403 404 clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 405 406 for (i = 0; i < ab->hw_params->ce_count; i++) { 407 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 408 continue; 409 ath12k_pci_ce_irq_disable(ab, i); 410 } 411 } 412 413 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) 414 { 415 int i; 416 int irq_idx; 417 418 for (i = 0; i < ab->hw_params->ce_count; i++) { 419 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 420 continue; 421 422 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 423 synchronize_irq(ab->irq_num[irq_idx]); 424 } 425 } 426 427 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) 428 { 429 struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 430 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 431 432 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 433 434 enable_irq(ce_pipe->ab->irq_num[irq_idx]); 435 } 436 437 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) 438 { 439 struct ath12k_ce_pipe *ce_pipe = arg; 440 struct ath12k_base *ab = ce_pipe->ab; 441 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 442 443 if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) 444 return IRQ_HANDLED; 445 446 /* last interrupt received for this CE */ 447 ce_pipe->timestamp = jiffies; 448 449 disable_irq_nosync(ab->irq_num[irq_idx]); 450 451 tasklet_schedule(&ce_pipe->intr_tq); 452 453 return IRQ_HANDLED; 454 } 455 456 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) 457 { 458 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 459 int i; 460 461 /* In case of one MSI vector, we handle irq enable/disable 462 * in a uniform way since we only have one irq 463 */ 464 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 465 return; 466 467 for (i = 0; i < irq_grp->num_irq; i++) 468 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 469 } 470 471 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 472 { 473 int i; 474 475 clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 476 477 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 478 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 479 480 ath12k_pci_ext_grp_disable(irq_grp); 481 482 napi_synchronize(&irq_grp->napi); 483 napi_disable(&irq_grp->napi); 484 } 485 } 486 487 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) 488 { 489 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 490 int i; 491 492 /* In case of one MSI vector, we handle irq enable/disable in a 493 * uniform way since we only have one irq 494 */ 495 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 496 return; 497 498 for (i = 0; i < irq_grp->num_irq; i++) 499 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 500 } 501 502 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) 503 { 504 int i, j, irq_idx; 505 506 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 507 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 508 509 for (j = 0; j < irq_grp->num_irq; j++) { 510 irq_idx = irq_grp->irqs[j]; 511 synchronize_irq(ab->irq_num[irq_idx]); 512 } 513 } 514 } 515 516 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 517 { 518 struct ath12k_ext_irq_grp *irq_grp = container_of(napi, 519 struct ath12k_ext_irq_grp, 520 napi); 521 struct ath12k_base *ab = irq_grp->ab; 522 int work_done; 523 int i; 524 525 work_done = ath12k_dp_service_srng(ab, irq_grp, budget); 526 if (work_done < budget) { 527 napi_complete_done(napi, work_done); 528 for (i = 0; i < irq_grp->num_irq; i++) 529 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 530 } 531 532 if (work_done > budget) 533 work_done = budget; 534 535 return work_done; 536 } 537 538 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) 539 { 540 struct ath12k_ext_irq_grp *irq_grp = arg; 541 struct ath12k_base *ab = irq_grp->ab; 542 int i; 543 544 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 545 return IRQ_HANDLED; 546 547 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); 548 549 /* last interrupt received for this group */ 550 irq_grp->timestamp = jiffies; 551 552 for (i = 0; i < irq_grp->num_irq; i++) 553 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 554 555 napi_schedule(&irq_grp->napi); 556 557 return IRQ_HANDLED; 558 } 559 560 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) 561 { 562 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 563 int i, j, ret, num_vectors = 0; 564 u32 user_base_data = 0, base_vector = 0, base_idx; 565 566 base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 567 ret = ath12k_pci_get_user_msi_assignment(ab, "DP", 568 &num_vectors, 569 &user_base_data, 570 &base_vector); 571 if (ret < 0) 572 return ret; 573 574 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 575 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 576 u32 num_irq = 0; 577 578 irq_grp->ab = ab; 579 irq_grp->grp_id = i; 580 init_dummy_netdev(&irq_grp->napi_ndev); 581 netif_napi_add(&irq_grp->napi_ndev, &irq_grp->napi, 582 ath12k_pci_ext_grp_napi_poll); 583 584 if (ab->hw_params->ring_mask->tx[i] || 585 ab->hw_params->ring_mask->rx[i] || 586 ab->hw_params->ring_mask->rx_err[i] || 587 ab->hw_params->ring_mask->rx_wbm_rel[i] || 588 ab->hw_params->ring_mask->reo_status[i] || 589 ab->hw_params->ring_mask->host2rxdma[i] || 590 ab->hw_params->ring_mask->rx_mon_dest[i]) { 591 num_irq = 1; 592 } 593 594 irq_grp->num_irq = num_irq; 595 irq_grp->irqs[0] = base_idx + i; 596 597 for (j = 0; j < irq_grp->num_irq; j++) { 598 int irq_idx = irq_grp->irqs[j]; 599 int vector = (i % num_vectors) + base_vector; 600 int irq = ath12k_pci_get_msi_irq(ab->dev, vector); 601 602 ab->irq_num[irq_idx] = irq; 603 604 ath12k_dbg(ab, ATH12K_DBG_PCI, 605 "irq:%d group:%d\n", irq, i); 606 607 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 608 ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, 609 ab_pci->irq_flags, 610 "DP_EXT_IRQ", irq_grp); 611 if (ret) { 612 ath12k_err(ab, "failed request irq %d: %d\n", 613 vector, ret); 614 return ret; 615 } 616 } 617 ath12k_pci_ext_grp_disable(irq_grp); 618 } 619 620 return 0; 621 } 622 623 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, 624 const struct cpumask *m) 625 { 626 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 627 return 0; 628 629 return irq_set_affinity_hint(ab_pci->pdev->irq, m); 630 } 631 632 static int ath12k_pci_config_irq(struct ath12k_base *ab) 633 { 634 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 635 struct ath12k_ce_pipe *ce_pipe; 636 u32 msi_data_start; 637 u32 msi_data_count, msi_data_idx; 638 u32 msi_irq_start; 639 unsigned int msi_data; 640 int irq, i, ret, irq_idx; 641 642 ret = ath12k_pci_get_user_msi_assignment(ab, 643 "CE", &msi_data_count, 644 &msi_data_start, &msi_irq_start); 645 if (ret) 646 return ret; 647 648 /* Configure CE irqs */ 649 650 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 651 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 652 continue; 653 654 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 655 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); 656 ce_pipe = &ab->ce.ce_pipe[i]; 657 658 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 659 660 tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); 661 662 ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, 663 ab_pci->irq_flags, irq_name[irq_idx], 664 ce_pipe); 665 if (ret) { 666 ath12k_err(ab, "failed to request irq %d: %d\n", 667 irq_idx, ret); 668 return ret; 669 } 670 671 ab->irq_num[irq_idx] = irq; 672 msi_data_idx++; 673 674 ath12k_pci_ce_irq_disable(ab, i); 675 } 676 677 ret = ath12k_pci_ext_irq_config(ab); 678 if (ret) 679 return ret; 680 681 return 0; 682 } 683 684 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) 685 { 686 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 687 688 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 689 struct pci_bus *bus = ab_pci->pdev->bus; 690 691 cfg->tgt_ce = ab->hw_params->target_ce_config; 692 cfg->tgt_ce_len = ab->hw_params->target_ce_count; 693 694 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; 695 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; 696 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; 697 698 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) { 699 ab_pci->qmi_instance = 700 u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) | 701 u32_encode_bits(bus->number, BUS_NUMBER_MASK); 702 ab->qmi.service_ins_id += ab_pci->qmi_instance; 703 } 704 } 705 706 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) 707 { 708 int i; 709 710 set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 711 712 for (i = 0; i < ab->hw_params->ce_count; i++) { 713 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 714 continue; 715 ath12k_pci_ce_irq_enable(ab, i); 716 } 717 } 718 719 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) 720 { 721 struct pci_dev *dev = ab_pci->pdev; 722 u16 control; 723 724 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 725 726 if (enable) 727 control |= PCI_MSI_FLAGS_ENABLE; 728 else 729 control &= ~PCI_MSI_FLAGS_ENABLE; 730 731 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 732 } 733 734 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) 735 { 736 ath12k_pci_msi_config(ab_pci, true); 737 } 738 739 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) 740 { 741 ath12k_pci_msi_config(ab_pci, false); 742 } 743 744 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) 745 { 746 struct ath12k_base *ab = ab_pci->ab; 747 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 748 struct msi_desc *msi_desc; 749 int num_vectors; 750 int ret; 751 752 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 753 msi_config->total_vectors, 754 msi_config->total_vectors, 755 PCI_IRQ_MSI); 756 757 if (num_vectors == msi_config->total_vectors) { 758 set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 759 ab_pci->irq_flags = IRQF_SHARED; 760 } else { 761 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 762 1, 763 1, 764 PCI_IRQ_MSI); 765 if (num_vectors < 0) { 766 ret = -EINVAL; 767 goto reset_msi_config; 768 } 769 clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 770 ab_pci->msi_config = &msi_config_one_msi; 771 ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 772 ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n"); 773 } 774 775 ath12k_info(ab, "MSI vectors: %d\n", num_vectors); 776 777 ath12k_pci_msi_disable(ab_pci); 778 779 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 780 if (!msi_desc) { 781 ath12k_err(ab, "msi_desc is NULL!\n"); 782 ret = -EINVAL; 783 goto free_msi_vector; 784 } 785 786 ab_pci->msi_ep_base_data = msi_desc->msg.data; 787 if (msi_desc->pci.msi_attrib.is_64) 788 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 789 790 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 791 792 return 0; 793 794 free_msi_vector: 795 pci_free_irq_vectors(ab_pci->pdev); 796 797 reset_msi_config: 798 return ret; 799 } 800 801 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) 802 { 803 pci_free_irq_vectors(ab_pci->pdev); 804 } 805 806 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci) 807 { 808 struct msi_desc *msi_desc; 809 810 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 811 if (!msi_desc) { 812 ath12k_err(ab_pci->ab, "msi_desc is NULL!\n"); 813 pci_free_irq_vectors(ab_pci->pdev); 814 return -EINVAL; 815 } 816 817 ab_pci->msi_ep_base_data = msi_desc->msg.data; 818 819 ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", 820 ab_pci->msi_ep_base_data); 821 822 return 0; 823 } 824 825 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) 826 { 827 struct ath12k_base *ab = ab_pci->ab; 828 u16 device_id; 829 int ret = 0; 830 831 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 832 if (device_id != ab_pci->dev_id) { 833 ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 834 device_id, ab_pci->dev_id); 835 ret = -EIO; 836 goto out; 837 } 838 839 ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); 840 if (ret) { 841 ath12k_err(ab, "failed to assign pci resource: %d\n", ret); 842 goto out; 843 } 844 845 ret = pci_enable_device(pdev); 846 if (ret) { 847 ath12k_err(ab, "failed to enable pci device: %d\n", ret); 848 goto out; 849 } 850 851 ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); 852 if (ret) { 853 ath12k_err(ab, "failed to request pci region: %d\n", ret); 854 goto disable_device; 855 } 856 857 ret = dma_set_mask_and_coherent(&pdev->dev, 858 DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); 859 if (ret) { 860 ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", 861 ATH12K_PCI_DMA_MASK, ret); 862 goto release_region; 863 } 864 865 pci_set_master(pdev); 866 867 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); 868 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); 869 if (!ab->mem) { 870 ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); 871 ret = -EIO; 872 goto release_region; 873 } 874 875 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); 876 return 0; 877 878 release_region: 879 pci_release_region(pdev, ATH12K_PCI_BAR_NUM); 880 disable_device: 881 pci_disable_device(pdev); 882 out: 883 return ret; 884 } 885 886 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) 887 { 888 struct ath12k_base *ab = ab_pci->ab; 889 struct pci_dev *pci_dev = ab_pci->pdev; 890 891 pci_iounmap(pci_dev, ab->mem); 892 ab->mem = NULL; 893 pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); 894 if (pci_is_enabled(pci_dev)) 895 pci_disable_device(pci_dev); 896 } 897 898 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) 899 { 900 struct ath12k_base *ab = ab_pci->ab; 901 902 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 903 &ab_pci->link_ctl); 904 905 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 906 ab_pci->link_ctl, 907 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 908 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 909 910 /* disable L0s and L1 */ 911 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, 912 PCI_EXP_LNKCTL_ASPMC); 913 914 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); 915 } 916 917 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) 918 { 919 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 920 u32 reg; 921 922 /* On platforms with two or more identical mhi devices, qmi service run 923 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup 924 * cannot register more than one qmi service with identical node ID. 925 * 926 * This generates a unique instance ID from PCIe domain number and bus number, 927 * writes to the given register, it is available for firmware when the QMI service 928 * is spawned. 929 */ 930 reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK; 931 ath12k_pci_write32(ab, reg, ab_pci->qmi_instance); 932 933 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n", 934 reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg)); 935 } 936 937 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) 938 { 939 if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) 940 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, 941 PCI_EXP_LNKCTL_ASPMC, 942 ab_pci->link_ctl & 943 PCI_EXP_LNKCTL_ASPMC); 944 } 945 946 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) 947 { 948 int i; 949 950 for (i = 0; i < ab->hw_params->ce_count; i++) { 951 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 952 953 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 954 continue; 955 956 tasklet_kill(&ce_pipe->intr_tq); 957 } 958 } 959 960 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) 961 { 962 ath12k_pci_ce_irqs_disable(ab); 963 ath12k_pci_sync_ce_irqs(ab); 964 ath12k_pci_kill_tasklets(ab); 965 } 966 967 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, 968 u8 *ul_pipe, u8 *dl_pipe) 969 { 970 const struct service_to_pipe *entry; 971 bool ul_set = false, dl_set = false; 972 int i; 973 974 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { 975 entry = &ab->hw_params->svc_to_ce_map[i]; 976 977 if (__le32_to_cpu(entry->service_id) != service_id) 978 continue; 979 980 switch (__le32_to_cpu(entry->pipedir)) { 981 case PIPEDIR_NONE: 982 break; 983 case PIPEDIR_IN: 984 WARN_ON(dl_set); 985 *dl_pipe = __le32_to_cpu(entry->pipenum); 986 dl_set = true; 987 break; 988 case PIPEDIR_OUT: 989 WARN_ON(ul_set); 990 *ul_pipe = __le32_to_cpu(entry->pipenum); 991 ul_set = true; 992 break; 993 case PIPEDIR_INOUT: 994 WARN_ON(dl_set); 995 WARN_ON(ul_set); 996 *dl_pipe = __le32_to_cpu(entry->pipenum); 997 *ul_pipe = __le32_to_cpu(entry->pipenum); 998 dl_set = true; 999 ul_set = true; 1000 break; 1001 } 1002 } 1003 1004 if (WARN_ON(!ul_set || !dl_set)) 1005 return -ENOENT; 1006 1007 return 0; 1008 } 1009 1010 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) 1011 { 1012 struct pci_dev *pci_dev = to_pci_dev(dev); 1013 1014 return pci_irq_vector(pci_dev, vector); 1015 } 1016 1017 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, 1018 int *num_vectors, u32 *user_base_data, 1019 u32 *base_vector) 1020 { 1021 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1022 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 1023 int idx; 1024 1025 for (idx = 0; idx < msi_config->total_users; idx++) { 1026 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 1027 *num_vectors = msi_config->users[idx].num_vectors; 1028 *base_vector = msi_config->users[idx].base_vector; 1029 *user_base_data = *base_vector + ab_pci->msi_ep_base_data; 1030 1031 ath12k_dbg(ab, ATH12K_DBG_PCI, 1032 "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 1033 user_name, *num_vectors, *user_base_data, 1034 *base_vector); 1035 1036 return 0; 1037 } 1038 } 1039 1040 ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 1041 1042 return -EINVAL; 1043 } 1044 1045 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, 1046 u32 *msi_addr_hi) 1047 { 1048 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1049 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 1050 1051 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 1052 msi_addr_lo); 1053 1054 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 1055 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 1056 msi_addr_hi); 1057 } else { 1058 *msi_addr_hi = 0; 1059 } 1060 } 1061 1062 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, 1063 u32 *msi_idx) 1064 { 1065 u32 i, msi_data_idx; 1066 1067 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 1068 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1069 continue; 1070 1071 if (ce_id == i) 1072 break; 1073 1074 msi_data_idx++; 1075 } 1076 *msi_idx = msi_data_idx; 1077 } 1078 1079 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) 1080 { 1081 ath12k_pci_ce_irqs_enable(ab); 1082 } 1083 1084 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) 1085 { 1086 ath12k_pci_ce_irq_disable_sync(ab); 1087 } 1088 1089 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) 1090 { 1091 int i; 1092 1093 set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 1094 1095 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 1096 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 1097 1098 napi_enable(&irq_grp->napi); 1099 ath12k_pci_ext_grp_enable(irq_grp); 1100 } 1101 } 1102 1103 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 1104 { 1105 __ath12k_pci_ext_irq_disable(ab); 1106 ath12k_pci_sync_ext_irqs(ab); 1107 } 1108 1109 int ath12k_pci_hif_suspend(struct ath12k_base *ab) 1110 { 1111 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1112 1113 ath12k_mhi_suspend(ar_pci); 1114 1115 return 0; 1116 } 1117 1118 int ath12k_pci_hif_resume(struct ath12k_base *ab) 1119 { 1120 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1121 1122 ath12k_mhi_resume(ar_pci); 1123 1124 return 0; 1125 } 1126 1127 void ath12k_pci_stop(struct ath12k_base *ab) 1128 { 1129 ath12k_pci_ce_irq_disable_sync(ab); 1130 ath12k_ce_cleanup_pipes(ab); 1131 } 1132 1133 int ath12k_pci_start(struct ath12k_base *ab) 1134 { 1135 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1136 1137 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1138 1139 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 1140 ath12k_pci_aspm_restore(ab_pci); 1141 else 1142 ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); 1143 1144 ath12k_pci_ce_irqs_enable(ab); 1145 ath12k_ce_rx_post_buf(ab); 1146 1147 return 0; 1148 } 1149 1150 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) 1151 { 1152 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1153 u32 val, window_start; 1154 int ret = 0; 1155 1156 /* for offset beyond BAR + 4K - 32, may 1157 * need to wakeup MHI to access. 1158 */ 1159 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1160 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1161 ret = ab_pci->pci_ops->wakeup(ab); 1162 1163 if (offset < WINDOW_START) { 1164 val = ioread32(ab->mem + offset); 1165 } else { 1166 if (ab->static_window_map) 1167 window_start = ath12k_pci_get_window_start(ab, offset); 1168 else 1169 window_start = WINDOW_START; 1170 1171 if (window_start == WINDOW_START) { 1172 spin_lock_bh(&ab_pci->window_lock); 1173 ath12k_pci_select_window(ab_pci, offset); 1174 1175 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1176 offset = offset - PCI_MHIREGLEN_REG; 1177 val = ioread32(ab->mem + 1178 (offset & WINDOW_RANGE_MASK)); 1179 } else { 1180 val = ioread32(ab->mem + window_start + 1181 (offset & WINDOW_RANGE_MASK)); 1182 } 1183 spin_unlock_bh(&ab_pci->window_lock); 1184 } else { 1185 val = ioread32(ab->mem + window_start + 1186 (offset & WINDOW_RANGE_MASK)); 1187 } 1188 } 1189 1190 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1191 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1192 !ret) 1193 ab_pci->pci_ops->release(ab); 1194 return val; 1195 } 1196 1197 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) 1198 { 1199 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1200 u32 window_start; 1201 int ret = 0; 1202 1203 /* for offset beyond BAR + 4K - 32, may 1204 * need to wakeup MHI to access. 1205 */ 1206 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1207 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1208 ret = ab_pci->pci_ops->wakeup(ab); 1209 1210 if (offset < WINDOW_START) { 1211 iowrite32(value, ab->mem + offset); 1212 } else { 1213 if (ab->static_window_map) 1214 window_start = ath12k_pci_get_window_start(ab, offset); 1215 else 1216 window_start = WINDOW_START; 1217 1218 if (window_start == WINDOW_START) { 1219 spin_lock_bh(&ab_pci->window_lock); 1220 ath12k_pci_select_window(ab_pci, offset); 1221 1222 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1223 offset = offset - PCI_MHIREGLEN_REG; 1224 iowrite32(value, ab->mem + 1225 (offset & WINDOW_RANGE_MASK)); 1226 } else { 1227 iowrite32(value, ab->mem + window_start + 1228 (offset & WINDOW_RANGE_MASK)); 1229 } 1230 spin_unlock_bh(&ab_pci->window_lock); 1231 } else { 1232 iowrite32(value, ab->mem + window_start + 1233 (offset & WINDOW_RANGE_MASK)); 1234 } 1235 } 1236 1237 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1238 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1239 !ret) 1240 ab_pci->pci_ops->release(ab); 1241 } 1242 1243 int ath12k_pci_power_up(struct ath12k_base *ab) 1244 { 1245 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1246 int ret; 1247 1248 ab_pci->register_window = 0; 1249 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1250 ath12k_pci_sw_reset(ab_pci->ab, true); 1251 1252 /* Disable ASPM during firmware download due to problems switching 1253 * to AMSS state. 1254 */ 1255 ath12k_pci_aspm_disable(ab_pci); 1256 1257 ath12k_pci_msi_enable(ab_pci); 1258 1259 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) 1260 ath12k_pci_update_qrtr_node_id(ab); 1261 1262 ret = ath12k_mhi_start(ab_pci); 1263 if (ret) { 1264 ath12k_err(ab, "failed to start mhi: %d\n", ret); 1265 return ret; 1266 } 1267 1268 if (ab->static_window_map) 1269 ath12k_pci_select_static_window(ab_pci); 1270 1271 return 0; 1272 } 1273 1274 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) 1275 { 1276 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1277 1278 /* restore aspm in case firmware bootup fails */ 1279 ath12k_pci_aspm_restore(ab_pci); 1280 1281 ath12k_pci_force_wake(ab_pci->ab); 1282 ath12k_pci_msi_disable(ab_pci); 1283 ath12k_mhi_stop(ab_pci, is_suspend); 1284 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1285 ath12k_pci_sw_reset(ab_pci->ab, false); 1286 } 1287 1288 static const struct ath12k_hif_ops ath12k_pci_hif_ops = { 1289 .start = ath12k_pci_start, 1290 .stop = ath12k_pci_stop, 1291 .read32 = ath12k_pci_read32, 1292 .write32 = ath12k_pci_write32, 1293 .power_down = ath12k_pci_power_down, 1294 .power_up = ath12k_pci_power_up, 1295 .suspend = ath12k_pci_hif_suspend, 1296 .resume = ath12k_pci_hif_resume, 1297 .irq_enable = ath12k_pci_ext_irq_enable, 1298 .irq_disable = ath12k_pci_ext_irq_disable, 1299 .get_msi_address = ath12k_pci_get_msi_address, 1300 .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, 1301 .map_service_to_pipe = ath12k_pci_map_service_to_pipe, 1302 .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, 1303 .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, 1304 .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, 1305 }; 1306 1307 static 1308 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) 1309 { 1310 u32 soc_hw_version; 1311 1312 soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1313 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1314 soc_hw_version); 1315 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1316 soc_hw_version); 1317 1318 ath12k_dbg(ab, ATH12K_DBG_PCI, 1319 "pci tcsr_soc_hw_version major %d minor %d\n", 1320 *major, *minor); 1321 } 1322 1323 static int ath12k_pci_probe(struct pci_dev *pdev, 1324 const struct pci_device_id *pci_dev) 1325 { 1326 struct ath12k_base *ab; 1327 struct ath12k_pci *ab_pci; 1328 u32 soc_hw_version_major, soc_hw_version_minor; 1329 int ret; 1330 1331 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); 1332 if (!ab) { 1333 dev_err(&pdev->dev, "failed to allocate ath12k base\n"); 1334 return -ENOMEM; 1335 } 1336 1337 ab->dev = &pdev->dev; 1338 pci_set_drvdata(pdev, ab); 1339 ab_pci = ath12k_pci_priv(ab); 1340 ab_pci->dev_id = pci_dev->device; 1341 ab_pci->ab = ab; 1342 ab_pci->pdev = pdev; 1343 ab->hif.ops = &ath12k_pci_hif_ops; 1344 pci_set_drvdata(pdev, ab); 1345 spin_lock_init(&ab_pci->window_lock); 1346 1347 ret = ath12k_pci_claim(ab_pci, pdev); 1348 if (ret) { 1349 ath12k_err(ab, "failed to claim device: %d\n", ret); 1350 goto err_free_core; 1351 } 1352 1353 ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 1354 pdev->vendor, pdev->device, 1355 pdev->subsystem_vendor, pdev->subsystem_device); 1356 1357 ab->id.vendor = pdev->vendor; 1358 ab->id.device = pdev->device; 1359 ab->id.subsystem_vendor = pdev->subsystem_vendor; 1360 ab->id.subsystem_device = pdev->subsystem_device; 1361 1362 switch (pci_dev->device) { 1363 case QCN9274_DEVICE_ID: 1364 ab_pci->msi_config = &ath12k_msi_config[0]; 1365 ab->static_window_map = true; 1366 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; 1367 ab->hal_rx_ops = &hal_rx_qcn9274_ops; 1368 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1369 &soc_hw_version_minor); 1370 switch (soc_hw_version_major) { 1371 case ATH12K_PCI_SOC_HW_VERSION_2: 1372 ab->hw_rev = ATH12K_HW_QCN9274_HW20; 1373 break; 1374 case ATH12K_PCI_SOC_HW_VERSION_1: 1375 ab->hw_rev = ATH12K_HW_QCN9274_HW10; 1376 break; 1377 default: 1378 dev_err(&pdev->dev, 1379 "Unknown hardware version found for QCN9274: 0x%x\n", 1380 soc_hw_version_major); 1381 ret = -EOPNOTSUPP; 1382 goto err_pci_free_region; 1383 } 1384 break; 1385 case WCN7850_DEVICE_ID: 1386 ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; 1387 ab_pci->msi_config = &ath12k_msi_config[0]; 1388 ab->static_window_map = false; 1389 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; 1390 ab->hal_rx_ops = &hal_rx_wcn7850_ops; 1391 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1392 &soc_hw_version_minor); 1393 switch (soc_hw_version_major) { 1394 case ATH12K_PCI_SOC_HW_VERSION_2: 1395 ab->hw_rev = ATH12K_HW_WCN7850_HW20; 1396 break; 1397 default: 1398 dev_err(&pdev->dev, 1399 "Unknown hardware version found for WCN7850: 0x%x\n", 1400 soc_hw_version_major); 1401 ret = -EOPNOTSUPP; 1402 goto err_pci_free_region; 1403 } 1404 break; 1405 1406 default: 1407 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1408 pci_dev->device); 1409 ret = -EOPNOTSUPP; 1410 goto err_pci_free_region; 1411 } 1412 1413 ret = ath12k_pci_msi_alloc(ab_pci); 1414 if (ret) { 1415 ath12k_err(ab, "failed to alloc msi: %d\n", ret); 1416 goto err_pci_free_region; 1417 } 1418 1419 ret = ath12k_core_pre_init(ab); 1420 if (ret) 1421 goto err_pci_msi_free; 1422 1423 ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); 1424 if (ret) { 1425 ath12k_err(ab, "failed to set irq affinity %d\n", ret); 1426 goto err_pci_msi_free; 1427 } 1428 1429 ret = ath12k_mhi_register(ab_pci); 1430 if (ret) { 1431 ath12k_err(ab, "failed to register mhi: %d\n", ret); 1432 goto err_irq_affinity_cleanup; 1433 } 1434 1435 ret = ath12k_hal_srng_init(ab); 1436 if (ret) 1437 goto err_mhi_unregister; 1438 1439 ret = ath12k_ce_alloc_pipes(ab); 1440 if (ret) { 1441 ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1442 goto err_hal_srng_deinit; 1443 } 1444 1445 ath12k_pci_init_qmi_ce_config(ab); 1446 1447 ret = ath12k_pci_config_irq(ab); 1448 if (ret) { 1449 ath12k_err(ab, "failed to config irq: %d\n", ret); 1450 goto err_ce_free; 1451 } 1452 1453 /* kernel may allocate a dummy vector before request_irq and 1454 * then allocate a real vector when request_irq is called. 1455 * So get msi_data here again to avoid spurious interrupt 1456 * as msi_data will configured to srngs. 1457 */ 1458 ret = ath12k_pci_config_msi_data(ab_pci); 1459 if (ret) { 1460 ath12k_err(ab, "failed to config msi_data: %d\n", ret); 1461 goto err_free_irq; 1462 } 1463 1464 ret = ath12k_core_init(ab); 1465 if (ret) { 1466 ath12k_err(ab, "failed to init core: %d\n", ret); 1467 goto err_free_irq; 1468 } 1469 return 0; 1470 1471 err_free_irq: 1472 ath12k_pci_free_irq(ab); 1473 1474 err_ce_free: 1475 ath12k_ce_free_pipes(ab); 1476 1477 err_hal_srng_deinit: 1478 ath12k_hal_srng_deinit(ab); 1479 1480 err_mhi_unregister: 1481 ath12k_mhi_unregister(ab_pci); 1482 1483 err_pci_msi_free: 1484 ath12k_pci_msi_free(ab_pci); 1485 1486 err_irq_affinity_cleanup: 1487 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1488 1489 err_pci_free_region: 1490 ath12k_pci_free_region(ab_pci); 1491 1492 err_free_core: 1493 ath12k_core_free(ab); 1494 1495 return ret; 1496 } 1497 1498 static void ath12k_pci_remove(struct pci_dev *pdev) 1499 { 1500 struct ath12k_base *ab = pci_get_drvdata(pdev); 1501 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1502 1503 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1504 1505 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1506 ath12k_pci_power_down(ab, false); 1507 ath12k_qmi_deinit_service(ab); 1508 goto qmi_fail; 1509 } 1510 1511 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); 1512 1513 cancel_work_sync(&ab->reset_work); 1514 ath12k_core_deinit(ab); 1515 1516 qmi_fail: 1517 ath12k_mhi_unregister(ab_pci); 1518 1519 ath12k_pci_free_irq(ab); 1520 ath12k_pci_msi_free(ab_pci); 1521 ath12k_pci_free_region(ab_pci); 1522 1523 ath12k_hal_srng_deinit(ab); 1524 ath12k_ce_free_pipes(ab); 1525 ath12k_core_free(ab); 1526 } 1527 1528 static void ath12k_pci_shutdown(struct pci_dev *pdev) 1529 { 1530 struct ath12k_base *ab = pci_get_drvdata(pdev); 1531 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1532 1533 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1534 ath12k_pci_power_down(ab, false); 1535 } 1536 1537 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) 1538 { 1539 struct ath12k_base *ab = dev_get_drvdata(dev); 1540 int ret; 1541 1542 ret = ath12k_core_suspend(ab); 1543 if (ret) 1544 ath12k_warn(ab, "failed to suspend core: %d\n", ret); 1545 1546 return ret; 1547 } 1548 1549 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) 1550 { 1551 struct ath12k_base *ab = dev_get_drvdata(dev); 1552 int ret; 1553 1554 ret = ath12k_core_resume(ab); 1555 if (ret) 1556 ath12k_warn(ab, "failed to resume core: %d\n", ret); 1557 1558 return ret; 1559 } 1560 1561 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) 1562 { 1563 struct ath12k_base *ab = dev_get_drvdata(dev); 1564 int ret; 1565 1566 ret = ath12k_core_suspend_late(ab); 1567 if (ret) 1568 ath12k_warn(ab, "failed to late suspend core: %d\n", ret); 1569 1570 return ret; 1571 } 1572 1573 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) 1574 { 1575 struct ath12k_base *ab = dev_get_drvdata(dev); 1576 int ret; 1577 1578 ret = ath12k_core_resume_early(ab); 1579 if (ret) 1580 ath12k_warn(ab, "failed to early resume core: %d\n", ret); 1581 1582 return ret; 1583 } 1584 1585 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { 1586 SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, 1587 ath12k_pci_pm_resume) 1588 SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, 1589 ath12k_pci_pm_resume_early) 1590 }; 1591 1592 static struct pci_driver ath12k_pci_driver = { 1593 .name = "ath12k_pci", 1594 .id_table = ath12k_pci_id_table, 1595 .probe = ath12k_pci_probe, 1596 .remove = ath12k_pci_remove, 1597 .shutdown = ath12k_pci_shutdown, 1598 .driver.pm = &ath12k_pci_pm_ops, 1599 }; 1600 1601 static int ath12k_pci_init(void) 1602 { 1603 int ret; 1604 1605 ret = pci_register_driver(&ath12k_pci_driver); 1606 if (ret) { 1607 pr_err("failed to register ath12k pci driver: %d\n", 1608 ret); 1609 return ret; 1610 } 1611 1612 return 0; 1613 } 1614 module_init(ath12k_pci_init); 1615 1616 static void ath12k_pci_exit(void) 1617 { 1618 pci_unregister_driver(&ath12k_pci_driver); 1619 } 1620 1621 module_exit(ath12k_pci_exit); 1622 1623 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); 1624 MODULE_LICENSE("Dual BSD/GPL"); 1625