1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/msi.h> 9 #include <linux/pci.h> 10 11 #include "pci.h" 12 #include "core.h" 13 #include "hif.h" 14 #include "mhi.h" 15 #include "debug.h" 16 17 #define ATH12K_PCI_BAR_NUM 0 18 #define ATH12K_PCI_DMA_MASK 32 19 20 #define ATH12K_PCI_IRQ_CE0_OFFSET 3 21 22 #define WINDOW_ENABLE_BIT 0x40000000 23 #define WINDOW_REG_ADDRESS 0x310c 24 #define WINDOW_VALUE_MASK GENMASK(24, 19) 25 #define WINDOW_START 0x80000 26 #define WINDOW_RANGE_MASK GENMASK(18, 0) 27 #define WINDOW_STATIC_MASK GENMASK(31, 6) 28 29 #define TCSR_SOC_HW_VERSION 0x1B00000 30 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 31 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) 32 33 /* BAR0 + 4k is always accessible, and no 34 * need to force wakeup. 35 * 4K - 32 = 0xFE0 36 */ 37 #define ACCESS_ALWAYS_OFF 0xFE0 38 39 #define QCN9274_DEVICE_ID 0x1109 40 #define WCN7850_DEVICE_ID 0x1107 41 42 #define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164 43 #define DOMAIN_NUMBER_MASK GENMASK(7, 4) 44 #define BUS_NUMBER_MASK GENMASK(3, 0) 45 46 static const struct pci_device_id ath12k_pci_id_table[] = { 47 { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, 48 { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, 49 {0} 50 }; 51 52 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); 53 54 /* TODO: revisit IRQ mapping for new SRNG's */ 55 static const struct ath12k_msi_config ath12k_msi_config[] = { 56 { 57 .total_vectors = 16, 58 .total_users = 3, 59 .users = (struct ath12k_msi_user[]) { 60 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 61 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 62 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 63 }, 64 }, 65 }; 66 67 static const struct ath12k_msi_config msi_config_one_msi = { 68 .total_vectors = 1, 69 .total_users = 4, 70 .users = (struct ath12k_msi_user[]) { 71 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 72 { .name = "CE", .num_vectors = 1, .base_vector = 0 }, 73 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, 74 { .name = "DP", .num_vectors = 1, .base_vector = 0 }, 75 }, 76 }; 77 78 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { 79 "bhi", 80 "mhi-er0", 81 "mhi-er1", 82 "ce0", 83 "ce1", 84 "ce2", 85 "ce3", 86 "ce4", 87 "ce5", 88 "ce6", 89 "ce7", 90 "ce8", 91 "ce9", 92 "ce10", 93 "ce11", 94 "ce12", 95 "ce13", 96 "ce14", 97 "ce15", 98 "host2wbm-desc-feed", 99 "host2reo-re-injection", 100 "host2reo-command", 101 "host2rxdma-monitor-ring3", 102 "host2rxdma-monitor-ring2", 103 "host2rxdma-monitor-ring1", 104 "reo2ost-exception", 105 "wbm2host-rx-release", 106 "reo2host-status", 107 "reo2host-destination-ring4", 108 "reo2host-destination-ring3", 109 "reo2host-destination-ring2", 110 "reo2host-destination-ring1", 111 "rxdma2host-monitor-destination-mac3", 112 "rxdma2host-monitor-destination-mac2", 113 "rxdma2host-monitor-destination-mac1", 114 "ppdu-end-interrupts-mac3", 115 "ppdu-end-interrupts-mac2", 116 "ppdu-end-interrupts-mac1", 117 "rxdma2host-monitor-status-ring-mac3", 118 "rxdma2host-monitor-status-ring-mac2", 119 "rxdma2host-monitor-status-ring-mac1", 120 "host2rxdma-host-buf-ring-mac3", 121 "host2rxdma-host-buf-ring-mac2", 122 "host2rxdma-host-buf-ring-mac1", 123 "rxdma2host-destination-ring-mac3", 124 "rxdma2host-destination-ring-mac2", 125 "rxdma2host-destination-ring-mac1", 126 "host2tcl-input-ring4", 127 "host2tcl-input-ring3", 128 "host2tcl-input-ring2", 129 "host2tcl-input-ring1", 130 "wbm2host-tx-completions-ring4", 131 "wbm2host-tx-completions-ring3", 132 "wbm2host-tx-completions-ring2", 133 "wbm2host-tx-completions-ring1", 134 "tcl2host-status-ring", 135 }; 136 137 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab) 138 { 139 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 140 141 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 142 } 143 144 static void ath12k_pci_bus_release(struct ath12k_base *ab) 145 { 146 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 147 148 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 149 } 150 151 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = { 152 .wakeup = NULL, 153 .release = NULL, 154 }; 155 156 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = { 157 .wakeup = ath12k_pci_bus_wake_up, 158 .release = ath12k_pci_bus_release, 159 }; 160 161 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) 162 { 163 struct ath12k_base *ab = ab_pci->ab; 164 165 u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); 166 u32 static_window; 167 168 lockdep_assert_held(&ab_pci->window_lock); 169 170 /* Preserve the static window configuration and reset only dynamic window */ 171 static_window = ab_pci->register_window & WINDOW_STATIC_MASK; 172 window |= static_window; 173 174 if (window != ab_pci->register_window) { 175 iowrite32(WINDOW_ENABLE_BIT | window, 176 ab->mem + WINDOW_REG_ADDRESS); 177 ioread32(ab->mem + WINDOW_REG_ADDRESS); 178 ab_pci->register_window = window; 179 } 180 } 181 182 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) 183 { 184 u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); 185 u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); 186 u32 window; 187 188 window = (umac_window << 12) | (ce_window << 6); 189 190 spin_lock_bh(&ab_pci->window_lock); 191 ab_pci->register_window = window; 192 spin_unlock_bh(&ab_pci->window_lock); 193 194 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 195 } 196 197 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, 198 u32 offset) 199 { 200 u32 window_start; 201 202 /* If offset lies within DP register range, use 3rd window */ 203 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 204 window_start = 3 * WINDOW_START; 205 /* If offset lies within CE register range, use 2nd window */ 206 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 207 window_start = 2 * WINDOW_START; 208 else 209 window_start = WINDOW_START; 210 211 return window_start; 212 } 213 214 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) 215 { 216 return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); 217 } 218 219 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) 220 { 221 u32 val, delay; 222 223 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 224 225 val |= PCIE_SOC_GLOBAL_RESET_V; 226 227 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 228 229 /* TODO: exact time to sleep is uncertain */ 230 delay = 10; 231 mdelay(delay); 232 233 /* Need to toggle V bit back otherwise stuck in reset status */ 234 val &= ~PCIE_SOC_GLOBAL_RESET_V; 235 236 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 237 238 mdelay(delay); 239 240 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 241 if (val == 0xffffffff) 242 ath12k_warn(ab, "link down error during global reset\n"); 243 } 244 245 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) 246 { 247 u32 val; 248 249 /* read cookie */ 250 val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 251 ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); 252 253 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 254 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 255 256 /* TODO: exact time to sleep is uncertain */ 257 mdelay(10); 258 259 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 260 * continuing warm path and entering dead loop. 261 */ 262 ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 263 mdelay(10); 264 265 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 266 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 267 268 /* A read clear register. clear the register to prevent 269 * Q6 from entering wrong code path. 270 */ 271 val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 272 ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); 273 } 274 275 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) 276 { 277 u32 val; 278 int i; 279 280 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 281 282 /* PCIE link seems very unstable after the Hot Reset*/ 283 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 284 if (val == 0xffffffff) 285 mdelay(5); 286 287 ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 288 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 289 } 290 291 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); 292 293 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 294 val |= GCC_GCC_PCIE_HOT_RST_VAL; 295 ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 296 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 297 298 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 299 300 mdelay(5); 301 } 302 303 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) 304 { 305 /* This is a WAR for PCIE Hotreset. 306 * When target receive Hotreset, but will set the interrupt. 307 * So when download SBL again, SBL will open Interrupt and 308 * receive it, and crash immediately. 309 */ 310 ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 311 } 312 313 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) 314 { 315 u32 val; 316 317 val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 318 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 319 ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 320 } 321 322 static void ath12k_pci_force_wake(struct ath12k_base *ab) 323 { 324 ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 325 mdelay(5); 326 } 327 328 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) 329 { 330 if (power_on) { 331 ath12k_pci_enable_ltssm(ab); 332 ath12k_pci_clear_all_intrs(ab); 333 ath12k_pci_set_wlaon_pwr_ctrl(ab); 334 } 335 336 ath12k_mhi_clear_vector(ab); 337 ath12k_pci_clear_dbg_registers(ab); 338 ath12k_pci_soc_global_reset(ab); 339 ath12k_mhi_set_mhictrl_reset(ab); 340 } 341 342 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) 343 { 344 int i, j; 345 346 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 347 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 348 349 for (j = 0; j < irq_grp->num_irq; j++) 350 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 351 352 netif_napi_del(&irq_grp->napi); 353 free_netdev(irq_grp->napi_ndev); 354 } 355 } 356 357 static void ath12k_pci_free_irq(struct ath12k_base *ab) 358 { 359 int i, irq_idx; 360 361 for (i = 0; i < ab->hw_params->ce_count; i++) { 362 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 363 continue; 364 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 365 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 366 } 367 368 ath12k_pci_free_ext_irq(ab); 369 } 370 371 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) 372 { 373 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 374 u32 irq_idx; 375 376 /* In case of one MSI vector, we handle irq enable/disable in a 377 * uniform way since we only have one irq 378 */ 379 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 380 return; 381 382 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 383 enable_irq(ab->irq_num[irq_idx]); 384 } 385 386 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) 387 { 388 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 389 u32 irq_idx; 390 391 /* In case of one MSI vector, we handle irq enable/disable in a 392 * uniform way since we only have one irq 393 */ 394 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 395 return; 396 397 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 398 disable_irq_nosync(ab->irq_num[irq_idx]); 399 } 400 401 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) 402 { 403 int i; 404 405 clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 406 407 for (i = 0; i < ab->hw_params->ce_count; i++) { 408 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 409 continue; 410 ath12k_pci_ce_irq_disable(ab, i); 411 } 412 } 413 414 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) 415 { 416 int i; 417 int irq_idx; 418 419 for (i = 0; i < ab->hw_params->ce_count; i++) { 420 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 421 continue; 422 423 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 424 synchronize_irq(ab->irq_num[irq_idx]); 425 } 426 } 427 428 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) 429 { 430 struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 431 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 432 433 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 434 435 enable_irq(ce_pipe->ab->irq_num[irq_idx]); 436 } 437 438 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) 439 { 440 struct ath12k_ce_pipe *ce_pipe = arg; 441 struct ath12k_base *ab = ce_pipe->ab; 442 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 443 444 if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) 445 return IRQ_HANDLED; 446 447 /* last interrupt received for this CE */ 448 ce_pipe->timestamp = jiffies; 449 450 disable_irq_nosync(ab->irq_num[irq_idx]); 451 452 tasklet_schedule(&ce_pipe->intr_tq); 453 454 return IRQ_HANDLED; 455 } 456 457 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) 458 { 459 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 460 int i; 461 462 /* In case of one MSI vector, we handle irq enable/disable 463 * in a uniform way since we only have one irq 464 */ 465 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 466 return; 467 468 for (i = 0; i < irq_grp->num_irq; i++) 469 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 470 } 471 472 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 473 { 474 int i; 475 476 if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 477 return; 478 479 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 480 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 481 482 ath12k_pci_ext_grp_disable(irq_grp); 483 484 napi_synchronize(&irq_grp->napi); 485 napi_disable(&irq_grp->napi); 486 } 487 } 488 489 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) 490 { 491 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 492 int i; 493 494 /* In case of one MSI vector, we handle irq enable/disable in a 495 * uniform way since we only have one irq 496 */ 497 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 498 return; 499 500 for (i = 0; i < irq_grp->num_irq; i++) 501 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 502 } 503 504 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) 505 { 506 int i, j, irq_idx; 507 508 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 509 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 510 511 for (j = 0; j < irq_grp->num_irq; j++) { 512 irq_idx = irq_grp->irqs[j]; 513 synchronize_irq(ab->irq_num[irq_idx]); 514 } 515 } 516 } 517 518 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 519 { 520 struct ath12k_ext_irq_grp *irq_grp = container_of(napi, 521 struct ath12k_ext_irq_grp, 522 napi); 523 struct ath12k_base *ab = irq_grp->ab; 524 int work_done; 525 int i; 526 527 work_done = ath12k_dp_service_srng(ab, irq_grp, budget); 528 if (work_done < budget) { 529 napi_complete_done(napi, work_done); 530 for (i = 0; i < irq_grp->num_irq; i++) 531 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 532 } 533 534 if (work_done > budget) 535 work_done = budget; 536 537 return work_done; 538 } 539 540 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) 541 { 542 struct ath12k_ext_irq_grp *irq_grp = arg; 543 struct ath12k_base *ab = irq_grp->ab; 544 int i; 545 546 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 547 return IRQ_HANDLED; 548 549 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); 550 551 /* last interrupt received for this group */ 552 irq_grp->timestamp = jiffies; 553 554 for (i = 0; i < irq_grp->num_irq; i++) 555 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 556 557 napi_schedule(&irq_grp->napi); 558 559 return IRQ_HANDLED; 560 } 561 562 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) 563 { 564 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 565 int i, j, n, ret, num_vectors = 0; 566 u32 user_base_data = 0, base_vector = 0, base_idx; 567 struct ath12k_ext_irq_grp *irq_grp; 568 569 base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 570 ret = ath12k_pci_get_user_msi_assignment(ab, "DP", 571 &num_vectors, 572 &user_base_data, 573 &base_vector); 574 if (ret < 0) 575 return ret; 576 577 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 578 irq_grp = &ab->ext_irq_grp[i]; 579 u32 num_irq = 0; 580 581 irq_grp->ab = ab; 582 irq_grp->grp_id = i; 583 irq_grp->napi_ndev = alloc_netdev_dummy(0); 584 if (!irq_grp->napi_ndev) { 585 ret = -ENOMEM; 586 goto fail_allocate; 587 } 588 589 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, 590 ath12k_pci_ext_grp_napi_poll); 591 592 if (ab->hw_params->ring_mask->tx[i] || 593 ab->hw_params->ring_mask->rx[i] || 594 ab->hw_params->ring_mask->rx_err[i] || 595 ab->hw_params->ring_mask->rx_wbm_rel[i] || 596 ab->hw_params->ring_mask->reo_status[i] || 597 ab->hw_params->ring_mask->host2rxdma[i] || 598 ab->hw_params->ring_mask->rx_mon_dest[i]) { 599 num_irq = 1; 600 } 601 602 irq_grp->num_irq = num_irq; 603 irq_grp->irqs[0] = base_idx + i; 604 605 for (j = 0; j < irq_grp->num_irq; j++) { 606 int irq_idx = irq_grp->irqs[j]; 607 int vector = (i % num_vectors) + base_vector; 608 int irq = ath12k_pci_get_msi_irq(ab->dev, vector); 609 610 ab->irq_num[irq_idx] = irq; 611 612 ath12k_dbg(ab, ATH12K_DBG_PCI, 613 "irq:%d group:%d\n", irq, i); 614 615 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 616 ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, 617 ab_pci->irq_flags, 618 "DP_EXT_IRQ", irq_grp); 619 if (ret) { 620 ath12k_err(ab, "failed request irq %d: %d\n", 621 vector, ret); 622 goto fail_request; 623 } 624 } 625 ath12k_pci_ext_grp_disable(irq_grp); 626 } 627 628 return 0; 629 630 fail_request: 631 /* i ->napi_ndev was properly allocated. Free it also */ 632 i += 1; 633 fail_allocate: 634 for (n = 0; n < i; n++) { 635 irq_grp = &ab->ext_irq_grp[n]; 636 free_netdev(irq_grp->napi_ndev); 637 } 638 return ret; 639 } 640 641 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, 642 const struct cpumask *m) 643 { 644 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 645 return 0; 646 647 return irq_set_affinity_hint(ab_pci->pdev->irq, m); 648 } 649 650 static int ath12k_pci_config_irq(struct ath12k_base *ab) 651 { 652 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 653 struct ath12k_ce_pipe *ce_pipe; 654 u32 msi_data_start; 655 u32 msi_data_count, msi_data_idx; 656 u32 msi_irq_start; 657 unsigned int msi_data; 658 int irq, i, ret, irq_idx; 659 660 ret = ath12k_pci_get_user_msi_assignment(ab, 661 "CE", &msi_data_count, 662 &msi_data_start, &msi_irq_start); 663 if (ret) 664 return ret; 665 666 /* Configure CE irqs */ 667 668 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 669 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 670 continue; 671 672 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 673 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); 674 ce_pipe = &ab->ce.ce_pipe[i]; 675 676 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 677 678 tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); 679 680 ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, 681 ab_pci->irq_flags, irq_name[irq_idx], 682 ce_pipe); 683 if (ret) { 684 ath12k_err(ab, "failed to request irq %d: %d\n", 685 irq_idx, ret); 686 return ret; 687 } 688 689 ab->irq_num[irq_idx] = irq; 690 msi_data_idx++; 691 692 ath12k_pci_ce_irq_disable(ab, i); 693 } 694 695 ret = ath12k_pci_ext_irq_config(ab); 696 if (ret) 697 return ret; 698 699 return 0; 700 } 701 702 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) 703 { 704 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 705 706 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 707 struct pci_bus *bus = ab_pci->pdev->bus; 708 709 cfg->tgt_ce = ab->hw_params->target_ce_config; 710 cfg->tgt_ce_len = ab->hw_params->target_ce_count; 711 712 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; 713 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; 714 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; 715 716 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) { 717 ab_pci->qmi_instance = 718 u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) | 719 u32_encode_bits(bus->number, BUS_NUMBER_MASK); 720 ab->qmi.service_ins_id += ab_pci->qmi_instance; 721 } 722 } 723 724 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) 725 { 726 int i; 727 728 set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 729 730 for (i = 0; i < ab->hw_params->ce_count; i++) { 731 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 732 continue; 733 ath12k_pci_ce_irq_enable(ab, i); 734 } 735 } 736 737 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) 738 { 739 struct pci_dev *dev = ab_pci->pdev; 740 u16 control; 741 742 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 743 744 if (enable) 745 control |= PCI_MSI_FLAGS_ENABLE; 746 else 747 control &= ~PCI_MSI_FLAGS_ENABLE; 748 749 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 750 } 751 752 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) 753 { 754 ath12k_pci_msi_config(ab_pci, true); 755 } 756 757 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) 758 { 759 ath12k_pci_msi_config(ab_pci, false); 760 } 761 762 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) 763 { 764 struct ath12k_base *ab = ab_pci->ab; 765 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 766 struct msi_desc *msi_desc; 767 int num_vectors; 768 int ret; 769 770 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 771 msi_config->total_vectors, 772 msi_config->total_vectors, 773 PCI_IRQ_MSI); 774 775 if (num_vectors == msi_config->total_vectors) { 776 set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 777 ab_pci->irq_flags = IRQF_SHARED; 778 } else { 779 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 780 1, 781 1, 782 PCI_IRQ_MSI); 783 if (num_vectors < 0) { 784 ret = -EINVAL; 785 goto reset_msi_config; 786 } 787 clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 788 ab_pci->msi_config = &msi_config_one_msi; 789 ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 790 ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n"); 791 } 792 793 ath12k_info(ab, "MSI vectors: %d\n", num_vectors); 794 795 ath12k_pci_msi_disable(ab_pci); 796 797 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 798 if (!msi_desc) { 799 ath12k_err(ab, "msi_desc is NULL!\n"); 800 ret = -EINVAL; 801 goto free_msi_vector; 802 } 803 804 ab_pci->msi_ep_base_data = msi_desc->msg.data; 805 if (msi_desc->pci.msi_attrib.is_64) 806 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 807 808 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 809 810 return 0; 811 812 free_msi_vector: 813 pci_free_irq_vectors(ab_pci->pdev); 814 815 reset_msi_config: 816 return ret; 817 } 818 819 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) 820 { 821 pci_free_irq_vectors(ab_pci->pdev); 822 } 823 824 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci) 825 { 826 struct msi_desc *msi_desc; 827 828 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 829 if (!msi_desc) { 830 ath12k_err(ab_pci->ab, "msi_desc is NULL!\n"); 831 pci_free_irq_vectors(ab_pci->pdev); 832 return -EINVAL; 833 } 834 835 ab_pci->msi_ep_base_data = msi_desc->msg.data; 836 837 ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", 838 ab_pci->msi_ep_base_data); 839 840 return 0; 841 } 842 843 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) 844 { 845 struct ath12k_base *ab = ab_pci->ab; 846 u16 device_id; 847 int ret = 0; 848 849 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 850 if (device_id != ab_pci->dev_id) { 851 ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 852 device_id, ab_pci->dev_id); 853 ret = -EIO; 854 goto out; 855 } 856 857 ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); 858 if (ret) { 859 ath12k_err(ab, "failed to assign pci resource: %d\n", ret); 860 goto out; 861 } 862 863 ret = pci_enable_device(pdev); 864 if (ret) { 865 ath12k_err(ab, "failed to enable pci device: %d\n", ret); 866 goto out; 867 } 868 869 ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); 870 if (ret) { 871 ath12k_err(ab, "failed to request pci region: %d\n", ret); 872 goto disable_device; 873 } 874 875 ret = dma_set_mask_and_coherent(&pdev->dev, 876 DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); 877 if (ret) { 878 ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", 879 ATH12K_PCI_DMA_MASK, ret); 880 goto release_region; 881 } 882 883 pci_set_master(pdev); 884 885 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); 886 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); 887 if (!ab->mem) { 888 ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); 889 ret = -EIO; 890 goto release_region; 891 } 892 893 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); 894 return 0; 895 896 release_region: 897 pci_release_region(pdev, ATH12K_PCI_BAR_NUM); 898 disable_device: 899 pci_disable_device(pdev); 900 out: 901 return ret; 902 } 903 904 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) 905 { 906 struct ath12k_base *ab = ab_pci->ab; 907 struct pci_dev *pci_dev = ab_pci->pdev; 908 909 pci_iounmap(pci_dev, ab->mem); 910 ab->mem = NULL; 911 pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); 912 if (pci_is_enabled(pci_dev)) 913 pci_disable_device(pci_dev); 914 } 915 916 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) 917 { 918 struct ath12k_base *ab = ab_pci->ab; 919 920 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 921 &ab_pci->link_ctl); 922 923 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 924 ab_pci->link_ctl, 925 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 926 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 927 928 /* disable L0s and L1 */ 929 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, 930 PCI_EXP_LNKCTL_ASPMC); 931 932 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); 933 } 934 935 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) 936 { 937 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 938 u32 reg; 939 940 /* On platforms with two or more identical mhi devices, qmi service run 941 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup 942 * cannot register more than one qmi service with identical node ID. 943 * 944 * This generates a unique instance ID from PCIe domain number and bus number, 945 * writes to the given register, it is available for firmware when the QMI service 946 * is spawned. 947 */ 948 reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK; 949 ath12k_pci_write32(ab, reg, ab_pci->qmi_instance); 950 951 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n", 952 reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg)); 953 } 954 955 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) 956 { 957 if (ab_pci->ab->hw_params->supports_aspm && 958 test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) 959 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, 960 PCI_EXP_LNKCTL_ASPMC, 961 ab_pci->link_ctl & 962 PCI_EXP_LNKCTL_ASPMC); 963 } 964 965 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) 966 { 967 int i; 968 969 for (i = 0; i < ab->hw_params->ce_count; i++) { 970 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 971 972 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 973 continue; 974 975 tasklet_kill(&ce_pipe->intr_tq); 976 } 977 } 978 979 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) 980 { 981 ath12k_pci_ce_irqs_disable(ab); 982 ath12k_pci_sync_ce_irqs(ab); 983 ath12k_pci_kill_tasklets(ab); 984 } 985 986 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, 987 u8 *ul_pipe, u8 *dl_pipe) 988 { 989 const struct service_to_pipe *entry; 990 bool ul_set = false, dl_set = false; 991 int i; 992 993 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { 994 entry = &ab->hw_params->svc_to_ce_map[i]; 995 996 if (__le32_to_cpu(entry->service_id) != service_id) 997 continue; 998 999 switch (__le32_to_cpu(entry->pipedir)) { 1000 case PIPEDIR_NONE: 1001 break; 1002 case PIPEDIR_IN: 1003 WARN_ON(dl_set); 1004 *dl_pipe = __le32_to_cpu(entry->pipenum); 1005 dl_set = true; 1006 break; 1007 case PIPEDIR_OUT: 1008 WARN_ON(ul_set); 1009 *ul_pipe = __le32_to_cpu(entry->pipenum); 1010 ul_set = true; 1011 break; 1012 case PIPEDIR_INOUT: 1013 WARN_ON(dl_set); 1014 WARN_ON(ul_set); 1015 *dl_pipe = __le32_to_cpu(entry->pipenum); 1016 *ul_pipe = __le32_to_cpu(entry->pipenum); 1017 dl_set = true; 1018 ul_set = true; 1019 break; 1020 } 1021 } 1022 1023 if (WARN_ON(!ul_set || !dl_set)) 1024 return -ENOENT; 1025 1026 return 0; 1027 } 1028 1029 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) 1030 { 1031 struct pci_dev *pci_dev = to_pci_dev(dev); 1032 1033 return pci_irq_vector(pci_dev, vector); 1034 } 1035 1036 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, 1037 int *num_vectors, u32 *user_base_data, 1038 u32 *base_vector) 1039 { 1040 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1041 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 1042 int idx; 1043 1044 for (idx = 0; idx < msi_config->total_users; idx++) { 1045 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 1046 *num_vectors = msi_config->users[idx].num_vectors; 1047 *base_vector = msi_config->users[idx].base_vector; 1048 *user_base_data = *base_vector + ab_pci->msi_ep_base_data; 1049 1050 ath12k_dbg(ab, ATH12K_DBG_PCI, 1051 "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 1052 user_name, *num_vectors, *user_base_data, 1053 *base_vector); 1054 1055 return 0; 1056 } 1057 } 1058 1059 ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 1060 1061 return -EINVAL; 1062 } 1063 1064 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, 1065 u32 *msi_addr_hi) 1066 { 1067 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1068 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 1069 1070 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 1071 msi_addr_lo); 1072 1073 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 1074 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 1075 msi_addr_hi); 1076 } else { 1077 *msi_addr_hi = 0; 1078 } 1079 } 1080 1081 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, 1082 u32 *msi_idx) 1083 { 1084 u32 i, msi_data_idx; 1085 1086 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 1087 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1088 continue; 1089 1090 if (ce_id == i) 1091 break; 1092 1093 msi_data_idx++; 1094 } 1095 *msi_idx = msi_data_idx; 1096 } 1097 1098 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) 1099 { 1100 ath12k_pci_ce_irqs_enable(ab); 1101 } 1102 1103 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) 1104 { 1105 ath12k_pci_ce_irq_disable_sync(ab); 1106 } 1107 1108 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) 1109 { 1110 int i; 1111 1112 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 1113 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 1114 1115 napi_enable(&irq_grp->napi); 1116 ath12k_pci_ext_grp_enable(irq_grp); 1117 } 1118 1119 set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 1120 } 1121 1122 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 1123 { 1124 __ath12k_pci_ext_irq_disable(ab); 1125 ath12k_pci_sync_ext_irqs(ab); 1126 } 1127 1128 int ath12k_pci_hif_suspend(struct ath12k_base *ab) 1129 { 1130 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1131 1132 ath12k_mhi_suspend(ar_pci); 1133 1134 return 0; 1135 } 1136 1137 int ath12k_pci_hif_resume(struct ath12k_base *ab) 1138 { 1139 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1140 1141 ath12k_mhi_resume(ar_pci); 1142 1143 return 0; 1144 } 1145 1146 void ath12k_pci_stop(struct ath12k_base *ab) 1147 { 1148 ath12k_pci_ce_irq_disable_sync(ab); 1149 ath12k_ce_cleanup_pipes(ab); 1150 } 1151 1152 int ath12k_pci_start(struct ath12k_base *ab) 1153 { 1154 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1155 1156 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1157 1158 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 1159 ath12k_pci_aspm_restore(ab_pci); 1160 else 1161 ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); 1162 1163 ath12k_pci_ce_irqs_enable(ab); 1164 ath12k_ce_rx_post_buf(ab); 1165 1166 return 0; 1167 } 1168 1169 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) 1170 { 1171 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1172 u32 val, window_start; 1173 int ret = 0; 1174 1175 /* for offset beyond BAR + 4K - 32, may 1176 * need to wakeup MHI to access. 1177 */ 1178 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1179 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1180 ret = ab_pci->pci_ops->wakeup(ab); 1181 1182 if (offset < WINDOW_START) { 1183 val = ioread32(ab->mem + offset); 1184 } else { 1185 if (ab->static_window_map) 1186 window_start = ath12k_pci_get_window_start(ab, offset); 1187 else 1188 window_start = WINDOW_START; 1189 1190 if (window_start == WINDOW_START) { 1191 spin_lock_bh(&ab_pci->window_lock); 1192 ath12k_pci_select_window(ab_pci, offset); 1193 1194 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1195 offset = offset - PCI_MHIREGLEN_REG; 1196 val = ioread32(ab->mem + 1197 (offset & WINDOW_RANGE_MASK)); 1198 } else { 1199 val = ioread32(ab->mem + window_start + 1200 (offset & WINDOW_RANGE_MASK)); 1201 } 1202 spin_unlock_bh(&ab_pci->window_lock); 1203 } else { 1204 val = ioread32(ab->mem + window_start + 1205 (offset & WINDOW_RANGE_MASK)); 1206 } 1207 } 1208 1209 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1210 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1211 !ret) 1212 ab_pci->pci_ops->release(ab); 1213 return val; 1214 } 1215 1216 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) 1217 { 1218 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1219 u32 window_start; 1220 int ret = 0; 1221 1222 /* for offset beyond BAR + 4K - 32, may 1223 * need to wakeup MHI to access. 1224 */ 1225 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1226 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1227 ret = ab_pci->pci_ops->wakeup(ab); 1228 1229 if (offset < WINDOW_START) { 1230 iowrite32(value, ab->mem + offset); 1231 } else { 1232 if (ab->static_window_map) 1233 window_start = ath12k_pci_get_window_start(ab, offset); 1234 else 1235 window_start = WINDOW_START; 1236 1237 if (window_start == WINDOW_START) { 1238 spin_lock_bh(&ab_pci->window_lock); 1239 ath12k_pci_select_window(ab_pci, offset); 1240 1241 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1242 offset = offset - PCI_MHIREGLEN_REG; 1243 iowrite32(value, ab->mem + 1244 (offset & WINDOW_RANGE_MASK)); 1245 } else { 1246 iowrite32(value, ab->mem + window_start + 1247 (offset & WINDOW_RANGE_MASK)); 1248 } 1249 spin_unlock_bh(&ab_pci->window_lock); 1250 } else { 1251 iowrite32(value, ab->mem + window_start + 1252 (offset & WINDOW_RANGE_MASK)); 1253 } 1254 } 1255 1256 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1257 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1258 !ret) 1259 ab_pci->pci_ops->release(ab); 1260 } 1261 1262 int ath12k_pci_power_up(struct ath12k_base *ab) 1263 { 1264 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1265 int ret; 1266 1267 ab_pci->register_window = 0; 1268 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1269 ath12k_pci_sw_reset(ab_pci->ab, true); 1270 1271 /* Disable ASPM during firmware download due to problems switching 1272 * to AMSS state. 1273 */ 1274 ath12k_pci_aspm_disable(ab_pci); 1275 1276 ath12k_pci_msi_enable(ab_pci); 1277 1278 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) 1279 ath12k_pci_update_qrtr_node_id(ab); 1280 1281 ret = ath12k_mhi_start(ab_pci); 1282 if (ret) { 1283 ath12k_err(ab, "failed to start mhi: %d\n", ret); 1284 return ret; 1285 } 1286 1287 if (ab->static_window_map) 1288 ath12k_pci_select_static_window(ab_pci); 1289 1290 return 0; 1291 } 1292 1293 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) 1294 { 1295 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1296 1297 /* restore aspm in case firmware bootup fails */ 1298 ath12k_pci_aspm_restore(ab_pci); 1299 1300 ath12k_pci_force_wake(ab_pci->ab); 1301 ath12k_pci_msi_disable(ab_pci); 1302 ath12k_mhi_stop(ab_pci, is_suspend); 1303 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1304 ath12k_pci_sw_reset(ab_pci->ab, false); 1305 } 1306 1307 static int ath12k_pci_panic_handler(struct ath12k_base *ab) 1308 { 1309 ath12k_pci_sw_reset(ab, false); 1310 1311 return NOTIFY_OK; 1312 } 1313 1314 static const struct ath12k_hif_ops ath12k_pci_hif_ops = { 1315 .start = ath12k_pci_start, 1316 .stop = ath12k_pci_stop, 1317 .read32 = ath12k_pci_read32, 1318 .write32 = ath12k_pci_write32, 1319 .power_down = ath12k_pci_power_down, 1320 .power_up = ath12k_pci_power_up, 1321 .suspend = ath12k_pci_hif_suspend, 1322 .resume = ath12k_pci_hif_resume, 1323 .irq_enable = ath12k_pci_ext_irq_enable, 1324 .irq_disable = ath12k_pci_ext_irq_disable, 1325 .get_msi_address = ath12k_pci_get_msi_address, 1326 .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, 1327 .map_service_to_pipe = ath12k_pci_map_service_to_pipe, 1328 .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, 1329 .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, 1330 .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, 1331 .panic_handler = ath12k_pci_panic_handler, 1332 }; 1333 1334 static 1335 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) 1336 { 1337 u32 soc_hw_version; 1338 1339 soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1340 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1341 soc_hw_version); 1342 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1343 soc_hw_version); 1344 1345 ath12k_dbg(ab, ATH12K_DBG_PCI, 1346 "pci tcsr_soc_hw_version major %d minor %d\n", 1347 *major, *minor); 1348 } 1349 1350 static int ath12k_pci_probe(struct pci_dev *pdev, 1351 const struct pci_device_id *pci_dev) 1352 { 1353 struct ath12k_base *ab; 1354 struct ath12k_pci *ab_pci; 1355 u32 soc_hw_version_major, soc_hw_version_minor; 1356 int ret; 1357 1358 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); 1359 if (!ab) { 1360 dev_err(&pdev->dev, "failed to allocate ath12k base\n"); 1361 return -ENOMEM; 1362 } 1363 1364 ab->dev = &pdev->dev; 1365 pci_set_drvdata(pdev, ab); 1366 ab_pci = ath12k_pci_priv(ab); 1367 ab_pci->dev_id = pci_dev->device; 1368 ab_pci->ab = ab; 1369 ab_pci->pdev = pdev; 1370 ab->hif.ops = &ath12k_pci_hif_ops; 1371 pci_set_drvdata(pdev, ab); 1372 spin_lock_init(&ab_pci->window_lock); 1373 1374 ret = ath12k_pci_claim(ab_pci, pdev); 1375 if (ret) { 1376 ath12k_err(ab, "failed to claim device: %d\n", ret); 1377 goto err_free_core; 1378 } 1379 1380 ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 1381 pdev->vendor, pdev->device, 1382 pdev->subsystem_vendor, pdev->subsystem_device); 1383 1384 ab->id.vendor = pdev->vendor; 1385 ab->id.device = pdev->device; 1386 ab->id.subsystem_vendor = pdev->subsystem_vendor; 1387 ab->id.subsystem_device = pdev->subsystem_device; 1388 1389 switch (pci_dev->device) { 1390 case QCN9274_DEVICE_ID: 1391 ab_pci->msi_config = &ath12k_msi_config[0]; 1392 ab->static_window_map = true; 1393 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; 1394 ab->hal_rx_ops = &hal_rx_qcn9274_ops; 1395 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1396 &soc_hw_version_minor); 1397 switch (soc_hw_version_major) { 1398 case ATH12K_PCI_SOC_HW_VERSION_2: 1399 ab->hw_rev = ATH12K_HW_QCN9274_HW20; 1400 break; 1401 case ATH12K_PCI_SOC_HW_VERSION_1: 1402 ab->hw_rev = ATH12K_HW_QCN9274_HW10; 1403 break; 1404 default: 1405 dev_err(&pdev->dev, 1406 "Unknown hardware version found for QCN9274: 0x%x\n", 1407 soc_hw_version_major); 1408 ret = -EOPNOTSUPP; 1409 goto err_pci_free_region; 1410 } 1411 break; 1412 case WCN7850_DEVICE_ID: 1413 ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; 1414 ab_pci->msi_config = &ath12k_msi_config[0]; 1415 ab->static_window_map = false; 1416 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; 1417 ab->hal_rx_ops = &hal_rx_wcn7850_ops; 1418 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1419 &soc_hw_version_minor); 1420 switch (soc_hw_version_major) { 1421 case ATH12K_PCI_SOC_HW_VERSION_2: 1422 ab->hw_rev = ATH12K_HW_WCN7850_HW20; 1423 break; 1424 default: 1425 dev_err(&pdev->dev, 1426 "Unknown hardware version found for WCN7850: 0x%x\n", 1427 soc_hw_version_major); 1428 ret = -EOPNOTSUPP; 1429 goto err_pci_free_region; 1430 } 1431 break; 1432 1433 default: 1434 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1435 pci_dev->device); 1436 ret = -EOPNOTSUPP; 1437 goto err_pci_free_region; 1438 } 1439 1440 ret = ath12k_pci_msi_alloc(ab_pci); 1441 if (ret) { 1442 ath12k_err(ab, "failed to alloc msi: %d\n", ret); 1443 goto err_pci_free_region; 1444 } 1445 1446 ret = ath12k_core_pre_init(ab); 1447 if (ret) 1448 goto err_pci_msi_free; 1449 1450 ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); 1451 if (ret) { 1452 ath12k_err(ab, "failed to set irq affinity %d\n", ret); 1453 goto err_pci_msi_free; 1454 } 1455 1456 ret = ath12k_mhi_register(ab_pci); 1457 if (ret) { 1458 ath12k_err(ab, "failed to register mhi: %d\n", ret); 1459 goto err_irq_affinity_cleanup; 1460 } 1461 1462 ret = ath12k_hal_srng_init(ab); 1463 if (ret) 1464 goto err_mhi_unregister; 1465 1466 ret = ath12k_ce_alloc_pipes(ab); 1467 if (ret) { 1468 ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1469 goto err_hal_srng_deinit; 1470 } 1471 1472 ath12k_pci_init_qmi_ce_config(ab); 1473 1474 ret = ath12k_pci_config_irq(ab); 1475 if (ret) { 1476 ath12k_err(ab, "failed to config irq: %d\n", ret); 1477 goto err_ce_free; 1478 } 1479 1480 /* kernel may allocate a dummy vector before request_irq and 1481 * then allocate a real vector when request_irq is called. 1482 * So get msi_data here again to avoid spurious interrupt 1483 * as msi_data will configured to srngs. 1484 */ 1485 ret = ath12k_pci_config_msi_data(ab_pci); 1486 if (ret) { 1487 ath12k_err(ab, "failed to config msi_data: %d\n", ret); 1488 goto err_free_irq; 1489 } 1490 1491 ret = ath12k_core_init(ab); 1492 if (ret) { 1493 ath12k_err(ab, "failed to init core: %d\n", ret); 1494 goto err_free_irq; 1495 } 1496 return 0; 1497 1498 err_free_irq: 1499 ath12k_pci_free_irq(ab); 1500 1501 err_ce_free: 1502 ath12k_ce_free_pipes(ab); 1503 1504 err_hal_srng_deinit: 1505 ath12k_hal_srng_deinit(ab); 1506 1507 err_mhi_unregister: 1508 ath12k_mhi_unregister(ab_pci); 1509 1510 err_pci_msi_free: 1511 ath12k_pci_msi_free(ab_pci); 1512 1513 err_irq_affinity_cleanup: 1514 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1515 1516 err_pci_free_region: 1517 ath12k_pci_free_region(ab_pci); 1518 1519 err_free_core: 1520 ath12k_core_free(ab); 1521 1522 return ret; 1523 } 1524 1525 static void ath12k_pci_remove(struct pci_dev *pdev) 1526 { 1527 struct ath12k_base *ab = pci_get_drvdata(pdev); 1528 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1529 1530 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1531 1532 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1533 ath12k_pci_power_down(ab, false); 1534 ath12k_qmi_deinit_service(ab); 1535 goto qmi_fail; 1536 } 1537 1538 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); 1539 1540 cancel_work_sync(&ab->reset_work); 1541 ath12k_core_deinit(ab); 1542 1543 qmi_fail: 1544 ath12k_mhi_unregister(ab_pci); 1545 1546 ath12k_pci_free_irq(ab); 1547 ath12k_pci_msi_free(ab_pci); 1548 ath12k_pci_free_region(ab_pci); 1549 1550 ath12k_hal_srng_deinit(ab); 1551 ath12k_ce_free_pipes(ab); 1552 ath12k_core_free(ab); 1553 } 1554 1555 static void ath12k_pci_shutdown(struct pci_dev *pdev) 1556 { 1557 struct ath12k_base *ab = pci_get_drvdata(pdev); 1558 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1559 1560 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1561 ath12k_pci_power_down(ab, false); 1562 } 1563 1564 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) 1565 { 1566 struct ath12k_base *ab = dev_get_drvdata(dev); 1567 int ret; 1568 1569 ret = ath12k_core_suspend(ab); 1570 if (ret) 1571 ath12k_warn(ab, "failed to suspend core: %d\n", ret); 1572 1573 return ret; 1574 } 1575 1576 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) 1577 { 1578 struct ath12k_base *ab = dev_get_drvdata(dev); 1579 int ret; 1580 1581 ret = ath12k_core_resume(ab); 1582 if (ret) 1583 ath12k_warn(ab, "failed to resume core: %d\n", ret); 1584 1585 return ret; 1586 } 1587 1588 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) 1589 { 1590 struct ath12k_base *ab = dev_get_drvdata(dev); 1591 int ret; 1592 1593 ret = ath12k_core_suspend_late(ab); 1594 if (ret) 1595 ath12k_warn(ab, "failed to late suspend core: %d\n", ret); 1596 1597 return ret; 1598 } 1599 1600 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) 1601 { 1602 struct ath12k_base *ab = dev_get_drvdata(dev); 1603 int ret; 1604 1605 ret = ath12k_core_resume_early(ab); 1606 if (ret) 1607 ath12k_warn(ab, "failed to early resume core: %d\n", ret); 1608 1609 return ret; 1610 } 1611 1612 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { 1613 SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, 1614 ath12k_pci_pm_resume) 1615 SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, 1616 ath12k_pci_pm_resume_early) 1617 }; 1618 1619 static struct pci_driver ath12k_pci_driver = { 1620 .name = "ath12k_pci", 1621 .id_table = ath12k_pci_id_table, 1622 .probe = ath12k_pci_probe, 1623 .remove = ath12k_pci_remove, 1624 .shutdown = ath12k_pci_shutdown, 1625 .driver.pm = &ath12k_pci_pm_ops, 1626 }; 1627 1628 static int ath12k_pci_init(void) 1629 { 1630 int ret; 1631 1632 ret = pci_register_driver(&ath12k_pci_driver); 1633 if (ret) { 1634 pr_err("failed to register ath12k pci driver: %d\n", 1635 ret); 1636 return ret; 1637 } 1638 1639 return 0; 1640 } 1641 module_init(ath12k_pci_init); 1642 1643 static void ath12k_pci_exit(void) 1644 { 1645 pci_unregister_driver(&ath12k_pci_driver); 1646 } 1647 1648 module_exit(ath12k_pci_exit); 1649 1650 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); 1651 MODULE_LICENSE("Dual BSD/GPL"); 1652