1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/msi.h> 9 #include <linux/pci.h> 10 11 #include "pci.h" 12 #include "core.h" 13 #include "hif.h" 14 #include "mhi.h" 15 #include "debug.h" 16 17 #define ATH12K_PCI_BAR_NUM 0 18 #define ATH12K_PCI_DMA_MASK 32 19 20 #define ATH12K_PCI_IRQ_CE0_OFFSET 3 21 22 #define WINDOW_ENABLE_BIT 0x40000000 23 #define WINDOW_REG_ADDRESS 0x310c 24 #define WINDOW_VALUE_MASK GENMASK(24, 19) 25 #define WINDOW_START 0x80000 26 #define WINDOW_RANGE_MASK GENMASK(18, 0) 27 #define WINDOW_STATIC_MASK GENMASK(31, 6) 28 29 #define TCSR_SOC_HW_VERSION 0x1B00000 30 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 31 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) 32 33 /* BAR0 + 4k is always accessible, and no 34 * need to force wakeup. 35 * 4K - 32 = 0xFE0 36 */ 37 #define ACCESS_ALWAYS_OFF 0xFE0 38 39 #define QCN9274_DEVICE_ID 0x1109 40 #define WCN7850_DEVICE_ID 0x1107 41 42 #define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164 43 #define DOMAIN_NUMBER_MASK GENMASK(7, 4) 44 #define BUS_NUMBER_MASK GENMASK(3, 0) 45 46 static const struct pci_device_id ath12k_pci_id_table[] = { 47 { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, 48 { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, 49 {0} 50 }; 51 52 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); 53 54 /* TODO: revisit IRQ mapping for new SRNG's */ 55 static const struct ath12k_msi_config ath12k_msi_config[] = { 56 { 57 .total_vectors = 16, 58 .total_users = 3, 59 .users = (struct ath12k_msi_user[]) { 60 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 61 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 62 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 63 }, 64 }, 65 }; 66 67 static const struct ath12k_msi_config msi_config_one_msi = { 68 .total_vectors = 1, 69 .total_users = 4, 70 .users = (struct ath12k_msi_user[]) { 71 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 72 { .name = "CE", .num_vectors = 1, .base_vector = 0 }, 73 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, 74 { .name = "DP", .num_vectors = 1, .base_vector = 0 }, 75 }, 76 }; 77 78 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { 79 "bhi", 80 "mhi-er0", 81 "mhi-er1", 82 "ce0", 83 "ce1", 84 "ce2", 85 "ce3", 86 "ce4", 87 "ce5", 88 "ce6", 89 "ce7", 90 "ce8", 91 "ce9", 92 "ce10", 93 "ce11", 94 "ce12", 95 "ce13", 96 "ce14", 97 "ce15", 98 "host2wbm-desc-feed", 99 "host2reo-re-injection", 100 "host2reo-command", 101 "host2rxdma-monitor-ring3", 102 "host2rxdma-monitor-ring2", 103 "host2rxdma-monitor-ring1", 104 "reo2ost-exception", 105 "wbm2host-rx-release", 106 "reo2host-status", 107 "reo2host-destination-ring4", 108 "reo2host-destination-ring3", 109 "reo2host-destination-ring2", 110 "reo2host-destination-ring1", 111 "rxdma2host-monitor-destination-mac3", 112 "rxdma2host-monitor-destination-mac2", 113 "rxdma2host-monitor-destination-mac1", 114 "ppdu-end-interrupts-mac3", 115 "ppdu-end-interrupts-mac2", 116 "ppdu-end-interrupts-mac1", 117 "rxdma2host-monitor-status-ring-mac3", 118 "rxdma2host-monitor-status-ring-mac2", 119 "rxdma2host-monitor-status-ring-mac1", 120 "host2rxdma-host-buf-ring-mac3", 121 "host2rxdma-host-buf-ring-mac2", 122 "host2rxdma-host-buf-ring-mac1", 123 "rxdma2host-destination-ring-mac3", 124 "rxdma2host-destination-ring-mac2", 125 "rxdma2host-destination-ring-mac1", 126 "host2tcl-input-ring4", 127 "host2tcl-input-ring3", 128 "host2tcl-input-ring2", 129 "host2tcl-input-ring1", 130 "wbm2host-tx-completions-ring4", 131 "wbm2host-tx-completions-ring3", 132 "wbm2host-tx-completions-ring2", 133 "wbm2host-tx-completions-ring1", 134 "tcl2host-status-ring", 135 }; 136 137 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab) 138 { 139 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 140 141 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 142 } 143 144 static void ath12k_pci_bus_release(struct ath12k_base *ab) 145 { 146 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 147 148 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 149 } 150 151 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = { 152 .wakeup = NULL, 153 .release = NULL, 154 }; 155 156 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = { 157 .wakeup = ath12k_pci_bus_wake_up, 158 .release = ath12k_pci_bus_release, 159 }; 160 161 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) 162 { 163 struct ath12k_base *ab = ab_pci->ab; 164 165 u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); 166 u32 static_window; 167 168 lockdep_assert_held(&ab_pci->window_lock); 169 170 /* Preserve the static window configuration and reset only dynamic window */ 171 static_window = ab_pci->register_window & WINDOW_STATIC_MASK; 172 window |= static_window; 173 174 if (window != ab_pci->register_window) { 175 iowrite32(WINDOW_ENABLE_BIT | window, 176 ab->mem + WINDOW_REG_ADDRESS); 177 ioread32(ab->mem + WINDOW_REG_ADDRESS); 178 ab_pci->register_window = window; 179 } 180 } 181 182 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) 183 { 184 u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); 185 u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); 186 u32 window; 187 188 window = (umac_window << 12) | (ce_window << 6); 189 190 spin_lock_bh(&ab_pci->window_lock); 191 ab_pci->register_window = window; 192 spin_unlock_bh(&ab_pci->window_lock); 193 194 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 195 } 196 197 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, 198 u32 offset) 199 { 200 u32 window_start; 201 202 /* If offset lies within DP register range, use 3rd window */ 203 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 204 window_start = 3 * WINDOW_START; 205 /* If offset lies within CE register range, use 2nd window */ 206 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 207 window_start = 2 * WINDOW_START; 208 else 209 window_start = WINDOW_START; 210 211 return window_start; 212 } 213 214 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) 215 { 216 return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); 217 } 218 219 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) 220 { 221 u32 val, delay; 222 223 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 224 225 val |= PCIE_SOC_GLOBAL_RESET_V; 226 227 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 228 229 /* TODO: exact time to sleep is uncertain */ 230 delay = 10; 231 mdelay(delay); 232 233 /* Need to toggle V bit back otherwise stuck in reset status */ 234 val &= ~PCIE_SOC_GLOBAL_RESET_V; 235 236 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 237 238 mdelay(delay); 239 240 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 241 if (val == 0xffffffff) 242 ath12k_warn(ab, "link down error during global reset\n"); 243 } 244 245 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) 246 { 247 u32 val; 248 249 /* read cookie */ 250 val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 251 ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); 252 253 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 254 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 255 256 /* TODO: exact time to sleep is uncertain */ 257 mdelay(10); 258 259 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 260 * continuing warm path and entering dead loop. 261 */ 262 ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 263 mdelay(10); 264 265 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 266 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 267 268 /* A read clear register. clear the register to prevent 269 * Q6 from entering wrong code path. 270 */ 271 val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 272 ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); 273 } 274 275 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) 276 { 277 u32 val; 278 int i; 279 280 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 281 282 /* PCIE link seems very unstable after the Hot Reset*/ 283 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 284 if (val == 0xffffffff) 285 mdelay(5); 286 287 ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 288 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 289 } 290 291 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); 292 293 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 294 val |= GCC_GCC_PCIE_HOT_RST_VAL; 295 ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 296 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 297 298 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 299 300 mdelay(5); 301 } 302 303 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) 304 { 305 /* This is a WAR for PCIE Hotreset. 306 * When target receive Hotreset, but will set the interrupt. 307 * So when download SBL again, SBL will open Interrupt and 308 * receive it, and crash immediately. 309 */ 310 ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 311 } 312 313 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) 314 { 315 u32 val; 316 317 val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 318 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 319 ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 320 } 321 322 static void ath12k_pci_force_wake(struct ath12k_base *ab) 323 { 324 ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 325 mdelay(5); 326 } 327 328 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) 329 { 330 if (power_on) { 331 ath12k_pci_enable_ltssm(ab); 332 ath12k_pci_clear_all_intrs(ab); 333 ath12k_pci_set_wlaon_pwr_ctrl(ab); 334 } 335 336 ath12k_mhi_clear_vector(ab); 337 ath12k_pci_clear_dbg_registers(ab); 338 ath12k_pci_soc_global_reset(ab); 339 ath12k_mhi_set_mhictrl_reset(ab); 340 } 341 342 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) 343 { 344 int i, j; 345 346 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 347 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 348 349 for (j = 0; j < irq_grp->num_irq; j++) 350 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 351 352 netif_napi_del(&irq_grp->napi); 353 free_netdev(irq_grp->napi_ndev); 354 } 355 } 356 357 static void ath12k_pci_free_irq(struct ath12k_base *ab) 358 { 359 int i, irq_idx; 360 361 for (i = 0; i < ab->hw_params->ce_count; i++) { 362 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 363 continue; 364 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 365 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 366 } 367 368 ath12k_pci_free_ext_irq(ab); 369 } 370 371 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) 372 { 373 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 374 u32 irq_idx; 375 376 /* In case of one MSI vector, we handle irq enable/disable in a 377 * uniform way since we only have one irq 378 */ 379 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 380 return; 381 382 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 383 enable_irq(ab->irq_num[irq_idx]); 384 } 385 386 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) 387 { 388 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 389 u32 irq_idx; 390 391 /* In case of one MSI vector, we handle irq enable/disable in a 392 * uniform way since we only have one irq 393 */ 394 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 395 return; 396 397 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 398 disable_irq_nosync(ab->irq_num[irq_idx]); 399 } 400 401 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) 402 { 403 int i; 404 405 clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 406 407 for (i = 0; i < ab->hw_params->ce_count; i++) { 408 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 409 continue; 410 ath12k_pci_ce_irq_disable(ab, i); 411 } 412 } 413 414 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) 415 { 416 int i; 417 int irq_idx; 418 419 for (i = 0; i < ab->hw_params->ce_count; i++) { 420 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 421 continue; 422 423 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 424 synchronize_irq(ab->irq_num[irq_idx]); 425 } 426 } 427 428 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) 429 { 430 struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 431 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 432 433 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 434 435 enable_irq(ce_pipe->ab->irq_num[irq_idx]); 436 } 437 438 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) 439 { 440 struct ath12k_ce_pipe *ce_pipe = arg; 441 struct ath12k_base *ab = ce_pipe->ab; 442 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 443 444 if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) 445 return IRQ_HANDLED; 446 447 /* last interrupt received for this CE */ 448 ce_pipe->timestamp = jiffies; 449 450 disable_irq_nosync(ab->irq_num[irq_idx]); 451 452 tasklet_schedule(&ce_pipe->intr_tq); 453 454 return IRQ_HANDLED; 455 } 456 457 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) 458 { 459 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 460 int i; 461 462 /* In case of one MSI vector, we handle irq enable/disable 463 * in a uniform way since we only have one irq 464 */ 465 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 466 return; 467 468 for (i = 0; i < irq_grp->num_irq; i++) 469 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 470 } 471 472 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 473 { 474 int i; 475 476 clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 477 478 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 479 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 480 481 ath12k_pci_ext_grp_disable(irq_grp); 482 483 napi_synchronize(&irq_grp->napi); 484 napi_disable(&irq_grp->napi); 485 } 486 } 487 488 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) 489 { 490 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 491 int i; 492 493 /* In case of one MSI vector, we handle irq enable/disable in a 494 * uniform way since we only have one irq 495 */ 496 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 497 return; 498 499 for (i = 0; i < irq_grp->num_irq; i++) 500 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 501 } 502 503 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) 504 { 505 int i, j, irq_idx; 506 507 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 508 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 509 510 for (j = 0; j < irq_grp->num_irq; j++) { 511 irq_idx = irq_grp->irqs[j]; 512 synchronize_irq(ab->irq_num[irq_idx]); 513 } 514 } 515 } 516 517 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 518 { 519 struct ath12k_ext_irq_grp *irq_grp = container_of(napi, 520 struct ath12k_ext_irq_grp, 521 napi); 522 struct ath12k_base *ab = irq_grp->ab; 523 int work_done; 524 int i; 525 526 work_done = ath12k_dp_service_srng(ab, irq_grp, budget); 527 if (work_done < budget) { 528 napi_complete_done(napi, work_done); 529 for (i = 0; i < irq_grp->num_irq; i++) 530 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 531 } 532 533 if (work_done > budget) 534 work_done = budget; 535 536 return work_done; 537 } 538 539 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) 540 { 541 struct ath12k_ext_irq_grp *irq_grp = arg; 542 struct ath12k_base *ab = irq_grp->ab; 543 int i; 544 545 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 546 return IRQ_HANDLED; 547 548 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); 549 550 /* last interrupt received for this group */ 551 irq_grp->timestamp = jiffies; 552 553 for (i = 0; i < irq_grp->num_irq; i++) 554 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 555 556 napi_schedule(&irq_grp->napi); 557 558 return IRQ_HANDLED; 559 } 560 561 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) 562 { 563 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 564 int i, j, n, ret, num_vectors = 0; 565 u32 user_base_data = 0, base_vector = 0, base_idx; 566 struct ath12k_ext_irq_grp *irq_grp; 567 568 base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 569 ret = ath12k_pci_get_user_msi_assignment(ab, "DP", 570 &num_vectors, 571 &user_base_data, 572 &base_vector); 573 if (ret < 0) 574 return ret; 575 576 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 577 irq_grp = &ab->ext_irq_grp[i]; 578 u32 num_irq = 0; 579 580 irq_grp->ab = ab; 581 irq_grp->grp_id = i; 582 irq_grp->napi_ndev = alloc_netdev_dummy(0); 583 if (!irq_grp->napi_ndev) { 584 ret = -ENOMEM; 585 goto fail_allocate; 586 } 587 588 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, 589 ath12k_pci_ext_grp_napi_poll); 590 591 if (ab->hw_params->ring_mask->tx[i] || 592 ab->hw_params->ring_mask->rx[i] || 593 ab->hw_params->ring_mask->rx_err[i] || 594 ab->hw_params->ring_mask->rx_wbm_rel[i] || 595 ab->hw_params->ring_mask->reo_status[i] || 596 ab->hw_params->ring_mask->host2rxdma[i] || 597 ab->hw_params->ring_mask->rx_mon_dest[i]) { 598 num_irq = 1; 599 } 600 601 irq_grp->num_irq = num_irq; 602 irq_grp->irqs[0] = base_idx + i; 603 604 for (j = 0; j < irq_grp->num_irq; j++) { 605 int irq_idx = irq_grp->irqs[j]; 606 int vector = (i % num_vectors) + base_vector; 607 int irq = ath12k_pci_get_msi_irq(ab->dev, vector); 608 609 ab->irq_num[irq_idx] = irq; 610 611 ath12k_dbg(ab, ATH12K_DBG_PCI, 612 "irq:%d group:%d\n", irq, i); 613 614 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 615 ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, 616 ab_pci->irq_flags, 617 "DP_EXT_IRQ", irq_grp); 618 if (ret) { 619 ath12k_err(ab, "failed request irq %d: %d\n", 620 vector, ret); 621 goto fail_request; 622 } 623 } 624 ath12k_pci_ext_grp_disable(irq_grp); 625 } 626 627 return 0; 628 629 fail_request: 630 /* i ->napi_ndev was properly allocated. Free it also */ 631 i += 1; 632 fail_allocate: 633 for (n = 0; n < i; n++) { 634 irq_grp = &ab->ext_irq_grp[n]; 635 free_netdev(irq_grp->napi_ndev); 636 } 637 return ret; 638 } 639 640 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, 641 const struct cpumask *m) 642 { 643 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 644 return 0; 645 646 return irq_set_affinity_hint(ab_pci->pdev->irq, m); 647 } 648 649 static int ath12k_pci_config_irq(struct ath12k_base *ab) 650 { 651 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 652 struct ath12k_ce_pipe *ce_pipe; 653 u32 msi_data_start; 654 u32 msi_data_count, msi_data_idx; 655 u32 msi_irq_start; 656 unsigned int msi_data; 657 int irq, i, ret, irq_idx; 658 659 ret = ath12k_pci_get_user_msi_assignment(ab, 660 "CE", &msi_data_count, 661 &msi_data_start, &msi_irq_start); 662 if (ret) 663 return ret; 664 665 /* Configure CE irqs */ 666 667 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 668 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 669 continue; 670 671 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 672 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); 673 ce_pipe = &ab->ce.ce_pipe[i]; 674 675 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 676 677 tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); 678 679 ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, 680 ab_pci->irq_flags, irq_name[irq_idx], 681 ce_pipe); 682 if (ret) { 683 ath12k_err(ab, "failed to request irq %d: %d\n", 684 irq_idx, ret); 685 return ret; 686 } 687 688 ab->irq_num[irq_idx] = irq; 689 msi_data_idx++; 690 691 ath12k_pci_ce_irq_disable(ab, i); 692 } 693 694 ret = ath12k_pci_ext_irq_config(ab); 695 if (ret) 696 return ret; 697 698 return 0; 699 } 700 701 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) 702 { 703 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 704 705 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 706 struct pci_bus *bus = ab_pci->pdev->bus; 707 708 cfg->tgt_ce = ab->hw_params->target_ce_config; 709 cfg->tgt_ce_len = ab->hw_params->target_ce_count; 710 711 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; 712 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; 713 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; 714 715 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) { 716 ab_pci->qmi_instance = 717 u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) | 718 u32_encode_bits(bus->number, BUS_NUMBER_MASK); 719 ab->qmi.service_ins_id += ab_pci->qmi_instance; 720 } 721 } 722 723 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) 724 { 725 int i; 726 727 set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 728 729 for (i = 0; i < ab->hw_params->ce_count; i++) { 730 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 731 continue; 732 ath12k_pci_ce_irq_enable(ab, i); 733 } 734 } 735 736 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) 737 { 738 struct pci_dev *dev = ab_pci->pdev; 739 u16 control; 740 741 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 742 743 if (enable) 744 control |= PCI_MSI_FLAGS_ENABLE; 745 else 746 control &= ~PCI_MSI_FLAGS_ENABLE; 747 748 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 749 } 750 751 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) 752 { 753 ath12k_pci_msi_config(ab_pci, true); 754 } 755 756 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) 757 { 758 ath12k_pci_msi_config(ab_pci, false); 759 } 760 761 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) 762 { 763 struct ath12k_base *ab = ab_pci->ab; 764 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 765 struct msi_desc *msi_desc; 766 int num_vectors; 767 int ret; 768 769 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 770 msi_config->total_vectors, 771 msi_config->total_vectors, 772 PCI_IRQ_MSI); 773 774 if (num_vectors == msi_config->total_vectors) { 775 set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 776 ab_pci->irq_flags = IRQF_SHARED; 777 } else { 778 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 779 1, 780 1, 781 PCI_IRQ_MSI); 782 if (num_vectors < 0) { 783 ret = -EINVAL; 784 goto reset_msi_config; 785 } 786 clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 787 ab_pci->msi_config = &msi_config_one_msi; 788 ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 789 ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n"); 790 } 791 792 ath12k_info(ab, "MSI vectors: %d\n", num_vectors); 793 794 ath12k_pci_msi_disable(ab_pci); 795 796 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 797 if (!msi_desc) { 798 ath12k_err(ab, "msi_desc is NULL!\n"); 799 ret = -EINVAL; 800 goto free_msi_vector; 801 } 802 803 ab_pci->msi_ep_base_data = msi_desc->msg.data; 804 if (msi_desc->pci.msi_attrib.is_64) 805 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 806 807 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 808 809 return 0; 810 811 free_msi_vector: 812 pci_free_irq_vectors(ab_pci->pdev); 813 814 reset_msi_config: 815 return ret; 816 } 817 818 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) 819 { 820 pci_free_irq_vectors(ab_pci->pdev); 821 } 822 823 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci) 824 { 825 struct msi_desc *msi_desc; 826 827 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 828 if (!msi_desc) { 829 ath12k_err(ab_pci->ab, "msi_desc is NULL!\n"); 830 pci_free_irq_vectors(ab_pci->pdev); 831 return -EINVAL; 832 } 833 834 ab_pci->msi_ep_base_data = msi_desc->msg.data; 835 836 ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", 837 ab_pci->msi_ep_base_data); 838 839 return 0; 840 } 841 842 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) 843 { 844 struct ath12k_base *ab = ab_pci->ab; 845 u16 device_id; 846 int ret = 0; 847 848 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 849 if (device_id != ab_pci->dev_id) { 850 ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 851 device_id, ab_pci->dev_id); 852 ret = -EIO; 853 goto out; 854 } 855 856 ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); 857 if (ret) { 858 ath12k_err(ab, "failed to assign pci resource: %d\n", ret); 859 goto out; 860 } 861 862 ret = pci_enable_device(pdev); 863 if (ret) { 864 ath12k_err(ab, "failed to enable pci device: %d\n", ret); 865 goto out; 866 } 867 868 ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); 869 if (ret) { 870 ath12k_err(ab, "failed to request pci region: %d\n", ret); 871 goto disable_device; 872 } 873 874 ret = dma_set_mask_and_coherent(&pdev->dev, 875 DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); 876 if (ret) { 877 ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", 878 ATH12K_PCI_DMA_MASK, ret); 879 goto release_region; 880 } 881 882 pci_set_master(pdev); 883 884 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); 885 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); 886 if (!ab->mem) { 887 ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); 888 ret = -EIO; 889 goto release_region; 890 } 891 892 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); 893 return 0; 894 895 release_region: 896 pci_release_region(pdev, ATH12K_PCI_BAR_NUM); 897 disable_device: 898 pci_disable_device(pdev); 899 out: 900 return ret; 901 } 902 903 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) 904 { 905 struct ath12k_base *ab = ab_pci->ab; 906 struct pci_dev *pci_dev = ab_pci->pdev; 907 908 pci_iounmap(pci_dev, ab->mem); 909 ab->mem = NULL; 910 pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); 911 if (pci_is_enabled(pci_dev)) 912 pci_disable_device(pci_dev); 913 } 914 915 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) 916 { 917 struct ath12k_base *ab = ab_pci->ab; 918 919 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 920 &ab_pci->link_ctl); 921 922 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 923 ab_pci->link_ctl, 924 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 925 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 926 927 /* disable L0s and L1 */ 928 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, 929 PCI_EXP_LNKCTL_ASPMC); 930 931 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); 932 } 933 934 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) 935 { 936 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 937 u32 reg; 938 939 /* On platforms with two or more identical mhi devices, qmi service run 940 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup 941 * cannot register more than one qmi service with identical node ID. 942 * 943 * This generates a unique instance ID from PCIe domain number and bus number, 944 * writes to the given register, it is available for firmware when the QMI service 945 * is spawned. 946 */ 947 reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK; 948 ath12k_pci_write32(ab, reg, ab_pci->qmi_instance); 949 950 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n", 951 reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg)); 952 } 953 954 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) 955 { 956 if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) 957 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, 958 PCI_EXP_LNKCTL_ASPMC, 959 ab_pci->link_ctl & 960 PCI_EXP_LNKCTL_ASPMC); 961 } 962 963 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) 964 { 965 int i; 966 967 for (i = 0; i < ab->hw_params->ce_count; i++) { 968 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 969 970 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 971 continue; 972 973 tasklet_kill(&ce_pipe->intr_tq); 974 } 975 } 976 977 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) 978 { 979 ath12k_pci_ce_irqs_disable(ab); 980 ath12k_pci_sync_ce_irqs(ab); 981 ath12k_pci_kill_tasklets(ab); 982 } 983 984 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, 985 u8 *ul_pipe, u8 *dl_pipe) 986 { 987 const struct service_to_pipe *entry; 988 bool ul_set = false, dl_set = false; 989 int i; 990 991 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { 992 entry = &ab->hw_params->svc_to_ce_map[i]; 993 994 if (__le32_to_cpu(entry->service_id) != service_id) 995 continue; 996 997 switch (__le32_to_cpu(entry->pipedir)) { 998 case PIPEDIR_NONE: 999 break; 1000 case PIPEDIR_IN: 1001 WARN_ON(dl_set); 1002 *dl_pipe = __le32_to_cpu(entry->pipenum); 1003 dl_set = true; 1004 break; 1005 case PIPEDIR_OUT: 1006 WARN_ON(ul_set); 1007 *ul_pipe = __le32_to_cpu(entry->pipenum); 1008 ul_set = true; 1009 break; 1010 case PIPEDIR_INOUT: 1011 WARN_ON(dl_set); 1012 WARN_ON(ul_set); 1013 *dl_pipe = __le32_to_cpu(entry->pipenum); 1014 *ul_pipe = __le32_to_cpu(entry->pipenum); 1015 dl_set = true; 1016 ul_set = true; 1017 break; 1018 } 1019 } 1020 1021 if (WARN_ON(!ul_set || !dl_set)) 1022 return -ENOENT; 1023 1024 return 0; 1025 } 1026 1027 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) 1028 { 1029 struct pci_dev *pci_dev = to_pci_dev(dev); 1030 1031 return pci_irq_vector(pci_dev, vector); 1032 } 1033 1034 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, 1035 int *num_vectors, u32 *user_base_data, 1036 u32 *base_vector) 1037 { 1038 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1039 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 1040 int idx; 1041 1042 for (idx = 0; idx < msi_config->total_users; idx++) { 1043 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 1044 *num_vectors = msi_config->users[idx].num_vectors; 1045 *base_vector = msi_config->users[idx].base_vector; 1046 *user_base_data = *base_vector + ab_pci->msi_ep_base_data; 1047 1048 ath12k_dbg(ab, ATH12K_DBG_PCI, 1049 "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 1050 user_name, *num_vectors, *user_base_data, 1051 *base_vector); 1052 1053 return 0; 1054 } 1055 } 1056 1057 ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 1058 1059 return -EINVAL; 1060 } 1061 1062 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, 1063 u32 *msi_addr_hi) 1064 { 1065 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1066 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 1067 1068 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 1069 msi_addr_lo); 1070 1071 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 1072 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 1073 msi_addr_hi); 1074 } else { 1075 *msi_addr_hi = 0; 1076 } 1077 } 1078 1079 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, 1080 u32 *msi_idx) 1081 { 1082 u32 i, msi_data_idx; 1083 1084 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 1085 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1086 continue; 1087 1088 if (ce_id == i) 1089 break; 1090 1091 msi_data_idx++; 1092 } 1093 *msi_idx = msi_data_idx; 1094 } 1095 1096 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) 1097 { 1098 ath12k_pci_ce_irqs_enable(ab); 1099 } 1100 1101 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) 1102 { 1103 ath12k_pci_ce_irq_disable_sync(ab); 1104 } 1105 1106 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) 1107 { 1108 int i; 1109 1110 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 1111 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 1112 1113 napi_enable(&irq_grp->napi); 1114 ath12k_pci_ext_grp_enable(irq_grp); 1115 } 1116 1117 set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 1118 } 1119 1120 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 1121 { 1122 __ath12k_pci_ext_irq_disable(ab); 1123 ath12k_pci_sync_ext_irqs(ab); 1124 } 1125 1126 int ath12k_pci_hif_suspend(struct ath12k_base *ab) 1127 { 1128 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1129 1130 ath12k_mhi_suspend(ar_pci); 1131 1132 return 0; 1133 } 1134 1135 int ath12k_pci_hif_resume(struct ath12k_base *ab) 1136 { 1137 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1138 1139 ath12k_mhi_resume(ar_pci); 1140 1141 return 0; 1142 } 1143 1144 void ath12k_pci_stop(struct ath12k_base *ab) 1145 { 1146 ath12k_pci_ce_irq_disable_sync(ab); 1147 ath12k_ce_cleanup_pipes(ab); 1148 } 1149 1150 int ath12k_pci_start(struct ath12k_base *ab) 1151 { 1152 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1153 1154 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1155 1156 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 1157 ath12k_pci_aspm_restore(ab_pci); 1158 else 1159 ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); 1160 1161 ath12k_pci_ce_irqs_enable(ab); 1162 ath12k_ce_rx_post_buf(ab); 1163 1164 return 0; 1165 } 1166 1167 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) 1168 { 1169 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1170 u32 val, window_start; 1171 int ret = 0; 1172 1173 /* for offset beyond BAR + 4K - 32, may 1174 * need to wakeup MHI to access. 1175 */ 1176 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1177 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1178 ret = ab_pci->pci_ops->wakeup(ab); 1179 1180 if (offset < WINDOW_START) { 1181 val = ioread32(ab->mem + offset); 1182 } else { 1183 if (ab->static_window_map) 1184 window_start = ath12k_pci_get_window_start(ab, offset); 1185 else 1186 window_start = WINDOW_START; 1187 1188 if (window_start == WINDOW_START) { 1189 spin_lock_bh(&ab_pci->window_lock); 1190 ath12k_pci_select_window(ab_pci, offset); 1191 1192 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1193 offset = offset - PCI_MHIREGLEN_REG; 1194 val = ioread32(ab->mem + 1195 (offset & WINDOW_RANGE_MASK)); 1196 } else { 1197 val = ioread32(ab->mem + window_start + 1198 (offset & WINDOW_RANGE_MASK)); 1199 } 1200 spin_unlock_bh(&ab_pci->window_lock); 1201 } else { 1202 val = ioread32(ab->mem + window_start + 1203 (offset & WINDOW_RANGE_MASK)); 1204 } 1205 } 1206 1207 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1208 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1209 !ret) 1210 ab_pci->pci_ops->release(ab); 1211 return val; 1212 } 1213 1214 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) 1215 { 1216 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1217 u32 window_start; 1218 int ret = 0; 1219 1220 /* for offset beyond BAR + 4K - 32, may 1221 * need to wakeup MHI to access. 1222 */ 1223 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1224 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1225 ret = ab_pci->pci_ops->wakeup(ab); 1226 1227 if (offset < WINDOW_START) { 1228 iowrite32(value, ab->mem + offset); 1229 } else { 1230 if (ab->static_window_map) 1231 window_start = ath12k_pci_get_window_start(ab, offset); 1232 else 1233 window_start = WINDOW_START; 1234 1235 if (window_start == WINDOW_START) { 1236 spin_lock_bh(&ab_pci->window_lock); 1237 ath12k_pci_select_window(ab_pci, offset); 1238 1239 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1240 offset = offset - PCI_MHIREGLEN_REG; 1241 iowrite32(value, ab->mem + 1242 (offset & WINDOW_RANGE_MASK)); 1243 } else { 1244 iowrite32(value, ab->mem + window_start + 1245 (offset & WINDOW_RANGE_MASK)); 1246 } 1247 spin_unlock_bh(&ab_pci->window_lock); 1248 } else { 1249 iowrite32(value, ab->mem + window_start + 1250 (offset & WINDOW_RANGE_MASK)); 1251 } 1252 } 1253 1254 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1255 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1256 !ret) 1257 ab_pci->pci_ops->release(ab); 1258 } 1259 1260 int ath12k_pci_power_up(struct ath12k_base *ab) 1261 { 1262 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1263 int ret; 1264 1265 ab_pci->register_window = 0; 1266 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1267 ath12k_pci_sw_reset(ab_pci->ab, true); 1268 1269 /* Disable ASPM during firmware download due to problems switching 1270 * to AMSS state. 1271 */ 1272 ath12k_pci_aspm_disable(ab_pci); 1273 1274 ath12k_pci_msi_enable(ab_pci); 1275 1276 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) 1277 ath12k_pci_update_qrtr_node_id(ab); 1278 1279 ret = ath12k_mhi_start(ab_pci); 1280 if (ret) { 1281 ath12k_err(ab, "failed to start mhi: %d\n", ret); 1282 return ret; 1283 } 1284 1285 if (ab->static_window_map) 1286 ath12k_pci_select_static_window(ab_pci); 1287 1288 return 0; 1289 } 1290 1291 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) 1292 { 1293 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1294 1295 /* restore aspm in case firmware bootup fails */ 1296 ath12k_pci_aspm_restore(ab_pci); 1297 1298 ath12k_pci_force_wake(ab_pci->ab); 1299 ath12k_pci_msi_disable(ab_pci); 1300 ath12k_mhi_stop(ab_pci, is_suspend); 1301 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1302 ath12k_pci_sw_reset(ab_pci->ab, false); 1303 } 1304 1305 static int ath12k_pci_panic_handler(struct ath12k_base *ab) 1306 { 1307 ath12k_pci_sw_reset(ab, false); 1308 1309 return NOTIFY_OK; 1310 } 1311 1312 static const struct ath12k_hif_ops ath12k_pci_hif_ops = { 1313 .start = ath12k_pci_start, 1314 .stop = ath12k_pci_stop, 1315 .read32 = ath12k_pci_read32, 1316 .write32 = ath12k_pci_write32, 1317 .power_down = ath12k_pci_power_down, 1318 .power_up = ath12k_pci_power_up, 1319 .suspend = ath12k_pci_hif_suspend, 1320 .resume = ath12k_pci_hif_resume, 1321 .irq_enable = ath12k_pci_ext_irq_enable, 1322 .irq_disable = ath12k_pci_ext_irq_disable, 1323 .get_msi_address = ath12k_pci_get_msi_address, 1324 .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, 1325 .map_service_to_pipe = ath12k_pci_map_service_to_pipe, 1326 .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, 1327 .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, 1328 .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, 1329 .panic_handler = ath12k_pci_panic_handler, 1330 }; 1331 1332 static 1333 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) 1334 { 1335 u32 soc_hw_version; 1336 1337 soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1338 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1339 soc_hw_version); 1340 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1341 soc_hw_version); 1342 1343 ath12k_dbg(ab, ATH12K_DBG_PCI, 1344 "pci tcsr_soc_hw_version major %d minor %d\n", 1345 *major, *minor); 1346 } 1347 1348 static int ath12k_pci_probe(struct pci_dev *pdev, 1349 const struct pci_device_id *pci_dev) 1350 { 1351 struct ath12k_base *ab; 1352 struct ath12k_pci *ab_pci; 1353 u32 soc_hw_version_major, soc_hw_version_minor; 1354 int ret; 1355 1356 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); 1357 if (!ab) { 1358 dev_err(&pdev->dev, "failed to allocate ath12k base\n"); 1359 return -ENOMEM; 1360 } 1361 1362 ab->dev = &pdev->dev; 1363 pci_set_drvdata(pdev, ab); 1364 ab_pci = ath12k_pci_priv(ab); 1365 ab_pci->dev_id = pci_dev->device; 1366 ab_pci->ab = ab; 1367 ab_pci->pdev = pdev; 1368 ab->hif.ops = &ath12k_pci_hif_ops; 1369 pci_set_drvdata(pdev, ab); 1370 spin_lock_init(&ab_pci->window_lock); 1371 1372 ret = ath12k_pci_claim(ab_pci, pdev); 1373 if (ret) { 1374 ath12k_err(ab, "failed to claim device: %d\n", ret); 1375 goto err_free_core; 1376 } 1377 1378 ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 1379 pdev->vendor, pdev->device, 1380 pdev->subsystem_vendor, pdev->subsystem_device); 1381 1382 ab->id.vendor = pdev->vendor; 1383 ab->id.device = pdev->device; 1384 ab->id.subsystem_vendor = pdev->subsystem_vendor; 1385 ab->id.subsystem_device = pdev->subsystem_device; 1386 1387 switch (pci_dev->device) { 1388 case QCN9274_DEVICE_ID: 1389 ab_pci->msi_config = &ath12k_msi_config[0]; 1390 ab->static_window_map = true; 1391 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; 1392 ab->hal_rx_ops = &hal_rx_qcn9274_ops; 1393 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1394 &soc_hw_version_minor); 1395 switch (soc_hw_version_major) { 1396 case ATH12K_PCI_SOC_HW_VERSION_2: 1397 ab->hw_rev = ATH12K_HW_QCN9274_HW20; 1398 break; 1399 case ATH12K_PCI_SOC_HW_VERSION_1: 1400 ab->hw_rev = ATH12K_HW_QCN9274_HW10; 1401 break; 1402 default: 1403 dev_err(&pdev->dev, 1404 "Unknown hardware version found for QCN9274: 0x%x\n", 1405 soc_hw_version_major); 1406 ret = -EOPNOTSUPP; 1407 goto err_pci_free_region; 1408 } 1409 break; 1410 case WCN7850_DEVICE_ID: 1411 ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; 1412 ab_pci->msi_config = &ath12k_msi_config[0]; 1413 ab->static_window_map = false; 1414 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; 1415 ab->hal_rx_ops = &hal_rx_wcn7850_ops; 1416 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1417 &soc_hw_version_minor); 1418 switch (soc_hw_version_major) { 1419 case ATH12K_PCI_SOC_HW_VERSION_2: 1420 ab->hw_rev = ATH12K_HW_WCN7850_HW20; 1421 break; 1422 default: 1423 dev_err(&pdev->dev, 1424 "Unknown hardware version found for WCN7850: 0x%x\n", 1425 soc_hw_version_major); 1426 ret = -EOPNOTSUPP; 1427 goto err_pci_free_region; 1428 } 1429 break; 1430 1431 default: 1432 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1433 pci_dev->device); 1434 ret = -EOPNOTSUPP; 1435 goto err_pci_free_region; 1436 } 1437 1438 ret = ath12k_pci_msi_alloc(ab_pci); 1439 if (ret) { 1440 ath12k_err(ab, "failed to alloc msi: %d\n", ret); 1441 goto err_pci_free_region; 1442 } 1443 1444 ret = ath12k_core_pre_init(ab); 1445 if (ret) 1446 goto err_pci_msi_free; 1447 1448 ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); 1449 if (ret) { 1450 ath12k_err(ab, "failed to set irq affinity %d\n", ret); 1451 goto err_pci_msi_free; 1452 } 1453 1454 ret = ath12k_mhi_register(ab_pci); 1455 if (ret) { 1456 ath12k_err(ab, "failed to register mhi: %d\n", ret); 1457 goto err_irq_affinity_cleanup; 1458 } 1459 1460 ret = ath12k_hal_srng_init(ab); 1461 if (ret) 1462 goto err_mhi_unregister; 1463 1464 ret = ath12k_ce_alloc_pipes(ab); 1465 if (ret) { 1466 ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1467 goto err_hal_srng_deinit; 1468 } 1469 1470 ath12k_pci_init_qmi_ce_config(ab); 1471 1472 ret = ath12k_pci_config_irq(ab); 1473 if (ret) { 1474 ath12k_err(ab, "failed to config irq: %d\n", ret); 1475 goto err_ce_free; 1476 } 1477 1478 /* kernel may allocate a dummy vector before request_irq and 1479 * then allocate a real vector when request_irq is called. 1480 * So get msi_data here again to avoid spurious interrupt 1481 * as msi_data will configured to srngs. 1482 */ 1483 ret = ath12k_pci_config_msi_data(ab_pci); 1484 if (ret) { 1485 ath12k_err(ab, "failed to config msi_data: %d\n", ret); 1486 goto err_free_irq; 1487 } 1488 1489 ret = ath12k_core_init(ab); 1490 if (ret) { 1491 ath12k_err(ab, "failed to init core: %d\n", ret); 1492 goto err_free_irq; 1493 } 1494 return 0; 1495 1496 err_free_irq: 1497 ath12k_pci_free_irq(ab); 1498 1499 err_ce_free: 1500 ath12k_ce_free_pipes(ab); 1501 1502 err_hal_srng_deinit: 1503 ath12k_hal_srng_deinit(ab); 1504 1505 err_mhi_unregister: 1506 ath12k_mhi_unregister(ab_pci); 1507 1508 err_pci_msi_free: 1509 ath12k_pci_msi_free(ab_pci); 1510 1511 err_irq_affinity_cleanup: 1512 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1513 1514 err_pci_free_region: 1515 ath12k_pci_free_region(ab_pci); 1516 1517 err_free_core: 1518 ath12k_core_free(ab); 1519 1520 return ret; 1521 } 1522 1523 static void ath12k_pci_remove(struct pci_dev *pdev) 1524 { 1525 struct ath12k_base *ab = pci_get_drvdata(pdev); 1526 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1527 1528 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1529 1530 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1531 ath12k_pci_power_down(ab, false); 1532 ath12k_qmi_deinit_service(ab); 1533 goto qmi_fail; 1534 } 1535 1536 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); 1537 1538 cancel_work_sync(&ab->reset_work); 1539 ath12k_core_deinit(ab); 1540 1541 qmi_fail: 1542 ath12k_mhi_unregister(ab_pci); 1543 1544 ath12k_pci_free_irq(ab); 1545 ath12k_pci_msi_free(ab_pci); 1546 ath12k_pci_free_region(ab_pci); 1547 1548 ath12k_hal_srng_deinit(ab); 1549 ath12k_ce_free_pipes(ab); 1550 ath12k_core_free(ab); 1551 } 1552 1553 static void ath12k_pci_shutdown(struct pci_dev *pdev) 1554 { 1555 struct ath12k_base *ab = pci_get_drvdata(pdev); 1556 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1557 1558 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1559 ath12k_pci_power_down(ab, false); 1560 } 1561 1562 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) 1563 { 1564 struct ath12k_base *ab = dev_get_drvdata(dev); 1565 int ret; 1566 1567 ret = ath12k_core_suspend(ab); 1568 if (ret) 1569 ath12k_warn(ab, "failed to suspend core: %d\n", ret); 1570 1571 return ret; 1572 } 1573 1574 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) 1575 { 1576 struct ath12k_base *ab = dev_get_drvdata(dev); 1577 int ret; 1578 1579 ret = ath12k_core_resume(ab); 1580 if (ret) 1581 ath12k_warn(ab, "failed to resume core: %d\n", ret); 1582 1583 return ret; 1584 } 1585 1586 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) 1587 { 1588 struct ath12k_base *ab = dev_get_drvdata(dev); 1589 int ret; 1590 1591 ret = ath12k_core_suspend_late(ab); 1592 if (ret) 1593 ath12k_warn(ab, "failed to late suspend core: %d\n", ret); 1594 1595 return ret; 1596 } 1597 1598 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) 1599 { 1600 struct ath12k_base *ab = dev_get_drvdata(dev); 1601 int ret; 1602 1603 ret = ath12k_core_resume_early(ab); 1604 if (ret) 1605 ath12k_warn(ab, "failed to early resume core: %d\n", ret); 1606 1607 return ret; 1608 } 1609 1610 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { 1611 SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, 1612 ath12k_pci_pm_resume) 1613 SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, 1614 ath12k_pci_pm_resume_early) 1615 }; 1616 1617 static struct pci_driver ath12k_pci_driver = { 1618 .name = "ath12k_pci", 1619 .id_table = ath12k_pci_id_table, 1620 .probe = ath12k_pci_probe, 1621 .remove = ath12k_pci_remove, 1622 .shutdown = ath12k_pci_shutdown, 1623 .driver.pm = &ath12k_pci_pm_ops, 1624 }; 1625 1626 static int ath12k_pci_init(void) 1627 { 1628 int ret; 1629 1630 ret = pci_register_driver(&ath12k_pci_driver); 1631 if (ret) { 1632 pr_err("failed to register ath12k pci driver: %d\n", 1633 ret); 1634 return ret; 1635 } 1636 1637 return 0; 1638 } 1639 module_init(ath12k_pci_init); 1640 1641 static void ath12k_pci_exit(void) 1642 { 1643 pci_unregister_driver(&ath12k_pci_driver); 1644 } 1645 1646 module_exit(ath12k_pci_exit); 1647 1648 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); 1649 MODULE_LICENSE("Dual BSD/GPL"); 1650