1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/msi.h> 9 #include <linux/pci.h> 10 11 #include "pci.h" 12 #include "core.h" 13 #include "hif.h" 14 #include "mhi.h" 15 #include "debug.h" 16 17 #define ATH12K_PCI_BAR_NUM 0 18 #define ATH12K_PCI_DMA_MASK 32 19 20 #define ATH12K_PCI_IRQ_CE0_OFFSET 3 21 22 #define WINDOW_ENABLE_BIT 0x40000000 23 #define WINDOW_REG_ADDRESS 0x310c 24 #define WINDOW_VALUE_MASK GENMASK(24, 19) 25 #define WINDOW_START 0x80000 26 #define WINDOW_RANGE_MASK GENMASK(18, 0) 27 #define WINDOW_STATIC_MASK GENMASK(31, 6) 28 29 #define TCSR_SOC_HW_VERSION 0x1B00000 30 #define TCSR_SOC_HW_VERSION_MAJOR_MASK GENMASK(11, 8) 31 #define TCSR_SOC_HW_VERSION_MINOR_MASK GENMASK(7, 4) 32 33 /* BAR0 + 4k is always accessible, and no 34 * need to force wakeup. 35 * 4K - 32 = 0xFE0 36 */ 37 #define ACCESS_ALWAYS_OFF 0xFE0 38 39 #define QCN9274_DEVICE_ID 0x1109 40 #define WCN7850_DEVICE_ID 0x1107 41 42 #define PCIE_LOCAL_REG_QRTR_NODE_ID 0x1E03164 43 #define DOMAIN_NUMBER_MASK GENMASK(7, 4) 44 #define BUS_NUMBER_MASK GENMASK(3, 0) 45 46 static const struct pci_device_id ath12k_pci_id_table[] = { 47 { PCI_VDEVICE(QCOM, QCN9274_DEVICE_ID) }, 48 { PCI_VDEVICE(QCOM, WCN7850_DEVICE_ID) }, 49 {0} 50 }; 51 52 MODULE_DEVICE_TABLE(pci, ath12k_pci_id_table); 53 54 /* TODO: revisit IRQ mapping for new SRNG's */ 55 static const struct ath12k_msi_config ath12k_msi_config[] = { 56 { 57 .total_vectors = 16, 58 .total_users = 3, 59 .users = (struct ath12k_msi_user[]) { 60 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 61 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 62 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 63 }, 64 }, 65 }; 66 67 static const struct ath12k_msi_config msi_config_one_msi = { 68 .total_vectors = 1, 69 .total_users = 4, 70 .users = (struct ath12k_msi_user[]) { 71 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 72 { .name = "CE", .num_vectors = 1, .base_vector = 0 }, 73 { .name = "WAKE", .num_vectors = 1, .base_vector = 0 }, 74 { .name = "DP", .num_vectors = 1, .base_vector = 0 }, 75 }, 76 }; 77 78 static const char *irq_name[ATH12K_IRQ_NUM_MAX] = { 79 "bhi", 80 "mhi-er0", 81 "mhi-er1", 82 "ce0", 83 "ce1", 84 "ce2", 85 "ce3", 86 "ce4", 87 "ce5", 88 "ce6", 89 "ce7", 90 "ce8", 91 "ce9", 92 "ce10", 93 "ce11", 94 "ce12", 95 "ce13", 96 "ce14", 97 "ce15", 98 "host2wbm-desc-feed", 99 "host2reo-re-injection", 100 "host2reo-command", 101 "host2rxdma-monitor-ring3", 102 "host2rxdma-monitor-ring2", 103 "host2rxdma-monitor-ring1", 104 "reo2ost-exception", 105 "wbm2host-rx-release", 106 "reo2host-status", 107 "reo2host-destination-ring4", 108 "reo2host-destination-ring3", 109 "reo2host-destination-ring2", 110 "reo2host-destination-ring1", 111 "rxdma2host-monitor-destination-mac3", 112 "rxdma2host-monitor-destination-mac2", 113 "rxdma2host-monitor-destination-mac1", 114 "ppdu-end-interrupts-mac3", 115 "ppdu-end-interrupts-mac2", 116 "ppdu-end-interrupts-mac1", 117 "rxdma2host-monitor-status-ring-mac3", 118 "rxdma2host-monitor-status-ring-mac2", 119 "rxdma2host-monitor-status-ring-mac1", 120 "host2rxdma-host-buf-ring-mac3", 121 "host2rxdma-host-buf-ring-mac2", 122 "host2rxdma-host-buf-ring-mac1", 123 "rxdma2host-destination-ring-mac3", 124 "rxdma2host-destination-ring-mac2", 125 "rxdma2host-destination-ring-mac1", 126 "host2tcl-input-ring4", 127 "host2tcl-input-ring3", 128 "host2tcl-input-ring2", 129 "host2tcl-input-ring1", 130 "wbm2host-tx-completions-ring4", 131 "wbm2host-tx-completions-ring3", 132 "wbm2host-tx-completions-ring2", 133 "wbm2host-tx-completions-ring1", 134 "tcl2host-status-ring", 135 }; 136 137 static int ath12k_pci_bus_wake_up(struct ath12k_base *ab) 138 { 139 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 140 141 return mhi_device_get_sync(ab_pci->mhi_ctrl->mhi_dev); 142 } 143 144 static void ath12k_pci_bus_release(struct ath12k_base *ab) 145 { 146 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 147 148 mhi_device_put(ab_pci->mhi_ctrl->mhi_dev); 149 } 150 151 static const struct ath12k_pci_ops ath12k_pci_ops_qcn9274 = { 152 .wakeup = NULL, 153 .release = NULL, 154 }; 155 156 static const struct ath12k_pci_ops ath12k_pci_ops_wcn7850 = { 157 .wakeup = ath12k_pci_bus_wake_up, 158 .release = ath12k_pci_bus_release, 159 }; 160 161 static void ath12k_pci_select_window(struct ath12k_pci *ab_pci, u32 offset) 162 { 163 struct ath12k_base *ab = ab_pci->ab; 164 165 u32 window = u32_get_bits(offset, WINDOW_VALUE_MASK); 166 u32 static_window; 167 168 lockdep_assert_held(&ab_pci->window_lock); 169 170 /* Preserve the static window configuration and reset only dynamic window */ 171 static_window = ab_pci->register_window & WINDOW_STATIC_MASK; 172 window |= static_window; 173 174 if (window != ab_pci->register_window) { 175 iowrite32(WINDOW_ENABLE_BIT | window, 176 ab->mem + WINDOW_REG_ADDRESS); 177 ioread32(ab->mem + WINDOW_REG_ADDRESS); 178 ab_pci->register_window = window; 179 } 180 } 181 182 static void ath12k_pci_select_static_window(struct ath12k_pci *ab_pci) 183 { 184 u32 umac_window = u32_get_bits(HAL_SEQ_WCSS_UMAC_OFFSET, WINDOW_VALUE_MASK); 185 u32 ce_window = u32_get_bits(HAL_CE_WFSS_CE_REG_BASE, WINDOW_VALUE_MASK); 186 u32 window; 187 188 window = (umac_window << 12) | (ce_window << 6); 189 190 spin_lock_bh(&ab_pci->window_lock); 191 ab_pci->register_window = window; 192 spin_unlock_bh(&ab_pci->window_lock); 193 194 iowrite32(WINDOW_ENABLE_BIT | window, ab_pci->ab->mem + WINDOW_REG_ADDRESS); 195 } 196 197 static u32 ath12k_pci_get_window_start(struct ath12k_base *ab, 198 u32 offset) 199 { 200 u32 window_start; 201 202 /* If offset lies within DP register range, use 3rd window */ 203 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < WINDOW_RANGE_MASK) 204 window_start = 3 * WINDOW_START; 205 /* If offset lies within CE register range, use 2nd window */ 206 else if ((offset ^ HAL_CE_WFSS_CE_REG_BASE) < WINDOW_RANGE_MASK) 207 window_start = 2 * WINDOW_START; 208 else 209 window_start = WINDOW_START; 210 211 return window_start; 212 } 213 214 static inline bool ath12k_pci_is_offset_within_mhi_region(u32 offset) 215 { 216 return (offset >= PCI_MHIREGLEN_REG && offset <= PCI_MHI_REGION_END); 217 } 218 219 static void ath12k_pci_soc_global_reset(struct ath12k_base *ab) 220 { 221 u32 val, delay; 222 223 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 224 225 val |= PCIE_SOC_GLOBAL_RESET_V; 226 227 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 228 229 /* TODO: exact time to sleep is uncertain */ 230 delay = 10; 231 mdelay(delay); 232 233 /* Need to toggle V bit back otherwise stuck in reset status */ 234 val &= ~PCIE_SOC_GLOBAL_RESET_V; 235 236 ath12k_pci_write32(ab, PCIE_SOC_GLOBAL_RESET, val); 237 238 mdelay(delay); 239 240 val = ath12k_pci_read32(ab, PCIE_SOC_GLOBAL_RESET); 241 if (val == 0xffffffff) 242 ath12k_warn(ab, "link down error during global reset\n"); 243 } 244 245 static void ath12k_pci_clear_dbg_registers(struct ath12k_base *ab) 246 { 247 u32 val; 248 249 /* read cookie */ 250 val = ath12k_pci_read32(ab, PCIE_Q6_COOKIE_ADDR); 251 ath12k_dbg(ab, ATH12K_DBG_PCI, "cookie:0x%x\n", val); 252 253 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 254 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 255 256 /* TODO: exact time to sleep is uncertain */ 257 mdelay(10); 258 259 /* write 0 to WLAON_WARM_SW_ENTRY to prevent Q6 from 260 * continuing warm path and entering dead loop. 261 */ 262 ath12k_pci_write32(ab, WLAON_WARM_SW_ENTRY, 0); 263 mdelay(10); 264 265 val = ath12k_pci_read32(ab, WLAON_WARM_SW_ENTRY); 266 ath12k_dbg(ab, ATH12K_DBG_PCI, "WLAON_WARM_SW_ENTRY 0x%x\n", val); 267 268 /* A read clear register. clear the register to prevent 269 * Q6 from entering wrong code path. 270 */ 271 val = ath12k_pci_read32(ab, WLAON_SOC_RESET_CAUSE_REG); 272 ath12k_dbg(ab, ATH12K_DBG_PCI, "soc reset cause:%d\n", val); 273 } 274 275 static void ath12k_pci_enable_ltssm(struct ath12k_base *ab) 276 { 277 u32 val; 278 int i; 279 280 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 281 282 /* PCIE link seems very unstable after the Hot Reset*/ 283 for (i = 0; val != PARM_LTSSM_VALUE && i < 5; i++) { 284 if (val == 0xffffffff) 285 mdelay(5); 286 287 ath12k_pci_write32(ab, PCIE_PCIE_PARF_LTSSM, PARM_LTSSM_VALUE); 288 val = ath12k_pci_read32(ab, PCIE_PCIE_PARF_LTSSM); 289 } 290 291 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci ltssm 0x%x\n", val); 292 293 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 294 val |= GCC_GCC_PCIE_HOT_RST_VAL; 295 ath12k_pci_write32(ab, GCC_GCC_PCIE_HOT_RST, val); 296 val = ath12k_pci_read32(ab, GCC_GCC_PCIE_HOT_RST); 297 298 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci pcie_hot_rst 0x%x\n", val); 299 300 mdelay(5); 301 } 302 303 static void ath12k_pci_clear_all_intrs(struct ath12k_base *ab) 304 { 305 /* This is a WAR for PCIE Hotreset. 306 * When target receive Hotreset, but will set the interrupt. 307 * So when download SBL again, SBL will open Interrupt and 308 * receive it, and crash immediately. 309 */ 310 ath12k_pci_write32(ab, PCIE_PCIE_INT_ALL_CLEAR, PCIE_INT_CLEAR_ALL); 311 } 312 313 static void ath12k_pci_set_wlaon_pwr_ctrl(struct ath12k_base *ab) 314 { 315 u32 val; 316 317 val = ath12k_pci_read32(ab, WLAON_QFPROM_PWR_CTRL_REG); 318 val &= ~QFPROM_PWR_CTRL_VDD4BLOW_MASK; 319 ath12k_pci_write32(ab, WLAON_QFPROM_PWR_CTRL_REG, val); 320 } 321 322 static void ath12k_pci_force_wake(struct ath12k_base *ab) 323 { 324 ath12k_pci_write32(ab, PCIE_SOC_WAKE_PCIE_LOCAL_REG, 1); 325 mdelay(5); 326 } 327 328 static void ath12k_pci_sw_reset(struct ath12k_base *ab, bool power_on) 329 { 330 if (power_on) { 331 ath12k_pci_enable_ltssm(ab); 332 ath12k_pci_clear_all_intrs(ab); 333 ath12k_pci_set_wlaon_pwr_ctrl(ab); 334 } 335 336 ath12k_mhi_clear_vector(ab); 337 ath12k_pci_clear_dbg_registers(ab); 338 ath12k_pci_soc_global_reset(ab); 339 ath12k_mhi_set_mhictrl_reset(ab); 340 } 341 342 static void ath12k_pci_free_ext_irq(struct ath12k_base *ab) 343 { 344 int i, j; 345 346 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 347 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 348 349 for (j = 0; j < irq_grp->num_irq; j++) 350 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 351 352 netif_napi_del(&irq_grp->napi); 353 free_netdev(irq_grp->napi_ndev); 354 } 355 } 356 357 static void ath12k_pci_free_irq(struct ath12k_base *ab) 358 { 359 int i, irq_idx; 360 361 for (i = 0; i < ab->hw_params->ce_count; i++) { 362 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 363 continue; 364 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 365 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 366 } 367 368 ath12k_pci_free_ext_irq(ab); 369 } 370 371 static void ath12k_pci_ce_irq_enable(struct ath12k_base *ab, u16 ce_id) 372 { 373 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 374 u32 irq_idx; 375 376 /* In case of one MSI vector, we handle irq enable/disable in a 377 * uniform way since we only have one irq 378 */ 379 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 380 return; 381 382 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 383 enable_irq(ab->irq_num[irq_idx]); 384 } 385 386 static void ath12k_pci_ce_irq_disable(struct ath12k_base *ab, u16 ce_id) 387 { 388 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 389 u32 irq_idx; 390 391 /* In case of one MSI vector, we handle irq enable/disable in a 392 * uniform way since we only have one irq 393 */ 394 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 395 return; 396 397 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_id; 398 disable_irq_nosync(ab->irq_num[irq_idx]); 399 } 400 401 static void ath12k_pci_ce_irqs_disable(struct ath12k_base *ab) 402 { 403 int i; 404 405 clear_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 406 407 for (i = 0; i < ab->hw_params->ce_count; i++) { 408 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 409 continue; 410 ath12k_pci_ce_irq_disable(ab, i); 411 } 412 } 413 414 static void ath12k_pci_sync_ce_irqs(struct ath12k_base *ab) 415 { 416 int i; 417 int irq_idx; 418 419 for (i = 0; i < ab->hw_params->ce_count; i++) { 420 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 421 continue; 422 423 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 424 synchronize_irq(ab->irq_num[irq_idx]); 425 } 426 } 427 428 static void ath12k_pci_ce_tasklet(struct tasklet_struct *t) 429 { 430 struct ath12k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 431 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 432 433 ath12k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 434 435 enable_irq(ce_pipe->ab->irq_num[irq_idx]); 436 } 437 438 static irqreturn_t ath12k_pci_ce_interrupt_handler(int irq, void *arg) 439 { 440 struct ath12k_ce_pipe *ce_pipe = arg; 441 struct ath12k_base *ab = ce_pipe->ab; 442 int irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 443 444 if (!test_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) 445 return IRQ_HANDLED; 446 447 /* last interrupt received for this CE */ 448 ce_pipe->timestamp = jiffies; 449 450 disable_irq_nosync(ab->irq_num[irq_idx]); 451 452 tasklet_schedule(&ce_pipe->intr_tq); 453 454 return IRQ_HANDLED; 455 } 456 457 static void ath12k_pci_ext_grp_disable(struct ath12k_ext_irq_grp *irq_grp) 458 { 459 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 460 int i; 461 462 /* In case of one MSI vector, we handle irq enable/disable 463 * in a uniform way since we only have one irq 464 */ 465 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 466 return; 467 468 for (i = 0; i < irq_grp->num_irq; i++) 469 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 470 } 471 472 static void __ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 473 { 474 int i; 475 476 if (!test_and_clear_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 477 return; 478 479 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 480 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 481 482 ath12k_pci_ext_grp_disable(irq_grp); 483 484 napi_synchronize(&irq_grp->napi); 485 napi_disable(&irq_grp->napi); 486 } 487 } 488 489 static void ath12k_pci_ext_grp_enable(struct ath12k_ext_irq_grp *irq_grp) 490 { 491 struct ath12k_pci *ab_pci = ath12k_pci_priv(irq_grp->ab); 492 int i; 493 494 /* In case of one MSI vector, we handle irq enable/disable in a 495 * uniform way since we only have one irq 496 */ 497 if (!test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 498 return; 499 500 for (i = 0; i < irq_grp->num_irq; i++) 501 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 502 } 503 504 static void ath12k_pci_sync_ext_irqs(struct ath12k_base *ab) 505 { 506 int i, j, irq_idx; 507 508 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 509 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 510 511 for (j = 0; j < irq_grp->num_irq; j++) { 512 irq_idx = irq_grp->irqs[j]; 513 synchronize_irq(ab->irq_num[irq_idx]); 514 } 515 } 516 } 517 518 static int ath12k_pci_ext_grp_napi_poll(struct napi_struct *napi, int budget) 519 { 520 struct ath12k_ext_irq_grp *irq_grp = container_of(napi, 521 struct ath12k_ext_irq_grp, 522 napi); 523 struct ath12k_base *ab = irq_grp->ab; 524 int work_done; 525 int i; 526 527 work_done = ath12k_dp_service_srng(ab, irq_grp, budget); 528 if (work_done < budget) { 529 napi_complete_done(napi, work_done); 530 for (i = 0; i < irq_grp->num_irq; i++) 531 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 532 } 533 534 if (work_done > budget) 535 work_done = budget; 536 537 return work_done; 538 } 539 540 static irqreturn_t ath12k_pci_ext_interrupt_handler(int irq, void *arg) 541 { 542 struct ath12k_ext_irq_grp *irq_grp = arg; 543 struct ath12k_base *ab = irq_grp->ab; 544 int i; 545 546 if (!test_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 547 return IRQ_HANDLED; 548 549 ath12k_dbg(irq_grp->ab, ATH12K_DBG_PCI, "ext irq:%d\n", irq); 550 551 /* last interrupt received for this group */ 552 irq_grp->timestamp = jiffies; 553 554 for (i = 0; i < irq_grp->num_irq; i++) 555 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 556 557 napi_schedule(&irq_grp->napi); 558 559 return IRQ_HANDLED; 560 } 561 562 static int ath12k_pci_ext_irq_config(struct ath12k_base *ab) 563 { 564 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 565 int i, j, n, ret, num_vectors = 0; 566 u32 user_base_data = 0, base_vector = 0, base_idx; 567 struct ath12k_ext_irq_grp *irq_grp; 568 569 base_idx = ATH12K_PCI_IRQ_CE0_OFFSET + CE_COUNT_MAX; 570 ret = ath12k_pci_get_user_msi_assignment(ab, "DP", 571 &num_vectors, 572 &user_base_data, 573 &base_vector); 574 if (ret < 0) 575 return ret; 576 577 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 578 irq_grp = &ab->ext_irq_grp[i]; 579 u32 num_irq = 0; 580 581 irq_grp->ab = ab; 582 irq_grp->grp_id = i; 583 irq_grp->napi_ndev = alloc_netdev_dummy(0); 584 if (!irq_grp->napi_ndev) { 585 ret = -ENOMEM; 586 goto fail_allocate; 587 } 588 589 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, 590 ath12k_pci_ext_grp_napi_poll); 591 592 if (ab->hw_params->ring_mask->tx[i] || 593 ab->hw_params->ring_mask->rx[i] || 594 ab->hw_params->ring_mask->rx_err[i] || 595 ab->hw_params->ring_mask->rx_wbm_rel[i] || 596 ab->hw_params->ring_mask->reo_status[i] || 597 ab->hw_params->ring_mask->host2rxdma[i] || 598 ab->hw_params->ring_mask->rx_mon_dest[i]) { 599 num_irq = 1; 600 } 601 602 irq_grp->num_irq = num_irq; 603 irq_grp->irqs[0] = base_idx + i; 604 605 for (j = 0; j < irq_grp->num_irq; j++) { 606 int irq_idx = irq_grp->irqs[j]; 607 int vector = (i % num_vectors) + base_vector; 608 int irq = ath12k_pci_get_msi_irq(ab->dev, vector); 609 610 ab->irq_num[irq_idx] = irq; 611 612 ath12k_dbg(ab, ATH12K_DBG_PCI, 613 "irq:%d group:%d\n", irq, i); 614 615 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 616 ret = request_irq(irq, ath12k_pci_ext_interrupt_handler, 617 ab_pci->irq_flags, 618 "DP_EXT_IRQ", irq_grp); 619 if (ret) { 620 ath12k_err(ab, "failed request irq %d: %d\n", 621 vector, ret); 622 goto fail_request; 623 } 624 } 625 ath12k_pci_ext_grp_disable(irq_grp); 626 } 627 628 return 0; 629 630 fail_request: 631 /* i ->napi_ndev was properly allocated. Free it also */ 632 i += 1; 633 fail_allocate: 634 for (n = 0; n < i; n++) { 635 irq_grp = &ab->ext_irq_grp[n]; 636 free_netdev(irq_grp->napi_ndev); 637 } 638 return ret; 639 } 640 641 static int ath12k_pci_set_irq_affinity_hint(struct ath12k_pci *ab_pci, 642 const struct cpumask *m) 643 { 644 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 645 return 0; 646 647 return irq_set_affinity_hint(ab_pci->pdev->irq, m); 648 } 649 650 static int ath12k_pci_config_irq(struct ath12k_base *ab) 651 { 652 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 653 struct ath12k_ce_pipe *ce_pipe; 654 u32 msi_data_start; 655 u32 msi_data_count, msi_data_idx; 656 u32 msi_irq_start; 657 unsigned int msi_data; 658 int irq, i, ret, irq_idx; 659 660 ret = ath12k_pci_get_user_msi_assignment(ab, 661 "CE", &msi_data_count, 662 &msi_data_start, &msi_irq_start); 663 if (ret) 664 return ret; 665 666 /* Configure CE irqs */ 667 668 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 669 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 670 continue; 671 672 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 673 irq = ath12k_pci_get_msi_irq(ab->dev, msi_data); 674 ce_pipe = &ab->ce.ce_pipe[i]; 675 676 irq_idx = ATH12K_PCI_IRQ_CE0_OFFSET + i; 677 678 tasklet_setup(&ce_pipe->intr_tq, ath12k_pci_ce_tasklet); 679 680 ret = request_irq(irq, ath12k_pci_ce_interrupt_handler, 681 ab_pci->irq_flags, irq_name[irq_idx], 682 ce_pipe); 683 if (ret) { 684 ath12k_err(ab, "failed to request irq %d: %d\n", 685 irq_idx, ret); 686 return ret; 687 } 688 689 ab->irq_num[irq_idx] = irq; 690 msi_data_idx++; 691 692 ath12k_pci_ce_irq_disable(ab, i); 693 } 694 695 ret = ath12k_pci_ext_irq_config(ab); 696 if (ret) 697 return ret; 698 699 return 0; 700 } 701 702 static void ath12k_pci_init_qmi_ce_config(struct ath12k_base *ab) 703 { 704 struct ath12k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 705 706 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 707 struct pci_bus *bus = ab_pci->pdev->bus; 708 709 cfg->tgt_ce = ab->hw_params->target_ce_config; 710 cfg->tgt_ce_len = ab->hw_params->target_ce_count; 711 712 cfg->svc_to_ce_map = ab->hw_params->svc_to_ce_map; 713 cfg->svc_to_ce_map_len = ab->hw_params->svc_to_ce_map_len; 714 ab->qmi.service_ins_id = ab->hw_params->qmi_service_ins_id; 715 716 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) { 717 ab_pci->qmi_instance = 718 u32_encode_bits(pci_domain_nr(bus), DOMAIN_NUMBER_MASK) | 719 u32_encode_bits(bus->number, BUS_NUMBER_MASK); 720 ab->qmi.service_ins_id += ab_pci->qmi_instance; 721 } 722 } 723 724 static void ath12k_pci_ce_irqs_enable(struct ath12k_base *ab) 725 { 726 int i; 727 728 set_bit(ATH12K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 729 730 for (i = 0; i < ab->hw_params->ce_count; i++) { 731 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 732 continue; 733 ath12k_pci_ce_irq_enable(ab, i); 734 } 735 } 736 737 static void ath12k_pci_msi_config(struct ath12k_pci *ab_pci, bool enable) 738 { 739 struct pci_dev *dev = ab_pci->pdev; 740 u16 control; 741 742 pci_read_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, &control); 743 744 if (enable) 745 control |= PCI_MSI_FLAGS_ENABLE; 746 else 747 control &= ~PCI_MSI_FLAGS_ENABLE; 748 749 pci_write_config_word(dev, dev->msi_cap + PCI_MSI_FLAGS, control); 750 } 751 752 static void ath12k_pci_msi_enable(struct ath12k_pci *ab_pci) 753 { 754 ath12k_pci_msi_config(ab_pci, true); 755 } 756 757 static void ath12k_pci_msi_disable(struct ath12k_pci *ab_pci) 758 { 759 ath12k_pci_msi_config(ab_pci, false); 760 } 761 762 static int ath12k_pci_msi_alloc(struct ath12k_pci *ab_pci) 763 { 764 struct ath12k_base *ab = ab_pci->ab; 765 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 766 struct msi_desc *msi_desc; 767 int num_vectors; 768 int ret; 769 770 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 771 msi_config->total_vectors, 772 msi_config->total_vectors, 773 PCI_IRQ_MSI); 774 775 if (num_vectors == msi_config->total_vectors) { 776 set_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 777 ab_pci->irq_flags = IRQF_SHARED; 778 } else { 779 num_vectors = pci_alloc_irq_vectors(ab_pci->pdev, 780 1, 781 1, 782 PCI_IRQ_MSI); 783 if (num_vectors < 0) { 784 ret = -EINVAL; 785 goto reset_msi_config; 786 } 787 clear_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags); 788 ab_pci->msi_config = &msi_config_one_msi; 789 ab_pci->irq_flags = IRQF_SHARED | IRQF_NOBALANCING; 790 ath12k_dbg(ab, ATH12K_DBG_PCI, "request MSI one vector\n"); 791 } 792 793 ath12k_info(ab, "MSI vectors: %d\n", num_vectors); 794 795 ath12k_pci_msi_disable(ab_pci); 796 797 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 798 if (!msi_desc) { 799 ath12k_err(ab, "msi_desc is NULL!\n"); 800 ret = -EINVAL; 801 goto free_msi_vector; 802 } 803 804 ab_pci->msi_ep_base_data = msi_desc->msg.data; 805 if (msi_desc->pci.msi_attrib.is_64) 806 set_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags); 807 808 ath12k_dbg(ab, ATH12K_DBG_PCI, "msi base data is %d\n", ab_pci->msi_ep_base_data); 809 810 return 0; 811 812 free_msi_vector: 813 pci_free_irq_vectors(ab_pci->pdev); 814 815 reset_msi_config: 816 return ret; 817 } 818 819 static void ath12k_pci_msi_free(struct ath12k_pci *ab_pci) 820 { 821 pci_free_irq_vectors(ab_pci->pdev); 822 } 823 824 static int ath12k_pci_config_msi_data(struct ath12k_pci *ab_pci) 825 { 826 struct msi_desc *msi_desc; 827 828 msi_desc = irq_get_msi_desc(ab_pci->pdev->irq); 829 if (!msi_desc) { 830 ath12k_err(ab_pci->ab, "msi_desc is NULL!\n"); 831 pci_free_irq_vectors(ab_pci->pdev); 832 return -EINVAL; 833 } 834 835 ab_pci->msi_ep_base_data = msi_desc->msg.data; 836 837 ath12k_dbg(ab_pci->ab, ATH12K_DBG_PCI, "pci after request_irq msi_ep_base_data %d\n", 838 ab_pci->msi_ep_base_data); 839 840 return 0; 841 } 842 843 static int ath12k_pci_claim(struct ath12k_pci *ab_pci, struct pci_dev *pdev) 844 { 845 struct ath12k_base *ab = ab_pci->ab; 846 u16 device_id; 847 int ret = 0; 848 849 pci_read_config_word(pdev, PCI_DEVICE_ID, &device_id); 850 if (device_id != ab_pci->dev_id) { 851 ath12k_err(ab, "pci device id mismatch: 0x%x 0x%x\n", 852 device_id, ab_pci->dev_id); 853 ret = -EIO; 854 goto out; 855 } 856 857 ret = pci_assign_resource(pdev, ATH12K_PCI_BAR_NUM); 858 if (ret) { 859 ath12k_err(ab, "failed to assign pci resource: %d\n", ret); 860 goto out; 861 } 862 863 ret = pci_enable_device(pdev); 864 if (ret) { 865 ath12k_err(ab, "failed to enable pci device: %d\n", ret); 866 goto out; 867 } 868 869 ret = pci_request_region(pdev, ATH12K_PCI_BAR_NUM, "ath12k_pci"); 870 if (ret) { 871 ath12k_err(ab, "failed to request pci region: %d\n", ret); 872 goto disable_device; 873 } 874 875 ret = dma_set_mask_and_coherent(&pdev->dev, 876 DMA_BIT_MASK(ATH12K_PCI_DMA_MASK)); 877 if (ret) { 878 ath12k_err(ab, "failed to set pci dma mask to %d: %d\n", 879 ATH12K_PCI_DMA_MASK, ret); 880 goto release_region; 881 } 882 883 pci_set_master(pdev); 884 885 ab->mem_len = pci_resource_len(pdev, ATH12K_PCI_BAR_NUM); 886 ab->mem = pci_iomap(pdev, ATH12K_PCI_BAR_NUM, 0); 887 if (!ab->mem) { 888 ath12k_err(ab, "failed to map pci bar %d\n", ATH12K_PCI_BAR_NUM); 889 ret = -EIO; 890 goto release_region; 891 } 892 893 ath12k_dbg(ab, ATH12K_DBG_BOOT, "boot pci_mem 0x%p\n", ab->mem); 894 return 0; 895 896 release_region: 897 pci_release_region(pdev, ATH12K_PCI_BAR_NUM); 898 disable_device: 899 pci_disable_device(pdev); 900 out: 901 return ret; 902 } 903 904 static void ath12k_pci_free_region(struct ath12k_pci *ab_pci) 905 { 906 struct ath12k_base *ab = ab_pci->ab; 907 struct pci_dev *pci_dev = ab_pci->pdev; 908 909 pci_iounmap(pci_dev, ab->mem); 910 ab->mem = NULL; 911 pci_release_region(pci_dev, ATH12K_PCI_BAR_NUM); 912 if (pci_is_enabled(pci_dev)) 913 pci_disable_device(pci_dev); 914 } 915 916 static void ath12k_pci_aspm_disable(struct ath12k_pci *ab_pci) 917 { 918 struct ath12k_base *ab = ab_pci->ab; 919 920 pcie_capability_read_word(ab_pci->pdev, PCI_EXP_LNKCTL, 921 &ab_pci->link_ctl); 922 923 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci link_ctl 0x%04x L0s %d L1 %d\n", 924 ab_pci->link_ctl, 925 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L0S), 926 u16_get_bits(ab_pci->link_ctl, PCI_EXP_LNKCTL_ASPM_L1)); 927 928 /* disable L0s and L1 */ 929 pcie_capability_clear_word(ab_pci->pdev, PCI_EXP_LNKCTL, 930 PCI_EXP_LNKCTL_ASPMC); 931 932 set_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags); 933 } 934 935 static void ath12k_pci_update_qrtr_node_id(struct ath12k_base *ab) 936 { 937 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 938 u32 reg; 939 940 /* On platforms with two or more identical mhi devices, qmi service run 941 * with identical qrtr-node-id. Because of this identical ID qrtr-lookup 942 * cannot register more than one qmi service with identical node ID. 943 * 944 * This generates a unique instance ID from PCIe domain number and bus number, 945 * writes to the given register, it is available for firmware when the QMI service 946 * is spawned. 947 */ 948 reg = PCIE_LOCAL_REG_QRTR_NODE_ID & WINDOW_RANGE_MASK; 949 ath12k_pci_write32(ab, reg, ab_pci->qmi_instance); 950 951 ath12k_dbg(ab, ATH12K_DBG_PCI, "pci reg 0x%x instance 0x%x read val 0x%x\n", 952 reg, ab_pci->qmi_instance, ath12k_pci_read32(ab, reg)); 953 } 954 955 static void ath12k_pci_aspm_restore(struct ath12k_pci *ab_pci) 956 { 957 if (test_and_clear_bit(ATH12K_PCI_ASPM_RESTORE, &ab_pci->flags)) 958 pcie_capability_clear_and_set_word(ab_pci->pdev, PCI_EXP_LNKCTL, 959 PCI_EXP_LNKCTL_ASPMC, 960 ab_pci->link_ctl & 961 PCI_EXP_LNKCTL_ASPMC); 962 } 963 964 static void ath12k_pci_kill_tasklets(struct ath12k_base *ab) 965 { 966 int i; 967 968 for (i = 0; i < ab->hw_params->ce_count; i++) { 969 struct ath12k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 970 971 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 972 continue; 973 974 tasklet_kill(&ce_pipe->intr_tq); 975 } 976 } 977 978 static void ath12k_pci_ce_irq_disable_sync(struct ath12k_base *ab) 979 { 980 ath12k_pci_ce_irqs_disable(ab); 981 ath12k_pci_sync_ce_irqs(ab); 982 ath12k_pci_kill_tasklets(ab); 983 } 984 985 int ath12k_pci_map_service_to_pipe(struct ath12k_base *ab, u16 service_id, 986 u8 *ul_pipe, u8 *dl_pipe) 987 { 988 const struct service_to_pipe *entry; 989 bool ul_set = false, dl_set = false; 990 int i; 991 992 for (i = 0; i < ab->hw_params->svc_to_ce_map_len; i++) { 993 entry = &ab->hw_params->svc_to_ce_map[i]; 994 995 if (__le32_to_cpu(entry->service_id) != service_id) 996 continue; 997 998 switch (__le32_to_cpu(entry->pipedir)) { 999 case PIPEDIR_NONE: 1000 break; 1001 case PIPEDIR_IN: 1002 WARN_ON(dl_set); 1003 *dl_pipe = __le32_to_cpu(entry->pipenum); 1004 dl_set = true; 1005 break; 1006 case PIPEDIR_OUT: 1007 WARN_ON(ul_set); 1008 *ul_pipe = __le32_to_cpu(entry->pipenum); 1009 ul_set = true; 1010 break; 1011 case PIPEDIR_INOUT: 1012 WARN_ON(dl_set); 1013 WARN_ON(ul_set); 1014 *dl_pipe = __le32_to_cpu(entry->pipenum); 1015 *ul_pipe = __le32_to_cpu(entry->pipenum); 1016 dl_set = true; 1017 ul_set = true; 1018 break; 1019 } 1020 } 1021 1022 if (WARN_ON(!ul_set || !dl_set)) 1023 return -ENOENT; 1024 1025 return 0; 1026 } 1027 1028 int ath12k_pci_get_msi_irq(struct device *dev, unsigned int vector) 1029 { 1030 struct pci_dev *pci_dev = to_pci_dev(dev); 1031 1032 return pci_irq_vector(pci_dev, vector); 1033 } 1034 1035 int ath12k_pci_get_user_msi_assignment(struct ath12k_base *ab, char *user_name, 1036 int *num_vectors, u32 *user_base_data, 1037 u32 *base_vector) 1038 { 1039 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1040 const struct ath12k_msi_config *msi_config = ab_pci->msi_config; 1041 int idx; 1042 1043 for (idx = 0; idx < msi_config->total_users; idx++) { 1044 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 1045 *num_vectors = msi_config->users[idx].num_vectors; 1046 *base_vector = msi_config->users[idx].base_vector; 1047 *user_base_data = *base_vector + ab_pci->msi_ep_base_data; 1048 1049 ath12k_dbg(ab, ATH12K_DBG_PCI, 1050 "Assign MSI to user: %s, num_vectors: %d, user_base_data: %u, base_vector: %u\n", 1051 user_name, *num_vectors, *user_base_data, 1052 *base_vector); 1053 1054 return 0; 1055 } 1056 } 1057 1058 ath12k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 1059 1060 return -EINVAL; 1061 } 1062 1063 void ath12k_pci_get_msi_address(struct ath12k_base *ab, u32 *msi_addr_lo, 1064 u32 *msi_addr_hi) 1065 { 1066 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1067 struct pci_dev *pci_dev = to_pci_dev(ab->dev); 1068 1069 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_LO, 1070 msi_addr_lo); 1071 1072 if (test_bit(ATH12K_PCI_FLAG_IS_MSI_64, &ab_pci->flags)) { 1073 pci_read_config_dword(pci_dev, pci_dev->msi_cap + PCI_MSI_ADDRESS_HI, 1074 msi_addr_hi); 1075 } else { 1076 *msi_addr_hi = 0; 1077 } 1078 } 1079 1080 void ath12k_pci_get_ce_msi_idx(struct ath12k_base *ab, u32 ce_id, 1081 u32 *msi_idx) 1082 { 1083 u32 i, msi_data_idx; 1084 1085 for (i = 0, msi_data_idx = 0; i < ab->hw_params->ce_count; i++) { 1086 if (ath12k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 1087 continue; 1088 1089 if (ce_id == i) 1090 break; 1091 1092 msi_data_idx++; 1093 } 1094 *msi_idx = msi_data_idx; 1095 } 1096 1097 void ath12k_pci_hif_ce_irq_enable(struct ath12k_base *ab) 1098 { 1099 ath12k_pci_ce_irqs_enable(ab); 1100 } 1101 1102 void ath12k_pci_hif_ce_irq_disable(struct ath12k_base *ab) 1103 { 1104 ath12k_pci_ce_irq_disable_sync(ab); 1105 } 1106 1107 void ath12k_pci_ext_irq_enable(struct ath12k_base *ab) 1108 { 1109 int i; 1110 1111 for (i = 0; i < ATH12K_EXT_IRQ_GRP_NUM_MAX; i++) { 1112 struct ath12k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 1113 1114 napi_enable(&irq_grp->napi); 1115 ath12k_pci_ext_grp_enable(irq_grp); 1116 } 1117 1118 set_bit(ATH12K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 1119 } 1120 1121 void ath12k_pci_ext_irq_disable(struct ath12k_base *ab) 1122 { 1123 __ath12k_pci_ext_irq_disable(ab); 1124 ath12k_pci_sync_ext_irqs(ab); 1125 } 1126 1127 int ath12k_pci_hif_suspend(struct ath12k_base *ab) 1128 { 1129 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1130 1131 ath12k_mhi_suspend(ar_pci); 1132 1133 return 0; 1134 } 1135 1136 int ath12k_pci_hif_resume(struct ath12k_base *ab) 1137 { 1138 struct ath12k_pci *ar_pci = ath12k_pci_priv(ab); 1139 1140 ath12k_mhi_resume(ar_pci); 1141 1142 return 0; 1143 } 1144 1145 void ath12k_pci_stop(struct ath12k_base *ab) 1146 { 1147 ath12k_pci_ce_irq_disable_sync(ab); 1148 ath12k_ce_cleanup_pipes(ab); 1149 } 1150 1151 int ath12k_pci_start(struct ath12k_base *ab) 1152 { 1153 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1154 1155 set_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1156 1157 if (test_bit(ATH12K_PCI_FLAG_MULTI_MSI_VECTORS, &ab_pci->flags)) 1158 ath12k_pci_aspm_restore(ab_pci); 1159 else 1160 ath12k_info(ab, "leaving PCI ASPM disabled to avoid MHI M2 problems\n"); 1161 1162 ath12k_pci_ce_irqs_enable(ab); 1163 ath12k_ce_rx_post_buf(ab); 1164 1165 return 0; 1166 } 1167 1168 u32 ath12k_pci_read32(struct ath12k_base *ab, u32 offset) 1169 { 1170 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1171 u32 val, window_start; 1172 int ret = 0; 1173 1174 /* for offset beyond BAR + 4K - 32, may 1175 * need to wakeup MHI to access. 1176 */ 1177 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1178 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1179 ret = ab_pci->pci_ops->wakeup(ab); 1180 1181 if (offset < WINDOW_START) { 1182 val = ioread32(ab->mem + offset); 1183 } else { 1184 if (ab->static_window_map) 1185 window_start = ath12k_pci_get_window_start(ab, offset); 1186 else 1187 window_start = WINDOW_START; 1188 1189 if (window_start == WINDOW_START) { 1190 spin_lock_bh(&ab_pci->window_lock); 1191 ath12k_pci_select_window(ab_pci, offset); 1192 1193 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1194 offset = offset - PCI_MHIREGLEN_REG; 1195 val = ioread32(ab->mem + 1196 (offset & WINDOW_RANGE_MASK)); 1197 } else { 1198 val = ioread32(ab->mem + window_start + 1199 (offset & WINDOW_RANGE_MASK)); 1200 } 1201 spin_unlock_bh(&ab_pci->window_lock); 1202 } else { 1203 val = ioread32(ab->mem + window_start + 1204 (offset & WINDOW_RANGE_MASK)); 1205 } 1206 } 1207 1208 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1209 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1210 !ret) 1211 ab_pci->pci_ops->release(ab); 1212 return val; 1213 } 1214 1215 void ath12k_pci_write32(struct ath12k_base *ab, u32 offset, u32 value) 1216 { 1217 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1218 u32 window_start; 1219 int ret = 0; 1220 1221 /* for offset beyond BAR + 4K - 32, may 1222 * need to wakeup MHI to access. 1223 */ 1224 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1225 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->wakeup) 1226 ret = ab_pci->pci_ops->wakeup(ab); 1227 1228 if (offset < WINDOW_START) { 1229 iowrite32(value, ab->mem + offset); 1230 } else { 1231 if (ab->static_window_map) 1232 window_start = ath12k_pci_get_window_start(ab, offset); 1233 else 1234 window_start = WINDOW_START; 1235 1236 if (window_start == WINDOW_START) { 1237 spin_lock_bh(&ab_pci->window_lock); 1238 ath12k_pci_select_window(ab_pci, offset); 1239 1240 if (ath12k_pci_is_offset_within_mhi_region(offset)) { 1241 offset = offset - PCI_MHIREGLEN_REG; 1242 iowrite32(value, ab->mem + 1243 (offset & WINDOW_RANGE_MASK)); 1244 } else { 1245 iowrite32(value, ab->mem + window_start + 1246 (offset & WINDOW_RANGE_MASK)); 1247 } 1248 spin_unlock_bh(&ab_pci->window_lock); 1249 } else { 1250 iowrite32(value, ab->mem + window_start + 1251 (offset & WINDOW_RANGE_MASK)); 1252 } 1253 } 1254 1255 if (test_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags) && 1256 offset >= ACCESS_ALWAYS_OFF && ab_pci->pci_ops->release && 1257 !ret) 1258 ab_pci->pci_ops->release(ab); 1259 } 1260 1261 int ath12k_pci_power_up(struct ath12k_base *ab) 1262 { 1263 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1264 int ret; 1265 1266 ab_pci->register_window = 0; 1267 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1268 ath12k_pci_sw_reset(ab_pci->ab, true); 1269 1270 /* Disable ASPM during firmware download due to problems switching 1271 * to AMSS state. 1272 */ 1273 ath12k_pci_aspm_disable(ab_pci); 1274 1275 ath12k_pci_msi_enable(ab_pci); 1276 1277 if (test_bit(ATH12K_FW_FEATURE_MULTI_QRTR_ID, ab->fw.fw_features)) 1278 ath12k_pci_update_qrtr_node_id(ab); 1279 1280 ret = ath12k_mhi_start(ab_pci); 1281 if (ret) { 1282 ath12k_err(ab, "failed to start mhi: %d\n", ret); 1283 return ret; 1284 } 1285 1286 if (ab->static_window_map) 1287 ath12k_pci_select_static_window(ab_pci); 1288 1289 return 0; 1290 } 1291 1292 void ath12k_pci_power_down(struct ath12k_base *ab, bool is_suspend) 1293 { 1294 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1295 1296 /* restore aspm in case firmware bootup fails */ 1297 ath12k_pci_aspm_restore(ab_pci); 1298 1299 ath12k_pci_force_wake(ab_pci->ab); 1300 ath12k_pci_msi_disable(ab_pci); 1301 ath12k_mhi_stop(ab_pci, is_suspend); 1302 clear_bit(ATH12K_PCI_FLAG_INIT_DONE, &ab_pci->flags); 1303 ath12k_pci_sw_reset(ab_pci->ab, false); 1304 } 1305 1306 static int ath12k_pci_panic_handler(struct ath12k_base *ab) 1307 { 1308 ath12k_pci_sw_reset(ab, false); 1309 1310 return NOTIFY_OK; 1311 } 1312 1313 static const struct ath12k_hif_ops ath12k_pci_hif_ops = { 1314 .start = ath12k_pci_start, 1315 .stop = ath12k_pci_stop, 1316 .read32 = ath12k_pci_read32, 1317 .write32 = ath12k_pci_write32, 1318 .power_down = ath12k_pci_power_down, 1319 .power_up = ath12k_pci_power_up, 1320 .suspend = ath12k_pci_hif_suspend, 1321 .resume = ath12k_pci_hif_resume, 1322 .irq_enable = ath12k_pci_ext_irq_enable, 1323 .irq_disable = ath12k_pci_ext_irq_disable, 1324 .get_msi_address = ath12k_pci_get_msi_address, 1325 .get_user_msi_vector = ath12k_pci_get_user_msi_assignment, 1326 .map_service_to_pipe = ath12k_pci_map_service_to_pipe, 1327 .ce_irq_enable = ath12k_pci_hif_ce_irq_enable, 1328 .ce_irq_disable = ath12k_pci_hif_ce_irq_disable, 1329 .get_ce_msi_idx = ath12k_pci_get_ce_msi_idx, 1330 .panic_handler = ath12k_pci_panic_handler, 1331 }; 1332 1333 static 1334 void ath12k_pci_read_hw_version(struct ath12k_base *ab, u32 *major, u32 *minor) 1335 { 1336 u32 soc_hw_version; 1337 1338 soc_hw_version = ath12k_pci_read32(ab, TCSR_SOC_HW_VERSION); 1339 *major = FIELD_GET(TCSR_SOC_HW_VERSION_MAJOR_MASK, 1340 soc_hw_version); 1341 *minor = FIELD_GET(TCSR_SOC_HW_VERSION_MINOR_MASK, 1342 soc_hw_version); 1343 1344 ath12k_dbg(ab, ATH12K_DBG_PCI, 1345 "pci tcsr_soc_hw_version major %d minor %d\n", 1346 *major, *minor); 1347 } 1348 1349 static int ath12k_pci_probe(struct pci_dev *pdev, 1350 const struct pci_device_id *pci_dev) 1351 { 1352 struct ath12k_base *ab; 1353 struct ath12k_pci *ab_pci; 1354 u32 soc_hw_version_major, soc_hw_version_minor; 1355 int ret; 1356 1357 ab = ath12k_core_alloc(&pdev->dev, sizeof(*ab_pci), ATH12K_BUS_PCI); 1358 if (!ab) { 1359 dev_err(&pdev->dev, "failed to allocate ath12k base\n"); 1360 return -ENOMEM; 1361 } 1362 1363 ab->dev = &pdev->dev; 1364 pci_set_drvdata(pdev, ab); 1365 ab_pci = ath12k_pci_priv(ab); 1366 ab_pci->dev_id = pci_dev->device; 1367 ab_pci->ab = ab; 1368 ab_pci->pdev = pdev; 1369 ab->hif.ops = &ath12k_pci_hif_ops; 1370 pci_set_drvdata(pdev, ab); 1371 spin_lock_init(&ab_pci->window_lock); 1372 1373 ret = ath12k_pci_claim(ab_pci, pdev); 1374 if (ret) { 1375 ath12k_err(ab, "failed to claim device: %d\n", ret); 1376 goto err_free_core; 1377 } 1378 1379 ath12k_dbg(ab, ATH12K_DBG_BOOT, "pci probe %04x:%04x %04x:%04x\n", 1380 pdev->vendor, pdev->device, 1381 pdev->subsystem_vendor, pdev->subsystem_device); 1382 1383 ab->id.vendor = pdev->vendor; 1384 ab->id.device = pdev->device; 1385 ab->id.subsystem_vendor = pdev->subsystem_vendor; 1386 ab->id.subsystem_device = pdev->subsystem_device; 1387 1388 switch (pci_dev->device) { 1389 case QCN9274_DEVICE_ID: 1390 ab_pci->msi_config = &ath12k_msi_config[0]; 1391 ab->static_window_map = true; 1392 ab_pci->pci_ops = &ath12k_pci_ops_qcn9274; 1393 ab->hal_rx_ops = &hal_rx_qcn9274_ops; 1394 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1395 &soc_hw_version_minor); 1396 switch (soc_hw_version_major) { 1397 case ATH12K_PCI_SOC_HW_VERSION_2: 1398 ab->hw_rev = ATH12K_HW_QCN9274_HW20; 1399 break; 1400 case ATH12K_PCI_SOC_HW_VERSION_1: 1401 ab->hw_rev = ATH12K_HW_QCN9274_HW10; 1402 break; 1403 default: 1404 dev_err(&pdev->dev, 1405 "Unknown hardware version found for QCN9274: 0x%x\n", 1406 soc_hw_version_major); 1407 ret = -EOPNOTSUPP; 1408 goto err_pci_free_region; 1409 } 1410 break; 1411 case WCN7850_DEVICE_ID: 1412 ab->id.bdf_search = ATH12K_BDF_SEARCH_BUS_AND_BOARD; 1413 ab_pci->msi_config = &ath12k_msi_config[0]; 1414 ab->static_window_map = false; 1415 ab_pci->pci_ops = &ath12k_pci_ops_wcn7850; 1416 ab->hal_rx_ops = &hal_rx_wcn7850_ops; 1417 ath12k_pci_read_hw_version(ab, &soc_hw_version_major, 1418 &soc_hw_version_minor); 1419 switch (soc_hw_version_major) { 1420 case ATH12K_PCI_SOC_HW_VERSION_2: 1421 ab->hw_rev = ATH12K_HW_WCN7850_HW20; 1422 break; 1423 default: 1424 dev_err(&pdev->dev, 1425 "Unknown hardware version found for WCN7850: 0x%x\n", 1426 soc_hw_version_major); 1427 ret = -EOPNOTSUPP; 1428 goto err_pci_free_region; 1429 } 1430 break; 1431 1432 default: 1433 dev_err(&pdev->dev, "Unknown PCI device found: 0x%x\n", 1434 pci_dev->device); 1435 ret = -EOPNOTSUPP; 1436 goto err_pci_free_region; 1437 } 1438 1439 ret = ath12k_pci_msi_alloc(ab_pci); 1440 if (ret) { 1441 ath12k_err(ab, "failed to alloc msi: %d\n", ret); 1442 goto err_pci_free_region; 1443 } 1444 1445 ret = ath12k_core_pre_init(ab); 1446 if (ret) 1447 goto err_pci_msi_free; 1448 1449 ret = ath12k_pci_set_irq_affinity_hint(ab_pci, cpumask_of(0)); 1450 if (ret) { 1451 ath12k_err(ab, "failed to set irq affinity %d\n", ret); 1452 goto err_pci_msi_free; 1453 } 1454 1455 ret = ath12k_mhi_register(ab_pci); 1456 if (ret) { 1457 ath12k_err(ab, "failed to register mhi: %d\n", ret); 1458 goto err_irq_affinity_cleanup; 1459 } 1460 1461 ret = ath12k_hal_srng_init(ab); 1462 if (ret) 1463 goto err_mhi_unregister; 1464 1465 ret = ath12k_ce_alloc_pipes(ab); 1466 if (ret) { 1467 ath12k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1468 goto err_hal_srng_deinit; 1469 } 1470 1471 ath12k_pci_init_qmi_ce_config(ab); 1472 1473 ret = ath12k_pci_config_irq(ab); 1474 if (ret) { 1475 ath12k_err(ab, "failed to config irq: %d\n", ret); 1476 goto err_ce_free; 1477 } 1478 1479 /* kernel may allocate a dummy vector before request_irq and 1480 * then allocate a real vector when request_irq is called. 1481 * So get msi_data here again to avoid spurious interrupt 1482 * as msi_data will configured to srngs. 1483 */ 1484 ret = ath12k_pci_config_msi_data(ab_pci); 1485 if (ret) { 1486 ath12k_err(ab, "failed to config msi_data: %d\n", ret); 1487 goto err_free_irq; 1488 } 1489 1490 ret = ath12k_core_init(ab); 1491 if (ret) { 1492 ath12k_err(ab, "failed to init core: %d\n", ret); 1493 goto err_free_irq; 1494 } 1495 return 0; 1496 1497 err_free_irq: 1498 ath12k_pci_free_irq(ab); 1499 1500 err_ce_free: 1501 ath12k_ce_free_pipes(ab); 1502 1503 err_hal_srng_deinit: 1504 ath12k_hal_srng_deinit(ab); 1505 1506 err_mhi_unregister: 1507 ath12k_mhi_unregister(ab_pci); 1508 1509 err_pci_msi_free: 1510 ath12k_pci_msi_free(ab_pci); 1511 1512 err_irq_affinity_cleanup: 1513 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1514 1515 err_pci_free_region: 1516 ath12k_pci_free_region(ab_pci); 1517 1518 err_free_core: 1519 ath12k_core_free(ab); 1520 1521 return ret; 1522 } 1523 1524 static void ath12k_pci_remove(struct pci_dev *pdev) 1525 { 1526 struct ath12k_base *ab = pci_get_drvdata(pdev); 1527 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1528 1529 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1530 1531 if (test_bit(ATH12K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1532 ath12k_pci_power_down(ab, false); 1533 ath12k_qmi_deinit_service(ab); 1534 goto qmi_fail; 1535 } 1536 1537 set_bit(ATH12K_FLAG_UNREGISTERING, &ab->dev_flags); 1538 1539 cancel_work_sync(&ab->reset_work); 1540 ath12k_core_deinit(ab); 1541 1542 qmi_fail: 1543 ath12k_mhi_unregister(ab_pci); 1544 1545 ath12k_pci_free_irq(ab); 1546 ath12k_pci_msi_free(ab_pci); 1547 ath12k_pci_free_region(ab_pci); 1548 1549 ath12k_hal_srng_deinit(ab); 1550 ath12k_ce_free_pipes(ab); 1551 ath12k_core_free(ab); 1552 } 1553 1554 static void ath12k_pci_shutdown(struct pci_dev *pdev) 1555 { 1556 struct ath12k_base *ab = pci_get_drvdata(pdev); 1557 struct ath12k_pci *ab_pci = ath12k_pci_priv(ab); 1558 1559 ath12k_pci_set_irq_affinity_hint(ab_pci, NULL); 1560 ath12k_pci_power_down(ab, false); 1561 } 1562 1563 static __maybe_unused int ath12k_pci_pm_suspend(struct device *dev) 1564 { 1565 struct ath12k_base *ab = dev_get_drvdata(dev); 1566 int ret; 1567 1568 ret = ath12k_core_suspend(ab); 1569 if (ret) 1570 ath12k_warn(ab, "failed to suspend core: %d\n", ret); 1571 1572 return ret; 1573 } 1574 1575 static __maybe_unused int ath12k_pci_pm_resume(struct device *dev) 1576 { 1577 struct ath12k_base *ab = dev_get_drvdata(dev); 1578 int ret; 1579 1580 ret = ath12k_core_resume(ab); 1581 if (ret) 1582 ath12k_warn(ab, "failed to resume core: %d\n", ret); 1583 1584 return ret; 1585 } 1586 1587 static __maybe_unused int ath12k_pci_pm_suspend_late(struct device *dev) 1588 { 1589 struct ath12k_base *ab = dev_get_drvdata(dev); 1590 int ret; 1591 1592 ret = ath12k_core_suspend_late(ab); 1593 if (ret) 1594 ath12k_warn(ab, "failed to late suspend core: %d\n", ret); 1595 1596 return ret; 1597 } 1598 1599 static __maybe_unused int ath12k_pci_pm_resume_early(struct device *dev) 1600 { 1601 struct ath12k_base *ab = dev_get_drvdata(dev); 1602 int ret; 1603 1604 ret = ath12k_core_resume_early(ab); 1605 if (ret) 1606 ath12k_warn(ab, "failed to early resume core: %d\n", ret); 1607 1608 return ret; 1609 } 1610 1611 static const struct dev_pm_ops __maybe_unused ath12k_pci_pm_ops = { 1612 SET_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend, 1613 ath12k_pci_pm_resume) 1614 SET_LATE_SYSTEM_SLEEP_PM_OPS(ath12k_pci_pm_suspend_late, 1615 ath12k_pci_pm_resume_early) 1616 }; 1617 1618 static struct pci_driver ath12k_pci_driver = { 1619 .name = "ath12k_pci", 1620 .id_table = ath12k_pci_id_table, 1621 .probe = ath12k_pci_probe, 1622 .remove = ath12k_pci_remove, 1623 .shutdown = ath12k_pci_shutdown, 1624 .driver.pm = &ath12k_pci_pm_ops, 1625 }; 1626 1627 static int ath12k_pci_init(void) 1628 { 1629 int ret; 1630 1631 ret = pci_register_driver(&ath12k_pci_driver); 1632 if (ret) { 1633 pr_err("failed to register ath12k pci driver: %d\n", 1634 ret); 1635 return ret; 1636 } 1637 1638 return 0; 1639 } 1640 module_init(ath12k_pci_init); 1641 1642 static void ath12k_pci_exit(void) 1643 { 1644 pci_unregister_driver(&ath12k_pci_driver); 1645 } 1646 1647 module_exit(ath12k_pci_exit); 1648 1649 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies PCIe 802.11be WLAN devices"); 1650 MODULE_LICENSE("Dual BSD/GPL"); 1651