1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2018-2019 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2022-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include <linux/module.h> 8 #include <linux/platform_device.h> 9 #include <linux/property.h> 10 #include <linux/of_device.h> 11 #include <linux/of.h> 12 #include <linux/dma-mapping.h> 13 #include <linux/of_address.h> 14 #include <linux/iommu.h> 15 #include "ahb.h" 16 #include "debug.h" 17 #include "hif.h" 18 #include "qmi.h" 19 #include <linux/remoteproc.h> 20 #include "pcic.h" 21 #include <linux/soc/qcom/smem.h> 22 #include <linux/soc/qcom/smem_state.h> 23 24 static const struct of_device_id ath11k_ahb_of_match[] = { 25 /* TODO: Should we change the compatible string to something similar 26 * to one that ath10k uses? 27 */ 28 { .compatible = "qcom,ipq8074-wifi", 29 .data = (void *)ATH11K_HW_IPQ8074, 30 }, 31 { .compatible = "qcom,ipq6018-wifi", 32 .data = (void *)ATH11K_HW_IPQ6018_HW10, 33 }, 34 { .compatible = "qcom,wcn6750-wifi", 35 .data = (void *)ATH11K_HW_WCN6750_HW10, 36 }, 37 { .compatible = "qcom,ipq5018-wifi", 38 .data = (void *)ATH11K_HW_IPQ5018_HW10, 39 }, 40 { } 41 }; 42 43 MODULE_DEVICE_TABLE(of, ath11k_ahb_of_match); 44 45 #define ATH11K_IRQ_CE0_OFFSET 4 46 47 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 48 "misc-pulse1", 49 "misc-latch", 50 "sw-exception", 51 "watchdog", 52 "ce0", 53 "ce1", 54 "ce2", 55 "ce3", 56 "ce4", 57 "ce5", 58 "ce6", 59 "ce7", 60 "ce8", 61 "ce9", 62 "ce10", 63 "ce11", 64 "host2wbm-desc-feed", 65 "host2reo-re-injection", 66 "host2reo-command", 67 "host2rxdma-monitor-ring3", 68 "host2rxdma-monitor-ring2", 69 "host2rxdma-monitor-ring1", 70 "reo2ost-exception", 71 "wbm2host-rx-release", 72 "reo2host-status", 73 "reo2host-destination-ring4", 74 "reo2host-destination-ring3", 75 "reo2host-destination-ring2", 76 "reo2host-destination-ring1", 77 "rxdma2host-monitor-destination-mac3", 78 "rxdma2host-monitor-destination-mac2", 79 "rxdma2host-monitor-destination-mac1", 80 "ppdu-end-interrupts-mac3", 81 "ppdu-end-interrupts-mac2", 82 "ppdu-end-interrupts-mac1", 83 "rxdma2host-monitor-status-ring-mac3", 84 "rxdma2host-monitor-status-ring-mac2", 85 "rxdma2host-monitor-status-ring-mac1", 86 "host2rxdma-host-buf-ring-mac3", 87 "host2rxdma-host-buf-ring-mac2", 88 "host2rxdma-host-buf-ring-mac1", 89 "rxdma2host-destination-ring-mac3", 90 "rxdma2host-destination-ring-mac2", 91 "rxdma2host-destination-ring-mac1", 92 "host2tcl-input-ring4", 93 "host2tcl-input-ring3", 94 "host2tcl-input-ring2", 95 "host2tcl-input-ring1", 96 "wbm2host-tx-completions-ring3", 97 "wbm2host-tx-completions-ring2", 98 "wbm2host-tx-completions-ring1", 99 "tcl2host-status-ring", 100 }; 101 102 /* enum ext_irq_num - irq numbers that can be used by external modules 103 * like datapath 104 */ 105 enum ext_irq_num { 106 host2wbm_desc_feed = 16, 107 host2reo_re_injection, 108 host2reo_command, 109 host2rxdma_monitor_ring3, 110 host2rxdma_monitor_ring2, 111 host2rxdma_monitor_ring1, 112 reo2host_exception, 113 wbm2host_rx_release, 114 reo2host_status, 115 reo2host_destination_ring4, 116 reo2host_destination_ring3, 117 reo2host_destination_ring2, 118 reo2host_destination_ring1, 119 rxdma2host_monitor_destination_mac3, 120 rxdma2host_monitor_destination_mac2, 121 rxdma2host_monitor_destination_mac1, 122 ppdu_end_interrupts_mac3, 123 ppdu_end_interrupts_mac2, 124 ppdu_end_interrupts_mac1, 125 rxdma2host_monitor_status_ring_mac3, 126 rxdma2host_monitor_status_ring_mac2, 127 rxdma2host_monitor_status_ring_mac1, 128 host2rxdma_host_buf_ring_mac3, 129 host2rxdma_host_buf_ring_mac2, 130 host2rxdma_host_buf_ring_mac1, 131 rxdma2host_destination_ring_mac3, 132 rxdma2host_destination_ring_mac2, 133 rxdma2host_destination_ring_mac1, 134 host2tcl_input_ring4, 135 host2tcl_input_ring3, 136 host2tcl_input_ring2, 137 host2tcl_input_ring1, 138 wbm2host_tx_completions_ring3, 139 wbm2host_tx_completions_ring2, 140 wbm2host_tx_completions_ring1, 141 tcl2host_status_ring, 142 }; 143 144 static int 145 ath11k_ahb_get_msi_irq_wcn6750(struct ath11k_base *ab, unsigned int vector) 146 { 147 return ab->pci.msi.irqs[vector]; 148 } 149 150 static inline u32 151 ath11k_ahb_get_window_start_wcn6750(struct ath11k_base *ab, u32 offset) 152 { 153 u32 window_start = 0; 154 155 /* If offset lies within DP register range, use 1st window */ 156 if ((offset ^ HAL_SEQ_WCSS_UMAC_OFFSET) < ATH11K_PCI_WINDOW_RANGE_MASK) 157 window_start = ATH11K_PCI_WINDOW_START; 158 /* If offset lies within CE register range, use 2nd window */ 159 else if ((offset ^ HAL_SEQ_WCSS_UMAC_CE0_SRC_REG(ab)) < 160 ATH11K_PCI_WINDOW_RANGE_MASK) 161 window_start = 2 * ATH11K_PCI_WINDOW_START; 162 163 return window_start; 164 } 165 166 static void 167 ath11k_ahb_window_write32_wcn6750(struct ath11k_base *ab, u32 offset, u32 value) 168 { 169 u32 window_start; 170 171 /* WCN6750 uses static window based register access*/ 172 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); 173 174 iowrite32(value, ab->mem + window_start + 175 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 176 } 177 178 static u32 ath11k_ahb_window_read32_wcn6750(struct ath11k_base *ab, u32 offset) 179 { 180 u32 window_start; 181 u32 val; 182 183 /* WCN6750 uses static window based register access */ 184 window_start = ath11k_ahb_get_window_start_wcn6750(ab, offset); 185 186 val = ioread32(ab->mem + window_start + 187 (offset & ATH11K_PCI_WINDOW_RANGE_MASK)); 188 return val; 189 } 190 191 static const struct ath11k_pci_ops ath11k_ahb_pci_ops_wcn6750 = { 192 .wakeup = NULL, 193 .release = NULL, 194 .get_msi_irq = ath11k_ahb_get_msi_irq_wcn6750, 195 .window_write32 = ath11k_ahb_window_write32_wcn6750, 196 .window_read32 = ath11k_ahb_window_read32_wcn6750, 197 }; 198 199 static inline u32 ath11k_ahb_read32(struct ath11k_base *ab, u32 offset) 200 { 201 return ioread32(ab->mem + offset); 202 } 203 204 static inline void ath11k_ahb_write32(struct ath11k_base *ab, u32 offset, u32 value) 205 { 206 iowrite32(value, ab->mem + offset); 207 } 208 209 static void ath11k_ahb_kill_tasklets(struct ath11k_base *ab) 210 { 211 int i; 212 213 for (i = 0; i < ab->hw_params.ce_count; i++) { 214 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 215 216 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 217 continue; 218 219 tasklet_kill(&ce_pipe->intr_tq); 220 } 221 } 222 223 static void ath11k_ahb_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 224 { 225 int i; 226 227 for (i = 0; i < irq_grp->num_irq; i++) 228 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 229 } 230 231 static void __ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) 232 { 233 int i; 234 235 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 236 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 237 238 ath11k_ahb_ext_grp_disable(irq_grp); 239 240 if (irq_grp->napi_enabled) { 241 napi_synchronize(&irq_grp->napi); 242 napi_disable(&irq_grp->napi); 243 irq_grp->napi_enabled = false; 244 } 245 } 246 } 247 248 static void ath11k_ahb_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 249 { 250 int i; 251 252 for (i = 0; i < irq_grp->num_irq; i++) 253 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 254 } 255 256 static void ath11k_ahb_setbit32(struct ath11k_base *ab, u8 bit, u32 offset) 257 { 258 u32 val; 259 260 val = ath11k_ahb_read32(ab, offset); 261 ath11k_ahb_write32(ab, offset, val | BIT(bit)); 262 } 263 264 static void ath11k_ahb_clearbit32(struct ath11k_base *ab, u8 bit, u32 offset) 265 { 266 u32 val; 267 268 val = ath11k_ahb_read32(ab, offset); 269 ath11k_ahb_write32(ab, offset, val & ~BIT(bit)); 270 } 271 272 static void ath11k_ahb_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 273 { 274 const struct ce_attr *ce_attr; 275 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; 276 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; 277 278 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); 279 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); 280 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); 281 282 ce_attr = &ab->hw_params.host_ce_config[ce_id]; 283 if (ce_attr->src_nentries) 284 ath11k_ahb_setbit32(ab, ce_id, ie1_reg_addr); 285 286 if (ce_attr->dest_nentries) { 287 ath11k_ahb_setbit32(ab, ce_id, ie2_reg_addr); 288 ath11k_ahb_setbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, 289 ie3_reg_addr); 290 } 291 } 292 293 static void ath11k_ahb_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 294 { 295 const struct ce_attr *ce_attr; 296 const struct ce_ie_addr *ce_ie_addr = ab->hw_params.ce_ie_addr; 297 u32 ie1_reg_addr, ie2_reg_addr, ie3_reg_addr; 298 299 ie1_reg_addr = ce_ie_addr->ie1_reg_addr + ATH11K_CE_OFFSET(ab); 300 ie2_reg_addr = ce_ie_addr->ie2_reg_addr + ATH11K_CE_OFFSET(ab); 301 ie3_reg_addr = ce_ie_addr->ie3_reg_addr + ATH11K_CE_OFFSET(ab); 302 303 ce_attr = &ab->hw_params.host_ce_config[ce_id]; 304 if (ce_attr->src_nentries) 305 ath11k_ahb_clearbit32(ab, ce_id, ie1_reg_addr); 306 307 if (ce_attr->dest_nentries) { 308 ath11k_ahb_clearbit32(ab, ce_id, ie2_reg_addr); 309 ath11k_ahb_clearbit32(ab, ce_id + CE_HOST_IE_3_SHIFT, 310 ie3_reg_addr); 311 } 312 } 313 314 static void ath11k_ahb_sync_ce_irqs(struct ath11k_base *ab) 315 { 316 int i; 317 int irq_idx; 318 319 for (i = 0; i < ab->hw_params.ce_count; i++) { 320 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 321 continue; 322 323 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 324 synchronize_irq(ab->irq_num[irq_idx]); 325 } 326 } 327 328 static void ath11k_ahb_sync_ext_irqs(struct ath11k_base *ab) 329 { 330 int i, j; 331 int irq_idx; 332 333 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 334 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 335 336 for (j = 0; j < irq_grp->num_irq; j++) { 337 irq_idx = irq_grp->irqs[j]; 338 synchronize_irq(ab->irq_num[irq_idx]); 339 } 340 } 341 } 342 343 static void ath11k_ahb_ce_irqs_enable(struct ath11k_base *ab) 344 { 345 int i; 346 347 for (i = 0; i < ab->hw_params.ce_count; i++) { 348 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 349 continue; 350 ath11k_ahb_ce_irq_enable(ab, i); 351 } 352 } 353 354 static void ath11k_ahb_ce_irqs_disable(struct ath11k_base *ab) 355 { 356 int i; 357 358 for (i = 0; i < ab->hw_params.ce_count; i++) { 359 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 360 continue; 361 ath11k_ahb_ce_irq_disable(ab, i); 362 } 363 } 364 365 static int ath11k_ahb_start(struct ath11k_base *ab) 366 { 367 ath11k_ahb_ce_irqs_enable(ab); 368 ath11k_ce_rx_post_buf(ab); 369 370 return 0; 371 } 372 373 static void ath11k_ahb_ext_irq_enable(struct ath11k_base *ab) 374 { 375 int i; 376 377 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 378 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 379 380 if (!irq_grp->napi_enabled) { 381 napi_enable(&irq_grp->napi); 382 irq_grp->napi_enabled = true; 383 } 384 ath11k_ahb_ext_grp_enable(irq_grp); 385 } 386 } 387 388 static void ath11k_ahb_ext_irq_disable(struct ath11k_base *ab) 389 { 390 __ath11k_ahb_ext_irq_disable(ab); 391 ath11k_ahb_sync_ext_irqs(ab); 392 } 393 394 static void ath11k_ahb_stop(struct ath11k_base *ab) 395 { 396 if (!test_bit(ATH11K_FLAG_CRASH_FLUSH, &ab->dev_flags)) 397 ath11k_ahb_ce_irqs_disable(ab); 398 ath11k_ahb_sync_ce_irqs(ab); 399 ath11k_ahb_kill_tasklets(ab); 400 del_timer_sync(&ab->rx_replenish_retry); 401 ath11k_ce_cleanup_pipes(ab); 402 } 403 404 static int ath11k_ahb_power_up(struct ath11k_base *ab) 405 { 406 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 407 int ret; 408 409 ret = rproc_boot(ab_ahb->tgt_rproc); 410 if (ret) 411 ath11k_err(ab, "failed to boot the remote processor Q6\n"); 412 413 return ret; 414 } 415 416 static void ath11k_ahb_power_down(struct ath11k_base *ab, bool is_suspend) 417 { 418 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 419 420 rproc_shutdown(ab_ahb->tgt_rproc); 421 } 422 423 static void ath11k_ahb_init_qmi_ce_config(struct ath11k_base *ab) 424 { 425 struct ath11k_qmi_ce_cfg *cfg = &ab->qmi.ce_cfg; 426 427 cfg->tgt_ce_len = ab->hw_params.target_ce_count; 428 cfg->tgt_ce = ab->hw_params.target_ce_config; 429 cfg->svc_to_ce_map_len = ab->hw_params.svc_to_ce_map_len; 430 cfg->svc_to_ce_map = ab->hw_params.svc_to_ce_map; 431 ab->qmi.service_ins_id = ab->hw_params.qmi_service_ins_id; 432 } 433 434 static void ath11k_ahb_free_ext_irq(struct ath11k_base *ab) 435 { 436 int i, j; 437 438 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 439 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 440 441 for (j = 0; j < irq_grp->num_irq; j++) 442 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 443 444 netif_napi_del(&irq_grp->napi); 445 free_netdev(irq_grp->napi_ndev); 446 } 447 } 448 449 static void ath11k_ahb_free_irq(struct ath11k_base *ab) 450 { 451 int irq_idx; 452 int i; 453 454 if (ab->hw_params.hybrid_bus_type) 455 return ath11k_pcic_free_irq(ab); 456 457 for (i = 0; i < ab->hw_params.ce_count; i++) { 458 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 459 continue; 460 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 461 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 462 } 463 464 ath11k_ahb_free_ext_irq(ab); 465 } 466 467 static void ath11k_ahb_ce_tasklet(struct tasklet_struct *t) 468 { 469 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 470 471 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 472 473 ath11k_ahb_ce_irq_enable(ce_pipe->ab, ce_pipe->pipe_num); 474 } 475 476 static irqreturn_t ath11k_ahb_ce_interrupt_handler(int irq, void *arg) 477 { 478 struct ath11k_ce_pipe *ce_pipe = arg; 479 480 /* last interrupt received for this CE */ 481 ce_pipe->timestamp = jiffies; 482 483 ath11k_ahb_ce_irq_disable(ce_pipe->ab, ce_pipe->pipe_num); 484 485 tasklet_schedule(&ce_pipe->intr_tq); 486 487 return IRQ_HANDLED; 488 } 489 490 static int ath11k_ahb_ext_grp_napi_poll(struct napi_struct *napi, int budget) 491 { 492 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 493 struct ath11k_ext_irq_grp, 494 napi); 495 struct ath11k_base *ab = irq_grp->ab; 496 int work_done; 497 498 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 499 if (work_done < budget) { 500 napi_complete_done(napi, work_done); 501 ath11k_ahb_ext_grp_enable(irq_grp); 502 } 503 504 if (work_done > budget) 505 work_done = budget; 506 507 return work_done; 508 } 509 510 static irqreturn_t ath11k_ahb_ext_interrupt_handler(int irq, void *arg) 511 { 512 struct ath11k_ext_irq_grp *irq_grp = arg; 513 514 /* last interrupt received for this group */ 515 irq_grp->timestamp = jiffies; 516 517 ath11k_ahb_ext_grp_disable(irq_grp); 518 519 napi_schedule(&irq_grp->napi); 520 521 return IRQ_HANDLED; 522 } 523 524 static int ath11k_ahb_config_ext_irq(struct ath11k_base *ab) 525 { 526 struct ath11k_hw_params *hw = &ab->hw_params; 527 int i, j; 528 int irq; 529 int ret; 530 531 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 532 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 533 u32 num_irq = 0; 534 535 irq_grp->ab = ab; 536 irq_grp->grp_id = i; 537 538 irq_grp->napi_ndev = alloc_netdev_dummy(0); 539 if (!irq_grp->napi_ndev) 540 return -ENOMEM; 541 542 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, 543 ath11k_ahb_ext_grp_napi_poll); 544 545 for (j = 0; j < ATH11K_EXT_IRQ_NUM_MAX; j++) { 546 if (ab->hw_params.ring_mask->tx[i] & BIT(j)) { 547 irq_grp->irqs[num_irq++] = 548 wbm2host_tx_completions_ring1 - j; 549 } 550 551 if (ab->hw_params.ring_mask->rx[i] & BIT(j)) { 552 irq_grp->irqs[num_irq++] = 553 reo2host_destination_ring1 - j; 554 } 555 556 if (ab->hw_params.ring_mask->rx_err[i] & BIT(j)) 557 irq_grp->irqs[num_irq++] = reo2host_exception; 558 559 if (ab->hw_params.ring_mask->rx_wbm_rel[i] & BIT(j)) 560 irq_grp->irqs[num_irq++] = wbm2host_rx_release; 561 562 if (ab->hw_params.ring_mask->reo_status[i] & BIT(j)) 563 irq_grp->irqs[num_irq++] = reo2host_status; 564 565 if (j < ab->hw_params.max_radios) { 566 if (ab->hw_params.ring_mask->rxdma2host[i] & BIT(j)) { 567 irq_grp->irqs[num_irq++] = 568 rxdma2host_destination_ring_mac1 - 569 ath11k_hw_get_mac_from_pdev_id(hw, j); 570 } 571 572 if (ab->hw_params.ring_mask->host2rxdma[i] & BIT(j)) { 573 irq_grp->irqs[num_irq++] = 574 host2rxdma_host_buf_ring_mac1 - 575 ath11k_hw_get_mac_from_pdev_id(hw, j); 576 } 577 578 if (ab->hw_params.ring_mask->rx_mon_status[i] & BIT(j)) { 579 irq_grp->irqs[num_irq++] = 580 ppdu_end_interrupts_mac1 - 581 ath11k_hw_get_mac_from_pdev_id(hw, j); 582 irq_grp->irqs[num_irq++] = 583 rxdma2host_monitor_status_ring_mac1 - 584 ath11k_hw_get_mac_from_pdev_id(hw, j); 585 } 586 } 587 } 588 irq_grp->num_irq = num_irq; 589 590 for (j = 0; j < irq_grp->num_irq; j++) { 591 int irq_idx = irq_grp->irqs[j]; 592 593 irq = platform_get_irq_byname(ab->pdev, 594 irq_name[irq_idx]); 595 ab->irq_num[irq_idx] = irq; 596 irq_set_status_flags(irq, IRQ_NOAUTOEN | IRQ_DISABLE_UNLAZY); 597 ret = request_irq(irq, ath11k_ahb_ext_interrupt_handler, 598 IRQF_TRIGGER_RISING, 599 irq_name[irq_idx], irq_grp); 600 if (ret) { 601 ath11k_err(ab, "failed request_irq for %d\n", 602 irq); 603 } 604 } 605 } 606 607 return 0; 608 } 609 610 static int ath11k_ahb_config_irq(struct ath11k_base *ab) 611 { 612 int irq, irq_idx, i; 613 int ret; 614 615 if (ab->hw_params.hybrid_bus_type) 616 return ath11k_pcic_config_irq(ab); 617 618 /* Configure CE irqs */ 619 for (i = 0; i < ab->hw_params.ce_count; i++) { 620 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 621 622 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 623 continue; 624 625 irq_idx = ATH11K_IRQ_CE0_OFFSET + i; 626 627 tasklet_setup(&ce_pipe->intr_tq, ath11k_ahb_ce_tasklet); 628 irq = platform_get_irq_byname(ab->pdev, irq_name[irq_idx]); 629 ret = request_irq(irq, ath11k_ahb_ce_interrupt_handler, 630 IRQF_TRIGGER_RISING, irq_name[irq_idx], 631 ce_pipe); 632 if (ret) 633 return ret; 634 635 ab->irq_num[irq_idx] = irq; 636 } 637 638 /* Configure external interrupts */ 639 ret = ath11k_ahb_config_ext_irq(ab); 640 641 return ret; 642 } 643 644 static int ath11k_ahb_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 645 u8 *ul_pipe, u8 *dl_pipe) 646 { 647 const struct service_to_pipe *entry; 648 bool ul_set = false, dl_set = false; 649 int i; 650 651 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { 652 entry = &ab->hw_params.svc_to_ce_map[i]; 653 654 if (__le32_to_cpu(entry->service_id) != service_id) 655 continue; 656 657 switch (__le32_to_cpu(entry->pipedir)) { 658 case PIPEDIR_NONE: 659 break; 660 case PIPEDIR_IN: 661 WARN_ON(dl_set); 662 *dl_pipe = __le32_to_cpu(entry->pipenum); 663 dl_set = true; 664 break; 665 case PIPEDIR_OUT: 666 WARN_ON(ul_set); 667 *ul_pipe = __le32_to_cpu(entry->pipenum); 668 ul_set = true; 669 break; 670 case PIPEDIR_INOUT: 671 WARN_ON(dl_set); 672 WARN_ON(ul_set); 673 *dl_pipe = __le32_to_cpu(entry->pipenum); 674 *ul_pipe = __le32_to_cpu(entry->pipenum); 675 dl_set = true; 676 ul_set = true; 677 break; 678 } 679 } 680 681 if (WARN_ON(!ul_set || !dl_set)) 682 return -ENOENT; 683 684 return 0; 685 } 686 687 static int ath11k_ahb_hif_suspend(struct ath11k_base *ab) 688 { 689 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 690 u32 wake_irq; 691 u32 value = 0; 692 int ret; 693 694 if (!device_may_wakeup(ab->dev)) 695 return -EPERM; 696 697 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; 698 699 ret = enable_irq_wake(wake_irq); 700 if (ret) { 701 ath11k_err(ab, "failed to enable wakeup irq :%d\n", ret); 702 return ret; 703 } 704 705 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, 706 ATH11K_AHB_SMP2P_SMEM_SEQ_NO); 707 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_ENTER, 708 ATH11K_AHB_SMP2P_SMEM_MSG); 709 710 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, 711 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); 712 if (ret) { 713 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); 714 return ret; 715 } 716 717 ath11k_dbg(ab, ATH11K_DBG_AHB, "device suspended\n"); 718 719 return ret; 720 } 721 722 static int ath11k_ahb_hif_resume(struct ath11k_base *ab) 723 { 724 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 725 u32 wake_irq; 726 u32 value = 0; 727 int ret; 728 729 if (!device_may_wakeup(ab->dev)) 730 return -EPERM; 731 732 wake_irq = ab->irq_num[ATH11K_PCI_IRQ_CE0_OFFSET + ATH11K_PCI_CE_WAKE_IRQ]; 733 734 ret = disable_irq_wake(wake_irq); 735 if (ret) { 736 ath11k_err(ab, "failed to disable wakeup irq: %d\n", ret); 737 return ret; 738 } 739 740 reinit_completion(&ab->wow.wakeup_completed); 741 742 value = u32_encode_bits(ab_ahb->smp2p_info.seq_no++, 743 ATH11K_AHB_SMP2P_SMEM_SEQ_NO); 744 value |= u32_encode_bits(ATH11K_AHB_POWER_SAVE_EXIT, 745 ATH11K_AHB_SMP2P_SMEM_MSG); 746 747 ret = qcom_smem_state_update_bits(ab_ahb->smp2p_info.smem_state, 748 ATH11K_AHB_SMP2P_SMEM_VALUE_MASK, value); 749 if (ret) { 750 ath11k_err(ab, "failed to send smp2p power save enter cmd :%d\n", ret); 751 return ret; 752 } 753 754 ret = wait_for_completion_timeout(&ab->wow.wakeup_completed, 3 * HZ); 755 if (ret == 0) { 756 ath11k_warn(ab, "timed out while waiting for wow wakeup completion\n"); 757 return -ETIMEDOUT; 758 } 759 760 ath11k_dbg(ab, ATH11K_DBG_AHB, "device resumed\n"); 761 762 return 0; 763 } 764 765 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_ipq8074 = { 766 .start = ath11k_ahb_start, 767 .stop = ath11k_ahb_stop, 768 .read32 = ath11k_ahb_read32, 769 .write32 = ath11k_ahb_write32, 770 .read = NULL, 771 .irq_enable = ath11k_ahb_ext_irq_enable, 772 .irq_disable = ath11k_ahb_ext_irq_disable, 773 .map_service_to_pipe = ath11k_ahb_map_service_to_pipe, 774 .power_down = ath11k_ahb_power_down, 775 .power_up = ath11k_ahb_power_up, 776 }; 777 778 static const struct ath11k_hif_ops ath11k_ahb_hif_ops_wcn6750 = { 779 .start = ath11k_pcic_start, 780 .stop = ath11k_pcic_stop, 781 .read32 = ath11k_pcic_read32, 782 .write32 = ath11k_pcic_write32, 783 .read = NULL, 784 .irq_enable = ath11k_pcic_ext_irq_enable, 785 .irq_disable = ath11k_pcic_ext_irq_disable, 786 .get_msi_address = ath11k_pcic_get_msi_address, 787 .get_user_msi_vector = ath11k_pcic_get_user_msi_assignment, 788 .map_service_to_pipe = ath11k_pcic_map_service_to_pipe, 789 .power_down = ath11k_ahb_power_down, 790 .power_up = ath11k_ahb_power_up, 791 .suspend = ath11k_ahb_hif_suspend, 792 .resume = ath11k_ahb_hif_resume, 793 .ce_irq_enable = ath11k_pci_enable_ce_irqs_except_wake_irq, 794 .ce_irq_disable = ath11k_pci_disable_ce_irqs_except_wake_irq, 795 }; 796 797 static int ath11k_core_get_rproc(struct ath11k_base *ab) 798 { 799 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 800 struct device *dev = ab->dev; 801 struct rproc *prproc; 802 phandle rproc_phandle; 803 804 if (of_property_read_u32(dev->of_node, "qcom,rproc", &rproc_phandle)) { 805 ath11k_err(ab, "failed to get q6_rproc handle\n"); 806 return -ENOENT; 807 } 808 809 prproc = rproc_get_by_phandle(rproc_phandle); 810 if (!prproc) { 811 ath11k_dbg(ab, ATH11K_DBG_AHB, "failed to get rproc, deferring\n"); 812 return -EPROBE_DEFER; 813 } 814 ab_ahb->tgt_rproc = prproc; 815 816 return 0; 817 } 818 819 static int ath11k_ahb_setup_msi_resources(struct ath11k_base *ab) 820 { 821 struct platform_device *pdev = ab->pdev; 822 phys_addr_t msi_addr_pa; 823 dma_addr_t msi_addr_iova; 824 struct resource *res; 825 int int_prop; 826 int ret; 827 int i; 828 829 ret = ath11k_pcic_init_msi_config(ab); 830 if (ret) { 831 ath11k_err(ab, "failed to init msi config: %d\n", ret); 832 return ret; 833 } 834 835 res = platform_get_resource(pdev, IORESOURCE_MEM, 0); 836 if (!res) { 837 ath11k_err(ab, "failed to fetch msi_addr\n"); 838 return -ENOENT; 839 } 840 841 msi_addr_pa = res->start; 842 msi_addr_iova = dma_map_resource(ab->dev, msi_addr_pa, PAGE_SIZE, 843 DMA_FROM_DEVICE, 0); 844 if (dma_mapping_error(ab->dev, msi_addr_iova)) 845 return -ENOMEM; 846 847 ab->pci.msi.addr_lo = lower_32_bits(msi_addr_iova); 848 ab->pci.msi.addr_hi = upper_32_bits(msi_addr_iova); 849 850 ret = of_property_read_u32_index(ab->dev->of_node, "interrupts", 1, &int_prop); 851 if (ret) 852 return ret; 853 854 ab->pci.msi.ep_base_data = int_prop + 32; 855 856 for (i = 0; i < ab->pci.msi.config->total_vectors; i++) { 857 ret = platform_get_irq(pdev, i); 858 if (ret < 0) 859 return ret; 860 861 ab->pci.msi.irqs[i] = ret; 862 } 863 864 set_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags); 865 866 return 0; 867 } 868 869 static int ath11k_ahb_setup_smp2p_handle(struct ath11k_base *ab) 870 { 871 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 872 873 if (!ab->hw_params.smp2p_wow_exit) 874 return 0; 875 876 ab_ahb->smp2p_info.smem_state = qcom_smem_state_get(ab->dev, "wlan-smp2p-out", 877 &ab_ahb->smp2p_info.smem_bit); 878 if (IS_ERR(ab_ahb->smp2p_info.smem_state)) { 879 ath11k_err(ab, "failed to fetch smem state: %ld\n", 880 PTR_ERR(ab_ahb->smp2p_info.smem_state)); 881 return PTR_ERR(ab_ahb->smp2p_info.smem_state); 882 } 883 884 return 0; 885 } 886 887 static void ath11k_ahb_release_smp2p_handle(struct ath11k_base *ab) 888 { 889 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 890 891 if (!ab->hw_params.smp2p_wow_exit) 892 return; 893 894 qcom_smem_state_put(ab_ahb->smp2p_info.smem_state); 895 } 896 897 static int ath11k_ahb_setup_resources(struct ath11k_base *ab) 898 { 899 struct platform_device *pdev = ab->pdev; 900 struct resource *mem_res; 901 void __iomem *mem; 902 903 if (ab->hw_params.hybrid_bus_type) 904 return ath11k_ahb_setup_msi_resources(ab); 905 906 mem = devm_platform_get_and_ioremap_resource(pdev, 0, &mem_res); 907 if (IS_ERR(mem)) { 908 dev_err(&pdev->dev, "ioremap error\n"); 909 return PTR_ERR(mem); 910 } 911 912 ab->mem = mem; 913 ab->mem_len = resource_size(mem_res); 914 915 return 0; 916 } 917 918 static int ath11k_ahb_setup_msa_resources(struct ath11k_base *ab) 919 { 920 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 921 struct device *dev = ab->dev; 922 struct device_node *node; 923 struct resource r; 924 int ret; 925 926 node = of_parse_phandle(dev->of_node, "memory-region", 0); 927 if (!node) 928 return -ENOENT; 929 930 ret = of_address_to_resource(node, 0, &r); 931 of_node_put(node); 932 if (ret) { 933 dev_err(dev, "failed to resolve msa fixed region\n"); 934 return ret; 935 } 936 937 ab_ahb->fw.msa_paddr = r.start; 938 ab_ahb->fw.msa_size = resource_size(&r); 939 940 node = of_parse_phandle(dev->of_node, "memory-region", 1); 941 if (!node) 942 return -ENOENT; 943 944 ret = of_address_to_resource(node, 0, &r); 945 of_node_put(node); 946 if (ret) { 947 dev_err(dev, "failed to resolve ce fixed region\n"); 948 return ret; 949 } 950 951 ab_ahb->fw.ce_paddr = r.start; 952 ab_ahb->fw.ce_size = resource_size(&r); 953 954 return 0; 955 } 956 957 static int ath11k_ahb_fw_resources_init(struct ath11k_base *ab) 958 { 959 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 960 struct device *host_dev = ab->dev; 961 struct platform_device_info info = {0}; 962 struct iommu_domain *iommu_dom; 963 struct platform_device *pdev; 964 struct device_node *node; 965 int ret; 966 967 /* Chipsets not requiring MSA need not initialize 968 * MSA resources, return success in such cases. 969 */ 970 if (!ab->hw_params.fixed_fw_mem) 971 return 0; 972 973 ret = ath11k_ahb_setup_msa_resources(ab); 974 if (ret) { 975 ath11k_err(ab, "failed to setup msa resources\n"); 976 return ret; 977 } 978 979 node = of_get_child_by_name(host_dev->of_node, "wifi-firmware"); 980 if (!node) { 981 ab_ahb->fw.use_tz = true; 982 return 0; 983 } 984 985 info.fwnode = &node->fwnode; 986 info.parent = host_dev; 987 info.name = node->name; 988 info.dma_mask = DMA_BIT_MASK(32); 989 990 pdev = platform_device_register_full(&info); 991 if (IS_ERR(pdev)) { 992 of_node_put(node); 993 return PTR_ERR(pdev); 994 } 995 996 ret = of_dma_configure(&pdev->dev, node, true); 997 if (ret) { 998 ath11k_err(ab, "dma configure fail: %d\n", ret); 999 goto err_unregister; 1000 } 1001 1002 ab_ahb->fw.dev = &pdev->dev; 1003 1004 iommu_dom = iommu_domain_alloc(&platform_bus_type); 1005 if (!iommu_dom) { 1006 ath11k_err(ab, "failed to allocate iommu domain\n"); 1007 ret = -ENOMEM; 1008 goto err_unregister; 1009 } 1010 1011 ret = iommu_attach_device(iommu_dom, ab_ahb->fw.dev); 1012 if (ret) { 1013 ath11k_err(ab, "could not attach device: %d\n", ret); 1014 goto err_iommu_free; 1015 } 1016 1017 ret = iommu_map(iommu_dom, ab_ahb->fw.msa_paddr, 1018 ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size, 1019 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); 1020 if (ret) { 1021 ath11k_err(ab, "failed to map firmware region: %d\n", ret); 1022 goto err_iommu_detach; 1023 } 1024 1025 ret = iommu_map(iommu_dom, ab_ahb->fw.ce_paddr, 1026 ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size, 1027 IOMMU_READ | IOMMU_WRITE, GFP_KERNEL); 1028 if (ret) { 1029 ath11k_err(ab, "failed to map firmware CE region: %d\n", ret); 1030 goto err_iommu_unmap; 1031 } 1032 1033 ab_ahb->fw.use_tz = false; 1034 ab_ahb->fw.iommu_domain = iommu_dom; 1035 of_node_put(node); 1036 1037 return 0; 1038 1039 err_iommu_unmap: 1040 iommu_unmap(iommu_dom, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); 1041 1042 err_iommu_detach: 1043 iommu_detach_device(iommu_dom, ab_ahb->fw.dev); 1044 1045 err_iommu_free: 1046 iommu_domain_free(iommu_dom); 1047 1048 err_unregister: 1049 platform_device_unregister(pdev); 1050 of_node_put(node); 1051 1052 return ret; 1053 } 1054 1055 static int ath11k_ahb_fw_resource_deinit(struct ath11k_base *ab) 1056 { 1057 struct ath11k_ahb *ab_ahb = ath11k_ahb_priv(ab); 1058 struct iommu_domain *iommu; 1059 size_t unmapped_size; 1060 1061 /* Chipsets not requiring MSA would have not initialized 1062 * MSA resources, return success in such cases. 1063 */ 1064 if (!ab->hw_params.fixed_fw_mem) 1065 return 0; 1066 1067 if (ab_ahb->fw.use_tz) 1068 return 0; 1069 1070 iommu = ab_ahb->fw.iommu_domain; 1071 1072 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.msa_paddr, ab_ahb->fw.msa_size); 1073 if (unmapped_size != ab_ahb->fw.msa_size) 1074 ath11k_err(ab, "failed to unmap firmware: %zu\n", 1075 unmapped_size); 1076 1077 unmapped_size = iommu_unmap(iommu, ab_ahb->fw.ce_paddr, ab_ahb->fw.ce_size); 1078 if (unmapped_size != ab_ahb->fw.ce_size) 1079 ath11k_err(ab, "failed to unmap firmware CE memory: %zu\n", 1080 unmapped_size); 1081 1082 iommu_detach_device(iommu, ab_ahb->fw.dev); 1083 iommu_domain_free(iommu); 1084 1085 platform_device_unregister(to_platform_device(ab_ahb->fw.dev)); 1086 1087 return 0; 1088 } 1089 1090 static int ath11k_ahb_probe(struct platform_device *pdev) 1091 { 1092 struct ath11k_base *ab; 1093 const struct ath11k_hif_ops *hif_ops; 1094 const struct ath11k_pci_ops *pci_ops; 1095 enum ath11k_hw_rev hw_rev; 1096 int ret; 1097 1098 hw_rev = (uintptr_t)device_get_match_data(&pdev->dev); 1099 1100 switch (hw_rev) { 1101 case ATH11K_HW_IPQ8074: 1102 case ATH11K_HW_IPQ6018_HW10: 1103 case ATH11K_HW_IPQ5018_HW10: 1104 hif_ops = &ath11k_ahb_hif_ops_ipq8074; 1105 pci_ops = NULL; 1106 break; 1107 case ATH11K_HW_WCN6750_HW10: 1108 hif_ops = &ath11k_ahb_hif_ops_wcn6750; 1109 pci_ops = &ath11k_ahb_pci_ops_wcn6750; 1110 break; 1111 default: 1112 dev_err(&pdev->dev, "unsupported device type %d\n", hw_rev); 1113 return -EOPNOTSUPP; 1114 } 1115 1116 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); 1117 if (ret) { 1118 dev_err(&pdev->dev, "failed to set 32-bit consistent dma\n"); 1119 return ret; 1120 } 1121 1122 ab = ath11k_core_alloc(&pdev->dev, sizeof(struct ath11k_ahb), 1123 ATH11K_BUS_AHB); 1124 if (!ab) { 1125 dev_err(&pdev->dev, "failed to allocate ath11k base\n"); 1126 return -ENOMEM; 1127 } 1128 1129 ab->hif.ops = hif_ops; 1130 ab->pdev = pdev; 1131 ab->hw_rev = hw_rev; 1132 ab->fw_mode = ATH11K_FIRMWARE_MODE_NORMAL; 1133 platform_set_drvdata(pdev, ab); 1134 1135 ret = ath11k_pcic_register_pci_ops(ab, pci_ops); 1136 if (ret) { 1137 ath11k_err(ab, "failed to register PCI ops: %d\n", ret); 1138 goto err_core_free; 1139 } 1140 1141 ret = ath11k_core_pre_init(ab); 1142 if (ret) 1143 goto err_core_free; 1144 1145 ret = ath11k_ahb_setup_resources(ab); 1146 if (ret) 1147 goto err_core_free; 1148 1149 ab->mem_ce = ab->mem; 1150 1151 if (ab->hw_params.ce_remap) { 1152 const struct ce_remap *ce_remap = ab->hw_params.ce_remap; 1153 /* ce register space is moved out of wcss unlike ipq8074 or ipq6018 1154 * and the space is not contiguous, hence remapping the CE registers 1155 * to a new space for accessing them. 1156 */ 1157 ab->mem_ce = ioremap(ce_remap->base, ce_remap->size); 1158 if (!ab->mem_ce) { 1159 dev_err(&pdev->dev, "ce ioremap error\n"); 1160 ret = -ENOMEM; 1161 goto err_core_free; 1162 } 1163 } 1164 1165 ret = ath11k_ahb_fw_resources_init(ab); 1166 if (ret) 1167 goto err_core_free; 1168 1169 ret = ath11k_ahb_setup_smp2p_handle(ab); 1170 if (ret) 1171 goto err_fw_deinit; 1172 1173 ret = ath11k_hal_srng_init(ab); 1174 if (ret) 1175 goto err_release_smp2p_handle; 1176 1177 ret = ath11k_ce_alloc_pipes(ab); 1178 if (ret) { 1179 ath11k_err(ab, "failed to allocate ce pipes: %d\n", ret); 1180 goto err_hal_srng_deinit; 1181 } 1182 1183 ath11k_ahb_init_qmi_ce_config(ab); 1184 1185 ret = ath11k_core_get_rproc(ab); 1186 if (ret) { 1187 ath11k_err(ab, "failed to get rproc: %d\n", ret); 1188 goto err_ce_free; 1189 } 1190 1191 ret = ath11k_core_init(ab); 1192 if (ret) { 1193 ath11k_err(ab, "failed to init core: %d\n", ret); 1194 goto err_ce_free; 1195 } 1196 1197 ret = ath11k_ahb_config_irq(ab); 1198 if (ret) { 1199 ath11k_err(ab, "failed to configure irq: %d\n", ret); 1200 goto err_ce_free; 1201 } 1202 1203 ath11k_qmi_fwreset_from_cold_boot(ab); 1204 1205 return 0; 1206 1207 err_ce_free: 1208 ath11k_ce_free_pipes(ab); 1209 1210 err_hal_srng_deinit: 1211 ath11k_hal_srng_deinit(ab); 1212 1213 err_release_smp2p_handle: 1214 ath11k_ahb_release_smp2p_handle(ab); 1215 1216 err_fw_deinit: 1217 ath11k_ahb_fw_resource_deinit(ab); 1218 1219 err_core_free: 1220 ath11k_core_free(ab); 1221 platform_set_drvdata(pdev, NULL); 1222 1223 return ret; 1224 } 1225 1226 static void ath11k_ahb_remove_prepare(struct ath11k_base *ab) 1227 { 1228 unsigned long left; 1229 1230 if (test_bit(ATH11K_FLAG_RECOVERY, &ab->dev_flags)) { 1231 left = wait_for_completion_timeout(&ab->driver_recovery, 1232 ATH11K_AHB_RECOVERY_TIMEOUT); 1233 if (!left) 1234 ath11k_warn(ab, "failed to receive recovery response completion\n"); 1235 } 1236 1237 set_bit(ATH11K_FLAG_UNREGISTERING, &ab->dev_flags); 1238 cancel_work_sync(&ab->restart_work); 1239 cancel_work_sync(&ab->qmi.event_work); 1240 } 1241 1242 static void ath11k_ahb_free_resources(struct ath11k_base *ab) 1243 { 1244 struct platform_device *pdev = ab->pdev; 1245 1246 ath11k_ahb_free_irq(ab); 1247 ath11k_hal_srng_deinit(ab); 1248 ath11k_ahb_release_smp2p_handle(ab); 1249 ath11k_ahb_fw_resource_deinit(ab); 1250 ath11k_ce_free_pipes(ab); 1251 1252 if (ab->hw_params.ce_remap) 1253 iounmap(ab->mem_ce); 1254 1255 ath11k_core_free(ab); 1256 platform_set_drvdata(pdev, NULL); 1257 } 1258 1259 static void ath11k_ahb_remove(struct platform_device *pdev) 1260 { 1261 struct ath11k_base *ab = platform_get_drvdata(pdev); 1262 1263 if (test_bit(ATH11K_FLAG_QMI_FAIL, &ab->dev_flags)) { 1264 ath11k_ahb_power_down(ab, false); 1265 ath11k_debugfs_soc_destroy(ab); 1266 ath11k_qmi_deinit_service(ab); 1267 goto qmi_fail; 1268 } 1269 1270 ath11k_ahb_remove_prepare(ab); 1271 ath11k_core_deinit(ab); 1272 1273 qmi_fail: 1274 ath11k_ahb_free_resources(ab); 1275 } 1276 1277 static void ath11k_ahb_shutdown(struct platform_device *pdev) 1278 { 1279 struct ath11k_base *ab = platform_get_drvdata(pdev); 1280 1281 /* platform shutdown() & remove() are mutually exclusive. 1282 * remove() is invoked during rmmod & shutdown() during 1283 * system reboot/shutdown. 1284 */ 1285 ath11k_ahb_remove_prepare(ab); 1286 1287 if (!(test_bit(ATH11K_FLAG_REGISTERED, &ab->dev_flags))) 1288 goto free_resources; 1289 1290 ath11k_core_deinit(ab); 1291 1292 free_resources: 1293 ath11k_ahb_free_resources(ab); 1294 } 1295 1296 static struct platform_driver ath11k_ahb_driver = { 1297 .driver = { 1298 .name = "ath11k", 1299 .of_match_table = ath11k_ahb_of_match, 1300 }, 1301 .probe = ath11k_ahb_probe, 1302 .remove_new = ath11k_ahb_remove, 1303 .shutdown = ath11k_ahb_shutdown, 1304 }; 1305 1306 module_platform_driver(ath11k_ahb_driver); 1307 1308 MODULE_DESCRIPTION("Driver support for Qualcomm Technologies 802.11ax WLAN AHB devices"); 1309 MODULE_LICENSE("Dual BSD/GPL"); 1310