1 // SPDX-License-Identifier: BSD-3-Clause-Clear 2 /* 3 * Copyright (c) 2019-2021 The Linux Foundation. All rights reserved. 4 * Copyright (c) 2021-2024 Qualcomm Innovation Center, Inc. All rights reserved. 5 */ 6 7 #include "core.h" 8 #include "pcic.h" 9 #include "debug.h" 10 11 static const char *irq_name[ATH11K_IRQ_NUM_MAX] = { 12 "bhi", 13 "mhi-er0", 14 "mhi-er1", 15 "ce0", 16 "ce1", 17 "ce2", 18 "ce3", 19 "ce4", 20 "ce5", 21 "ce6", 22 "ce7", 23 "ce8", 24 "ce9", 25 "ce10", 26 "ce11", 27 "host2wbm-desc-feed", 28 "host2reo-re-injection", 29 "host2reo-command", 30 "host2rxdma-monitor-ring3", 31 "host2rxdma-monitor-ring2", 32 "host2rxdma-monitor-ring1", 33 "reo2ost-exception", 34 "wbm2host-rx-release", 35 "reo2host-status", 36 "reo2host-destination-ring4", 37 "reo2host-destination-ring3", 38 "reo2host-destination-ring2", 39 "reo2host-destination-ring1", 40 "rxdma2host-monitor-destination-mac3", 41 "rxdma2host-monitor-destination-mac2", 42 "rxdma2host-monitor-destination-mac1", 43 "ppdu-end-interrupts-mac3", 44 "ppdu-end-interrupts-mac2", 45 "ppdu-end-interrupts-mac1", 46 "rxdma2host-monitor-status-ring-mac3", 47 "rxdma2host-monitor-status-ring-mac2", 48 "rxdma2host-monitor-status-ring-mac1", 49 "host2rxdma-host-buf-ring-mac3", 50 "host2rxdma-host-buf-ring-mac2", 51 "host2rxdma-host-buf-ring-mac1", 52 "rxdma2host-destination-ring-mac3", 53 "rxdma2host-destination-ring-mac2", 54 "rxdma2host-destination-ring-mac1", 55 "host2tcl-input-ring4", 56 "host2tcl-input-ring3", 57 "host2tcl-input-ring2", 58 "host2tcl-input-ring1", 59 "wbm2host-tx-completions-ring3", 60 "wbm2host-tx-completions-ring2", 61 "wbm2host-tx-completions-ring1", 62 "tcl2host-status-ring", 63 }; 64 65 static const struct ath11k_msi_config ath11k_msi_config[] = { 66 { 67 .total_vectors = 32, 68 .total_users = 4, 69 .users = (struct ath11k_msi_user[]) { 70 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 71 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 72 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 73 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 74 }, 75 .hw_rev = ATH11K_HW_QCA6390_HW20, 76 }, 77 { 78 .total_vectors = 16, 79 .total_users = 3, 80 .users = (struct ath11k_msi_user[]) { 81 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 82 { .name = "CE", .num_vectors = 5, .base_vector = 3 }, 83 { .name = "DP", .num_vectors = 8, .base_vector = 8 }, 84 }, 85 .hw_rev = ATH11K_HW_QCN9074_HW10, 86 }, 87 { 88 .total_vectors = 32, 89 .total_users = 4, 90 .users = (struct ath11k_msi_user[]) { 91 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 92 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 93 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 94 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 95 }, 96 .hw_rev = ATH11K_HW_WCN6855_HW20, 97 }, 98 { 99 .total_vectors = 32, 100 .total_users = 4, 101 .users = (struct ath11k_msi_user[]) { 102 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 103 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 104 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 105 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 106 }, 107 .hw_rev = ATH11K_HW_WCN6855_HW21, 108 }, 109 { 110 .total_vectors = 28, 111 .total_users = 2, 112 .users = (struct ath11k_msi_user[]) { 113 { .name = "CE", .num_vectors = 10, .base_vector = 0 }, 114 { .name = "DP", .num_vectors = 18, .base_vector = 10 }, 115 }, 116 .hw_rev = ATH11K_HW_WCN6750_HW10, 117 }, 118 { 119 .total_vectors = 32, 120 .total_users = 4, 121 .users = (struct ath11k_msi_user[]) { 122 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 123 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 124 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 125 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 126 }, 127 .hw_rev = ATH11K_HW_QCA2066_HW21, 128 }, 129 { 130 .total_vectors = 32, 131 .total_users = 4, 132 .users = (struct ath11k_msi_user[]) { 133 { .name = "MHI", .num_vectors = 3, .base_vector = 0 }, 134 { .name = "CE", .num_vectors = 10, .base_vector = 3 }, 135 { .name = "WAKE", .num_vectors = 1, .base_vector = 13 }, 136 { .name = "DP", .num_vectors = 18, .base_vector = 14 }, 137 }, 138 .hw_rev = ATH11K_HW_QCA6698AQ_HW21, 139 }, 140 }; 141 142 int ath11k_pcic_init_msi_config(struct ath11k_base *ab) 143 { 144 const struct ath11k_msi_config *msi_config; 145 int i; 146 147 for (i = 0; i < ARRAY_SIZE(ath11k_msi_config); i++) { 148 msi_config = &ath11k_msi_config[i]; 149 150 if (msi_config->hw_rev == ab->hw_rev) 151 break; 152 } 153 154 if (i == ARRAY_SIZE(ath11k_msi_config)) { 155 ath11k_err(ab, "failed to fetch msi config, unsupported hw version: 0x%x\n", 156 ab->hw_rev); 157 return -EINVAL; 158 } 159 160 ab->pci.msi.config = msi_config; 161 return 0; 162 } 163 EXPORT_SYMBOL(ath11k_pcic_init_msi_config); 164 165 static void __ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) 166 { 167 if (offset < ATH11K_PCI_WINDOW_START) 168 iowrite32(value, ab->mem + offset); 169 else 170 ab->pci.ops->window_write32(ab, offset, value); 171 } 172 173 void ath11k_pcic_write32(struct ath11k_base *ab, u32 offset, u32 value) 174 { 175 int ret = 0; 176 bool wakeup_required; 177 178 /* for offset beyond BAR + 4K - 32, may 179 * need to wakeup the device to access. 180 */ 181 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && 182 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; 183 if (wakeup_required && ab->pci.ops->wakeup) 184 ret = ab->pci.ops->wakeup(ab); 185 186 __ath11k_pcic_write32(ab, offset, value); 187 188 if (wakeup_required && !ret && ab->pci.ops->release) 189 ab->pci.ops->release(ab); 190 } 191 EXPORT_SYMBOL(ath11k_pcic_write32); 192 193 static u32 __ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) 194 { 195 u32 val; 196 197 if (offset < ATH11K_PCI_WINDOW_START) 198 val = ioread32(ab->mem + offset); 199 else 200 val = ab->pci.ops->window_read32(ab, offset); 201 202 return val; 203 } 204 205 u32 ath11k_pcic_read32(struct ath11k_base *ab, u32 offset) 206 { 207 int ret = 0; 208 u32 val; 209 bool wakeup_required; 210 211 /* for offset beyond BAR + 4K - 32, may 212 * need to wakeup the device to access. 213 */ 214 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && 215 offset >= ATH11K_PCI_ACCESS_ALWAYS_OFF; 216 if (wakeup_required && ab->pci.ops->wakeup) 217 ret = ab->pci.ops->wakeup(ab); 218 219 val = __ath11k_pcic_read32(ab, offset); 220 221 if (wakeup_required && !ret && ab->pci.ops->release) 222 ab->pci.ops->release(ab); 223 224 return val; 225 } 226 EXPORT_SYMBOL(ath11k_pcic_read32); 227 228 int ath11k_pcic_read(struct ath11k_base *ab, void *buf, u32 start, u32 end) 229 { 230 int ret = 0; 231 bool wakeup_required; 232 u32 *data = buf; 233 u32 i; 234 235 /* for offset beyond BAR + 4K - 32, may 236 * need to wakeup the device to access. 237 */ 238 wakeup_required = test_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags) && 239 end >= ATH11K_PCI_ACCESS_ALWAYS_OFF; 240 if (wakeup_required && ab->pci.ops->wakeup) { 241 ret = ab->pci.ops->wakeup(ab); 242 if (ret) { 243 ath11k_warn(ab, 244 "wakeup failed, data may be invalid: %d", 245 ret); 246 /* Even though wakeup() failed, continue processing rather 247 * than returning because some parts of the data may still 248 * be valid and useful in some cases, e.g. could give us 249 * some clues on firmware crash. 250 * Mislead due to invalid data could be avoided because we 251 * are aware of the wakeup failure. 252 */ 253 } 254 } 255 256 for (i = start; i < end + 1; i += 4) 257 *data++ = __ath11k_pcic_read32(ab, i); 258 259 if (wakeup_required && ab->pci.ops->release) 260 ab->pci.ops->release(ab); 261 262 return 0; 263 } 264 EXPORT_SYMBOL(ath11k_pcic_read); 265 266 void ath11k_pcic_get_msi_address(struct ath11k_base *ab, u32 *msi_addr_lo, 267 u32 *msi_addr_hi) 268 { 269 *msi_addr_lo = ab->pci.msi.addr_lo; 270 *msi_addr_hi = ab->pci.msi.addr_hi; 271 } 272 EXPORT_SYMBOL(ath11k_pcic_get_msi_address); 273 274 int ath11k_pcic_get_user_msi_assignment(struct ath11k_base *ab, char *user_name, 275 int *num_vectors, u32 *user_base_data, 276 u32 *base_vector) 277 { 278 const struct ath11k_msi_config *msi_config = ab->pci.msi.config; 279 int idx; 280 281 for (idx = 0; idx < msi_config->total_users; idx++) { 282 if (strcmp(user_name, msi_config->users[idx].name) == 0) { 283 *num_vectors = msi_config->users[idx].num_vectors; 284 *base_vector = msi_config->users[idx].base_vector; 285 *user_base_data = *base_vector + ab->pci.msi.ep_base_data; 286 287 ath11k_dbg(ab, ATH11K_DBG_PCI, 288 "msi assignment %s num_vectors %d user_base_data %u base_vector %u\n", 289 user_name, *num_vectors, *user_base_data, 290 *base_vector); 291 292 return 0; 293 } 294 } 295 296 ath11k_err(ab, "Failed to find MSI assignment for %s!\n", user_name); 297 298 return -EINVAL; 299 } 300 EXPORT_SYMBOL(ath11k_pcic_get_user_msi_assignment); 301 302 void ath11k_pcic_get_ce_msi_idx(struct ath11k_base *ab, u32 ce_id, u32 *msi_idx) 303 { 304 u32 i, msi_data_idx; 305 306 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { 307 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 308 continue; 309 310 if (ce_id == i) 311 break; 312 313 msi_data_idx++; 314 } 315 *msi_idx = msi_data_idx; 316 } 317 EXPORT_SYMBOL(ath11k_pcic_get_ce_msi_idx); 318 319 static void ath11k_pcic_free_ext_irq(struct ath11k_base *ab) 320 { 321 int i, j; 322 323 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 324 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 325 326 for (j = 0; j < irq_grp->num_irq; j++) 327 free_irq(ab->irq_num[irq_grp->irqs[j]], irq_grp); 328 329 netif_napi_del(&irq_grp->napi); 330 free_netdev(irq_grp->napi_ndev); 331 } 332 } 333 334 void ath11k_pcic_free_irq(struct ath11k_base *ab) 335 { 336 int i, irq_idx; 337 338 for (i = 0; i < ab->hw_params.ce_count; i++) { 339 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 340 continue; 341 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 342 free_irq(ab->irq_num[irq_idx], &ab->ce.ce_pipe[i]); 343 } 344 345 ath11k_pcic_free_ext_irq(ab); 346 } 347 EXPORT_SYMBOL(ath11k_pcic_free_irq); 348 349 static void ath11k_pcic_ce_irq_enable(struct ath11k_base *ab, u16 ce_id) 350 { 351 u32 irq_idx; 352 353 /* In case of one MSI vector, we handle irq enable/disable in a 354 * uniform way since we only have one irq 355 */ 356 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 357 return; 358 359 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 360 enable_irq(ab->irq_num[irq_idx]); 361 } 362 363 static void ath11k_pcic_ce_irq_disable(struct ath11k_base *ab, u16 ce_id) 364 { 365 u32 irq_idx; 366 367 /* In case of one MSI vector, we handle irq enable/disable in a 368 * uniform way since we only have one irq 369 */ 370 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 371 return; 372 373 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_id; 374 disable_irq_nosync(ab->irq_num[irq_idx]); 375 } 376 377 static void ath11k_pcic_ce_irqs_disable(struct ath11k_base *ab) 378 { 379 int i; 380 381 clear_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 382 383 for (i = 0; i < ab->hw_params.ce_count; i++) { 384 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 385 continue; 386 ath11k_pcic_ce_irq_disable(ab, i); 387 } 388 } 389 390 static void ath11k_pcic_sync_ce_irqs(struct ath11k_base *ab) 391 { 392 int i; 393 int irq_idx; 394 395 for (i = 0; i < ab->hw_params.ce_count; i++) { 396 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 397 continue; 398 399 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 400 synchronize_irq(ab->irq_num[irq_idx]); 401 } 402 } 403 404 static void ath11k_pcic_ce_tasklet(struct tasklet_struct *t) 405 { 406 struct ath11k_ce_pipe *ce_pipe = from_tasklet(ce_pipe, t, intr_tq); 407 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 408 409 ath11k_ce_per_engine_service(ce_pipe->ab, ce_pipe->pipe_num); 410 411 enable_irq(ce_pipe->ab->irq_num[irq_idx]); 412 } 413 414 static irqreturn_t ath11k_pcic_ce_interrupt_handler(int irq, void *arg) 415 { 416 struct ath11k_ce_pipe *ce_pipe = arg; 417 struct ath11k_base *ab = ce_pipe->ab; 418 int irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + ce_pipe->pipe_num; 419 420 if (!test_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags)) 421 return IRQ_HANDLED; 422 423 /* last interrupt received for this CE */ 424 ce_pipe->timestamp = jiffies; 425 426 disable_irq_nosync(ab->irq_num[irq_idx]); 427 428 tasklet_schedule(&ce_pipe->intr_tq); 429 430 return IRQ_HANDLED; 431 } 432 433 static void ath11k_pcic_ext_grp_disable(struct ath11k_ext_irq_grp *irq_grp) 434 { 435 struct ath11k_base *ab = irq_grp->ab; 436 int i; 437 438 /* In case of one MSI vector, we handle irq enable/disable 439 * in a uniform way since we only have one irq 440 */ 441 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 442 return; 443 444 for (i = 0; i < irq_grp->num_irq; i++) 445 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 446 } 447 448 static void __ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) 449 { 450 int i; 451 452 clear_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 453 454 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 455 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 456 457 ath11k_pcic_ext_grp_disable(irq_grp); 458 459 if (irq_grp->napi_enabled) { 460 napi_synchronize(&irq_grp->napi); 461 napi_disable(&irq_grp->napi); 462 irq_grp->napi_enabled = false; 463 } 464 } 465 } 466 467 static void ath11k_pcic_ext_grp_enable(struct ath11k_ext_irq_grp *irq_grp) 468 { 469 struct ath11k_base *ab = irq_grp->ab; 470 int i; 471 472 /* In case of one MSI vector, we handle irq enable/disable in a 473 * uniform way since we only have one irq 474 */ 475 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 476 return; 477 478 for (i = 0; i < irq_grp->num_irq; i++) 479 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 480 } 481 482 void ath11k_pcic_ext_irq_enable(struct ath11k_base *ab) 483 { 484 int i; 485 486 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 487 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 488 489 if (!irq_grp->napi_enabled) { 490 napi_enable(&irq_grp->napi); 491 irq_grp->napi_enabled = true; 492 } 493 ath11k_pcic_ext_grp_enable(irq_grp); 494 } 495 496 set_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags); 497 } 498 EXPORT_SYMBOL(ath11k_pcic_ext_irq_enable); 499 500 static void ath11k_pcic_sync_ext_irqs(struct ath11k_base *ab) 501 { 502 int i, j, irq_idx; 503 504 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 505 struct ath11k_ext_irq_grp *irq_grp = &ab->ext_irq_grp[i]; 506 507 for (j = 0; j < irq_grp->num_irq; j++) { 508 irq_idx = irq_grp->irqs[j]; 509 synchronize_irq(ab->irq_num[irq_idx]); 510 } 511 } 512 } 513 514 void ath11k_pcic_ext_irq_disable(struct ath11k_base *ab) 515 { 516 __ath11k_pcic_ext_irq_disable(ab); 517 ath11k_pcic_sync_ext_irqs(ab); 518 } 519 EXPORT_SYMBOL(ath11k_pcic_ext_irq_disable); 520 521 static int ath11k_pcic_ext_grp_napi_poll(struct napi_struct *napi, int budget) 522 { 523 struct ath11k_ext_irq_grp *irq_grp = container_of(napi, 524 struct ath11k_ext_irq_grp, 525 napi); 526 struct ath11k_base *ab = irq_grp->ab; 527 int work_done; 528 int i; 529 530 work_done = ath11k_dp_service_srng(ab, irq_grp, budget); 531 if (work_done < budget) { 532 napi_complete_done(napi, work_done); 533 for (i = 0; i < irq_grp->num_irq; i++) 534 enable_irq(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 535 } 536 537 if (work_done > budget) 538 work_done = budget; 539 540 return work_done; 541 } 542 543 static irqreturn_t ath11k_pcic_ext_interrupt_handler(int irq, void *arg) 544 { 545 struct ath11k_ext_irq_grp *irq_grp = arg; 546 struct ath11k_base *ab = irq_grp->ab; 547 int i; 548 549 if (!test_bit(ATH11K_FLAG_EXT_IRQ_ENABLED, &ab->dev_flags)) 550 return IRQ_HANDLED; 551 552 ath11k_dbg(irq_grp->ab, ATH11K_DBG_PCI, "ext irq %d\n", irq); 553 554 /* last interrupt received for this group */ 555 irq_grp->timestamp = jiffies; 556 557 for (i = 0; i < irq_grp->num_irq; i++) 558 disable_irq_nosync(irq_grp->ab->irq_num[irq_grp->irqs[i]]); 559 560 napi_schedule(&irq_grp->napi); 561 562 return IRQ_HANDLED; 563 } 564 565 static int 566 ath11k_pcic_get_msi_irq(struct ath11k_base *ab, unsigned int vector) 567 { 568 return ab->pci.ops->get_msi_irq(ab, vector); 569 } 570 571 static int ath11k_pcic_ext_irq_config(struct ath11k_base *ab) 572 { 573 int i, j, n, ret, num_vectors = 0; 574 u32 user_base_data = 0, base_vector = 0; 575 struct ath11k_ext_irq_grp *irq_grp; 576 unsigned long irq_flags; 577 578 ret = ath11k_pcic_get_user_msi_assignment(ab, "DP", &num_vectors, 579 &user_base_data, 580 &base_vector); 581 if (ret < 0) 582 return ret; 583 584 irq_flags = IRQF_SHARED; 585 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 586 irq_flags |= IRQF_NOBALANCING; 587 588 for (i = 0; i < ATH11K_EXT_IRQ_GRP_NUM_MAX; i++) { 589 irq_grp = &ab->ext_irq_grp[i]; 590 u32 num_irq = 0; 591 592 irq_grp->ab = ab; 593 irq_grp->grp_id = i; 594 irq_grp->napi_ndev = alloc_netdev_dummy(0); 595 if (!irq_grp->napi_ndev) { 596 ret = -ENOMEM; 597 goto fail_allocate; 598 } 599 600 netif_napi_add(irq_grp->napi_ndev, &irq_grp->napi, 601 ath11k_pcic_ext_grp_napi_poll); 602 603 if (ab->hw_params.ring_mask->tx[i] || 604 ab->hw_params.ring_mask->rx[i] || 605 ab->hw_params.ring_mask->rx_err[i] || 606 ab->hw_params.ring_mask->rx_wbm_rel[i] || 607 ab->hw_params.ring_mask->reo_status[i] || 608 ab->hw_params.ring_mask->rxdma2host[i] || 609 ab->hw_params.ring_mask->host2rxdma[i] || 610 ab->hw_params.ring_mask->rx_mon_status[i]) { 611 num_irq = 1; 612 } 613 614 irq_grp->num_irq = num_irq; 615 irq_grp->irqs[0] = ATH11K_PCI_IRQ_DP_OFFSET + i; 616 617 for (j = 0; j < irq_grp->num_irq; j++) { 618 int irq_idx = irq_grp->irqs[j]; 619 int vector = (i % num_vectors) + base_vector; 620 int irq = ath11k_pcic_get_msi_irq(ab, vector); 621 622 if (irq < 0) { 623 ret = irq; 624 goto fail_irq; 625 } 626 627 ab->irq_num[irq_idx] = irq; 628 629 ath11k_dbg(ab, ATH11K_DBG_PCI, 630 "irq %d group %d\n", irq, i); 631 632 irq_set_status_flags(irq, IRQ_DISABLE_UNLAZY); 633 ret = request_irq(irq, ath11k_pcic_ext_interrupt_handler, 634 irq_flags, "DP_EXT_IRQ", irq_grp); 635 if (ret) { 636 ath11k_err(ab, "failed request irq %d: %d\n", 637 vector, ret); 638 for (n = 0; n <= i; n++) { 639 irq_grp = &ab->ext_irq_grp[n]; 640 free_netdev(irq_grp->napi_ndev); 641 } 642 return ret; 643 } 644 } 645 ath11k_pcic_ext_grp_disable(irq_grp); 646 } 647 648 return 0; 649 fail_irq: 650 /* i ->napi_ndev was properly allocated. Free it also */ 651 i += 1; 652 fail_allocate: 653 for (n = 0; n < i; n++) { 654 irq_grp = &ab->ext_irq_grp[n]; 655 free_netdev(irq_grp->napi_ndev); 656 } 657 return ret; 658 } 659 660 int ath11k_pcic_config_irq(struct ath11k_base *ab) 661 { 662 struct ath11k_ce_pipe *ce_pipe; 663 u32 msi_data_start; 664 u32 msi_data_count, msi_data_idx; 665 u32 msi_irq_start; 666 unsigned int msi_data; 667 int irq, i, ret, irq_idx; 668 unsigned long irq_flags; 669 670 ret = ath11k_pcic_get_user_msi_assignment(ab, "CE", &msi_data_count, 671 &msi_data_start, &msi_irq_start); 672 if (ret) 673 return ret; 674 675 irq_flags = IRQF_SHARED; 676 if (!test_bit(ATH11K_FLAG_MULTI_MSI_VECTORS, &ab->dev_flags)) 677 irq_flags |= IRQF_NOBALANCING; 678 679 /* Configure CE irqs */ 680 for (i = 0, msi_data_idx = 0; i < ab->hw_params.ce_count; i++) { 681 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 682 continue; 683 684 msi_data = (msi_data_idx % msi_data_count) + msi_irq_start; 685 irq = ath11k_pcic_get_msi_irq(ab, msi_data); 686 if (irq < 0) 687 return irq; 688 689 ce_pipe = &ab->ce.ce_pipe[i]; 690 691 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 692 693 tasklet_setup(&ce_pipe->intr_tq, ath11k_pcic_ce_tasklet); 694 695 ret = request_irq(irq, ath11k_pcic_ce_interrupt_handler, 696 irq_flags, irq_name[irq_idx], ce_pipe); 697 if (ret) { 698 ath11k_err(ab, "failed to request irq %d: %d\n", 699 irq_idx, ret); 700 return ret; 701 } 702 703 ab->irq_num[irq_idx] = irq; 704 msi_data_idx++; 705 706 ath11k_pcic_ce_irq_disable(ab, i); 707 } 708 709 ret = ath11k_pcic_ext_irq_config(ab); 710 if (ret) 711 return ret; 712 713 return 0; 714 } 715 EXPORT_SYMBOL(ath11k_pcic_config_irq); 716 717 void ath11k_pcic_ce_irqs_enable(struct ath11k_base *ab) 718 { 719 int i; 720 721 set_bit(ATH11K_FLAG_CE_IRQ_ENABLED, &ab->dev_flags); 722 723 for (i = 0; i < ab->hw_params.ce_count; i++) { 724 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 725 continue; 726 ath11k_pcic_ce_irq_enable(ab, i); 727 } 728 } 729 EXPORT_SYMBOL(ath11k_pcic_ce_irqs_enable); 730 731 static void ath11k_pcic_kill_tasklets(struct ath11k_base *ab) 732 { 733 int i; 734 735 for (i = 0; i < ab->hw_params.ce_count; i++) { 736 struct ath11k_ce_pipe *ce_pipe = &ab->ce.ce_pipe[i]; 737 738 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR) 739 continue; 740 741 tasklet_kill(&ce_pipe->intr_tq); 742 } 743 } 744 745 void ath11k_pcic_ce_irq_disable_sync(struct ath11k_base *ab) 746 { 747 ath11k_pcic_ce_irqs_disable(ab); 748 ath11k_pcic_sync_ce_irqs(ab); 749 ath11k_pcic_kill_tasklets(ab); 750 } 751 EXPORT_SYMBOL(ath11k_pcic_ce_irq_disable_sync); 752 753 void ath11k_pcic_stop(struct ath11k_base *ab) 754 { 755 ath11k_pcic_ce_irq_disable_sync(ab); 756 ath11k_ce_cleanup_pipes(ab); 757 } 758 EXPORT_SYMBOL(ath11k_pcic_stop); 759 760 int ath11k_pcic_start(struct ath11k_base *ab) 761 { 762 set_bit(ATH11K_FLAG_DEVICE_INIT_DONE, &ab->dev_flags); 763 764 ath11k_pcic_ce_irqs_enable(ab); 765 ath11k_ce_rx_post_buf(ab); 766 767 return 0; 768 } 769 EXPORT_SYMBOL(ath11k_pcic_start); 770 771 int ath11k_pcic_map_service_to_pipe(struct ath11k_base *ab, u16 service_id, 772 u8 *ul_pipe, u8 *dl_pipe) 773 { 774 const struct service_to_pipe *entry; 775 bool ul_set = false, dl_set = false; 776 int i; 777 778 for (i = 0; i < ab->hw_params.svc_to_ce_map_len; i++) { 779 entry = &ab->hw_params.svc_to_ce_map[i]; 780 781 if (__le32_to_cpu(entry->service_id) != service_id) 782 continue; 783 784 switch (__le32_to_cpu(entry->pipedir)) { 785 case PIPEDIR_NONE: 786 break; 787 case PIPEDIR_IN: 788 WARN_ON(dl_set); 789 *dl_pipe = __le32_to_cpu(entry->pipenum); 790 dl_set = true; 791 break; 792 case PIPEDIR_OUT: 793 WARN_ON(ul_set); 794 *ul_pipe = __le32_to_cpu(entry->pipenum); 795 ul_set = true; 796 break; 797 case PIPEDIR_INOUT: 798 WARN_ON(dl_set); 799 WARN_ON(ul_set); 800 *dl_pipe = __le32_to_cpu(entry->pipenum); 801 *ul_pipe = __le32_to_cpu(entry->pipenum); 802 dl_set = true; 803 ul_set = true; 804 break; 805 } 806 } 807 808 if (WARN_ON(!ul_set || !dl_set)) 809 return -ENOENT; 810 811 return 0; 812 } 813 EXPORT_SYMBOL(ath11k_pcic_map_service_to_pipe); 814 815 int ath11k_pcic_register_pci_ops(struct ath11k_base *ab, 816 const struct ath11k_pci_ops *pci_ops) 817 { 818 if (!pci_ops) 819 return 0; 820 821 /* Return error if mandatory pci_ops callbacks are missing */ 822 if (!pci_ops->get_msi_irq || !pci_ops->window_write32 || 823 !pci_ops->window_read32) 824 return -EINVAL; 825 826 ab->pci.ops = pci_ops; 827 return 0; 828 } 829 EXPORT_SYMBOL(ath11k_pcic_register_pci_ops); 830 831 void ath11k_pci_enable_ce_irqs_except_wake_irq(struct ath11k_base *ab) 832 { 833 int i; 834 835 for (i = 0; i < ab->hw_params.ce_count; i++) { 836 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR || 837 i == ATH11K_PCI_CE_WAKE_IRQ) 838 continue; 839 ath11k_pcic_ce_irq_enable(ab, i); 840 } 841 } 842 EXPORT_SYMBOL(ath11k_pci_enable_ce_irqs_except_wake_irq); 843 844 void ath11k_pci_disable_ce_irqs_except_wake_irq(struct ath11k_base *ab) 845 { 846 int i; 847 int irq_idx; 848 struct ath11k_ce_pipe *ce_pipe; 849 850 for (i = 0; i < ab->hw_params.ce_count; i++) { 851 ce_pipe = &ab->ce.ce_pipe[i]; 852 irq_idx = ATH11K_PCI_IRQ_CE0_OFFSET + i; 853 854 if (ath11k_ce_get_attr_flags(ab, i) & CE_ATTR_DIS_INTR || 855 i == ATH11K_PCI_CE_WAKE_IRQ) 856 continue; 857 858 disable_irq_nosync(ab->irq_num[irq_idx]); 859 synchronize_irq(ab->irq_num[irq_idx]); 860 tasklet_kill(&ce_pipe->intr_tq); 861 } 862 } 863 EXPORT_SYMBOL(ath11k_pci_disable_ce_irqs_except_wake_irq); 864