1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "riscv-plic: " fmt 7 #include <linux/acpi.h> 8 #include <linux/cpu.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/syscore_ops.h> 22 #include <asm/smp.h> 23 24 /* 25 * This driver implements a version of the RISC-V PLIC with the actual layout 26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 27 * 28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 29 * 30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 32 * Spec. 33 */ 34 35 #define MAX_DEVICES 1024 36 #define MAX_CONTEXTS 15872 37 38 /* 39 * Each interrupt source has a priority register associated with it. 40 * We always hardwire it to one in Linux. 41 */ 42 #define PRIORITY_BASE 0 43 #define PRIORITY_PER_ID 4 44 45 /* 46 * Each hart context has a vector of interrupt enable bits associated with it. 47 * There's one bit for each interrupt source. 48 */ 49 #define CONTEXT_ENABLE_BASE 0x2000 50 #define CONTEXT_ENABLE_SIZE 0x80 51 52 /* 53 * Each hart context has a set of control registers associated with it. Right 54 * now there's only two: a source priority threshold over which the hart will 55 * take an interrupt, and a register to claim interrupts. 56 */ 57 #define CONTEXT_BASE 0x200000 58 #define CONTEXT_SIZE 0x1000 59 #define CONTEXT_THRESHOLD 0x00 60 #define CONTEXT_CLAIM 0x04 61 62 #define PLIC_DISABLE_THRESHOLD 0x7 63 #define PLIC_ENABLE_THRESHOLD 0 64 65 #define PLIC_QUIRK_EDGE_INTERRUPT 0 66 67 struct plic_priv { 68 struct fwnode_handle *fwnode; 69 struct cpumask lmask; 70 struct irq_domain *irqdomain; 71 void __iomem *regs; 72 unsigned long plic_quirks; 73 unsigned int nr_irqs; 74 unsigned long *prio_save; 75 u32 gsi_base; 76 int acpi_plic_id; 77 }; 78 79 struct plic_handler { 80 bool present; 81 void __iomem *hart_base; 82 /* 83 * Protect mask operations on the registers given that we can't 84 * assume atomic memory operations work on them. 85 */ 86 raw_spinlock_t enable_lock; 87 void __iomem *enable_base; 88 u32 *enable_save; 89 struct plic_priv *priv; 90 }; 91 static int plic_parent_irq __ro_after_init; 92 static bool plic_global_setup_done __ro_after_init; 93 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 94 95 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 96 97 static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 98 { 99 u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 100 u32 hwirq_mask = 1 << (hwirq % 32); 101 102 if (enable) 103 writel(readl(reg) | hwirq_mask, reg); 104 else 105 writel(readl(reg) & ~hwirq_mask, reg); 106 } 107 108 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 109 { 110 unsigned long flags; 111 112 raw_spin_lock_irqsave(&handler->enable_lock, flags); 113 __plic_toggle(handler->enable_base, hwirq, enable); 114 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 115 } 116 117 static inline void plic_irq_toggle(const struct cpumask *mask, 118 struct irq_data *d, int enable) 119 { 120 int cpu; 121 122 for_each_cpu(cpu, mask) { 123 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 124 125 plic_toggle(handler, d->hwirq, enable); 126 } 127 } 128 129 static void plic_irq_unmask(struct irq_data *d) 130 { 131 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 132 133 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 134 } 135 136 static void plic_irq_mask(struct irq_data *d) 137 { 138 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 139 140 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 141 } 142 143 static void plic_irq_enable(struct irq_data *d) 144 { 145 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 146 plic_irq_unmask(d); 147 } 148 149 static void plic_irq_disable(struct irq_data *d) 150 { 151 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 152 } 153 154 static void plic_irq_eoi(struct irq_data *d) 155 { 156 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 157 158 if (unlikely(irqd_irq_disabled(d))) { 159 plic_toggle(handler, d->hwirq, 1); 160 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 161 plic_toggle(handler, d->hwirq, 0); 162 } else { 163 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 164 } 165 } 166 167 #ifdef CONFIG_SMP 168 static int plic_set_affinity(struct irq_data *d, 169 const struct cpumask *mask_val, bool force) 170 { 171 unsigned int cpu; 172 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 173 174 if (force) 175 cpu = cpumask_first_and(&priv->lmask, mask_val); 176 else 177 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 178 179 if (cpu >= nr_cpu_ids) 180 return -EINVAL; 181 182 /* Invalidate the original routing entry */ 183 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 184 185 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 186 187 /* Setting the new routing entry if irq is enabled */ 188 if (!irqd_irq_disabled(d)) 189 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 190 191 return IRQ_SET_MASK_OK_DONE; 192 } 193 #endif 194 195 static struct irq_chip plic_edge_chip = { 196 .name = "SiFive PLIC", 197 .irq_enable = plic_irq_enable, 198 .irq_disable = plic_irq_disable, 199 .irq_ack = plic_irq_eoi, 200 .irq_mask = plic_irq_mask, 201 .irq_unmask = plic_irq_unmask, 202 #ifdef CONFIG_SMP 203 .irq_set_affinity = plic_set_affinity, 204 #endif 205 .irq_set_type = plic_irq_set_type, 206 .flags = IRQCHIP_SKIP_SET_WAKE | 207 IRQCHIP_AFFINITY_PRE_STARTUP, 208 }; 209 210 static struct irq_chip plic_chip = { 211 .name = "SiFive PLIC", 212 .irq_enable = plic_irq_enable, 213 .irq_disable = plic_irq_disable, 214 .irq_mask = plic_irq_mask, 215 .irq_unmask = plic_irq_unmask, 216 .irq_eoi = plic_irq_eoi, 217 #ifdef CONFIG_SMP 218 .irq_set_affinity = plic_set_affinity, 219 #endif 220 .irq_set_type = plic_irq_set_type, 221 .flags = IRQCHIP_SKIP_SET_WAKE | 222 IRQCHIP_AFFINITY_PRE_STARTUP, 223 }; 224 225 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 226 { 227 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 228 229 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 230 return IRQ_SET_MASK_OK_NOCOPY; 231 232 switch (type) { 233 case IRQ_TYPE_EDGE_RISING: 234 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 235 handle_edge_irq, NULL); 236 break; 237 case IRQ_TYPE_LEVEL_HIGH: 238 irq_set_chip_handler_name_locked(d, &plic_chip, 239 handle_fasteoi_irq, NULL); 240 break; 241 default: 242 return -EINVAL; 243 } 244 245 return IRQ_SET_MASK_OK; 246 } 247 248 static int plic_irq_suspend(void) 249 { 250 unsigned int i, cpu; 251 unsigned long flags; 252 u32 __iomem *reg; 253 struct plic_priv *priv; 254 255 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 256 257 /* irq ID 0 is reserved */ 258 for (i = 1; i < priv->nr_irqs; i++) { 259 __assign_bit(i, priv->prio_save, 260 readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); 261 } 262 263 for_each_present_cpu(cpu) { 264 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 265 266 if (!handler->present) 267 continue; 268 269 raw_spin_lock_irqsave(&handler->enable_lock, flags); 270 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 271 reg = handler->enable_base + i * sizeof(u32); 272 handler->enable_save[i] = readl(reg); 273 } 274 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 275 } 276 277 return 0; 278 } 279 280 static void plic_irq_resume(void) 281 { 282 unsigned int i, index, cpu; 283 unsigned long flags; 284 u32 __iomem *reg; 285 struct plic_priv *priv; 286 287 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 288 289 /* irq ID 0 is reserved */ 290 for (i = 1; i < priv->nr_irqs; i++) { 291 index = BIT_WORD(i); 292 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 293 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 294 } 295 296 for_each_present_cpu(cpu) { 297 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 298 299 if (!handler->present) 300 continue; 301 302 raw_spin_lock_irqsave(&handler->enable_lock, flags); 303 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 304 reg = handler->enable_base + i * sizeof(u32); 305 writel(handler->enable_save[i], reg); 306 } 307 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 308 } 309 } 310 311 static struct syscore_ops plic_irq_syscore_ops = { 312 .suspend = plic_irq_suspend, 313 .resume = plic_irq_resume, 314 }; 315 316 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 317 irq_hw_number_t hwirq) 318 { 319 struct plic_priv *priv = d->host_data; 320 321 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 322 handle_fasteoi_irq, NULL, NULL); 323 irq_set_noprobe(irq); 324 irq_set_affinity(irq, &priv->lmask); 325 return 0; 326 } 327 328 static int plic_irq_domain_translate(struct irq_domain *d, 329 struct irq_fwspec *fwspec, 330 unsigned long *hwirq, 331 unsigned int *type) 332 { 333 struct plic_priv *priv = d->host_data; 334 335 /* For DT, gsi_base is always zero. */ 336 if (fwspec->param[0] >= priv->gsi_base) 337 fwspec->param[0] = fwspec->param[0] - priv->gsi_base; 338 339 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 340 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 341 342 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 343 } 344 345 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 346 unsigned int nr_irqs, void *arg) 347 { 348 int i, ret; 349 irq_hw_number_t hwirq; 350 unsigned int type; 351 struct irq_fwspec *fwspec = arg; 352 353 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 354 if (ret) 355 return ret; 356 357 for (i = 0; i < nr_irqs; i++) { 358 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 359 if (ret) 360 return ret; 361 } 362 363 return 0; 364 } 365 366 static const struct irq_domain_ops plic_irqdomain_ops = { 367 .translate = plic_irq_domain_translate, 368 .alloc = plic_irq_domain_alloc, 369 .free = irq_domain_free_irqs_top, 370 }; 371 372 /* 373 * Handling an interrupt is a two-step process: first you claim the interrupt 374 * by reading the claim register, then you complete the interrupt by writing 375 * that source ID back to the same claim register. This automatically enables 376 * and disables the interrupt, so there's nothing else to do. 377 */ 378 static void plic_handle_irq(struct irq_desc *desc) 379 { 380 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 381 struct irq_chip *chip = irq_desc_get_chip(desc); 382 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 383 irq_hw_number_t hwirq; 384 385 WARN_ON_ONCE(!handler->present); 386 387 chained_irq_enter(chip, desc); 388 389 while ((hwirq = readl(claim))) { 390 int err = generic_handle_domain_irq(handler->priv->irqdomain, 391 hwirq); 392 if (unlikely(err)) { 393 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 394 handler->priv->fwnode, hwirq); 395 } 396 } 397 398 chained_irq_exit(chip, desc); 399 } 400 401 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 402 { 403 /* priority must be > threshold to trigger an interrupt */ 404 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 405 } 406 407 static int plic_dying_cpu(unsigned int cpu) 408 { 409 if (plic_parent_irq) 410 disable_percpu_irq(plic_parent_irq); 411 412 return 0; 413 } 414 415 static int plic_starting_cpu(unsigned int cpu) 416 { 417 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 418 419 if (plic_parent_irq) 420 enable_percpu_irq(plic_parent_irq, 421 irq_get_trigger_type(plic_parent_irq)); 422 else 423 pr_warn("%pfwP: cpu%d: parent irq not available\n", 424 handler->priv->fwnode, cpu); 425 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 426 427 return 0; 428 } 429 430 static const struct of_device_id plic_match[] = { 431 { .compatible = "sifive,plic-1.0.0" }, 432 { .compatible = "riscv,plic0" }, 433 { .compatible = "andestech,nceplic100", 434 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 435 { .compatible = "thead,c900-plic", 436 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 437 {} 438 }; 439 440 #ifdef CONFIG_ACPI 441 442 static const struct acpi_device_id plic_acpi_match[] = { 443 { "RSCV0001", 0 }, 444 {} 445 }; 446 MODULE_DEVICE_TABLE(acpi, plic_acpi_match); 447 448 #endif 449 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, 450 u32 *nr_irqs, u32 *nr_contexts, 451 u32 *gsi_base, u32 *id) 452 { 453 int rc; 454 455 if (!is_of_node(fwnode)) { 456 rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); 457 if (rc) { 458 pr_err("%pfwP: failed to find GSI mapping\n", fwnode); 459 return rc; 460 } 461 462 *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); 463 if (WARN_ON(!*nr_contexts)) { 464 pr_err("%pfwP: no PLIC context available\n", fwnode); 465 return -EINVAL; 466 } 467 468 return 0; 469 } 470 471 rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); 472 if (rc) { 473 pr_err("%pfwP: riscv,ndev property not available\n", fwnode); 474 return rc; 475 } 476 477 *nr_contexts = of_irq_count(to_of_node(fwnode)); 478 if (WARN_ON(!(*nr_contexts))) { 479 pr_err("%pfwP: no PLIC context available\n", fwnode); 480 return -EINVAL; 481 } 482 483 *gsi_base = 0; 484 *id = 0; 485 486 return 0; 487 } 488 489 static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, 490 u32 *parent_hwirq, int *parent_cpu, u32 id) 491 { 492 struct of_phandle_args parent; 493 unsigned long hartid; 494 int rc; 495 496 if (!is_of_node(fwnode)) { 497 hartid = acpi_rintc_ext_parent_to_hartid(id, context); 498 if (hartid == INVALID_HARTID) 499 return -EINVAL; 500 501 *parent_cpu = riscv_hartid_to_cpuid(hartid); 502 *parent_hwirq = RV_IRQ_EXT; 503 return 0; 504 } 505 506 rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); 507 if (rc) 508 return rc; 509 510 rc = riscv_of_parent_hartid(parent.np, &hartid); 511 if (rc) 512 return rc; 513 514 *parent_hwirq = parent.args[0]; 515 *parent_cpu = riscv_hartid_to_cpuid(hartid); 516 return 0; 517 } 518 519 static int plic_probe(struct fwnode_handle *fwnode) 520 { 521 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 522 unsigned long plic_quirks = 0; 523 struct plic_handler *handler; 524 u32 nr_irqs, parent_hwirq; 525 struct plic_priv *priv; 526 irq_hw_number_t hwirq; 527 void __iomem *regs; 528 int id, context_id; 529 u32 gsi_base; 530 531 if (is_of_node(fwnode)) { 532 const struct of_device_id *id; 533 534 id = of_match_node(plic_match, to_of_node(fwnode)); 535 if (id) 536 plic_quirks = (unsigned long)id->data; 537 538 regs = of_iomap(to_of_node(fwnode), 0); 539 if (!regs) 540 return -ENOMEM; 541 } else { 542 regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); 543 if (IS_ERR(regs)) 544 return PTR_ERR(regs); 545 } 546 547 error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); 548 if (error) 549 goto fail_free_regs; 550 551 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 552 if (!priv) { 553 error = -ENOMEM; 554 goto fail_free_regs; 555 } 556 557 priv->fwnode = fwnode; 558 priv->plic_quirks = plic_quirks; 559 priv->nr_irqs = nr_irqs; 560 priv->regs = regs; 561 priv->gsi_base = gsi_base; 562 priv->acpi_plic_id = id; 563 564 priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); 565 if (!priv->prio_save) { 566 error = -ENOMEM; 567 goto fail_free_priv; 568 } 569 570 for (i = 0; i < nr_contexts; i++) { 571 error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, 572 priv->acpi_plic_id); 573 if (error) { 574 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); 575 continue; 576 } 577 578 if (is_of_node(fwnode)) { 579 context_id = i; 580 } else { 581 context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); 582 if (context_id == INVALID_CONTEXT) { 583 pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); 584 continue; 585 } 586 } 587 588 /* 589 * Skip contexts other than external interrupts for our 590 * privilege level. 591 */ 592 if (parent_hwirq != RV_IRQ_EXT) { 593 /* Disable S-mode enable bits if running in M-mode. */ 594 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 595 void __iomem *enable_base = priv->regs + 596 CONTEXT_ENABLE_BASE + 597 i * CONTEXT_ENABLE_SIZE; 598 599 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 600 __plic_toggle(enable_base, hwirq, 0); 601 } 602 continue; 603 } 604 605 if (cpu < 0) { 606 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); 607 continue; 608 } 609 610 /* 611 * When running in M-mode we need to ignore the S-mode handler. 612 * Here we assume it always comes later, but that might be a 613 * little fragile. 614 */ 615 handler = per_cpu_ptr(&plic_handlers, cpu); 616 if (handler->present) { 617 pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); 618 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 619 goto done; 620 } 621 622 cpumask_set_cpu(cpu, &priv->lmask); 623 handler->present = true; 624 handler->hart_base = priv->regs + CONTEXT_BASE + 625 context_id * CONTEXT_SIZE; 626 raw_spin_lock_init(&handler->enable_lock); 627 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 628 context_id * CONTEXT_ENABLE_SIZE; 629 handler->priv = priv; 630 631 handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), 632 sizeof(*handler->enable_save), GFP_KERNEL); 633 if (!handler->enable_save) { 634 error = -ENOMEM; 635 goto fail_cleanup_contexts; 636 } 637 done: 638 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 639 plic_toggle(handler, hwirq, 0); 640 writel(1, priv->regs + PRIORITY_BASE + 641 hwirq * PRIORITY_PER_ID); 642 } 643 nr_handlers++; 644 } 645 646 priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, 647 &plic_irqdomain_ops, priv); 648 if (WARN_ON(!priv->irqdomain)) { 649 error = -ENOMEM; 650 goto fail_cleanup_contexts; 651 } 652 653 /* 654 * We can have multiple PLIC instances so setup global state 655 * and register syscore operations only once after context 656 * handlers of all online CPUs are initialized. 657 */ 658 if (!plic_global_setup_done) { 659 struct irq_domain *domain; 660 bool global_setup = true; 661 662 for_each_online_cpu(cpu) { 663 handler = per_cpu_ptr(&plic_handlers, cpu); 664 if (!handler->present) { 665 global_setup = false; 666 break; 667 } 668 } 669 670 if (global_setup) { 671 /* Find parent domain and register chained handler */ 672 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 673 if (domain) 674 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 675 if (plic_parent_irq) 676 irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 677 678 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 679 "irqchip/sifive/plic:starting", 680 plic_starting_cpu, plic_dying_cpu); 681 register_syscore_ops(&plic_irq_syscore_ops); 682 plic_global_setup_done = true; 683 } 684 } 685 686 #ifdef CONFIG_ACPI 687 if (!acpi_disabled) 688 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); 689 #endif 690 691 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", 692 fwnode, nr_irqs, nr_handlers, nr_contexts); 693 return 0; 694 695 fail_cleanup_contexts: 696 for (i = 0; i < nr_contexts; i++) { 697 if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) 698 continue; 699 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 700 continue; 701 702 handler = per_cpu_ptr(&plic_handlers, cpu); 703 handler->present = false; 704 handler->hart_base = NULL; 705 handler->enable_base = NULL; 706 kfree(handler->enable_save); 707 handler->enable_save = NULL; 708 handler->priv = NULL; 709 } 710 bitmap_free(priv->prio_save); 711 fail_free_priv: 712 kfree(priv); 713 fail_free_regs: 714 iounmap(regs); 715 return error; 716 } 717 718 static int plic_platform_probe(struct platform_device *pdev) 719 { 720 return plic_probe(pdev->dev.fwnode); 721 } 722 723 static struct platform_driver plic_driver = { 724 .driver = { 725 .name = "riscv-plic", 726 .of_match_table = plic_match, 727 .suppress_bind_attrs = true, 728 .acpi_match_table = ACPI_PTR(plic_acpi_match), 729 }, 730 .probe = plic_platform_probe, 731 }; 732 builtin_platform_driver(plic_driver); 733 734 static int __init plic_early_probe(struct device_node *node, 735 struct device_node *parent) 736 { 737 return plic_probe(&node->fwnode); 738 } 739 740 IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); 741