1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "riscv-plic: " fmt 7 #include <linux/acpi.h> 8 #include <linux/cpu.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/syscore_ops.h> 22 #include <asm/smp.h> 23 24 /* 25 * This driver implements a version of the RISC-V PLIC with the actual layout 26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 27 * 28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 29 * 30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 32 * Spec. 33 */ 34 35 #define MAX_DEVICES 1024 36 #define MAX_CONTEXTS 15872 37 38 /* 39 * Each interrupt source has a priority register associated with it. 40 * We always hardwire it to one in Linux. 41 */ 42 #define PRIORITY_BASE 0 43 #define PRIORITY_PER_ID 4 44 45 /* 46 * Each hart context has a vector of interrupt enable bits associated with it. 47 * There's one bit for each interrupt source. 48 */ 49 #define CONTEXT_ENABLE_BASE 0x2000 50 #define CONTEXT_ENABLE_SIZE 0x80 51 52 /* 53 * Each hart context has a set of control registers associated with it. Right 54 * now there's only two: a source priority threshold over which the hart will 55 * take an interrupt, and a register to claim interrupts. 56 */ 57 #define CONTEXT_BASE 0x200000 58 #define CONTEXT_SIZE 0x1000 59 #define CONTEXT_THRESHOLD 0x00 60 #define CONTEXT_CLAIM 0x04 61 62 #define PLIC_DISABLE_THRESHOLD 0x7 63 #define PLIC_ENABLE_THRESHOLD 0 64 65 #define PLIC_QUIRK_EDGE_INTERRUPT 0 66 67 struct plic_priv { 68 struct fwnode_handle *fwnode; 69 struct cpumask lmask; 70 struct irq_domain *irqdomain; 71 void __iomem *regs; 72 unsigned long plic_quirks; 73 unsigned int nr_irqs; 74 unsigned long *prio_save; 75 u32 gsi_base; 76 int acpi_plic_id; 77 }; 78 79 struct plic_handler { 80 bool present; 81 void __iomem *hart_base; 82 /* 83 * Protect mask operations on the registers given that we can't 84 * assume atomic memory operations work on them. 85 */ 86 raw_spinlock_t enable_lock; 87 void __iomem *enable_base; 88 u32 *enable_save; 89 struct plic_priv *priv; 90 }; 91 static int plic_parent_irq __ro_after_init; 92 static bool plic_global_setup_done __ro_after_init; 93 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 94 95 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 96 97 static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 98 { 99 u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 100 u32 hwirq_mask = 1 << (hwirq % 32); 101 102 if (enable) 103 writel(readl(reg) | hwirq_mask, reg); 104 else 105 writel(readl(reg) & ~hwirq_mask, reg); 106 } 107 108 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 109 { 110 unsigned long flags; 111 112 raw_spin_lock_irqsave(&handler->enable_lock, flags); 113 __plic_toggle(handler->enable_base, hwirq, enable); 114 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 115 } 116 117 static inline void plic_irq_toggle(const struct cpumask *mask, 118 struct irq_data *d, int enable) 119 { 120 int cpu; 121 122 for_each_cpu(cpu, mask) { 123 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 124 125 plic_toggle(handler, d->hwirq, enable); 126 } 127 } 128 129 static void plic_irq_unmask(struct irq_data *d) 130 { 131 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 132 133 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 134 } 135 136 static void plic_irq_mask(struct irq_data *d) 137 { 138 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 139 140 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 141 } 142 143 static void plic_irq_enable(struct irq_data *d) 144 { 145 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 146 plic_irq_unmask(d); 147 } 148 149 static void plic_irq_disable(struct irq_data *d) 150 { 151 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 152 } 153 154 static void plic_irq_eoi(struct irq_data *d) 155 { 156 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 157 158 if (unlikely(irqd_irq_disabled(d))) { 159 plic_toggle(handler, d->hwirq, 1); 160 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 161 plic_toggle(handler, d->hwirq, 0); 162 } else { 163 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 164 } 165 } 166 167 #ifdef CONFIG_SMP 168 static int plic_set_affinity(struct irq_data *d, 169 const struct cpumask *mask_val, bool force) 170 { 171 unsigned int cpu; 172 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 173 174 if (force) 175 cpu = cpumask_first_and(&priv->lmask, mask_val); 176 else 177 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 178 179 if (cpu >= nr_cpu_ids) 180 return -EINVAL; 181 182 /* Invalidate the original routing entry */ 183 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 184 185 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 186 187 /* Setting the new routing entry if irq is enabled */ 188 if (!irqd_irq_disabled(d)) 189 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 190 191 return IRQ_SET_MASK_OK_DONE; 192 } 193 #endif 194 195 static struct irq_chip plic_edge_chip = { 196 .name = "SiFive PLIC", 197 .irq_enable = plic_irq_enable, 198 .irq_disable = plic_irq_disable, 199 .irq_ack = plic_irq_eoi, 200 .irq_mask = plic_irq_mask, 201 .irq_unmask = plic_irq_unmask, 202 #ifdef CONFIG_SMP 203 .irq_set_affinity = plic_set_affinity, 204 #endif 205 .irq_set_type = plic_irq_set_type, 206 .flags = IRQCHIP_SKIP_SET_WAKE | 207 IRQCHIP_AFFINITY_PRE_STARTUP, 208 }; 209 210 static struct irq_chip plic_chip = { 211 .name = "SiFive PLIC", 212 .irq_enable = plic_irq_enable, 213 .irq_disable = plic_irq_disable, 214 .irq_mask = plic_irq_mask, 215 .irq_unmask = plic_irq_unmask, 216 .irq_eoi = plic_irq_eoi, 217 #ifdef CONFIG_SMP 218 .irq_set_affinity = plic_set_affinity, 219 #endif 220 .irq_set_type = plic_irq_set_type, 221 .flags = IRQCHIP_SKIP_SET_WAKE | 222 IRQCHIP_AFFINITY_PRE_STARTUP, 223 }; 224 225 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 226 { 227 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 228 229 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 230 return IRQ_SET_MASK_OK_NOCOPY; 231 232 switch (type) { 233 case IRQ_TYPE_EDGE_RISING: 234 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 235 handle_edge_irq, NULL); 236 break; 237 case IRQ_TYPE_LEVEL_HIGH: 238 irq_set_chip_handler_name_locked(d, &plic_chip, 239 handle_fasteoi_irq, NULL); 240 break; 241 default: 242 return -EINVAL; 243 } 244 245 return IRQ_SET_MASK_OK; 246 } 247 248 static int plic_irq_suspend(void) 249 { 250 unsigned int i, cpu; 251 unsigned long flags; 252 u32 __iomem *reg; 253 struct plic_priv *priv; 254 255 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 256 257 for (i = 0; i < priv->nr_irqs; i++) { 258 __assign_bit(i, priv->prio_save, 259 readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); 260 } 261 262 for_each_present_cpu(cpu) { 263 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 264 265 if (!handler->present) 266 continue; 267 268 raw_spin_lock_irqsave(&handler->enable_lock, flags); 269 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 270 reg = handler->enable_base + i * sizeof(u32); 271 handler->enable_save[i] = readl(reg); 272 } 273 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 274 } 275 276 return 0; 277 } 278 279 static void plic_irq_resume(void) 280 { 281 unsigned int i, index, cpu; 282 unsigned long flags; 283 u32 __iomem *reg; 284 struct plic_priv *priv; 285 286 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 287 288 for (i = 0; i < priv->nr_irqs; i++) { 289 index = BIT_WORD(i); 290 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 291 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 292 } 293 294 for_each_present_cpu(cpu) { 295 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 296 297 if (!handler->present) 298 continue; 299 300 raw_spin_lock_irqsave(&handler->enable_lock, flags); 301 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 302 reg = handler->enable_base + i * sizeof(u32); 303 writel(handler->enable_save[i], reg); 304 } 305 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 306 } 307 } 308 309 static struct syscore_ops plic_irq_syscore_ops = { 310 .suspend = plic_irq_suspend, 311 .resume = plic_irq_resume, 312 }; 313 314 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 315 irq_hw_number_t hwirq) 316 { 317 struct plic_priv *priv = d->host_data; 318 319 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 320 handle_fasteoi_irq, NULL, NULL); 321 irq_set_noprobe(irq); 322 irq_set_affinity(irq, &priv->lmask); 323 return 0; 324 } 325 326 static int plic_irq_domain_translate(struct irq_domain *d, 327 struct irq_fwspec *fwspec, 328 unsigned long *hwirq, 329 unsigned int *type) 330 { 331 struct plic_priv *priv = d->host_data; 332 333 /* For DT, gsi_base is always zero. */ 334 if (fwspec->param[0] >= priv->gsi_base) 335 fwspec->param[0] = fwspec->param[0] - priv->gsi_base; 336 337 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 338 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 339 340 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 341 } 342 343 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 344 unsigned int nr_irqs, void *arg) 345 { 346 int i, ret; 347 irq_hw_number_t hwirq; 348 unsigned int type; 349 struct irq_fwspec *fwspec = arg; 350 351 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 352 if (ret) 353 return ret; 354 355 for (i = 0; i < nr_irqs; i++) { 356 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 357 if (ret) 358 return ret; 359 } 360 361 return 0; 362 } 363 364 static const struct irq_domain_ops plic_irqdomain_ops = { 365 .translate = plic_irq_domain_translate, 366 .alloc = plic_irq_domain_alloc, 367 .free = irq_domain_free_irqs_top, 368 }; 369 370 /* 371 * Handling an interrupt is a two-step process: first you claim the interrupt 372 * by reading the claim register, then you complete the interrupt by writing 373 * that source ID back to the same claim register. This automatically enables 374 * and disables the interrupt, so there's nothing else to do. 375 */ 376 static void plic_handle_irq(struct irq_desc *desc) 377 { 378 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 379 struct irq_chip *chip = irq_desc_get_chip(desc); 380 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 381 irq_hw_number_t hwirq; 382 383 WARN_ON_ONCE(!handler->present); 384 385 chained_irq_enter(chip, desc); 386 387 while ((hwirq = readl(claim))) { 388 int err = generic_handle_domain_irq(handler->priv->irqdomain, 389 hwirq); 390 if (unlikely(err)) { 391 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 392 handler->priv->fwnode, hwirq); 393 } 394 } 395 396 chained_irq_exit(chip, desc); 397 } 398 399 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 400 { 401 /* priority must be > threshold to trigger an interrupt */ 402 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 403 } 404 405 static int plic_dying_cpu(unsigned int cpu) 406 { 407 if (plic_parent_irq) 408 disable_percpu_irq(plic_parent_irq); 409 410 return 0; 411 } 412 413 static int plic_starting_cpu(unsigned int cpu) 414 { 415 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 416 417 if (plic_parent_irq) 418 enable_percpu_irq(plic_parent_irq, 419 irq_get_trigger_type(plic_parent_irq)); 420 else 421 pr_warn("%pfwP: cpu%d: parent irq not available\n", 422 handler->priv->fwnode, cpu); 423 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 424 425 return 0; 426 } 427 428 static const struct of_device_id plic_match[] = { 429 { .compatible = "sifive,plic-1.0.0" }, 430 { .compatible = "riscv,plic0" }, 431 { .compatible = "andestech,nceplic100", 432 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 433 { .compatible = "thead,c900-plic", 434 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 435 {} 436 }; 437 438 #ifdef CONFIG_ACPI 439 440 static const struct acpi_device_id plic_acpi_match[] = { 441 { "RSCV0001", 0 }, 442 {} 443 }; 444 MODULE_DEVICE_TABLE(acpi, plic_acpi_match); 445 446 #endif 447 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, 448 u32 *nr_irqs, u32 *nr_contexts, 449 u32 *gsi_base, u32 *id) 450 { 451 int rc; 452 453 if (!is_of_node(fwnode)) { 454 rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); 455 if (rc) { 456 pr_err("%pfwP: failed to find GSI mapping\n", fwnode); 457 return rc; 458 } 459 460 *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); 461 if (WARN_ON(!*nr_contexts)) { 462 pr_err("%pfwP: no PLIC context available\n", fwnode); 463 return -EINVAL; 464 } 465 466 return 0; 467 } 468 469 rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); 470 if (rc) { 471 pr_err("%pfwP: riscv,ndev property not available\n", fwnode); 472 return rc; 473 } 474 475 *nr_contexts = of_irq_count(to_of_node(fwnode)); 476 if (WARN_ON(!(*nr_contexts))) { 477 pr_err("%pfwP: no PLIC context available\n", fwnode); 478 return -EINVAL; 479 } 480 481 *gsi_base = 0; 482 *id = 0; 483 484 return 0; 485 } 486 487 static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, 488 u32 *parent_hwirq, int *parent_cpu, u32 id) 489 { 490 struct of_phandle_args parent; 491 unsigned long hartid; 492 int rc; 493 494 if (!is_of_node(fwnode)) { 495 hartid = acpi_rintc_ext_parent_to_hartid(id, context); 496 if (hartid == INVALID_HARTID) 497 return -EINVAL; 498 499 *parent_cpu = riscv_hartid_to_cpuid(hartid); 500 *parent_hwirq = RV_IRQ_EXT; 501 return 0; 502 } 503 504 rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); 505 if (rc) 506 return rc; 507 508 rc = riscv_of_parent_hartid(parent.np, &hartid); 509 if (rc) 510 return rc; 511 512 *parent_hwirq = parent.args[0]; 513 *parent_cpu = riscv_hartid_to_cpuid(hartid); 514 return 0; 515 } 516 517 static int plic_probe(struct fwnode_handle *fwnode) 518 { 519 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 520 unsigned long plic_quirks = 0; 521 struct plic_handler *handler; 522 u32 nr_irqs, parent_hwirq; 523 struct plic_priv *priv; 524 irq_hw_number_t hwirq; 525 void __iomem *regs; 526 int id, context_id; 527 u32 gsi_base; 528 529 if (is_of_node(fwnode)) { 530 const struct of_device_id *id; 531 532 id = of_match_node(plic_match, to_of_node(fwnode)); 533 if (id) 534 plic_quirks = (unsigned long)id->data; 535 536 regs = of_iomap(to_of_node(fwnode), 0); 537 if (!regs) 538 return -ENOMEM; 539 } else { 540 regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); 541 if (IS_ERR(regs)) 542 return PTR_ERR(regs); 543 } 544 545 error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); 546 if (error) 547 goto fail_free_regs; 548 549 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 550 if (!priv) { 551 error = -ENOMEM; 552 goto fail_free_regs; 553 } 554 555 priv->fwnode = fwnode; 556 priv->plic_quirks = plic_quirks; 557 priv->nr_irqs = nr_irqs; 558 priv->regs = regs; 559 priv->gsi_base = gsi_base; 560 priv->acpi_plic_id = id; 561 562 priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); 563 if (!priv->prio_save) { 564 error = -ENOMEM; 565 goto fail_free_priv; 566 } 567 568 for (i = 0; i < nr_contexts; i++) { 569 error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, 570 priv->acpi_plic_id); 571 if (error) { 572 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); 573 continue; 574 } 575 576 if (is_of_node(fwnode)) { 577 context_id = i; 578 } else { 579 context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); 580 if (context_id == INVALID_CONTEXT) { 581 pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); 582 continue; 583 } 584 } 585 586 /* 587 * Skip contexts other than external interrupts for our 588 * privilege level. 589 */ 590 if (parent_hwirq != RV_IRQ_EXT) { 591 /* Disable S-mode enable bits if running in M-mode. */ 592 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 593 void __iomem *enable_base = priv->regs + 594 CONTEXT_ENABLE_BASE + 595 i * CONTEXT_ENABLE_SIZE; 596 597 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 598 __plic_toggle(enable_base, hwirq, 0); 599 } 600 continue; 601 } 602 603 if (cpu < 0) { 604 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); 605 continue; 606 } 607 608 /* 609 * When running in M-mode we need to ignore the S-mode handler. 610 * Here we assume it always comes later, but that might be a 611 * little fragile. 612 */ 613 handler = per_cpu_ptr(&plic_handlers, cpu); 614 if (handler->present) { 615 pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); 616 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 617 goto done; 618 } 619 620 cpumask_set_cpu(cpu, &priv->lmask); 621 handler->present = true; 622 handler->hart_base = priv->regs + CONTEXT_BASE + 623 context_id * CONTEXT_SIZE; 624 raw_spin_lock_init(&handler->enable_lock); 625 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 626 context_id * CONTEXT_ENABLE_SIZE; 627 handler->priv = priv; 628 629 handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), 630 sizeof(*handler->enable_save), GFP_KERNEL); 631 if (!handler->enable_save) { 632 error = -ENOMEM; 633 goto fail_cleanup_contexts; 634 } 635 done: 636 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 637 plic_toggle(handler, hwirq, 0); 638 writel(1, priv->regs + PRIORITY_BASE + 639 hwirq * PRIORITY_PER_ID); 640 } 641 nr_handlers++; 642 } 643 644 priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, 645 &plic_irqdomain_ops, priv); 646 if (WARN_ON(!priv->irqdomain)) { 647 error = -ENOMEM; 648 goto fail_cleanup_contexts; 649 } 650 651 /* 652 * We can have multiple PLIC instances so setup global state 653 * and register syscore operations only once after context 654 * handlers of all online CPUs are initialized. 655 */ 656 if (!plic_global_setup_done) { 657 struct irq_domain *domain; 658 bool global_setup = true; 659 660 for_each_online_cpu(cpu) { 661 handler = per_cpu_ptr(&plic_handlers, cpu); 662 if (!handler->present) { 663 global_setup = false; 664 break; 665 } 666 } 667 668 if (global_setup) { 669 /* Find parent domain and register chained handler */ 670 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 671 if (domain) 672 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 673 if (plic_parent_irq) 674 irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 675 676 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 677 "irqchip/sifive/plic:starting", 678 plic_starting_cpu, plic_dying_cpu); 679 register_syscore_ops(&plic_irq_syscore_ops); 680 plic_global_setup_done = true; 681 } 682 } 683 684 #ifdef CONFIG_ACPI 685 if (!acpi_disabled) 686 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); 687 #endif 688 689 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", 690 fwnode, nr_irqs, nr_handlers, nr_contexts); 691 return 0; 692 693 fail_cleanup_contexts: 694 for (i = 0; i < nr_contexts; i++) { 695 if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) 696 continue; 697 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 698 continue; 699 700 handler = per_cpu_ptr(&plic_handlers, cpu); 701 handler->present = false; 702 handler->hart_base = NULL; 703 handler->enable_base = NULL; 704 kfree(handler->enable_save); 705 handler->enable_save = NULL; 706 handler->priv = NULL; 707 } 708 bitmap_free(priv->prio_save); 709 fail_free_priv: 710 kfree(priv); 711 fail_free_regs: 712 iounmap(regs); 713 return error; 714 } 715 716 static int plic_platform_probe(struct platform_device *pdev) 717 { 718 return plic_probe(pdev->dev.fwnode); 719 } 720 721 static struct platform_driver plic_driver = { 722 .driver = { 723 .name = "riscv-plic", 724 .of_match_table = plic_match, 725 .suppress_bind_attrs = true, 726 .acpi_match_table = ACPI_PTR(plic_acpi_match), 727 }, 728 .probe = plic_platform_probe, 729 }; 730 builtin_platform_driver(plic_driver); 731 732 static int __init plic_early_probe(struct device_node *node, 733 struct device_node *parent) 734 { 735 return plic_probe(&node->fwnode); 736 } 737 738 IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); 739