1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "riscv-plic: " fmt 7 #include <linux/acpi.h> 8 #include <linux/cpu.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/syscore_ops.h> 22 #include <asm/smp.h> 23 24 /* 25 * This driver implements a version of the RISC-V PLIC with the actual layout 26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 27 * 28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 29 * 30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 32 * Spec. 33 */ 34 35 #define MAX_DEVICES 1024 36 #define MAX_CONTEXTS 15872 37 38 /* 39 * Each interrupt source has a priority register associated with it. 40 * We always hardwire it to one in Linux. 41 */ 42 #define PRIORITY_BASE 0 43 #define PRIORITY_PER_ID 4 44 45 /* 46 * Each hart context has a vector of interrupt enable bits associated with it. 47 * There's one bit for each interrupt source. 48 */ 49 #define CONTEXT_ENABLE_BASE 0x2000 50 #define CONTEXT_ENABLE_SIZE 0x80 51 52 /* 53 * Each hart context has a set of control registers associated with it. Right 54 * now there's only two: a source priority threshold over which the hart will 55 * take an interrupt, and a register to claim interrupts. 56 */ 57 #define CONTEXT_BASE 0x200000 58 #define CONTEXT_SIZE 0x1000 59 #define CONTEXT_THRESHOLD 0x00 60 #define CONTEXT_CLAIM 0x04 61 62 #define PLIC_DISABLE_THRESHOLD 0x7 63 #define PLIC_ENABLE_THRESHOLD 0 64 65 #define PLIC_QUIRK_EDGE_INTERRUPT 0 66 67 struct plic_priv { 68 struct fwnode_handle *fwnode; 69 struct cpumask lmask; 70 struct irq_domain *irqdomain; 71 void __iomem *regs; 72 unsigned long plic_quirks; 73 unsigned int nr_irqs; 74 unsigned long *prio_save; 75 u32 gsi_base; 76 int acpi_plic_id; 77 }; 78 79 struct plic_handler { 80 bool present; 81 void __iomem *hart_base; 82 /* 83 * Protect mask operations on the registers given that we can't 84 * assume atomic memory operations work on them. 85 */ 86 raw_spinlock_t enable_lock; 87 void __iomem *enable_base; 88 u32 *enable_save; 89 struct plic_priv *priv; 90 }; 91 static int plic_parent_irq __ro_after_init; 92 static bool plic_global_setup_done __ro_after_init; 93 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 94 95 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 96 97 static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 98 { 99 u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 100 u32 hwirq_mask = 1 << (hwirq % 32); 101 102 if (enable) 103 writel(readl(reg) | hwirq_mask, reg); 104 else 105 writel(readl(reg) & ~hwirq_mask, reg); 106 } 107 108 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 109 { 110 unsigned long flags; 111 112 raw_spin_lock_irqsave(&handler->enable_lock, flags); 113 __plic_toggle(handler->enable_base, hwirq, enable); 114 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 115 } 116 117 static inline void plic_irq_toggle(const struct cpumask *mask, 118 struct irq_data *d, int enable) 119 { 120 int cpu; 121 122 for_each_cpu(cpu, mask) { 123 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 124 125 plic_toggle(handler, d->hwirq, enable); 126 } 127 } 128 129 static void plic_irq_unmask(struct irq_data *d) 130 { 131 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 132 133 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 134 } 135 136 static void plic_irq_mask(struct irq_data *d) 137 { 138 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 139 140 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 141 } 142 143 static void plic_irq_enable(struct irq_data *d) 144 { 145 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 146 plic_irq_unmask(d); 147 } 148 149 static void plic_irq_disable(struct irq_data *d) 150 { 151 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 152 } 153 154 static void plic_irq_eoi(struct irq_data *d) 155 { 156 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 157 158 if (unlikely(irqd_irq_disabled(d))) { 159 plic_toggle(handler, d->hwirq, 1); 160 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 161 plic_toggle(handler, d->hwirq, 0); 162 } else { 163 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 164 } 165 } 166 167 #ifdef CONFIG_SMP 168 static int plic_set_affinity(struct irq_data *d, 169 const struct cpumask *mask_val, bool force) 170 { 171 unsigned int cpu; 172 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 173 174 if (force) 175 cpu = cpumask_first_and(&priv->lmask, mask_val); 176 else 177 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 178 179 if (cpu >= nr_cpu_ids) 180 return -EINVAL; 181 182 plic_irq_disable(d); 183 184 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 185 186 if (!irqd_irq_disabled(d)) 187 plic_irq_enable(d); 188 189 return IRQ_SET_MASK_OK_DONE; 190 } 191 #endif 192 193 static struct irq_chip plic_edge_chip = { 194 .name = "SiFive PLIC", 195 .irq_enable = plic_irq_enable, 196 .irq_disable = plic_irq_disable, 197 .irq_ack = plic_irq_eoi, 198 .irq_mask = plic_irq_mask, 199 .irq_unmask = plic_irq_unmask, 200 #ifdef CONFIG_SMP 201 .irq_set_affinity = plic_set_affinity, 202 #endif 203 .irq_set_type = plic_irq_set_type, 204 .flags = IRQCHIP_SKIP_SET_WAKE | 205 IRQCHIP_AFFINITY_PRE_STARTUP, 206 }; 207 208 static struct irq_chip plic_chip = { 209 .name = "SiFive PLIC", 210 .irq_enable = plic_irq_enable, 211 .irq_disable = plic_irq_disable, 212 .irq_mask = plic_irq_mask, 213 .irq_unmask = plic_irq_unmask, 214 .irq_eoi = plic_irq_eoi, 215 #ifdef CONFIG_SMP 216 .irq_set_affinity = plic_set_affinity, 217 #endif 218 .irq_set_type = plic_irq_set_type, 219 .flags = IRQCHIP_SKIP_SET_WAKE | 220 IRQCHIP_AFFINITY_PRE_STARTUP, 221 }; 222 223 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 224 { 225 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 226 227 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 228 return IRQ_SET_MASK_OK_NOCOPY; 229 230 switch (type) { 231 case IRQ_TYPE_EDGE_RISING: 232 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 233 handle_edge_irq, NULL); 234 break; 235 case IRQ_TYPE_LEVEL_HIGH: 236 irq_set_chip_handler_name_locked(d, &plic_chip, 237 handle_fasteoi_irq, NULL); 238 break; 239 default: 240 return -EINVAL; 241 } 242 243 return IRQ_SET_MASK_OK; 244 } 245 246 static int plic_irq_suspend(void) 247 { 248 unsigned int i, cpu; 249 unsigned long flags; 250 u32 __iomem *reg; 251 struct plic_priv *priv; 252 253 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 254 255 for (i = 0; i < priv->nr_irqs; i++) 256 if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) 257 __set_bit(i, priv->prio_save); 258 else 259 __clear_bit(i, priv->prio_save); 260 261 for_each_cpu(cpu, cpu_present_mask) { 262 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 263 264 if (!handler->present) 265 continue; 266 267 raw_spin_lock_irqsave(&handler->enable_lock, flags); 268 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 269 reg = handler->enable_base + i * sizeof(u32); 270 handler->enable_save[i] = readl(reg); 271 } 272 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 273 } 274 275 return 0; 276 } 277 278 static void plic_irq_resume(void) 279 { 280 unsigned int i, index, cpu; 281 unsigned long flags; 282 u32 __iomem *reg; 283 struct plic_priv *priv; 284 285 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 286 287 for (i = 0; i < priv->nr_irqs; i++) { 288 index = BIT_WORD(i); 289 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 290 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 291 } 292 293 for_each_cpu(cpu, cpu_present_mask) { 294 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 295 296 if (!handler->present) 297 continue; 298 299 raw_spin_lock_irqsave(&handler->enable_lock, flags); 300 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 301 reg = handler->enable_base + i * sizeof(u32); 302 writel(handler->enable_save[i], reg); 303 } 304 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 305 } 306 } 307 308 static struct syscore_ops plic_irq_syscore_ops = { 309 .suspend = plic_irq_suspend, 310 .resume = plic_irq_resume, 311 }; 312 313 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 314 irq_hw_number_t hwirq) 315 { 316 struct plic_priv *priv = d->host_data; 317 318 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 319 handle_fasteoi_irq, NULL, NULL); 320 irq_set_noprobe(irq); 321 irq_set_affinity(irq, &priv->lmask); 322 return 0; 323 } 324 325 static int plic_irq_domain_translate(struct irq_domain *d, 326 struct irq_fwspec *fwspec, 327 unsigned long *hwirq, 328 unsigned int *type) 329 { 330 struct plic_priv *priv = d->host_data; 331 332 /* For DT, gsi_base is always zero. */ 333 if (fwspec->param[0] >= priv->gsi_base) 334 fwspec->param[0] = fwspec->param[0] - priv->gsi_base; 335 336 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 337 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 338 339 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 340 } 341 342 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 343 unsigned int nr_irqs, void *arg) 344 { 345 int i, ret; 346 irq_hw_number_t hwirq; 347 unsigned int type; 348 struct irq_fwspec *fwspec = arg; 349 350 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 351 if (ret) 352 return ret; 353 354 for (i = 0; i < nr_irqs; i++) { 355 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 356 if (ret) 357 return ret; 358 } 359 360 return 0; 361 } 362 363 static const struct irq_domain_ops plic_irqdomain_ops = { 364 .translate = plic_irq_domain_translate, 365 .alloc = plic_irq_domain_alloc, 366 .free = irq_domain_free_irqs_top, 367 }; 368 369 /* 370 * Handling an interrupt is a two-step process: first you claim the interrupt 371 * by reading the claim register, then you complete the interrupt by writing 372 * that source ID back to the same claim register. This automatically enables 373 * and disables the interrupt, so there's nothing else to do. 374 */ 375 static void plic_handle_irq(struct irq_desc *desc) 376 { 377 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 378 struct irq_chip *chip = irq_desc_get_chip(desc); 379 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 380 irq_hw_number_t hwirq; 381 382 WARN_ON_ONCE(!handler->present); 383 384 chained_irq_enter(chip, desc); 385 386 while ((hwirq = readl(claim))) { 387 int err = generic_handle_domain_irq(handler->priv->irqdomain, 388 hwirq); 389 if (unlikely(err)) { 390 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 391 handler->priv->fwnode, hwirq); 392 } 393 } 394 395 chained_irq_exit(chip, desc); 396 } 397 398 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 399 { 400 /* priority must be > threshold to trigger an interrupt */ 401 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 402 } 403 404 static int plic_dying_cpu(unsigned int cpu) 405 { 406 if (plic_parent_irq) 407 disable_percpu_irq(plic_parent_irq); 408 409 return 0; 410 } 411 412 static int plic_starting_cpu(unsigned int cpu) 413 { 414 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 415 416 if (plic_parent_irq) 417 enable_percpu_irq(plic_parent_irq, 418 irq_get_trigger_type(plic_parent_irq)); 419 else 420 pr_warn("%pfwP: cpu%d: parent irq not available\n", 421 handler->priv->fwnode, cpu); 422 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 423 424 return 0; 425 } 426 427 static const struct of_device_id plic_match[] = { 428 { .compatible = "sifive,plic-1.0.0" }, 429 { .compatible = "riscv,plic0" }, 430 { .compatible = "andestech,nceplic100", 431 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 432 { .compatible = "thead,c900-plic", 433 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 434 {} 435 }; 436 437 #ifdef CONFIG_ACPI 438 439 static const struct acpi_device_id plic_acpi_match[] = { 440 { "RSCV0001", 0 }, 441 {} 442 }; 443 MODULE_DEVICE_TABLE(acpi, plic_acpi_match); 444 445 #endif 446 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, 447 u32 *nr_irqs, u32 *nr_contexts, 448 u32 *gsi_base, u32 *id) 449 { 450 int rc; 451 452 if (!is_of_node(fwnode)) { 453 rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); 454 if (rc) { 455 pr_err("%pfwP: failed to find GSI mapping\n", fwnode); 456 return rc; 457 } 458 459 *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); 460 if (WARN_ON(!*nr_contexts)) { 461 pr_err("%pfwP: no PLIC context available\n", fwnode); 462 return -EINVAL; 463 } 464 465 return 0; 466 } 467 468 rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); 469 if (rc) { 470 pr_err("%pfwP: riscv,ndev property not available\n", fwnode); 471 return rc; 472 } 473 474 *nr_contexts = of_irq_count(to_of_node(fwnode)); 475 if (WARN_ON(!(*nr_contexts))) { 476 pr_err("%pfwP: no PLIC context available\n", fwnode); 477 return -EINVAL; 478 } 479 480 *gsi_base = 0; 481 *id = 0; 482 483 return 0; 484 } 485 486 static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, 487 u32 *parent_hwirq, int *parent_cpu, u32 id) 488 { 489 struct of_phandle_args parent; 490 unsigned long hartid; 491 int rc; 492 493 if (!is_of_node(fwnode)) { 494 hartid = acpi_rintc_ext_parent_to_hartid(id, context); 495 if (hartid == INVALID_HARTID) 496 return -EINVAL; 497 498 *parent_cpu = riscv_hartid_to_cpuid(hartid); 499 *parent_hwirq = RV_IRQ_EXT; 500 return 0; 501 } 502 503 rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); 504 if (rc) 505 return rc; 506 507 rc = riscv_of_parent_hartid(parent.np, &hartid); 508 if (rc) 509 return rc; 510 511 *parent_hwirq = parent.args[0]; 512 *parent_cpu = riscv_hartid_to_cpuid(hartid); 513 return 0; 514 } 515 516 static int plic_probe(struct fwnode_handle *fwnode) 517 { 518 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 519 unsigned long plic_quirks = 0; 520 struct plic_handler *handler; 521 u32 nr_irqs, parent_hwirq; 522 struct plic_priv *priv; 523 irq_hw_number_t hwirq; 524 void __iomem *regs; 525 int id, context_id; 526 u32 gsi_base; 527 528 if (is_of_node(fwnode)) { 529 const struct of_device_id *id; 530 531 id = of_match_node(plic_match, to_of_node(fwnode)); 532 if (id) 533 plic_quirks = (unsigned long)id->data; 534 535 regs = of_iomap(to_of_node(fwnode), 0); 536 if (!regs) 537 return -ENOMEM; 538 } else { 539 regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); 540 if (IS_ERR(regs)) 541 return PTR_ERR(regs); 542 } 543 544 error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); 545 if (error) 546 goto fail_free_regs; 547 548 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 549 if (!priv) { 550 error = -ENOMEM; 551 goto fail_free_regs; 552 } 553 554 priv->fwnode = fwnode; 555 priv->plic_quirks = plic_quirks; 556 priv->nr_irqs = nr_irqs; 557 priv->regs = regs; 558 priv->gsi_base = gsi_base; 559 priv->acpi_plic_id = id; 560 561 priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); 562 if (!priv->prio_save) { 563 error = -ENOMEM; 564 goto fail_free_priv; 565 } 566 567 for (i = 0; i < nr_contexts; i++) { 568 error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, 569 priv->acpi_plic_id); 570 if (error) { 571 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); 572 continue; 573 } 574 575 if (is_of_node(fwnode)) { 576 context_id = i; 577 } else { 578 context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); 579 if (context_id == INVALID_CONTEXT) { 580 pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); 581 continue; 582 } 583 } 584 585 /* 586 * Skip contexts other than external interrupts for our 587 * privilege level. 588 */ 589 if (parent_hwirq != RV_IRQ_EXT) { 590 /* Disable S-mode enable bits if running in M-mode. */ 591 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 592 void __iomem *enable_base = priv->regs + 593 CONTEXT_ENABLE_BASE + 594 i * CONTEXT_ENABLE_SIZE; 595 596 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 597 __plic_toggle(enable_base, hwirq, 0); 598 } 599 continue; 600 } 601 602 if (cpu < 0) { 603 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); 604 continue; 605 } 606 607 /* 608 * When running in M-mode we need to ignore the S-mode handler. 609 * Here we assume it always comes later, but that might be a 610 * little fragile. 611 */ 612 handler = per_cpu_ptr(&plic_handlers, cpu); 613 if (handler->present) { 614 pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); 615 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 616 goto done; 617 } 618 619 cpumask_set_cpu(cpu, &priv->lmask); 620 handler->present = true; 621 handler->hart_base = priv->regs + CONTEXT_BASE + 622 context_id * CONTEXT_SIZE; 623 raw_spin_lock_init(&handler->enable_lock); 624 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 625 context_id * CONTEXT_ENABLE_SIZE; 626 handler->priv = priv; 627 628 handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), 629 sizeof(*handler->enable_save), GFP_KERNEL); 630 if (!handler->enable_save) { 631 error = -ENOMEM; 632 goto fail_cleanup_contexts; 633 } 634 done: 635 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 636 plic_toggle(handler, hwirq, 0); 637 writel(1, priv->regs + PRIORITY_BASE + 638 hwirq * PRIORITY_PER_ID); 639 } 640 nr_handlers++; 641 } 642 643 priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, 644 &plic_irqdomain_ops, priv); 645 if (WARN_ON(!priv->irqdomain)) { 646 error = -ENOMEM; 647 goto fail_cleanup_contexts; 648 } 649 650 /* 651 * We can have multiple PLIC instances so setup global state 652 * and register syscore operations only once after context 653 * handlers of all online CPUs are initialized. 654 */ 655 if (!plic_global_setup_done) { 656 struct irq_domain *domain; 657 bool global_setup = true; 658 659 for_each_online_cpu(cpu) { 660 handler = per_cpu_ptr(&plic_handlers, cpu); 661 if (!handler->present) { 662 global_setup = false; 663 break; 664 } 665 } 666 667 if (global_setup) { 668 /* Find parent domain and register chained handler */ 669 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 670 if (domain) 671 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 672 if (plic_parent_irq) 673 irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 674 675 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 676 "irqchip/sifive/plic:starting", 677 plic_starting_cpu, plic_dying_cpu); 678 register_syscore_ops(&plic_irq_syscore_ops); 679 plic_global_setup_done = true; 680 } 681 } 682 683 #ifdef CONFIG_ACPI 684 if (!acpi_disabled) 685 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); 686 #endif 687 688 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", 689 fwnode, nr_irqs, nr_handlers, nr_contexts); 690 return 0; 691 692 fail_cleanup_contexts: 693 for (i = 0; i < nr_contexts; i++) { 694 if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) 695 continue; 696 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 697 continue; 698 699 handler = per_cpu_ptr(&plic_handlers, cpu); 700 handler->present = false; 701 handler->hart_base = NULL; 702 handler->enable_base = NULL; 703 kfree(handler->enable_save); 704 handler->enable_save = NULL; 705 handler->priv = NULL; 706 } 707 bitmap_free(priv->prio_save); 708 fail_free_priv: 709 kfree(priv); 710 fail_free_regs: 711 iounmap(regs); 712 return error; 713 } 714 715 static int plic_platform_probe(struct platform_device *pdev) 716 { 717 return plic_probe(pdev->dev.fwnode); 718 } 719 720 static struct platform_driver plic_driver = { 721 .driver = { 722 .name = "riscv-plic", 723 .of_match_table = plic_match, 724 .suppress_bind_attrs = true, 725 .acpi_match_table = ACPI_PTR(plic_acpi_match), 726 }, 727 .probe = plic_platform_probe, 728 }; 729 builtin_platform_driver(plic_driver); 730 731 static int __init plic_early_probe(struct device_node *node, 732 struct device_node *parent) 733 { 734 return plic_probe(&node->fwnode); 735 } 736 737 IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); 738