1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "riscv-plic: " fmt 7 #include <linux/acpi.h> 8 #include <linux/cpu.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/syscore_ops.h> 22 #include <asm/smp.h> 23 24 /* 25 * This driver implements a version of the RISC-V PLIC with the actual layout 26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 27 * 28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 29 * 30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 32 * Spec. 33 */ 34 35 #define MAX_DEVICES 1024 36 #define MAX_CONTEXTS 15872 37 38 /* 39 * Each interrupt source has a priority register associated with it. 40 * We always hardwire it to one in Linux. 41 */ 42 #define PRIORITY_BASE 0 43 #define PRIORITY_PER_ID 4 44 45 /* 46 * Each hart context has a vector of interrupt enable bits associated with it. 47 * There's one bit for each interrupt source. 48 */ 49 #define CONTEXT_ENABLE_BASE 0x2000 50 #define CONTEXT_ENABLE_SIZE 0x80 51 52 #define PENDING_BASE 0x1000 53 54 /* 55 * Each hart context has a set of control registers associated with it. Right 56 * now there's only two: a source priority threshold over which the hart will 57 * take an interrupt, and a register to claim interrupts. 58 */ 59 #define CONTEXT_BASE 0x200000 60 #define CONTEXT_SIZE 0x1000 61 #define CONTEXT_THRESHOLD 0x00 62 #define CONTEXT_CLAIM 0x04 63 64 #define PLIC_DISABLE_THRESHOLD 0x7 65 #define PLIC_ENABLE_THRESHOLD 0 66 67 #define PLIC_QUIRK_EDGE_INTERRUPT 0 68 #define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1 69 70 struct plic_priv { 71 struct fwnode_handle *fwnode; 72 struct cpumask lmask; 73 struct irq_domain *irqdomain; 74 void __iomem *regs; 75 unsigned long plic_quirks; 76 /* device interrupts + 1 to compensate for the reserved hwirq 0 */ 77 unsigned int __private total_irqs; 78 unsigned int irq_groups; 79 unsigned long *prio_save; 80 u32 gsi_base; 81 int acpi_plic_id; 82 }; 83 84 struct plic_handler { 85 bool present; 86 void __iomem *hart_base; 87 /* 88 * Protect mask operations on the registers given that we can't 89 * assume atomic memory operations work on them. 90 */ 91 raw_spinlock_t enable_lock; 92 void __iomem *enable_base; 93 u32 *enable_save; 94 struct plic_priv *priv; 95 }; 96 97 /* 98 * Macro to deal with the insanity of hardware interrupt 0 being reserved */ 99 #define for_each_device_irq(iter, priv) \ 100 for (unsigned int iter = 1; iter < ACCESS_PRIVATE(priv, total_irqs); iter++) 101 102 static int plic_parent_irq __ro_after_init; 103 static bool plic_global_setup_done __ro_after_init; 104 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 105 106 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 107 108 static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable) 109 { 110 u32 __iomem *base = handler->enable_base; 111 u32 hwirq_mask = 1 << (hwirq % 32); 112 int group = hwirq / 32; 113 u32 value; 114 115 value = readl(base + group); 116 117 if (enable) 118 value |= hwirq_mask; 119 else 120 value &= ~hwirq_mask; 121 122 handler->enable_save[group] = value; 123 writel(value, base + group); 124 } 125 126 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 127 { 128 unsigned long flags; 129 130 raw_spin_lock_irqsave(&handler->enable_lock, flags); 131 __plic_toggle(handler, hwirq, enable); 132 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 133 } 134 135 static inline void plic_irq_toggle(const struct cpumask *mask, 136 struct irq_data *d, int enable) 137 { 138 int cpu; 139 140 for_each_cpu(cpu, mask) { 141 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 142 143 plic_toggle(handler, d->hwirq, enable); 144 } 145 } 146 147 static void plic_irq_unmask(struct irq_data *d) 148 { 149 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 150 151 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 152 } 153 154 static void plic_irq_mask(struct irq_data *d) 155 { 156 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 157 158 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 159 } 160 161 static void plic_irq_enable(struct irq_data *d) 162 { 163 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 164 plic_irq_unmask(d); 165 } 166 167 static void plic_irq_disable(struct irq_data *d) 168 { 169 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 170 } 171 172 static void plic_irq_eoi(struct irq_data *d) 173 { 174 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 175 176 if (unlikely(irqd_irq_disabled(d))) { 177 plic_toggle(handler, d->hwirq, 1); 178 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 179 plic_toggle(handler, d->hwirq, 0); 180 } else { 181 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 182 } 183 } 184 185 #ifdef CONFIG_SMP 186 static int plic_set_affinity(struct irq_data *d, 187 const struct cpumask *mask_val, bool force) 188 { 189 unsigned int cpu; 190 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 191 192 if (force) 193 cpu = cpumask_first_and(&priv->lmask, mask_val); 194 else 195 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 196 197 if (cpu >= nr_cpu_ids) 198 return -EINVAL; 199 200 /* Invalidate the original routing entry */ 201 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 202 203 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 204 205 /* Setting the new routing entry if irq is enabled */ 206 if (!irqd_irq_disabled(d)) 207 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 208 209 return IRQ_SET_MASK_OK_DONE; 210 } 211 #endif 212 213 static struct irq_chip plic_edge_chip = { 214 .name = "SiFive PLIC", 215 .irq_enable = plic_irq_enable, 216 .irq_disable = plic_irq_disable, 217 .irq_ack = plic_irq_eoi, 218 .irq_mask = plic_irq_mask, 219 .irq_unmask = plic_irq_unmask, 220 #ifdef CONFIG_SMP 221 .irq_set_affinity = plic_set_affinity, 222 #endif 223 .irq_set_type = plic_irq_set_type, 224 .flags = IRQCHIP_SKIP_SET_WAKE | 225 IRQCHIP_AFFINITY_PRE_STARTUP, 226 }; 227 228 static struct irq_chip plic_chip = { 229 .name = "SiFive PLIC", 230 .irq_enable = plic_irq_enable, 231 .irq_disable = plic_irq_disable, 232 .irq_mask = plic_irq_mask, 233 .irq_unmask = plic_irq_unmask, 234 .irq_eoi = plic_irq_eoi, 235 #ifdef CONFIG_SMP 236 .irq_set_affinity = plic_set_affinity, 237 #endif 238 .irq_set_type = plic_irq_set_type, 239 .flags = IRQCHIP_SKIP_SET_WAKE | 240 IRQCHIP_AFFINITY_PRE_STARTUP, 241 }; 242 243 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 244 { 245 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 246 247 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 248 return IRQ_SET_MASK_OK_NOCOPY; 249 250 switch (type) { 251 case IRQ_TYPE_EDGE_RISING: 252 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 253 handle_edge_irq, NULL); 254 break; 255 case IRQ_TYPE_LEVEL_HIGH: 256 irq_set_chip_handler_name_locked(d, &plic_chip, 257 handle_fasteoi_irq, NULL); 258 break; 259 default: 260 return -EINVAL; 261 } 262 263 return IRQ_SET_MASK_OK; 264 } 265 266 static int plic_irq_suspend(void *data) 267 { 268 struct plic_priv *priv = this_cpu_ptr(&plic_handlers)->priv; 269 270 for_each_device_irq(irq, priv) { 271 __assign_bit(irq, priv->prio_save, 272 readl(priv->regs + PRIORITY_BASE + irq * PRIORITY_PER_ID)); 273 } 274 275 return 0; 276 } 277 278 static void plic_irq_resume(void *data) 279 { 280 struct plic_priv *priv = this_cpu_ptr(&plic_handlers)->priv; 281 unsigned int index, cpu; 282 unsigned long flags; 283 u32 __iomem *reg; 284 285 for_each_device_irq(irq, priv) { 286 index = BIT_WORD(irq); 287 writel((priv->prio_save[index] & BIT_MASK(irq)) ? 1 : 0, 288 priv->regs + PRIORITY_BASE + irq * PRIORITY_PER_ID); 289 } 290 291 for_each_present_cpu(cpu) { 292 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 293 294 if (!handler->present) 295 continue; 296 297 raw_spin_lock_irqsave(&handler->enable_lock, flags); 298 for (unsigned int i = 0; i < priv->irq_groups; i++) { 299 reg = handler->enable_base + i * sizeof(u32); 300 writel(handler->enable_save[i], reg); 301 } 302 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 303 } 304 } 305 306 static const struct syscore_ops plic_irq_syscore_ops = { 307 .suspend = plic_irq_suspend, 308 .resume = plic_irq_resume, 309 }; 310 311 static struct syscore plic_irq_syscore = { 312 .ops = &plic_irq_syscore_ops, 313 }; 314 315 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 316 irq_hw_number_t hwirq) 317 { 318 struct plic_priv *priv = d->host_data; 319 320 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 321 handle_fasteoi_irq, NULL, NULL); 322 irq_set_noprobe(irq); 323 irq_set_affinity(irq, &priv->lmask); 324 return 0; 325 } 326 327 static int plic_irq_domain_translate(struct irq_domain *d, 328 struct irq_fwspec *fwspec, 329 unsigned long *hwirq, 330 unsigned int *type) 331 { 332 struct plic_priv *priv = d->host_data; 333 334 /* For DT, gsi_base is always zero. */ 335 if (fwspec->param[0] >= priv->gsi_base) 336 fwspec->param[0] = fwspec->param[0] - priv->gsi_base; 337 338 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 339 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 340 341 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 342 } 343 344 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 345 unsigned int nr_irqs, void *arg) 346 { 347 int i, ret; 348 irq_hw_number_t hwirq; 349 unsigned int type; 350 struct irq_fwspec *fwspec = arg; 351 352 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 353 if (ret) 354 return ret; 355 356 for (i = 0; i < nr_irqs; i++) { 357 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 358 if (ret) 359 return ret; 360 } 361 362 return 0; 363 } 364 365 static const struct irq_domain_ops plic_irqdomain_ops = { 366 .translate = plic_irq_domain_translate, 367 .alloc = plic_irq_domain_alloc, 368 .free = irq_domain_free_irqs_top, 369 }; 370 371 /* 372 * Handling an interrupt is a two-step process: first you claim the interrupt 373 * by reading the claim register, then you complete the interrupt by writing 374 * that source ID back to the same claim register. This automatically enables 375 * and disables the interrupt, so there's nothing else to do. 376 */ 377 static void plic_handle_irq(struct irq_desc *desc) 378 { 379 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 380 struct irq_chip *chip = irq_desc_get_chip(desc); 381 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 382 irq_hw_number_t hwirq; 383 384 WARN_ON_ONCE(!handler->present); 385 386 chained_irq_enter(chip, desc); 387 388 while ((hwirq = readl(claim))) { 389 int err = generic_handle_domain_irq(handler->priv->irqdomain, 390 hwirq); 391 if (unlikely(err)) { 392 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 393 handler->priv->fwnode, hwirq); 394 } 395 } 396 397 chained_irq_exit(chip, desc); 398 } 399 400 static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *handler) 401 { 402 u32 __iomem *pending = handler->priv->regs + PENDING_BASE; 403 u32 __iomem *enable = handler->enable_base; 404 u32 pending_irqs = 0; 405 int i, j; 406 407 /* Look for first pending interrupt */ 408 for (i = 0; i < nr_irq_groups; i++) { 409 /* Any pending interrupts would be annihilated, so skip checking them */ 410 if (!handler->enable_save[i]) 411 continue; 412 413 pending_irqs = handler->enable_save[i] & readl_relaxed(pending + i); 414 if (pending_irqs) 415 break; 416 } 417 418 if (!pending_irqs) 419 return 0; 420 421 /* Isolate lowest set bit */ 422 pending_irqs &= -pending_irqs; 423 424 /* Disable all interrupts but the first pending one */ 425 for (j = 0; j < nr_irq_groups; j++) { 426 u32 new_mask = j == i ? pending_irqs : 0; 427 428 if (new_mask != handler->enable_save[j]) 429 writel_relaxed(new_mask, enable + j); 430 } 431 return pending_irqs; 432 } 433 434 static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim) 435 { 436 int nr_irq_groups = handler->priv->irq_groups; 437 u32 __iomem *enable = handler->enable_base; 438 irq_hw_number_t hwirq = 0; 439 u32 iso_mask; 440 int i; 441 442 guard(raw_spinlock)(&handler->enable_lock); 443 444 /* Existing enable state is already cached in enable_save */ 445 iso_mask = cp100_isolate_pending_irq(nr_irq_groups, handler); 446 if (!iso_mask) 447 return 0; 448 449 /* 450 * Interrupts delievered to hardware still become pending, but only 451 * interrupts that are both pending and enabled can be claimed. 452 * Clearing the enable bit for all interrupts but the first pending 453 * one avoids a hardware bug that occurs during read from the claim 454 * register with more than one eligible interrupt. 455 */ 456 hwirq = readl(claim); 457 458 /* Restore previous state */ 459 for (i = 0; i < nr_irq_groups; i++) { 460 u32 written = i == hwirq / 32 ? iso_mask : 0; 461 u32 stored = handler->enable_save[i]; 462 463 if (stored != written) 464 writel_relaxed(stored, enable + i); 465 } 466 return hwirq; 467 } 468 469 static void plic_handle_irq_cp100(struct irq_desc *desc) 470 { 471 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 472 struct irq_chip *chip = irq_desc_get_chip(desc); 473 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 474 irq_hw_number_t hwirq; 475 476 WARN_ON_ONCE(!handler->present); 477 478 chained_irq_enter(chip, desc); 479 480 while ((hwirq = cp100_get_hwirq(handler, claim))) { 481 int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq); 482 483 if (unlikely(err)) { 484 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 485 handler->priv->fwnode, hwirq); 486 } 487 } 488 489 chained_irq_exit(chip, desc); 490 } 491 492 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 493 { 494 /* priority must be > threshold to trigger an interrupt */ 495 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 496 } 497 498 static int plic_dying_cpu(unsigned int cpu) 499 { 500 if (plic_parent_irq) 501 disable_percpu_irq(plic_parent_irq); 502 503 return 0; 504 } 505 506 static int plic_starting_cpu(unsigned int cpu) 507 { 508 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 509 510 if (plic_parent_irq) 511 enable_percpu_irq(plic_parent_irq, 512 irq_get_trigger_type(plic_parent_irq)); 513 else 514 pr_warn("%pfwP: cpu%d: parent irq not available\n", 515 handler->priv->fwnode, cpu); 516 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 517 518 return 0; 519 } 520 521 static const struct of_device_id plic_match[] = { 522 { .compatible = "sifive,plic-1.0.0" }, 523 { .compatible = "riscv,plic0" }, 524 { .compatible = "andestech,nceplic100", 525 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 526 { .compatible = "thead,c900-plic", 527 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 528 { .compatible = "ultrarisc,cp100-plic", 529 .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) }, 530 {} 531 }; 532 533 #ifdef CONFIG_ACPI 534 535 static const struct acpi_device_id plic_acpi_match[] = { 536 { "RSCV0001", 0 }, 537 {} 538 }; 539 MODULE_DEVICE_TABLE(acpi, plic_acpi_match); 540 541 #endif 542 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, 543 u32 *nr_irqs, u32 *nr_contexts, 544 u32 *gsi_base, u32 *id) 545 { 546 int rc; 547 548 if (!is_of_node(fwnode)) { 549 rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); 550 if (rc) { 551 pr_err("%pfwP: failed to find GSI mapping\n", fwnode); 552 return rc; 553 } 554 555 *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); 556 if (WARN_ON(!*nr_contexts)) { 557 pr_err("%pfwP: no PLIC context available\n", fwnode); 558 return -EINVAL; 559 } 560 561 return 0; 562 } 563 564 rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); 565 if (rc) { 566 pr_err("%pfwP: riscv,ndev property not available\n", fwnode); 567 return rc; 568 } 569 570 *nr_contexts = of_irq_count(to_of_node(fwnode)); 571 if (WARN_ON(!(*nr_contexts))) { 572 pr_err("%pfwP: no PLIC context available\n", fwnode); 573 return -EINVAL; 574 } 575 576 *gsi_base = 0; 577 *id = 0; 578 579 return 0; 580 } 581 582 static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, 583 u32 *parent_hwirq, int *parent_cpu, u32 id) 584 { 585 struct of_phandle_args parent; 586 unsigned long hartid; 587 int rc; 588 589 if (!is_of_node(fwnode)) { 590 hartid = acpi_rintc_ext_parent_to_hartid(id, context); 591 if (hartid == INVALID_HARTID) 592 return -EINVAL; 593 594 *parent_cpu = riscv_hartid_to_cpuid(hartid); 595 *parent_hwirq = RV_IRQ_EXT; 596 return 0; 597 } 598 599 rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); 600 if (rc) 601 return rc; 602 603 rc = riscv_of_parent_hartid(parent.np, &hartid); 604 if (rc) 605 return rc; 606 607 *parent_hwirq = parent.args[0]; 608 *parent_cpu = riscv_hartid_to_cpuid(hartid); 609 return 0; 610 } 611 612 static int plic_probe(struct fwnode_handle *fwnode) 613 { 614 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 615 unsigned long plic_quirks = 0; 616 struct plic_handler *handler; 617 u32 nr_irqs, parent_hwirq; 618 struct plic_priv *priv; 619 void __iomem *regs; 620 int id, context_id; 621 u32 gsi_base; 622 623 if (is_of_node(fwnode)) { 624 const struct of_device_id *id; 625 626 id = of_match_node(plic_match, to_of_node(fwnode)); 627 if (id) 628 plic_quirks = (unsigned long)id->data; 629 630 regs = of_iomap(to_of_node(fwnode), 0); 631 if (!regs) 632 return -ENOMEM; 633 } else { 634 regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); 635 if (IS_ERR(regs)) 636 return PTR_ERR(regs); 637 } 638 639 error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); 640 if (error) 641 goto fail_free_regs; 642 643 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 644 if (!priv) { 645 error = -ENOMEM; 646 goto fail_free_regs; 647 } 648 649 priv->fwnode = fwnode; 650 priv->plic_quirks = plic_quirks; 651 /* 652 * The firmware provides the number of device interrupts. As 653 * hardware interrupt 0 is reserved, the number of total interrupts 654 * is nr_irqs + 1. 655 */ 656 nr_irqs++; 657 ACCESS_PRIVATE(priv, total_irqs) = nr_irqs; 658 /* Precalculate the number of register groups */ 659 priv->irq_groups = DIV_ROUND_UP(nr_irqs, 32); 660 661 priv->regs = regs; 662 priv->gsi_base = gsi_base; 663 priv->acpi_plic_id = id; 664 665 priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); 666 if (!priv->prio_save) { 667 error = -ENOMEM; 668 goto fail_free_priv; 669 } 670 671 for (i = 0; i < nr_contexts; i++) { 672 error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, 673 priv->acpi_plic_id); 674 if (error) { 675 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); 676 continue; 677 } 678 679 if (is_of_node(fwnode)) { 680 context_id = i; 681 } else { 682 context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); 683 if (context_id == INVALID_CONTEXT) { 684 pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); 685 continue; 686 } 687 } 688 689 /* 690 * Skip contexts other than external interrupts for our 691 * privilege level. 692 */ 693 if (parent_hwirq != RV_IRQ_EXT) { 694 /* Disable S-mode enable bits if running in M-mode. */ 695 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 696 u32 __iomem *enable_base = priv->regs + CONTEXT_ENABLE_BASE + 697 i * CONTEXT_ENABLE_SIZE; 698 699 for (int j = 0; j < priv->irq_groups; j++) 700 writel(0, enable_base + j); 701 } 702 continue; 703 } 704 705 if (cpu < 0) { 706 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); 707 continue; 708 } 709 710 /* 711 * When running in M-mode we need to ignore the S-mode handler. 712 * Here we assume it always comes later, but that might be a 713 * little fragile. 714 */ 715 handler = per_cpu_ptr(&plic_handlers, cpu); 716 if (handler->present) { 717 pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); 718 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 719 goto done; 720 } 721 722 cpumask_set_cpu(cpu, &priv->lmask); 723 handler->present = true; 724 handler->hart_base = priv->regs + CONTEXT_BASE + 725 context_id * CONTEXT_SIZE; 726 raw_spin_lock_init(&handler->enable_lock); 727 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 728 context_id * CONTEXT_ENABLE_SIZE; 729 handler->priv = priv; 730 731 handler->enable_save = kcalloc(priv->irq_groups, sizeof(*handler->enable_save), 732 GFP_KERNEL); 733 if (!handler->enable_save) { 734 error = -ENOMEM; 735 goto fail_cleanup_contexts; 736 } 737 done: 738 for_each_device_irq(hwirq, priv) { 739 plic_toggle(handler, hwirq, 0); 740 writel(1, priv->regs + PRIORITY_BASE + hwirq * PRIORITY_PER_ID); 741 } 742 nr_handlers++; 743 } 744 745 priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs, &plic_irqdomain_ops, priv); 746 if (WARN_ON(!priv->irqdomain)) { 747 error = -ENOMEM; 748 goto fail_cleanup_contexts; 749 } 750 751 /* 752 * We can have multiple PLIC instances so setup global state 753 * and register syscore operations only once after context 754 * handlers of all online CPUs are initialized. 755 */ 756 if (!plic_global_setup_done) { 757 struct irq_domain *domain; 758 bool global_setup = true; 759 760 for_each_online_cpu(cpu) { 761 handler = per_cpu_ptr(&plic_handlers, cpu); 762 if (!handler->present) { 763 global_setup = false; 764 break; 765 } 766 } 767 768 if (global_setup) { 769 void (*handler_fn)(struct irq_desc *) = plic_handle_irq; 770 771 if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks)) 772 handler_fn = plic_handle_irq_cp100; 773 774 /* Find parent domain and register chained handler */ 775 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 776 if (domain) 777 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 778 if (plic_parent_irq) 779 irq_set_chained_handler(plic_parent_irq, handler_fn); 780 781 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 782 "irqchip/sifive/plic:starting", 783 plic_starting_cpu, plic_dying_cpu); 784 register_syscore(&plic_irq_syscore); 785 plic_global_setup_done = true; 786 } 787 } 788 789 #ifdef CONFIG_ACPI 790 if (!acpi_disabled) 791 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); 792 #endif 793 794 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", 795 fwnode, nr_irqs, nr_handlers, nr_contexts); 796 return 0; 797 798 fail_cleanup_contexts: 799 for (i = 0; i < nr_contexts; i++) { 800 if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) 801 continue; 802 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 803 continue; 804 805 handler = per_cpu_ptr(&plic_handlers, cpu); 806 handler->present = false; 807 handler->hart_base = NULL; 808 handler->enable_base = NULL; 809 kfree(handler->enable_save); 810 handler->enable_save = NULL; 811 handler->priv = NULL; 812 } 813 bitmap_free(priv->prio_save); 814 fail_free_priv: 815 kfree(priv); 816 fail_free_regs: 817 iounmap(regs); 818 return error; 819 } 820 821 static int plic_platform_probe(struct platform_device *pdev) 822 { 823 return plic_probe(pdev->dev.fwnode); 824 } 825 826 static struct platform_driver plic_driver = { 827 .driver = { 828 .name = "riscv-plic", 829 .of_match_table = plic_match, 830 .suppress_bind_attrs = true, 831 .acpi_match_table = ACPI_PTR(plic_acpi_match), 832 }, 833 .probe = plic_platform_probe, 834 }; 835 builtin_platform_driver(plic_driver); 836 837 static int __init plic_early_probe(struct device_node *node, 838 struct device_node *parent) 839 { 840 return plic_probe(&node->fwnode); 841 } 842 843 IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); 844