1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #define pr_fmt(fmt) "riscv-plic: " fmt 7 #include <linux/acpi.h> 8 #include <linux/cpu.h> 9 #include <linux/interrupt.h> 10 #include <linux/io.h> 11 #include <linux/irq.h> 12 #include <linux/irqchip.h> 13 #include <linux/irqchip/chained_irq.h> 14 #include <linux/irqdomain.h> 15 #include <linux/module.h> 16 #include <linux/of.h> 17 #include <linux/of_address.h> 18 #include <linux/of_irq.h> 19 #include <linux/platform_device.h> 20 #include <linux/spinlock.h> 21 #include <linux/syscore_ops.h> 22 #include <asm/smp.h> 23 24 /* 25 * This driver implements a version of the RISC-V PLIC with the actual layout 26 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 27 * 28 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 29 * 30 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 31 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 32 * Spec. 33 */ 34 35 #define MAX_DEVICES 1024 36 #define MAX_CONTEXTS 15872 37 38 /* 39 * Each interrupt source has a priority register associated with it. 40 * We always hardwire it to one in Linux. 41 */ 42 #define PRIORITY_BASE 0 43 #define PRIORITY_PER_ID 4 44 45 /* 46 * Each hart context has a vector of interrupt enable bits associated with it. 47 * There's one bit for each interrupt source. 48 */ 49 #define CONTEXT_ENABLE_BASE 0x2000 50 #define CONTEXT_ENABLE_SIZE 0x80 51 52 #define PENDING_BASE 0x1000 53 54 /* 55 * Each hart context has a set of control registers associated with it. Right 56 * now there's only two: a source priority threshold over which the hart will 57 * take an interrupt, and a register to claim interrupts. 58 */ 59 #define CONTEXT_BASE 0x200000 60 #define CONTEXT_SIZE 0x1000 61 #define CONTEXT_THRESHOLD 0x00 62 #define CONTEXT_CLAIM 0x04 63 64 #define PLIC_DISABLE_THRESHOLD 0x7 65 #define PLIC_ENABLE_THRESHOLD 0 66 67 #define PLIC_QUIRK_EDGE_INTERRUPT 0 68 #define PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM 1 69 70 struct plic_priv { 71 struct fwnode_handle *fwnode; 72 struct cpumask lmask; 73 struct irq_domain *irqdomain; 74 void __iomem *regs; 75 unsigned long plic_quirks; 76 unsigned int nr_irqs; 77 unsigned long *prio_save; 78 u32 gsi_base; 79 int acpi_plic_id; 80 }; 81 82 struct plic_handler { 83 bool present; 84 void __iomem *hart_base; 85 /* 86 * Protect mask operations on the registers given that we can't 87 * assume atomic memory operations work on them. 88 */ 89 raw_spinlock_t enable_lock; 90 void __iomem *enable_base; 91 u32 *enable_save; 92 struct plic_priv *priv; 93 }; 94 static int plic_parent_irq __ro_after_init; 95 static bool plic_global_setup_done __ro_after_init; 96 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 97 98 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 99 100 static void __plic_toggle(struct plic_handler *handler, int hwirq, int enable) 101 { 102 u32 __iomem *base = handler->enable_base; 103 u32 hwirq_mask = 1 << (hwirq % 32); 104 int group = hwirq / 32; 105 u32 value; 106 107 value = readl(base + group); 108 109 if (enable) 110 value |= hwirq_mask; 111 else 112 value &= ~hwirq_mask; 113 114 handler->enable_save[group] = value; 115 writel(value, base + group); 116 } 117 118 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 119 { 120 unsigned long flags; 121 122 raw_spin_lock_irqsave(&handler->enable_lock, flags); 123 __plic_toggle(handler, hwirq, enable); 124 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 125 } 126 127 static inline void plic_irq_toggle(const struct cpumask *mask, 128 struct irq_data *d, int enable) 129 { 130 int cpu; 131 132 for_each_cpu(cpu, mask) { 133 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 134 135 plic_toggle(handler, d->hwirq, enable); 136 } 137 } 138 139 static void plic_irq_unmask(struct irq_data *d) 140 { 141 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 142 143 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 144 } 145 146 static void plic_irq_mask(struct irq_data *d) 147 { 148 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 149 150 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 151 } 152 153 static void plic_irq_enable(struct irq_data *d) 154 { 155 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 156 plic_irq_unmask(d); 157 } 158 159 static void plic_irq_disable(struct irq_data *d) 160 { 161 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 162 } 163 164 static void plic_irq_eoi(struct irq_data *d) 165 { 166 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 167 168 if (unlikely(irqd_irq_disabled(d))) { 169 plic_toggle(handler, d->hwirq, 1); 170 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 171 plic_toggle(handler, d->hwirq, 0); 172 } else { 173 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 174 } 175 } 176 177 #ifdef CONFIG_SMP 178 static int plic_set_affinity(struct irq_data *d, 179 const struct cpumask *mask_val, bool force) 180 { 181 unsigned int cpu; 182 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 183 184 if (force) 185 cpu = cpumask_first_and(&priv->lmask, mask_val); 186 else 187 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 188 189 if (cpu >= nr_cpu_ids) 190 return -EINVAL; 191 192 /* Invalidate the original routing entry */ 193 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 194 195 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 196 197 /* Setting the new routing entry if irq is enabled */ 198 if (!irqd_irq_disabled(d)) 199 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 200 201 return IRQ_SET_MASK_OK_DONE; 202 } 203 #endif 204 205 static struct irq_chip plic_edge_chip = { 206 .name = "SiFive PLIC", 207 .irq_enable = plic_irq_enable, 208 .irq_disable = plic_irq_disable, 209 .irq_ack = plic_irq_eoi, 210 .irq_mask = plic_irq_mask, 211 .irq_unmask = plic_irq_unmask, 212 #ifdef CONFIG_SMP 213 .irq_set_affinity = plic_set_affinity, 214 #endif 215 .irq_set_type = plic_irq_set_type, 216 .flags = IRQCHIP_SKIP_SET_WAKE | 217 IRQCHIP_AFFINITY_PRE_STARTUP, 218 }; 219 220 static struct irq_chip plic_chip = { 221 .name = "SiFive PLIC", 222 .irq_enable = plic_irq_enable, 223 .irq_disable = plic_irq_disable, 224 .irq_mask = plic_irq_mask, 225 .irq_unmask = plic_irq_unmask, 226 .irq_eoi = plic_irq_eoi, 227 #ifdef CONFIG_SMP 228 .irq_set_affinity = plic_set_affinity, 229 #endif 230 .irq_set_type = plic_irq_set_type, 231 .flags = IRQCHIP_SKIP_SET_WAKE | 232 IRQCHIP_AFFINITY_PRE_STARTUP, 233 }; 234 235 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 236 { 237 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 238 239 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 240 return IRQ_SET_MASK_OK_NOCOPY; 241 242 switch (type) { 243 case IRQ_TYPE_EDGE_RISING: 244 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 245 handle_edge_irq, NULL); 246 break; 247 case IRQ_TYPE_LEVEL_HIGH: 248 irq_set_chip_handler_name_locked(d, &plic_chip, 249 handle_fasteoi_irq, NULL); 250 break; 251 default: 252 return -EINVAL; 253 } 254 255 return IRQ_SET_MASK_OK; 256 } 257 258 static int plic_irq_suspend(void) 259 { 260 struct plic_priv *priv; 261 262 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 263 264 /* irq ID 0 is reserved */ 265 for (unsigned int i = 1; i < priv->nr_irqs; i++) { 266 __assign_bit(i, priv->prio_save, 267 readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)); 268 } 269 270 return 0; 271 } 272 273 static void plic_irq_resume(void) 274 { 275 unsigned int i, index, cpu; 276 unsigned long flags; 277 u32 __iomem *reg; 278 struct plic_priv *priv; 279 280 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 281 282 /* irq ID 0 is reserved */ 283 for (i = 1; i < priv->nr_irqs; i++) { 284 index = BIT_WORD(i); 285 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 286 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 287 } 288 289 for_each_present_cpu(cpu) { 290 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 291 292 if (!handler->present) 293 continue; 294 295 raw_spin_lock_irqsave(&handler->enable_lock, flags); 296 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 297 reg = handler->enable_base + i * sizeof(u32); 298 writel(handler->enable_save[i], reg); 299 } 300 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 301 } 302 } 303 304 static struct syscore_ops plic_irq_syscore_ops = { 305 .suspend = plic_irq_suspend, 306 .resume = plic_irq_resume, 307 }; 308 309 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 310 irq_hw_number_t hwirq) 311 { 312 struct plic_priv *priv = d->host_data; 313 314 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 315 handle_fasteoi_irq, NULL, NULL); 316 irq_set_noprobe(irq); 317 irq_set_affinity(irq, &priv->lmask); 318 return 0; 319 } 320 321 static int plic_irq_domain_translate(struct irq_domain *d, 322 struct irq_fwspec *fwspec, 323 unsigned long *hwirq, 324 unsigned int *type) 325 { 326 struct plic_priv *priv = d->host_data; 327 328 /* For DT, gsi_base is always zero. */ 329 if (fwspec->param[0] >= priv->gsi_base) 330 fwspec->param[0] = fwspec->param[0] - priv->gsi_base; 331 332 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 333 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 334 335 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 336 } 337 338 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 339 unsigned int nr_irqs, void *arg) 340 { 341 int i, ret; 342 irq_hw_number_t hwirq; 343 unsigned int type; 344 struct irq_fwspec *fwspec = arg; 345 346 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 347 if (ret) 348 return ret; 349 350 for (i = 0; i < nr_irqs; i++) { 351 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 352 if (ret) 353 return ret; 354 } 355 356 return 0; 357 } 358 359 static const struct irq_domain_ops plic_irqdomain_ops = { 360 .translate = plic_irq_domain_translate, 361 .alloc = plic_irq_domain_alloc, 362 .free = irq_domain_free_irqs_top, 363 }; 364 365 /* 366 * Handling an interrupt is a two-step process: first you claim the interrupt 367 * by reading the claim register, then you complete the interrupt by writing 368 * that source ID back to the same claim register. This automatically enables 369 * and disables the interrupt, so there's nothing else to do. 370 */ 371 static void plic_handle_irq(struct irq_desc *desc) 372 { 373 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 374 struct irq_chip *chip = irq_desc_get_chip(desc); 375 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 376 irq_hw_number_t hwirq; 377 378 WARN_ON_ONCE(!handler->present); 379 380 chained_irq_enter(chip, desc); 381 382 while ((hwirq = readl(claim))) { 383 int err = generic_handle_domain_irq(handler->priv->irqdomain, 384 hwirq); 385 if (unlikely(err)) { 386 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 387 handler->priv->fwnode, hwirq); 388 } 389 } 390 391 chained_irq_exit(chip, desc); 392 } 393 394 static u32 cp100_isolate_pending_irq(int nr_irq_groups, struct plic_handler *handler) 395 { 396 u32 __iomem *pending = handler->priv->regs + PENDING_BASE; 397 u32 __iomem *enable = handler->enable_base; 398 u32 pending_irqs = 0; 399 int i, j; 400 401 /* Look for first pending interrupt */ 402 for (i = 0; i < nr_irq_groups; i++) { 403 /* Any pending interrupts would be annihilated, so skip checking them */ 404 if (!handler->enable_save[i]) 405 continue; 406 407 pending_irqs = handler->enable_save[i] & readl_relaxed(pending + i); 408 if (pending_irqs) 409 break; 410 } 411 412 if (!pending_irqs) 413 return 0; 414 415 /* Isolate lowest set bit */ 416 pending_irqs &= -pending_irqs; 417 418 /* Disable all interrupts but the first pending one */ 419 for (j = 0; j < nr_irq_groups; j++) { 420 u32 new_mask = j == i ? pending_irqs : 0; 421 422 if (new_mask != handler->enable_save[j]) 423 writel_relaxed(new_mask, enable + j); 424 } 425 return pending_irqs; 426 } 427 428 static irq_hw_number_t cp100_get_hwirq(struct plic_handler *handler, void __iomem *claim) 429 { 430 int nr_irq_groups = DIV_ROUND_UP(handler->priv->nr_irqs, 32); 431 u32 __iomem *enable = handler->enable_base; 432 irq_hw_number_t hwirq = 0; 433 u32 iso_mask; 434 int i; 435 436 guard(raw_spinlock)(&handler->enable_lock); 437 438 /* Existing enable state is already cached in enable_save */ 439 iso_mask = cp100_isolate_pending_irq(nr_irq_groups, handler); 440 if (!iso_mask) 441 return 0; 442 443 /* 444 * Interrupts delievered to hardware still become pending, but only 445 * interrupts that are both pending and enabled can be claimed. 446 * Clearing the enable bit for all interrupts but the first pending 447 * one avoids a hardware bug that occurs during read from the claim 448 * register with more than one eligible interrupt. 449 */ 450 hwirq = readl(claim); 451 452 /* Restore previous state */ 453 for (i = 0; i < nr_irq_groups; i++) { 454 u32 written = i == hwirq / 32 ? iso_mask : 0; 455 u32 stored = handler->enable_save[i]; 456 457 if (stored != written) 458 writel_relaxed(stored, enable + i); 459 } 460 return hwirq; 461 } 462 463 static void plic_handle_irq_cp100(struct irq_desc *desc) 464 { 465 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 466 struct irq_chip *chip = irq_desc_get_chip(desc); 467 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 468 irq_hw_number_t hwirq; 469 470 WARN_ON_ONCE(!handler->present); 471 472 chained_irq_enter(chip, desc); 473 474 while ((hwirq = cp100_get_hwirq(handler, claim))) { 475 int err = generic_handle_domain_irq(handler->priv->irqdomain, hwirq); 476 477 if (unlikely(err)) { 478 pr_warn_ratelimited("%pfwP: can't find mapping for hwirq %lu\n", 479 handler->priv->fwnode, hwirq); 480 } 481 } 482 483 chained_irq_exit(chip, desc); 484 } 485 486 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 487 { 488 /* priority must be > threshold to trigger an interrupt */ 489 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 490 } 491 492 static int plic_dying_cpu(unsigned int cpu) 493 { 494 if (plic_parent_irq) 495 disable_percpu_irq(plic_parent_irq); 496 497 return 0; 498 } 499 500 static int plic_starting_cpu(unsigned int cpu) 501 { 502 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 503 504 if (plic_parent_irq) 505 enable_percpu_irq(plic_parent_irq, 506 irq_get_trigger_type(plic_parent_irq)); 507 else 508 pr_warn("%pfwP: cpu%d: parent irq not available\n", 509 handler->priv->fwnode, cpu); 510 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 511 512 return 0; 513 } 514 515 static const struct of_device_id plic_match[] = { 516 { .compatible = "sifive,plic-1.0.0" }, 517 { .compatible = "riscv,plic0" }, 518 { .compatible = "andestech,nceplic100", 519 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 520 { .compatible = "thead,c900-plic", 521 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 522 { .compatible = "ultrarisc,cp100-plic", 523 .data = (const void *)BIT(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM) }, 524 {} 525 }; 526 527 #ifdef CONFIG_ACPI 528 529 static const struct acpi_device_id plic_acpi_match[] = { 530 { "RSCV0001", 0 }, 531 {} 532 }; 533 MODULE_DEVICE_TABLE(acpi, plic_acpi_match); 534 535 #endif 536 static int plic_parse_nr_irqs_and_contexts(struct fwnode_handle *fwnode, 537 u32 *nr_irqs, u32 *nr_contexts, 538 u32 *gsi_base, u32 *id) 539 { 540 int rc; 541 542 if (!is_of_node(fwnode)) { 543 rc = riscv_acpi_get_gsi_info(fwnode, gsi_base, id, nr_irqs, NULL); 544 if (rc) { 545 pr_err("%pfwP: failed to find GSI mapping\n", fwnode); 546 return rc; 547 } 548 549 *nr_contexts = acpi_rintc_get_plic_nr_contexts(*id); 550 if (WARN_ON(!*nr_contexts)) { 551 pr_err("%pfwP: no PLIC context available\n", fwnode); 552 return -EINVAL; 553 } 554 555 return 0; 556 } 557 558 rc = of_property_read_u32(to_of_node(fwnode), "riscv,ndev", nr_irqs); 559 if (rc) { 560 pr_err("%pfwP: riscv,ndev property not available\n", fwnode); 561 return rc; 562 } 563 564 *nr_contexts = of_irq_count(to_of_node(fwnode)); 565 if (WARN_ON(!(*nr_contexts))) { 566 pr_err("%pfwP: no PLIC context available\n", fwnode); 567 return -EINVAL; 568 } 569 570 *gsi_base = 0; 571 *id = 0; 572 573 return 0; 574 } 575 576 static int plic_parse_context_parent(struct fwnode_handle *fwnode, u32 context, 577 u32 *parent_hwirq, int *parent_cpu, u32 id) 578 { 579 struct of_phandle_args parent; 580 unsigned long hartid; 581 int rc; 582 583 if (!is_of_node(fwnode)) { 584 hartid = acpi_rintc_ext_parent_to_hartid(id, context); 585 if (hartid == INVALID_HARTID) 586 return -EINVAL; 587 588 *parent_cpu = riscv_hartid_to_cpuid(hartid); 589 *parent_hwirq = RV_IRQ_EXT; 590 return 0; 591 } 592 593 rc = of_irq_parse_one(to_of_node(fwnode), context, &parent); 594 if (rc) 595 return rc; 596 597 rc = riscv_of_parent_hartid(parent.np, &hartid); 598 if (rc) 599 return rc; 600 601 *parent_hwirq = parent.args[0]; 602 *parent_cpu = riscv_hartid_to_cpuid(hartid); 603 return 0; 604 } 605 606 static int plic_probe(struct fwnode_handle *fwnode) 607 { 608 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 609 unsigned long plic_quirks = 0; 610 struct plic_handler *handler; 611 u32 nr_irqs, parent_hwirq; 612 struct plic_priv *priv; 613 irq_hw_number_t hwirq; 614 void __iomem *regs; 615 int id, context_id; 616 u32 gsi_base; 617 618 if (is_of_node(fwnode)) { 619 const struct of_device_id *id; 620 621 id = of_match_node(plic_match, to_of_node(fwnode)); 622 if (id) 623 plic_quirks = (unsigned long)id->data; 624 625 regs = of_iomap(to_of_node(fwnode), 0); 626 if (!regs) 627 return -ENOMEM; 628 } else { 629 regs = devm_platform_ioremap_resource(to_platform_device(fwnode->dev), 0); 630 if (IS_ERR(regs)) 631 return PTR_ERR(regs); 632 } 633 634 error = plic_parse_nr_irqs_and_contexts(fwnode, &nr_irqs, &nr_contexts, &gsi_base, &id); 635 if (error) 636 goto fail_free_regs; 637 638 priv = kzalloc(sizeof(*priv), GFP_KERNEL); 639 if (!priv) { 640 error = -ENOMEM; 641 goto fail_free_regs; 642 } 643 644 priv->fwnode = fwnode; 645 priv->plic_quirks = plic_quirks; 646 priv->nr_irqs = nr_irqs; 647 priv->regs = regs; 648 priv->gsi_base = gsi_base; 649 priv->acpi_plic_id = id; 650 651 priv->prio_save = bitmap_zalloc(nr_irqs, GFP_KERNEL); 652 if (!priv->prio_save) { 653 error = -ENOMEM; 654 goto fail_free_priv; 655 } 656 657 for (i = 0; i < nr_contexts; i++) { 658 error = plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, 659 priv->acpi_plic_id); 660 if (error) { 661 pr_warn("%pfwP: hwirq for context%d not found\n", fwnode, i); 662 continue; 663 } 664 665 if (is_of_node(fwnode)) { 666 context_id = i; 667 } else { 668 context_id = acpi_rintc_get_plic_context(priv->acpi_plic_id, i); 669 if (context_id == INVALID_CONTEXT) { 670 pr_warn("%pfwP: invalid context id for context%d\n", fwnode, i); 671 continue; 672 } 673 } 674 675 /* 676 * Skip contexts other than external interrupts for our 677 * privilege level. 678 */ 679 if (parent_hwirq != RV_IRQ_EXT) { 680 /* Disable S-mode enable bits if running in M-mode. */ 681 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 682 u32 __iomem *enable_base = priv->regs + CONTEXT_ENABLE_BASE + 683 i * CONTEXT_ENABLE_SIZE; 684 685 for (int j = 0; j <= nr_irqs / 32; j++) 686 writel(0, enable_base + j); 687 } 688 continue; 689 } 690 691 if (cpu < 0) { 692 pr_warn("%pfwP: Invalid cpuid for context %d\n", fwnode, i); 693 continue; 694 } 695 696 /* 697 * When running in M-mode we need to ignore the S-mode handler. 698 * Here we assume it always comes later, but that might be a 699 * little fragile. 700 */ 701 handler = per_cpu_ptr(&plic_handlers, cpu); 702 if (handler->present) { 703 pr_warn("%pfwP: handler already present for context %d.\n", fwnode, i); 704 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 705 goto done; 706 } 707 708 cpumask_set_cpu(cpu, &priv->lmask); 709 handler->present = true; 710 handler->hart_base = priv->regs + CONTEXT_BASE + 711 context_id * CONTEXT_SIZE; 712 raw_spin_lock_init(&handler->enable_lock); 713 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 714 context_id * CONTEXT_ENABLE_SIZE; 715 handler->priv = priv; 716 717 handler->enable_save = kcalloc(DIV_ROUND_UP(nr_irqs, 32), 718 sizeof(*handler->enable_save), GFP_KERNEL); 719 if (!handler->enable_save) { 720 error = -ENOMEM; 721 goto fail_cleanup_contexts; 722 } 723 done: 724 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 725 plic_toggle(handler, hwirq, 0); 726 writel(1, priv->regs + PRIORITY_BASE + 727 hwirq * PRIORITY_PER_ID); 728 } 729 nr_handlers++; 730 } 731 732 priv->irqdomain = irq_domain_create_linear(fwnode, nr_irqs + 1, 733 &plic_irqdomain_ops, priv); 734 if (WARN_ON(!priv->irqdomain)) { 735 error = -ENOMEM; 736 goto fail_cleanup_contexts; 737 } 738 739 /* 740 * We can have multiple PLIC instances so setup global state 741 * and register syscore operations only once after context 742 * handlers of all online CPUs are initialized. 743 */ 744 if (!plic_global_setup_done) { 745 struct irq_domain *domain; 746 bool global_setup = true; 747 748 for_each_online_cpu(cpu) { 749 handler = per_cpu_ptr(&plic_handlers, cpu); 750 if (!handler->present) { 751 global_setup = false; 752 break; 753 } 754 } 755 756 if (global_setup) { 757 void (*handler_fn)(struct irq_desc *) = plic_handle_irq; 758 759 if (test_bit(PLIC_QUIRK_CP100_CLAIM_REGISTER_ERRATUM, &handler->priv->plic_quirks)) 760 handler_fn = plic_handle_irq_cp100; 761 762 /* Find parent domain and register chained handler */ 763 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 764 if (domain) 765 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 766 if (plic_parent_irq) 767 irq_set_chained_handler(plic_parent_irq, handler_fn); 768 769 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 770 "irqchip/sifive/plic:starting", 771 plic_starting_cpu, plic_dying_cpu); 772 register_syscore_ops(&plic_irq_syscore_ops); 773 plic_global_setup_done = true; 774 } 775 } 776 777 #ifdef CONFIG_ACPI 778 if (!acpi_disabled) 779 acpi_dev_clear_dependencies(ACPI_COMPANION(fwnode->dev)); 780 #endif 781 782 pr_info("%pfwP: mapped %d interrupts with %d handlers for %d contexts.\n", 783 fwnode, nr_irqs, nr_handlers, nr_contexts); 784 return 0; 785 786 fail_cleanup_contexts: 787 for (i = 0; i < nr_contexts; i++) { 788 if (plic_parse_context_parent(fwnode, i, &parent_hwirq, &cpu, priv->acpi_plic_id)) 789 continue; 790 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 791 continue; 792 793 handler = per_cpu_ptr(&plic_handlers, cpu); 794 handler->present = false; 795 handler->hart_base = NULL; 796 handler->enable_base = NULL; 797 kfree(handler->enable_save); 798 handler->enable_save = NULL; 799 handler->priv = NULL; 800 } 801 bitmap_free(priv->prio_save); 802 fail_free_priv: 803 kfree(priv); 804 fail_free_regs: 805 iounmap(regs); 806 return error; 807 } 808 809 static int plic_platform_probe(struct platform_device *pdev) 810 { 811 return plic_probe(pdev->dev.fwnode); 812 } 813 814 static struct platform_driver plic_driver = { 815 .driver = { 816 .name = "riscv-plic", 817 .of_match_table = plic_match, 818 .suppress_bind_attrs = true, 819 .acpi_match_table = ACPI_PTR(plic_acpi_match), 820 }, 821 .probe = plic_platform_probe, 822 }; 823 builtin_platform_driver(plic_driver); 824 825 static int __init plic_early_probe(struct device_node *node, 826 struct device_node *parent) 827 { 828 return plic_probe(&node->fwnode); 829 } 830 831 IRQCHIP_DECLARE(riscv, "allwinner,sun20i-d1-plic", plic_early_probe); 832