1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #include <linux/cpu.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/irq.h> 10 #include <linux/irqchip.h> 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock.h> 19 #include <linux/syscore_ops.h> 20 #include <asm/smp.h> 21 22 /* 23 * This driver implements a version of the RISC-V PLIC with the actual layout 24 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 25 * 26 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 27 * 28 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 29 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 30 * Spec. 31 */ 32 33 #define MAX_DEVICES 1024 34 #define MAX_CONTEXTS 15872 35 36 /* 37 * Each interrupt source has a priority register associated with it. 38 * We always hardwire it to one in Linux. 39 */ 40 #define PRIORITY_BASE 0 41 #define PRIORITY_PER_ID 4 42 43 /* 44 * Each hart context has a vector of interrupt enable bits associated with it. 45 * There's one bit for each interrupt source. 46 */ 47 #define CONTEXT_ENABLE_BASE 0x2000 48 #define CONTEXT_ENABLE_SIZE 0x80 49 50 /* 51 * Each hart context has a set of control registers associated with it. Right 52 * now there's only two: a source priority threshold over which the hart will 53 * take an interrupt, and a register to claim interrupts. 54 */ 55 #define CONTEXT_BASE 0x200000 56 #define CONTEXT_SIZE 0x1000 57 #define CONTEXT_THRESHOLD 0x00 58 #define CONTEXT_CLAIM 0x04 59 60 #define PLIC_DISABLE_THRESHOLD 0x7 61 #define PLIC_ENABLE_THRESHOLD 0 62 63 #define PLIC_QUIRK_EDGE_INTERRUPT 0 64 65 struct plic_priv { 66 struct device *dev; 67 struct cpumask lmask; 68 struct irq_domain *irqdomain; 69 void __iomem *regs; 70 unsigned long plic_quirks; 71 unsigned int nr_irqs; 72 unsigned long *prio_save; 73 }; 74 75 struct plic_handler { 76 bool present; 77 void __iomem *hart_base; 78 /* 79 * Protect mask operations on the registers given that we can't 80 * assume atomic memory operations work on them. 81 */ 82 raw_spinlock_t enable_lock; 83 void __iomem *enable_base; 84 u32 *enable_save; 85 struct plic_priv *priv; 86 }; 87 static int plic_parent_irq __ro_after_init; 88 static bool plic_cpuhp_setup_done __ro_after_init; 89 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 90 91 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 92 93 static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 94 { 95 u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 96 u32 hwirq_mask = 1 << (hwirq % 32); 97 98 if (enable) 99 writel(readl(reg) | hwirq_mask, reg); 100 else 101 writel(readl(reg) & ~hwirq_mask, reg); 102 } 103 104 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 105 { 106 unsigned long flags; 107 108 raw_spin_lock_irqsave(&handler->enable_lock, flags); 109 __plic_toggle(handler->enable_base, hwirq, enable); 110 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 111 } 112 113 static inline void plic_irq_toggle(const struct cpumask *mask, 114 struct irq_data *d, int enable) 115 { 116 int cpu; 117 118 for_each_cpu(cpu, mask) { 119 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 120 121 plic_toggle(handler, d->hwirq, enable); 122 } 123 } 124 125 static void plic_irq_enable(struct irq_data *d) 126 { 127 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 128 } 129 130 static void plic_irq_disable(struct irq_data *d) 131 { 132 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 133 } 134 135 static void plic_irq_unmask(struct irq_data *d) 136 { 137 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 138 139 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 140 } 141 142 static void plic_irq_mask(struct irq_data *d) 143 { 144 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 145 146 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 147 } 148 149 static void plic_irq_eoi(struct irq_data *d) 150 { 151 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 152 153 if (unlikely(irqd_irq_disabled(d))) { 154 plic_toggle(handler, d->hwirq, 1); 155 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 156 plic_toggle(handler, d->hwirq, 0); 157 } else { 158 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 159 } 160 } 161 162 #ifdef CONFIG_SMP 163 static int plic_set_affinity(struct irq_data *d, 164 const struct cpumask *mask_val, bool force) 165 { 166 unsigned int cpu; 167 struct cpumask amask; 168 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 169 170 cpumask_and(&amask, &priv->lmask, mask_val); 171 172 if (force) 173 cpu = cpumask_first(&amask); 174 else 175 cpu = cpumask_any_and(&amask, cpu_online_mask); 176 177 if (cpu >= nr_cpu_ids) 178 return -EINVAL; 179 180 plic_irq_disable(d); 181 182 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 183 184 if (!irqd_irq_disabled(d)) 185 plic_irq_enable(d); 186 187 return IRQ_SET_MASK_OK_DONE; 188 } 189 #endif 190 191 static struct irq_chip plic_edge_chip = { 192 .name = "SiFive PLIC", 193 .irq_enable = plic_irq_enable, 194 .irq_disable = plic_irq_disable, 195 .irq_ack = plic_irq_eoi, 196 .irq_mask = plic_irq_mask, 197 .irq_unmask = plic_irq_unmask, 198 #ifdef CONFIG_SMP 199 .irq_set_affinity = plic_set_affinity, 200 #endif 201 .irq_set_type = plic_irq_set_type, 202 .flags = IRQCHIP_SKIP_SET_WAKE | 203 IRQCHIP_AFFINITY_PRE_STARTUP, 204 }; 205 206 static struct irq_chip plic_chip = { 207 .name = "SiFive PLIC", 208 .irq_enable = plic_irq_enable, 209 .irq_disable = plic_irq_disable, 210 .irq_mask = plic_irq_mask, 211 .irq_unmask = plic_irq_unmask, 212 .irq_eoi = plic_irq_eoi, 213 #ifdef CONFIG_SMP 214 .irq_set_affinity = plic_set_affinity, 215 #endif 216 .irq_set_type = plic_irq_set_type, 217 .flags = IRQCHIP_SKIP_SET_WAKE | 218 IRQCHIP_AFFINITY_PRE_STARTUP, 219 }; 220 221 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 222 { 223 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 224 225 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 226 return IRQ_SET_MASK_OK_NOCOPY; 227 228 switch (type) { 229 case IRQ_TYPE_EDGE_RISING: 230 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 231 handle_edge_irq, NULL); 232 break; 233 case IRQ_TYPE_LEVEL_HIGH: 234 irq_set_chip_handler_name_locked(d, &plic_chip, 235 handle_fasteoi_irq, NULL); 236 break; 237 default: 238 return -EINVAL; 239 } 240 241 return IRQ_SET_MASK_OK; 242 } 243 244 static int plic_irq_suspend(void) 245 { 246 unsigned int i, cpu; 247 unsigned long flags; 248 u32 __iomem *reg; 249 struct plic_priv *priv; 250 251 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 252 253 for (i = 0; i < priv->nr_irqs; i++) 254 if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) 255 __set_bit(i, priv->prio_save); 256 else 257 __clear_bit(i, priv->prio_save); 258 259 for_each_cpu(cpu, cpu_present_mask) { 260 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 261 262 if (!handler->present) 263 continue; 264 265 raw_spin_lock_irqsave(&handler->enable_lock, flags); 266 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 267 reg = handler->enable_base + i * sizeof(u32); 268 handler->enable_save[i] = readl(reg); 269 } 270 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 271 } 272 273 return 0; 274 } 275 276 static void plic_irq_resume(void) 277 { 278 unsigned int i, index, cpu; 279 unsigned long flags; 280 u32 __iomem *reg; 281 struct plic_priv *priv; 282 283 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 284 285 for (i = 0; i < priv->nr_irqs; i++) { 286 index = BIT_WORD(i); 287 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 288 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 289 } 290 291 for_each_cpu(cpu, cpu_present_mask) { 292 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 293 294 if (!handler->present) 295 continue; 296 297 raw_spin_lock_irqsave(&handler->enable_lock, flags); 298 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 299 reg = handler->enable_base + i * sizeof(u32); 300 writel(handler->enable_save[i], reg); 301 } 302 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 303 } 304 } 305 306 static struct syscore_ops plic_irq_syscore_ops = { 307 .suspend = plic_irq_suspend, 308 .resume = plic_irq_resume, 309 }; 310 311 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 312 irq_hw_number_t hwirq) 313 { 314 struct plic_priv *priv = d->host_data; 315 316 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 317 handle_fasteoi_irq, NULL, NULL); 318 irq_set_noprobe(irq); 319 irq_set_affinity(irq, &priv->lmask); 320 return 0; 321 } 322 323 static int plic_irq_domain_translate(struct irq_domain *d, 324 struct irq_fwspec *fwspec, 325 unsigned long *hwirq, 326 unsigned int *type) 327 { 328 struct plic_priv *priv = d->host_data; 329 330 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 331 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 332 333 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 334 } 335 336 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 337 unsigned int nr_irqs, void *arg) 338 { 339 int i, ret; 340 irq_hw_number_t hwirq; 341 unsigned int type; 342 struct irq_fwspec *fwspec = arg; 343 344 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 345 if (ret) 346 return ret; 347 348 for (i = 0; i < nr_irqs; i++) { 349 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 350 if (ret) 351 return ret; 352 } 353 354 return 0; 355 } 356 357 static const struct irq_domain_ops plic_irqdomain_ops = { 358 .translate = plic_irq_domain_translate, 359 .alloc = plic_irq_domain_alloc, 360 .free = irq_domain_free_irqs_top, 361 }; 362 363 /* 364 * Handling an interrupt is a two-step process: first you claim the interrupt 365 * by reading the claim register, then you complete the interrupt by writing 366 * that source ID back to the same claim register. This automatically enables 367 * and disables the interrupt, so there's nothing else to do. 368 */ 369 static void plic_handle_irq(struct irq_desc *desc) 370 { 371 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 372 struct irq_chip *chip = irq_desc_get_chip(desc); 373 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 374 irq_hw_number_t hwirq; 375 376 WARN_ON_ONCE(!handler->present); 377 378 chained_irq_enter(chip, desc); 379 380 while ((hwirq = readl(claim))) { 381 int err = generic_handle_domain_irq(handler->priv->irqdomain, 382 hwirq); 383 if (unlikely(err)) { 384 dev_warn_ratelimited(handler->priv->dev, 385 "can't find mapping for hwirq %lu\n", hwirq); 386 } 387 } 388 389 chained_irq_exit(chip, desc); 390 } 391 392 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 393 { 394 /* priority must be > threshold to trigger an interrupt */ 395 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 396 } 397 398 static int plic_dying_cpu(unsigned int cpu) 399 { 400 if (plic_parent_irq) 401 disable_percpu_irq(plic_parent_irq); 402 403 return 0; 404 } 405 406 static int plic_starting_cpu(unsigned int cpu) 407 { 408 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 409 410 if (plic_parent_irq) 411 enable_percpu_irq(plic_parent_irq, 412 irq_get_trigger_type(plic_parent_irq)); 413 else 414 dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu); 415 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 416 417 return 0; 418 } 419 420 static const struct of_device_id plic_match[] = { 421 { .compatible = "sifive,plic-1.0.0" }, 422 { .compatible = "riscv,plic0" }, 423 { .compatible = "andestech,nceplic100", 424 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 425 { .compatible = "thead,c900-plic", 426 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 427 {} 428 }; 429 430 static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev, 431 u32 *nr_irqs, u32 *nr_contexts) 432 { 433 struct device *dev = &pdev->dev; 434 int rc; 435 436 /* 437 * Currently, only OF fwnode is supported so extend this 438 * function for ACPI support. 439 */ 440 if (!is_of_node(dev->fwnode)) 441 return -EINVAL; 442 443 rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs); 444 if (rc) { 445 dev_err(dev, "riscv,ndev property not available\n"); 446 return rc; 447 } 448 449 *nr_contexts = of_irq_count(to_of_node(dev->fwnode)); 450 if (WARN_ON(!(*nr_contexts))) { 451 dev_err(dev, "no PLIC context available\n"); 452 return -EINVAL; 453 } 454 455 return 0; 456 } 457 458 static int plic_parse_context_parent(struct platform_device *pdev, u32 context, 459 u32 *parent_hwirq, int *parent_cpu) 460 { 461 struct device *dev = &pdev->dev; 462 struct of_phandle_args parent; 463 unsigned long hartid; 464 int rc; 465 466 /* 467 * Currently, only OF fwnode is supported so extend this 468 * function for ACPI support. 469 */ 470 if (!is_of_node(dev->fwnode)) 471 return -EINVAL; 472 473 rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent); 474 if (rc) 475 return rc; 476 477 rc = riscv_of_parent_hartid(parent.np, &hartid); 478 if (rc) 479 return rc; 480 481 *parent_hwirq = parent.args[0]; 482 *parent_cpu = riscv_hartid_to_cpuid(hartid); 483 return 0; 484 } 485 486 static int plic_probe(struct platform_device *pdev) 487 { 488 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 489 struct device *dev = &pdev->dev; 490 unsigned long plic_quirks = 0; 491 struct plic_handler *handler; 492 u32 nr_irqs, parent_hwirq; 493 struct irq_domain *domain; 494 struct plic_priv *priv; 495 irq_hw_number_t hwirq; 496 bool cpuhp_setup; 497 498 if (is_of_node(dev->fwnode)) { 499 const struct of_device_id *id; 500 501 id = of_match_node(plic_match, to_of_node(dev->fwnode)); 502 if (id) 503 plic_quirks = (unsigned long)id->data; 504 } 505 506 error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts); 507 if (error) 508 return error; 509 510 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 511 if (!priv) 512 return -ENOMEM; 513 514 priv->dev = dev; 515 priv->plic_quirks = plic_quirks; 516 priv->nr_irqs = nr_irqs; 517 518 priv->regs = devm_platform_ioremap_resource(pdev, 0); 519 if (WARN_ON(!priv->regs)) 520 return -EIO; 521 522 priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL); 523 if (!priv->prio_save) 524 return -ENOMEM; 525 526 for (i = 0; i < nr_contexts; i++) { 527 error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu); 528 if (error) { 529 dev_warn(dev, "hwirq for context%d not found\n", i); 530 continue; 531 } 532 533 /* 534 * Skip contexts other than external interrupts for our 535 * privilege level. 536 */ 537 if (parent_hwirq != RV_IRQ_EXT) { 538 /* Disable S-mode enable bits if running in M-mode. */ 539 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 540 void __iomem *enable_base = priv->regs + 541 CONTEXT_ENABLE_BASE + 542 i * CONTEXT_ENABLE_SIZE; 543 544 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 545 __plic_toggle(enable_base, hwirq, 0); 546 } 547 continue; 548 } 549 550 if (cpu < 0) { 551 dev_warn(dev, "Invalid cpuid for context %d\n", i); 552 continue; 553 } 554 555 /* Find parent domain and register chained handler */ 556 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 557 if (!plic_parent_irq && domain) { 558 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 559 if (plic_parent_irq) 560 irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 561 } 562 563 /* 564 * When running in M-mode we need to ignore the S-mode handler. 565 * Here we assume it always comes later, but that might be a 566 * little fragile. 567 */ 568 handler = per_cpu_ptr(&plic_handlers, cpu); 569 if (handler->present) { 570 dev_warn(dev, "handler already present for context %d.\n", i); 571 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 572 goto done; 573 } 574 575 cpumask_set_cpu(cpu, &priv->lmask); 576 handler->present = true; 577 handler->hart_base = priv->regs + CONTEXT_BASE + 578 i * CONTEXT_SIZE; 579 raw_spin_lock_init(&handler->enable_lock); 580 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 581 i * CONTEXT_ENABLE_SIZE; 582 handler->priv = priv; 583 584 handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32), 585 sizeof(*handler->enable_save), GFP_KERNEL); 586 if (!handler->enable_save) 587 goto fail_cleanup_contexts; 588 done: 589 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 590 plic_toggle(handler, hwirq, 0); 591 writel(1, priv->regs + PRIORITY_BASE + 592 hwirq * PRIORITY_PER_ID); 593 } 594 nr_handlers++; 595 } 596 597 priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1, 598 &plic_irqdomain_ops, priv); 599 if (WARN_ON(!priv->irqdomain)) 600 goto fail_cleanup_contexts; 601 602 /* 603 * We can have multiple PLIC instances so setup cpuhp state 604 * and register syscore operations only once after context 605 * handlers of all online CPUs are initialized. 606 */ 607 if (!plic_cpuhp_setup_done) { 608 cpuhp_setup = true; 609 for_each_online_cpu(cpu) { 610 handler = per_cpu_ptr(&plic_handlers, cpu); 611 if (!handler->present) { 612 cpuhp_setup = false; 613 break; 614 } 615 } 616 if (cpuhp_setup) { 617 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 618 "irqchip/sifive/plic:starting", 619 plic_starting_cpu, plic_dying_cpu); 620 register_syscore_ops(&plic_irq_syscore_ops); 621 plic_cpuhp_setup_done = true; 622 } 623 } 624 625 dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n", 626 nr_irqs, nr_handlers, nr_contexts); 627 return 0; 628 629 fail_cleanup_contexts: 630 for (i = 0; i < nr_contexts; i++) { 631 if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu)) 632 continue; 633 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 634 continue; 635 636 handler = per_cpu_ptr(&plic_handlers, cpu); 637 handler->present = false; 638 handler->hart_base = NULL; 639 handler->enable_base = NULL; 640 handler->enable_save = NULL; 641 handler->priv = NULL; 642 } 643 return -ENOMEM; 644 } 645 646 static struct platform_driver plic_driver = { 647 .driver = { 648 .name = "riscv-plic", 649 .of_match_table = plic_match, 650 }, 651 .probe = plic_probe, 652 }; 653 builtin_platform_driver(plic_driver); 654