1 // SPDX-License-Identifier: GPL-2.0 2 /* 3 * Copyright (C) 2017 SiFive 4 * Copyright (C) 2018 Christoph Hellwig 5 */ 6 #include <linux/cpu.h> 7 #include <linux/interrupt.h> 8 #include <linux/io.h> 9 #include <linux/irq.h> 10 #include <linux/irqchip.h> 11 #include <linux/irqchip/chained_irq.h> 12 #include <linux/irqdomain.h> 13 #include <linux/module.h> 14 #include <linux/of.h> 15 #include <linux/of_address.h> 16 #include <linux/of_irq.h> 17 #include <linux/platform_device.h> 18 #include <linux/spinlock.h> 19 #include <linux/syscore_ops.h> 20 #include <asm/smp.h> 21 22 /* 23 * This driver implements a version of the RISC-V PLIC with the actual layout 24 * specified in chapter 8 of the SiFive U5 Coreplex Series Manual: 25 * 26 * https://static.dev.sifive.com/U54-MC-RVCoreIP.pdf 27 * 28 * The largest number supported by devices marked as 'sifive,plic-1.0.0', is 29 * 1024, of which device 0 is defined as non-existent by the RISC-V Privileged 30 * Spec. 31 */ 32 33 #define MAX_DEVICES 1024 34 #define MAX_CONTEXTS 15872 35 36 /* 37 * Each interrupt source has a priority register associated with it. 38 * We always hardwire it to one in Linux. 39 */ 40 #define PRIORITY_BASE 0 41 #define PRIORITY_PER_ID 4 42 43 /* 44 * Each hart context has a vector of interrupt enable bits associated with it. 45 * There's one bit for each interrupt source. 46 */ 47 #define CONTEXT_ENABLE_BASE 0x2000 48 #define CONTEXT_ENABLE_SIZE 0x80 49 50 /* 51 * Each hart context has a set of control registers associated with it. Right 52 * now there's only two: a source priority threshold over which the hart will 53 * take an interrupt, and a register to claim interrupts. 54 */ 55 #define CONTEXT_BASE 0x200000 56 #define CONTEXT_SIZE 0x1000 57 #define CONTEXT_THRESHOLD 0x00 58 #define CONTEXT_CLAIM 0x04 59 60 #define PLIC_DISABLE_THRESHOLD 0x7 61 #define PLIC_ENABLE_THRESHOLD 0 62 63 #define PLIC_QUIRK_EDGE_INTERRUPT 0 64 65 struct plic_priv { 66 struct device *dev; 67 struct cpumask lmask; 68 struct irq_domain *irqdomain; 69 void __iomem *regs; 70 unsigned long plic_quirks; 71 unsigned int nr_irqs; 72 unsigned long *prio_save; 73 }; 74 75 struct plic_handler { 76 bool present; 77 void __iomem *hart_base; 78 /* 79 * Protect mask operations on the registers given that we can't 80 * assume atomic memory operations work on them. 81 */ 82 raw_spinlock_t enable_lock; 83 void __iomem *enable_base; 84 u32 *enable_save; 85 struct plic_priv *priv; 86 }; 87 static int plic_parent_irq __ro_after_init; 88 static bool plic_global_setup_done __ro_after_init; 89 static DEFINE_PER_CPU(struct plic_handler, plic_handlers); 90 91 static int plic_irq_set_type(struct irq_data *d, unsigned int type); 92 93 static void __plic_toggle(void __iomem *enable_base, int hwirq, int enable) 94 { 95 u32 __iomem *reg = enable_base + (hwirq / 32) * sizeof(u32); 96 u32 hwirq_mask = 1 << (hwirq % 32); 97 98 if (enable) 99 writel(readl(reg) | hwirq_mask, reg); 100 else 101 writel(readl(reg) & ~hwirq_mask, reg); 102 } 103 104 static void plic_toggle(struct plic_handler *handler, int hwirq, int enable) 105 { 106 unsigned long flags; 107 108 raw_spin_lock_irqsave(&handler->enable_lock, flags); 109 __plic_toggle(handler->enable_base, hwirq, enable); 110 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 111 } 112 113 static inline void plic_irq_toggle(const struct cpumask *mask, 114 struct irq_data *d, int enable) 115 { 116 int cpu; 117 118 for_each_cpu(cpu, mask) { 119 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 120 121 plic_toggle(handler, d->hwirq, enable); 122 } 123 } 124 125 static void plic_irq_enable(struct irq_data *d) 126 { 127 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 1); 128 } 129 130 static void plic_irq_disable(struct irq_data *d) 131 { 132 plic_irq_toggle(irq_data_get_effective_affinity_mask(d), d, 0); 133 } 134 135 static void plic_irq_unmask(struct irq_data *d) 136 { 137 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 138 139 writel(1, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 140 } 141 142 static void plic_irq_mask(struct irq_data *d) 143 { 144 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 145 146 writel(0, priv->regs + PRIORITY_BASE + d->hwirq * PRIORITY_PER_ID); 147 } 148 149 static void plic_irq_eoi(struct irq_data *d) 150 { 151 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 152 153 if (unlikely(irqd_irq_disabled(d))) { 154 plic_toggle(handler, d->hwirq, 1); 155 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 156 plic_toggle(handler, d->hwirq, 0); 157 } else { 158 writel(d->hwirq, handler->hart_base + CONTEXT_CLAIM); 159 } 160 } 161 162 #ifdef CONFIG_SMP 163 static int plic_set_affinity(struct irq_data *d, 164 const struct cpumask *mask_val, bool force) 165 { 166 unsigned int cpu; 167 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 168 169 if (force) 170 cpu = cpumask_first_and(&priv->lmask, mask_val); 171 else 172 cpu = cpumask_first_and_and(&priv->lmask, mask_val, cpu_online_mask); 173 174 if (cpu >= nr_cpu_ids) 175 return -EINVAL; 176 177 plic_irq_disable(d); 178 179 irq_data_update_effective_affinity(d, cpumask_of(cpu)); 180 181 if (!irqd_irq_disabled(d)) 182 plic_irq_enable(d); 183 184 return IRQ_SET_MASK_OK_DONE; 185 } 186 #endif 187 188 static struct irq_chip plic_edge_chip = { 189 .name = "SiFive PLIC", 190 .irq_enable = plic_irq_enable, 191 .irq_disable = plic_irq_disable, 192 .irq_ack = plic_irq_eoi, 193 .irq_mask = plic_irq_mask, 194 .irq_unmask = plic_irq_unmask, 195 #ifdef CONFIG_SMP 196 .irq_set_affinity = plic_set_affinity, 197 #endif 198 .irq_set_type = plic_irq_set_type, 199 .flags = IRQCHIP_SKIP_SET_WAKE | 200 IRQCHIP_AFFINITY_PRE_STARTUP, 201 }; 202 203 static struct irq_chip plic_chip = { 204 .name = "SiFive PLIC", 205 .irq_enable = plic_irq_enable, 206 .irq_disable = plic_irq_disable, 207 .irq_mask = plic_irq_mask, 208 .irq_unmask = plic_irq_unmask, 209 .irq_eoi = plic_irq_eoi, 210 #ifdef CONFIG_SMP 211 .irq_set_affinity = plic_set_affinity, 212 #endif 213 .irq_set_type = plic_irq_set_type, 214 .flags = IRQCHIP_SKIP_SET_WAKE | 215 IRQCHIP_AFFINITY_PRE_STARTUP, 216 }; 217 218 static int plic_irq_set_type(struct irq_data *d, unsigned int type) 219 { 220 struct plic_priv *priv = irq_data_get_irq_chip_data(d); 221 222 if (!test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 223 return IRQ_SET_MASK_OK_NOCOPY; 224 225 switch (type) { 226 case IRQ_TYPE_EDGE_RISING: 227 irq_set_chip_handler_name_locked(d, &plic_edge_chip, 228 handle_edge_irq, NULL); 229 break; 230 case IRQ_TYPE_LEVEL_HIGH: 231 irq_set_chip_handler_name_locked(d, &plic_chip, 232 handle_fasteoi_irq, NULL); 233 break; 234 default: 235 return -EINVAL; 236 } 237 238 return IRQ_SET_MASK_OK; 239 } 240 241 static int plic_irq_suspend(void) 242 { 243 unsigned int i, cpu; 244 unsigned long flags; 245 u32 __iomem *reg; 246 struct plic_priv *priv; 247 248 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 249 250 for (i = 0; i < priv->nr_irqs; i++) 251 if (readl(priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID)) 252 __set_bit(i, priv->prio_save); 253 else 254 __clear_bit(i, priv->prio_save); 255 256 for_each_cpu(cpu, cpu_present_mask) { 257 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 258 259 if (!handler->present) 260 continue; 261 262 raw_spin_lock_irqsave(&handler->enable_lock, flags); 263 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 264 reg = handler->enable_base + i * sizeof(u32); 265 handler->enable_save[i] = readl(reg); 266 } 267 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 268 } 269 270 return 0; 271 } 272 273 static void plic_irq_resume(void) 274 { 275 unsigned int i, index, cpu; 276 unsigned long flags; 277 u32 __iomem *reg; 278 struct plic_priv *priv; 279 280 priv = per_cpu_ptr(&plic_handlers, smp_processor_id())->priv; 281 282 for (i = 0; i < priv->nr_irqs; i++) { 283 index = BIT_WORD(i); 284 writel((priv->prio_save[index] & BIT_MASK(i)) ? 1 : 0, 285 priv->regs + PRIORITY_BASE + i * PRIORITY_PER_ID); 286 } 287 288 for_each_cpu(cpu, cpu_present_mask) { 289 struct plic_handler *handler = per_cpu_ptr(&plic_handlers, cpu); 290 291 if (!handler->present) 292 continue; 293 294 raw_spin_lock_irqsave(&handler->enable_lock, flags); 295 for (i = 0; i < DIV_ROUND_UP(priv->nr_irqs, 32); i++) { 296 reg = handler->enable_base + i * sizeof(u32); 297 writel(handler->enable_save[i], reg); 298 } 299 raw_spin_unlock_irqrestore(&handler->enable_lock, flags); 300 } 301 } 302 303 static struct syscore_ops plic_irq_syscore_ops = { 304 .suspend = plic_irq_suspend, 305 .resume = plic_irq_resume, 306 }; 307 308 static int plic_irqdomain_map(struct irq_domain *d, unsigned int irq, 309 irq_hw_number_t hwirq) 310 { 311 struct plic_priv *priv = d->host_data; 312 313 irq_domain_set_info(d, irq, hwirq, &plic_chip, d->host_data, 314 handle_fasteoi_irq, NULL, NULL); 315 irq_set_noprobe(irq); 316 irq_set_affinity(irq, &priv->lmask); 317 return 0; 318 } 319 320 static int plic_irq_domain_translate(struct irq_domain *d, 321 struct irq_fwspec *fwspec, 322 unsigned long *hwirq, 323 unsigned int *type) 324 { 325 struct plic_priv *priv = d->host_data; 326 327 if (test_bit(PLIC_QUIRK_EDGE_INTERRUPT, &priv->plic_quirks)) 328 return irq_domain_translate_twocell(d, fwspec, hwirq, type); 329 330 return irq_domain_translate_onecell(d, fwspec, hwirq, type); 331 } 332 333 static int plic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 334 unsigned int nr_irqs, void *arg) 335 { 336 int i, ret; 337 irq_hw_number_t hwirq; 338 unsigned int type; 339 struct irq_fwspec *fwspec = arg; 340 341 ret = plic_irq_domain_translate(domain, fwspec, &hwirq, &type); 342 if (ret) 343 return ret; 344 345 for (i = 0; i < nr_irqs; i++) { 346 ret = plic_irqdomain_map(domain, virq + i, hwirq + i); 347 if (ret) 348 return ret; 349 } 350 351 return 0; 352 } 353 354 static const struct irq_domain_ops plic_irqdomain_ops = { 355 .translate = plic_irq_domain_translate, 356 .alloc = plic_irq_domain_alloc, 357 .free = irq_domain_free_irqs_top, 358 }; 359 360 /* 361 * Handling an interrupt is a two-step process: first you claim the interrupt 362 * by reading the claim register, then you complete the interrupt by writing 363 * that source ID back to the same claim register. This automatically enables 364 * and disables the interrupt, so there's nothing else to do. 365 */ 366 static void plic_handle_irq(struct irq_desc *desc) 367 { 368 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 369 struct irq_chip *chip = irq_desc_get_chip(desc); 370 void __iomem *claim = handler->hart_base + CONTEXT_CLAIM; 371 irq_hw_number_t hwirq; 372 373 WARN_ON_ONCE(!handler->present); 374 375 chained_irq_enter(chip, desc); 376 377 while ((hwirq = readl(claim))) { 378 int err = generic_handle_domain_irq(handler->priv->irqdomain, 379 hwirq); 380 if (unlikely(err)) { 381 dev_warn_ratelimited(handler->priv->dev, 382 "can't find mapping for hwirq %lu\n", hwirq); 383 } 384 } 385 386 chained_irq_exit(chip, desc); 387 } 388 389 static void plic_set_threshold(struct plic_handler *handler, u32 threshold) 390 { 391 /* priority must be > threshold to trigger an interrupt */ 392 writel(threshold, handler->hart_base + CONTEXT_THRESHOLD); 393 } 394 395 static int plic_dying_cpu(unsigned int cpu) 396 { 397 if (plic_parent_irq) 398 disable_percpu_irq(plic_parent_irq); 399 400 return 0; 401 } 402 403 static int plic_starting_cpu(unsigned int cpu) 404 { 405 struct plic_handler *handler = this_cpu_ptr(&plic_handlers); 406 407 if (plic_parent_irq) 408 enable_percpu_irq(plic_parent_irq, 409 irq_get_trigger_type(plic_parent_irq)); 410 else 411 dev_warn(handler->priv->dev, "cpu%d: parent irq not available\n", cpu); 412 plic_set_threshold(handler, PLIC_ENABLE_THRESHOLD); 413 414 return 0; 415 } 416 417 static const struct of_device_id plic_match[] = { 418 { .compatible = "sifive,plic-1.0.0" }, 419 { .compatible = "riscv,plic0" }, 420 { .compatible = "andestech,nceplic100", 421 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 422 { .compatible = "thead,c900-plic", 423 .data = (const void *)BIT(PLIC_QUIRK_EDGE_INTERRUPT) }, 424 {} 425 }; 426 427 static int plic_parse_nr_irqs_and_contexts(struct platform_device *pdev, 428 u32 *nr_irqs, u32 *nr_contexts) 429 { 430 struct device *dev = &pdev->dev; 431 int rc; 432 433 /* 434 * Currently, only OF fwnode is supported so extend this 435 * function for ACPI support. 436 */ 437 if (!is_of_node(dev->fwnode)) 438 return -EINVAL; 439 440 rc = of_property_read_u32(to_of_node(dev->fwnode), "riscv,ndev", nr_irqs); 441 if (rc) { 442 dev_err(dev, "riscv,ndev property not available\n"); 443 return rc; 444 } 445 446 *nr_contexts = of_irq_count(to_of_node(dev->fwnode)); 447 if (WARN_ON(!(*nr_contexts))) { 448 dev_err(dev, "no PLIC context available\n"); 449 return -EINVAL; 450 } 451 452 return 0; 453 } 454 455 static int plic_parse_context_parent(struct platform_device *pdev, u32 context, 456 u32 *parent_hwirq, int *parent_cpu) 457 { 458 struct device *dev = &pdev->dev; 459 struct of_phandle_args parent; 460 unsigned long hartid; 461 int rc; 462 463 /* 464 * Currently, only OF fwnode is supported so extend this 465 * function for ACPI support. 466 */ 467 if (!is_of_node(dev->fwnode)) 468 return -EINVAL; 469 470 rc = of_irq_parse_one(to_of_node(dev->fwnode), context, &parent); 471 if (rc) 472 return rc; 473 474 rc = riscv_of_parent_hartid(parent.np, &hartid); 475 if (rc) 476 return rc; 477 478 *parent_hwirq = parent.args[0]; 479 *parent_cpu = riscv_hartid_to_cpuid(hartid); 480 return 0; 481 } 482 483 static int plic_probe(struct platform_device *pdev) 484 { 485 int error = 0, nr_contexts, nr_handlers = 0, cpu, i; 486 struct device *dev = &pdev->dev; 487 unsigned long plic_quirks = 0; 488 struct plic_handler *handler; 489 u32 nr_irqs, parent_hwirq; 490 struct plic_priv *priv; 491 irq_hw_number_t hwirq; 492 493 if (is_of_node(dev->fwnode)) { 494 const struct of_device_id *id; 495 496 id = of_match_node(plic_match, to_of_node(dev->fwnode)); 497 if (id) 498 plic_quirks = (unsigned long)id->data; 499 } 500 501 error = plic_parse_nr_irqs_and_contexts(pdev, &nr_irqs, &nr_contexts); 502 if (error) 503 return error; 504 505 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); 506 if (!priv) 507 return -ENOMEM; 508 509 priv->dev = dev; 510 priv->plic_quirks = plic_quirks; 511 priv->nr_irqs = nr_irqs; 512 513 priv->regs = devm_platform_ioremap_resource(pdev, 0); 514 if (WARN_ON(!priv->regs)) 515 return -EIO; 516 517 priv->prio_save = devm_bitmap_zalloc(dev, nr_irqs, GFP_KERNEL); 518 if (!priv->prio_save) 519 return -ENOMEM; 520 521 for (i = 0; i < nr_contexts; i++) { 522 error = plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu); 523 if (error) { 524 dev_warn(dev, "hwirq for context%d not found\n", i); 525 continue; 526 } 527 528 /* 529 * Skip contexts other than external interrupts for our 530 * privilege level. 531 */ 532 if (parent_hwirq != RV_IRQ_EXT) { 533 /* Disable S-mode enable bits if running in M-mode. */ 534 if (IS_ENABLED(CONFIG_RISCV_M_MODE)) { 535 void __iomem *enable_base = priv->regs + 536 CONTEXT_ENABLE_BASE + 537 i * CONTEXT_ENABLE_SIZE; 538 539 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) 540 __plic_toggle(enable_base, hwirq, 0); 541 } 542 continue; 543 } 544 545 if (cpu < 0) { 546 dev_warn(dev, "Invalid cpuid for context %d\n", i); 547 continue; 548 } 549 550 /* 551 * When running in M-mode we need to ignore the S-mode handler. 552 * Here we assume it always comes later, but that might be a 553 * little fragile. 554 */ 555 handler = per_cpu_ptr(&plic_handlers, cpu); 556 if (handler->present) { 557 dev_warn(dev, "handler already present for context %d.\n", i); 558 plic_set_threshold(handler, PLIC_DISABLE_THRESHOLD); 559 goto done; 560 } 561 562 cpumask_set_cpu(cpu, &priv->lmask); 563 handler->present = true; 564 handler->hart_base = priv->regs + CONTEXT_BASE + 565 i * CONTEXT_SIZE; 566 raw_spin_lock_init(&handler->enable_lock); 567 handler->enable_base = priv->regs + CONTEXT_ENABLE_BASE + 568 i * CONTEXT_ENABLE_SIZE; 569 handler->priv = priv; 570 571 handler->enable_save = devm_kcalloc(dev, DIV_ROUND_UP(nr_irqs, 32), 572 sizeof(*handler->enable_save), GFP_KERNEL); 573 if (!handler->enable_save) 574 goto fail_cleanup_contexts; 575 done: 576 for (hwirq = 1; hwirq <= nr_irqs; hwirq++) { 577 plic_toggle(handler, hwirq, 0); 578 writel(1, priv->regs + PRIORITY_BASE + 579 hwirq * PRIORITY_PER_ID); 580 } 581 nr_handlers++; 582 } 583 584 priv->irqdomain = irq_domain_add_linear(to_of_node(dev->fwnode), nr_irqs + 1, 585 &plic_irqdomain_ops, priv); 586 if (WARN_ON(!priv->irqdomain)) 587 goto fail_cleanup_contexts; 588 589 /* 590 * We can have multiple PLIC instances so setup global state 591 * and register syscore operations only once after context 592 * handlers of all online CPUs are initialized. 593 */ 594 if (!plic_global_setup_done) { 595 struct irq_domain *domain; 596 bool global_setup = true; 597 598 for_each_online_cpu(cpu) { 599 handler = per_cpu_ptr(&plic_handlers, cpu); 600 if (!handler->present) { 601 global_setup = false; 602 break; 603 } 604 } 605 606 if (global_setup) { 607 /* Find parent domain and register chained handler */ 608 domain = irq_find_matching_fwnode(riscv_get_intc_hwnode(), DOMAIN_BUS_ANY); 609 if (domain) 610 plic_parent_irq = irq_create_mapping(domain, RV_IRQ_EXT); 611 if (plic_parent_irq) 612 irq_set_chained_handler(plic_parent_irq, plic_handle_irq); 613 614 cpuhp_setup_state(CPUHP_AP_IRQ_SIFIVE_PLIC_STARTING, 615 "irqchip/sifive/plic:starting", 616 plic_starting_cpu, plic_dying_cpu); 617 register_syscore_ops(&plic_irq_syscore_ops); 618 plic_global_setup_done = true; 619 } 620 } 621 622 dev_info(dev, "mapped %d interrupts with %d handlers for %d contexts.\n", 623 nr_irqs, nr_handlers, nr_contexts); 624 return 0; 625 626 fail_cleanup_contexts: 627 for (i = 0; i < nr_contexts; i++) { 628 if (plic_parse_context_parent(pdev, i, &parent_hwirq, &cpu)) 629 continue; 630 if (parent_hwirq != RV_IRQ_EXT || cpu < 0) 631 continue; 632 633 handler = per_cpu_ptr(&plic_handlers, cpu); 634 handler->present = false; 635 handler->hart_base = NULL; 636 handler->enable_base = NULL; 637 handler->enable_save = NULL; 638 handler->priv = NULL; 639 } 640 return -ENOMEM; 641 } 642 643 static struct platform_driver plic_driver = { 644 .driver = { 645 .name = "riscv-plic", 646 .of_match_table = plic_match, 647 }, 648 .probe = plic_probe, 649 }; 650 builtin_platform_driver(plic_driver); 651