1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Copyright 2016,2017 IBM Corporation. 4 */ 5 6 #define pr_fmt(fmt) "xive: " fmt 7 8 #include <linux/types.h> 9 #include <linux/threads.h> 10 #include <linux/kernel.h> 11 #include <linux/irq.h> 12 #include <linux/debugfs.h> 13 #include <linux/smp.h> 14 #include <linux/interrupt.h> 15 #include <linux/seq_file.h> 16 #include <linux/init.h> 17 #include <linux/cpu.h> 18 #include <linux/of.h> 19 #include <linux/slab.h> 20 #include <linux/spinlock.h> 21 #include <linux/msi.h> 22 #include <linux/vmalloc.h> 23 24 #include <asm/debugfs.h> 25 #include <asm/prom.h> 26 #include <asm/io.h> 27 #include <asm/smp.h> 28 #include <asm/machdep.h> 29 #include <asm/irq.h> 30 #include <asm/errno.h> 31 #include <asm/xive.h> 32 #include <asm/xive-regs.h> 33 #include <asm/xmon.h> 34 35 #include "xive-internal.h" 36 37 #undef DEBUG_FLUSH 38 #undef DEBUG_ALL 39 40 #ifdef DEBUG_ALL 41 #define DBG_VERBOSE(fmt, ...) pr_devel("cpu %d - " fmt, \ 42 smp_processor_id(), ## __VA_ARGS__) 43 #else 44 #define DBG_VERBOSE(fmt...) do { } while(0) 45 #endif 46 47 bool __xive_enabled; 48 EXPORT_SYMBOL_GPL(__xive_enabled); 49 bool xive_cmdline_disabled; 50 51 /* We use only one priority for now */ 52 static u8 xive_irq_priority; 53 54 /* TIMA exported to KVM */ 55 void __iomem *xive_tima; 56 EXPORT_SYMBOL_GPL(xive_tima); 57 u32 xive_tima_offset; 58 59 /* Backend ops */ 60 static const struct xive_ops *xive_ops; 61 62 /* Our global interrupt domain */ 63 static struct irq_domain *xive_irq_domain; 64 65 #ifdef CONFIG_SMP 66 /* The IPIs use the same logical irq number when on the same chip */ 67 static struct xive_ipi_desc { 68 unsigned int irq; 69 char name[16]; 70 } *xive_ipis; 71 72 /* 73 * Use early_cpu_to_node() for hot-plugged CPUs 74 */ 75 static unsigned int xive_ipi_cpu_to_irq(unsigned int cpu) 76 { 77 return xive_ipis[early_cpu_to_node(cpu)].irq; 78 } 79 #endif 80 81 /* Xive state for each CPU */ 82 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 83 84 /* An invalid CPU target */ 85 #define XIVE_INVALID_TARGET (-1) 86 87 /* 88 * Read the next entry in a queue, return its content if it's valid 89 * or 0 if there is no new entry. 90 * 91 * The queue pointer is moved forward unless "just_peek" is set 92 */ 93 static u32 xive_read_eq(struct xive_q *q, bool just_peek) 94 { 95 u32 cur; 96 97 if (!q->qpage) 98 return 0; 99 cur = be32_to_cpup(q->qpage + q->idx); 100 101 /* Check valid bit (31) vs current toggle polarity */ 102 if ((cur >> 31) == q->toggle) 103 return 0; 104 105 /* If consuming from the queue ... */ 106 if (!just_peek) { 107 /* Next entry */ 108 q->idx = (q->idx + 1) & q->msk; 109 110 /* Wrap around: flip valid toggle */ 111 if (q->idx == 0) 112 q->toggle ^= 1; 113 } 114 /* Mask out the valid bit (31) */ 115 return cur & 0x7fffffff; 116 } 117 118 /* 119 * Scans all the queue that may have interrupts in them 120 * (based on "pending_prio") in priority order until an 121 * interrupt is found or all the queues are empty. 122 * 123 * Then updates the CPPR (Current Processor Priority 124 * Register) based on the most favored interrupt found 125 * (0xff if none) and return what was found (0 if none). 126 * 127 * If just_peek is set, return the most favored pending 128 * interrupt if any but don't update the queue pointers. 129 * 130 * Note: This function can operate generically on any number 131 * of queues (up to 8). The current implementation of the XIVE 132 * driver only uses a single queue however. 133 * 134 * Note2: This will also "flush" "the pending_count" of a queue 135 * into the "count" when that queue is observed to be empty. 136 * This is used to keep track of the amount of interrupts 137 * targetting a queue. When an interrupt is moved away from 138 * a queue, we only decrement that queue count once the queue 139 * has been observed empty to avoid races. 140 */ 141 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 142 { 143 u32 irq = 0; 144 u8 prio = 0; 145 146 /* Find highest pending priority */ 147 while (xc->pending_prio != 0) { 148 struct xive_q *q; 149 150 prio = ffs(xc->pending_prio) - 1; 151 DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 152 153 /* Try to fetch */ 154 irq = xive_read_eq(&xc->queue[prio], just_peek); 155 156 /* Found something ? That's it */ 157 if (irq) { 158 if (just_peek || irq_to_desc(irq)) 159 break; 160 /* 161 * We should never get here; if we do then we must 162 * have failed to synchronize the interrupt properly 163 * when shutting it down. 164 */ 165 pr_crit("xive: got interrupt %d without descriptor, dropping\n", 166 irq); 167 WARN_ON(1); 168 continue; 169 } 170 171 /* Clear pending bits */ 172 xc->pending_prio &= ~(1 << prio); 173 174 /* 175 * Check if the queue count needs adjusting due to 176 * interrupts being moved away. See description of 177 * xive_dec_target_count() 178 */ 179 q = &xc->queue[prio]; 180 if (atomic_read(&q->pending_count)) { 181 int p = atomic_xchg(&q->pending_count, 0); 182 if (p) { 183 WARN_ON(p > atomic_read(&q->count)); 184 atomic_sub(p, &q->count); 185 } 186 } 187 } 188 189 /* If nothing was found, set CPPR to 0xff */ 190 if (irq == 0) 191 prio = 0xff; 192 193 /* Update HW CPPR to match if necessary */ 194 if (prio != xc->cppr) { 195 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 196 xc->cppr = prio; 197 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 198 } 199 200 return irq; 201 } 202 203 /* 204 * This is used to perform the magic loads from an ESB 205 * described in xive-regs.h 206 */ 207 static notrace u8 xive_esb_read(struct xive_irq_data *xd, u32 offset) 208 { 209 u64 val; 210 211 if (offset == XIVE_ESB_SET_PQ_10 && xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 212 offset |= XIVE_ESB_LD_ST_MO; 213 214 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 215 val = xive_ops->esb_rw(xd->hw_irq, offset, 0, 0); 216 else 217 val = in_be64(xd->eoi_mmio + offset); 218 219 return (u8)val; 220 } 221 222 static void xive_esb_write(struct xive_irq_data *xd, u32 offset, u64 data) 223 { 224 if ((xd->flags & XIVE_IRQ_FLAG_H_INT_ESB) && xive_ops->esb_rw) 225 xive_ops->esb_rw(xd->hw_irq, offset, data, 1); 226 else 227 out_be64(xd->eoi_mmio + offset, data); 228 } 229 230 #ifdef CONFIG_XMON 231 static notrace void xive_dump_eq(const char *name, struct xive_q *q) 232 { 233 u32 i0, i1, idx; 234 235 if (!q->qpage) 236 return; 237 idx = q->idx; 238 i0 = be32_to_cpup(q->qpage + idx); 239 idx = (idx + 1) & q->msk; 240 i1 = be32_to_cpup(q->qpage + idx); 241 xmon_printf("%s idx=%d T=%d %08x %08x ...", name, 242 q->idx, q->toggle, i0, i1); 243 } 244 245 notrace void xmon_xive_do_dump(int cpu) 246 { 247 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 248 249 xmon_printf("CPU %d:", cpu); 250 if (xc) { 251 xmon_printf("pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 252 253 #ifdef CONFIG_SMP 254 { 255 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 256 257 xmon_printf("IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 258 val & XIVE_ESB_VAL_P ? 'P' : '-', 259 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 260 } 261 #endif 262 xive_dump_eq("EQ", &xc->queue[xive_irq_priority]); 263 } 264 xmon_printf("\n"); 265 } 266 267 static struct irq_data *xive_get_irq_data(u32 hw_irq) 268 { 269 unsigned int irq = irq_find_mapping(xive_irq_domain, hw_irq); 270 271 return irq ? irq_get_irq_data(irq) : NULL; 272 } 273 274 int xmon_xive_get_irq_config(u32 hw_irq, struct irq_data *d) 275 { 276 int rc; 277 u32 target; 278 u8 prio; 279 u32 lirq; 280 281 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 282 if (rc) { 283 xmon_printf("IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 284 return rc; 285 } 286 287 xmon_printf("IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 288 hw_irq, target, prio, lirq); 289 290 if (!d) 291 d = xive_get_irq_data(hw_irq); 292 293 if (d) { 294 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 295 u64 val = xive_esb_read(xd, XIVE_ESB_GET); 296 297 xmon_printf("flags=%c%c%c PQ=%c%c", 298 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 299 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 300 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 301 val & XIVE_ESB_VAL_P ? 'P' : '-', 302 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 303 } 304 305 xmon_printf("\n"); 306 return 0; 307 } 308 309 void xmon_xive_get_irq_all(void) 310 { 311 unsigned int i; 312 struct irq_desc *desc; 313 314 for_each_irq_desc(i, desc) { 315 struct irq_data *d = irq_desc_get_irq_data(desc); 316 unsigned int hwirq = (unsigned int)irqd_to_hwirq(d); 317 318 if (d->domain == xive_irq_domain) 319 xmon_xive_get_irq_config(hwirq, d); 320 } 321 } 322 323 #endif /* CONFIG_XMON */ 324 325 static unsigned int xive_get_irq(void) 326 { 327 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 328 u32 irq; 329 330 /* 331 * This can be called either as a result of a HW interrupt or 332 * as a "replay" because EOI decided there was still something 333 * in one of the queues. 334 * 335 * First we perform an ACK cycle in order to update our mask 336 * of pending priorities. This will also have the effect of 337 * updating the CPPR to the most favored pending interrupts. 338 * 339 * In the future, if we have a way to differentiate a first 340 * entry (on HW interrupt) from a replay triggered by EOI, 341 * we could skip this on replays unless we soft-mask tells us 342 * that a new HW interrupt occurred. 343 */ 344 xive_ops->update_pending(xc); 345 346 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 347 348 /* Scan our queue(s) for interrupts */ 349 irq = xive_scan_interrupts(xc, false); 350 351 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 352 irq, xc->pending_prio); 353 354 /* Return pending interrupt if any */ 355 if (irq == XIVE_BAD_IRQ) 356 return 0; 357 return irq; 358 } 359 360 /* 361 * After EOI'ing an interrupt, we need to re-check the queue 362 * to see if another interrupt is pending since multiple 363 * interrupts can coalesce into a single notification to the 364 * CPU. 365 * 366 * If we find that there is indeed more in there, we call 367 * force_external_irq_replay() to make Linux synthetize an 368 * external interrupt on the next call to local_irq_restore(). 369 */ 370 static void xive_do_queue_eoi(struct xive_cpu *xc) 371 { 372 if (xive_scan_interrupts(xc, true) != 0) { 373 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 374 force_external_irq_replay(); 375 } 376 } 377 378 /* 379 * EOI an interrupt at the source. There are several methods 380 * to do this depending on the HW version and source type 381 */ 382 static void xive_do_source_eoi(struct xive_irq_data *xd) 383 { 384 u8 eoi_val; 385 386 xd->stale_p = false; 387 388 /* If the XIVE supports the new "store EOI facility, use it */ 389 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) { 390 xive_esb_write(xd, XIVE_ESB_STORE_EOI, 0); 391 return; 392 } 393 394 /* 395 * For LSIs, we use the "EOI cycle" special load rather than 396 * PQ bits, as they are automatically re-triggered in HW when 397 * still pending. 398 */ 399 if (xd->flags & XIVE_IRQ_FLAG_LSI) { 400 xive_esb_read(xd, XIVE_ESB_LOAD_EOI); 401 return; 402 } 403 404 /* 405 * Otherwise, we use the special MMIO that does a clear of 406 * both P and Q and returns the old Q. This allows us to then 407 * do a re-trigger if Q was set rather than synthesizing an 408 * interrupt in software 409 */ 410 eoi_val = xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 411 DBG_VERBOSE("eoi_val=%x\n", eoi_val); 412 413 /* Re-trigger if needed */ 414 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 415 out_be64(xd->trig_mmio, 0); 416 } 417 418 /* irq_chip eoi callback, called with irq descriptor lock held */ 419 static void xive_irq_eoi(struct irq_data *d) 420 { 421 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 422 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 423 424 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 425 d->irq, irqd_to_hwirq(d), xc->pending_prio); 426 427 /* 428 * EOI the source if it hasn't been disabled and hasn't 429 * been passed-through to a KVM guest 430 */ 431 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d) && 432 !(xd->flags & XIVE_IRQ_FLAG_NO_EOI)) 433 xive_do_source_eoi(xd); 434 else 435 xd->stale_p = true; 436 437 /* 438 * Clear saved_p to indicate that it's no longer occupying 439 * a queue slot on the target queue 440 */ 441 xd->saved_p = false; 442 443 /* Check for more work in the queue */ 444 xive_do_queue_eoi(xc); 445 } 446 447 /* 448 * Helper used to mask and unmask an interrupt source. 449 */ 450 static void xive_do_source_set_mask(struct xive_irq_data *xd, 451 bool mask) 452 { 453 u64 val; 454 455 /* 456 * If the interrupt had P set, it may be in a queue. 457 * 458 * We need to make sure we don't re-enable it until it 459 * has been fetched from that queue and EOId. We keep 460 * a copy of that P state and use it to restore the 461 * ESB accordingly on unmask. 462 */ 463 if (mask) { 464 val = xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 465 if (!xd->stale_p && !!(val & XIVE_ESB_VAL_P)) 466 xd->saved_p = true; 467 xd->stale_p = false; 468 } else if (xd->saved_p) { 469 xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 470 xd->saved_p = false; 471 } else { 472 xive_esb_read(xd, XIVE_ESB_SET_PQ_00); 473 xd->stale_p = false; 474 } 475 } 476 477 /* 478 * Try to chose "cpu" as a new interrupt target. Increments 479 * the queue accounting for that target if it's not already 480 * full. 481 */ 482 static bool xive_try_pick_target(int cpu) 483 { 484 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 485 struct xive_q *q = &xc->queue[xive_irq_priority]; 486 int max; 487 488 /* 489 * Calculate max number of interrupts in that queue. 490 * 491 * We leave a gap of 1 just in case... 492 */ 493 max = (q->msk + 1) - 1; 494 return !!atomic_add_unless(&q->count, 1, max); 495 } 496 497 /* 498 * Un-account an interrupt for a target CPU. We don't directly 499 * decrement q->count since the interrupt might still be present 500 * in the queue. 501 * 502 * Instead increment a separate counter "pending_count" which 503 * will be substracted from "count" later when that CPU observes 504 * the queue to be empty. 505 */ 506 static void xive_dec_target_count(int cpu) 507 { 508 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 509 struct xive_q *q = &xc->queue[xive_irq_priority]; 510 511 if (WARN_ON(cpu < 0 || !xc)) { 512 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 513 return; 514 } 515 516 /* 517 * We increment the "pending count" which will be used 518 * to decrement the target queue count whenever it's next 519 * processed and found empty. This ensure that we don't 520 * decrement while we still have the interrupt there 521 * occupying a slot. 522 */ 523 atomic_inc(&q->pending_count); 524 } 525 526 /* Find a tentative CPU target in a CPU mask */ 527 static int xive_find_target_in_mask(const struct cpumask *mask, 528 unsigned int fuzz) 529 { 530 int cpu, first, num, i; 531 532 /* Pick up a starting point CPU in the mask based on fuzz */ 533 num = min_t(int, cpumask_weight(mask), nr_cpu_ids); 534 first = fuzz % num; 535 536 /* Locate it */ 537 cpu = cpumask_first(mask); 538 for (i = 0; i < first && cpu < nr_cpu_ids; i++) 539 cpu = cpumask_next(cpu, mask); 540 541 /* Sanity check */ 542 if (WARN_ON(cpu >= nr_cpu_ids)) 543 cpu = cpumask_first(cpu_online_mask); 544 545 /* Remember first one to handle wrap-around */ 546 first = cpu; 547 548 /* 549 * Now go through the entire mask until we find a valid 550 * target. 551 */ 552 do { 553 /* 554 * We re-check online as the fallback case passes us 555 * an untested affinity mask 556 */ 557 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 558 return cpu; 559 cpu = cpumask_next(cpu, mask); 560 /* Wrap around */ 561 if (cpu >= nr_cpu_ids) 562 cpu = cpumask_first(mask); 563 } while (cpu != first); 564 565 return -1; 566 } 567 568 /* 569 * Pick a target CPU for an interrupt. This is done at 570 * startup or if the affinity is changed in a way that 571 * invalidates the current target. 572 */ 573 static int xive_pick_irq_target(struct irq_data *d, 574 const struct cpumask *affinity) 575 { 576 static unsigned int fuzz; 577 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 578 cpumask_var_t mask; 579 int cpu = -1; 580 581 /* 582 * If we have chip IDs, first we try to build a mask of 583 * CPUs matching the CPU and find a target in there 584 */ 585 if (xd->src_chip != XIVE_INVALID_CHIP_ID && 586 zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 587 /* Build a mask of matching chip IDs */ 588 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 589 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 590 if (xc->chip_id == xd->src_chip) 591 cpumask_set_cpu(cpu, mask); 592 } 593 /* Try to find a target */ 594 if (cpumask_empty(mask)) 595 cpu = -1; 596 else 597 cpu = xive_find_target_in_mask(mask, fuzz++); 598 free_cpumask_var(mask); 599 if (cpu >= 0) 600 return cpu; 601 fuzz--; 602 } 603 604 /* No chip IDs, fallback to using the affinity mask */ 605 return xive_find_target_in_mask(affinity, fuzz++); 606 } 607 608 static unsigned int xive_irq_startup(struct irq_data *d) 609 { 610 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 611 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 612 int target, rc; 613 614 xd->saved_p = false; 615 xd->stale_p = false; 616 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 617 d->irq, hw_irq, d); 618 619 /* Pick a target */ 620 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 621 if (target == XIVE_INVALID_TARGET) { 622 /* Try again breaking affinity */ 623 target = xive_pick_irq_target(d, cpu_online_mask); 624 if (target == XIVE_INVALID_TARGET) 625 return -ENXIO; 626 pr_warn("irq %d started with broken affinity\n", d->irq); 627 } 628 629 /* Sanity check */ 630 if (WARN_ON(target == XIVE_INVALID_TARGET || 631 target >= nr_cpu_ids)) 632 target = smp_processor_id(); 633 634 xd->target = target; 635 636 /* 637 * Configure the logical number to be the Linux IRQ number 638 * and set the target queue 639 */ 640 rc = xive_ops->configure_irq(hw_irq, 641 get_hard_smp_processor_id(target), 642 xive_irq_priority, d->irq); 643 if (rc) 644 return rc; 645 646 /* Unmask the ESB */ 647 xive_do_source_set_mask(xd, false); 648 649 return 0; 650 } 651 652 /* called with irq descriptor lock held */ 653 static void xive_irq_shutdown(struct irq_data *d) 654 { 655 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 656 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 657 658 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 659 d->irq, hw_irq, d); 660 661 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 662 return; 663 664 /* Mask the interrupt at the source */ 665 xive_do_source_set_mask(xd, true); 666 667 /* 668 * Mask the interrupt in HW in the IVT/EAS and set the number 669 * to be the "bad" IRQ number 670 */ 671 xive_ops->configure_irq(hw_irq, 672 get_hard_smp_processor_id(xd->target), 673 0xff, XIVE_BAD_IRQ); 674 675 xive_dec_target_count(xd->target); 676 xd->target = XIVE_INVALID_TARGET; 677 } 678 679 static void xive_irq_unmask(struct irq_data *d) 680 { 681 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 682 683 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 684 685 xive_do_source_set_mask(xd, false); 686 } 687 688 static void xive_irq_mask(struct irq_data *d) 689 { 690 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 691 692 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 693 694 xive_do_source_set_mask(xd, true); 695 } 696 697 static int xive_irq_set_affinity(struct irq_data *d, 698 const struct cpumask *cpumask, 699 bool force) 700 { 701 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 702 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 703 u32 target, old_target; 704 int rc = 0; 705 706 pr_debug("%s: irq %d/%x\n", __func__, d->irq, hw_irq); 707 708 /* Is this valid ? */ 709 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 710 return -EINVAL; 711 712 /* 713 * If existing target is already in the new mask, and is 714 * online then do nothing. 715 */ 716 if (xd->target != XIVE_INVALID_TARGET && 717 cpu_online(xd->target) && 718 cpumask_test_cpu(xd->target, cpumask)) 719 return IRQ_SET_MASK_OK; 720 721 /* Pick a new target */ 722 target = xive_pick_irq_target(d, cpumask); 723 724 /* No target found */ 725 if (target == XIVE_INVALID_TARGET) 726 return -ENXIO; 727 728 /* Sanity check */ 729 if (WARN_ON(target >= nr_cpu_ids)) 730 target = smp_processor_id(); 731 732 old_target = xd->target; 733 734 /* 735 * Only configure the irq if it's not currently passed-through to 736 * a KVM guest 737 */ 738 if (!irqd_is_forwarded_to_vcpu(d)) 739 rc = xive_ops->configure_irq(hw_irq, 740 get_hard_smp_processor_id(target), 741 xive_irq_priority, d->irq); 742 if (rc < 0) { 743 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 744 return rc; 745 } 746 747 pr_debug(" target: 0x%x\n", target); 748 xd->target = target; 749 750 /* Give up previous target */ 751 if (old_target != XIVE_INVALID_TARGET) 752 xive_dec_target_count(old_target); 753 754 return IRQ_SET_MASK_OK; 755 } 756 757 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 758 { 759 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 760 761 /* 762 * We only support these. This has really no effect other than setting 763 * the corresponding descriptor bits mind you but those will in turn 764 * affect the resend function when re-enabling an edge interrupt. 765 * 766 * Set set the default to edge as explained in map(). 767 */ 768 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 769 flow_type = IRQ_TYPE_EDGE_RISING; 770 771 if (flow_type != IRQ_TYPE_EDGE_RISING && 772 flow_type != IRQ_TYPE_LEVEL_LOW) 773 return -EINVAL; 774 775 irqd_set_trigger_type(d, flow_type); 776 777 /* 778 * Double check it matches what the FW thinks 779 * 780 * NOTE: We don't know yet if the PAPR interface will provide 781 * the LSI vs MSI information apart from the device-tree so 782 * this check might have to move into an optional backend call 783 * that is specific to the native backend 784 */ 785 if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 786 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 787 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 788 d->irq, (u32)irqd_to_hwirq(d), 789 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 790 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 791 } 792 793 return IRQ_SET_MASK_OK_NOCOPY; 794 } 795 796 static int xive_irq_retrigger(struct irq_data *d) 797 { 798 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 799 800 /* This should be only for MSIs */ 801 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 802 return 0; 803 804 /* 805 * To perform a retrigger, we first set the PQ bits to 806 * 11, then perform an EOI. 807 */ 808 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 809 xive_do_source_eoi(xd); 810 811 return 1; 812 } 813 814 /* 815 * Caller holds the irq descriptor lock, so this won't be called 816 * concurrently with xive_get_irqchip_state on the same interrupt. 817 */ 818 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 819 { 820 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 821 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 822 int rc; 823 u8 pq; 824 825 /* 826 * This is called by KVM with state non-NULL for enabling 827 * pass-through or NULL for disabling it 828 */ 829 if (state) { 830 irqd_set_forwarded_to_vcpu(d); 831 832 /* Set it to PQ=10 state to prevent further sends */ 833 pq = xive_esb_read(xd, XIVE_ESB_SET_PQ_10); 834 if (!xd->stale_p) { 835 xd->saved_p = !!(pq & XIVE_ESB_VAL_P); 836 xd->stale_p = !xd->saved_p; 837 } 838 839 /* No target ? nothing to do */ 840 if (xd->target == XIVE_INVALID_TARGET) { 841 /* 842 * An untargetted interrupt should have been 843 * also masked at the source 844 */ 845 WARN_ON(xd->saved_p); 846 847 return 0; 848 } 849 850 /* 851 * If P was set, adjust state to PQ=11 to indicate 852 * that a resend is needed for the interrupt to reach 853 * the guest. Also remember the value of P. 854 * 855 * This also tells us that it's in flight to a host queue 856 * or has already been fetched but hasn't been EOIed yet 857 * by the host. This it's potentially using up a host 858 * queue slot. This is important to know because as long 859 * as this is the case, we must not hard-unmask it when 860 * "returning" that interrupt to the host. 861 * 862 * This saved_p is cleared by the host EOI, when we know 863 * for sure the queue slot is no longer in use. 864 */ 865 if (xd->saved_p) { 866 xive_esb_read(xd, XIVE_ESB_SET_PQ_11); 867 868 /* 869 * Sync the XIVE source HW to ensure the interrupt 870 * has gone through the EAS before we change its 871 * target to the guest. That should guarantee us 872 * that we *will* eventually get an EOI for it on 873 * the host. Otherwise there would be a small window 874 * for P to be seen here but the interrupt going 875 * to the guest queue. 876 */ 877 if (xive_ops->sync_source) 878 xive_ops->sync_source(hw_irq); 879 } 880 } else { 881 irqd_clr_forwarded_to_vcpu(d); 882 883 /* No host target ? hard mask and return */ 884 if (xd->target == XIVE_INVALID_TARGET) { 885 xive_do_source_set_mask(xd, true); 886 return 0; 887 } 888 889 /* 890 * Sync the XIVE source HW to ensure the interrupt 891 * has gone through the EAS before we change its 892 * target to the host. 893 */ 894 if (xive_ops->sync_source) 895 xive_ops->sync_source(hw_irq); 896 897 /* 898 * By convention we are called with the interrupt in 899 * a PQ=10 or PQ=11 state, ie, it won't fire and will 900 * have latched in Q whether there's a pending HW 901 * interrupt or not. 902 * 903 * First reconfigure the target. 904 */ 905 rc = xive_ops->configure_irq(hw_irq, 906 get_hard_smp_processor_id(xd->target), 907 xive_irq_priority, d->irq); 908 if (rc) 909 return rc; 910 911 /* 912 * Then if saved_p is not set, effectively re-enable the 913 * interrupt with an EOI. If it is set, we know there is 914 * still a message in a host queue somewhere that will be 915 * EOId eventually. 916 * 917 * Note: We don't check irqd_irq_disabled(). Effectively, 918 * we *will* let the irq get through even if masked if the 919 * HW is still firing it in order to deal with the whole 920 * saved_p business properly. If the interrupt triggers 921 * while masked, the generic code will re-mask it anyway. 922 */ 923 if (!xd->saved_p) 924 xive_do_source_eoi(xd); 925 926 } 927 return 0; 928 } 929 930 /* Called with irq descriptor lock held. */ 931 static int xive_get_irqchip_state(struct irq_data *data, 932 enum irqchip_irq_state which, bool *state) 933 { 934 struct xive_irq_data *xd = irq_data_get_irq_handler_data(data); 935 u8 pq; 936 937 switch (which) { 938 case IRQCHIP_STATE_ACTIVE: 939 pq = xive_esb_read(xd, XIVE_ESB_GET); 940 941 /* 942 * The esb value being all 1's means we couldn't get 943 * the PQ state of the interrupt through mmio. It may 944 * happen, for example when querying a PHB interrupt 945 * while the PHB is in an error state. We consider the 946 * interrupt to be inactive in that case. 947 */ 948 *state = (pq != XIVE_ESB_INVALID) && !xd->stale_p && 949 (xd->saved_p || !!(pq & XIVE_ESB_VAL_P)); 950 return 0; 951 default: 952 return -EINVAL; 953 } 954 } 955 956 static struct irq_chip xive_irq_chip = { 957 .name = "XIVE-IRQ", 958 .irq_startup = xive_irq_startup, 959 .irq_shutdown = xive_irq_shutdown, 960 .irq_eoi = xive_irq_eoi, 961 .irq_mask = xive_irq_mask, 962 .irq_unmask = xive_irq_unmask, 963 .irq_set_affinity = xive_irq_set_affinity, 964 .irq_set_type = xive_irq_set_type, 965 .irq_retrigger = xive_irq_retrigger, 966 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 967 .irq_get_irqchip_state = xive_get_irqchip_state, 968 }; 969 970 bool is_xive_irq(struct irq_chip *chip) 971 { 972 return chip == &xive_irq_chip; 973 } 974 EXPORT_SYMBOL_GPL(is_xive_irq); 975 976 void xive_cleanup_irq_data(struct xive_irq_data *xd) 977 { 978 pr_debug("%s for HW %x\n", __func__, xd->hw_irq); 979 980 if (xd->eoi_mmio) { 981 iounmap(xd->eoi_mmio); 982 if (xd->eoi_mmio == xd->trig_mmio) 983 xd->trig_mmio = NULL; 984 xd->eoi_mmio = NULL; 985 } 986 if (xd->trig_mmio) { 987 iounmap(xd->trig_mmio); 988 xd->trig_mmio = NULL; 989 } 990 } 991 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 992 993 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 994 { 995 struct xive_irq_data *xd; 996 int rc; 997 998 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 999 if (!xd) 1000 return -ENOMEM; 1001 rc = xive_ops->populate_irq_data(hw, xd); 1002 if (rc) { 1003 kfree(xd); 1004 return rc; 1005 } 1006 xd->target = XIVE_INVALID_TARGET; 1007 irq_set_handler_data(virq, xd); 1008 1009 /* 1010 * Turn OFF by default the interrupt being mapped. A side 1011 * effect of this check is the mapping the ESB page of the 1012 * interrupt in the Linux address space. This prevents page 1013 * fault issues in the crash handler which masks all 1014 * interrupts. 1015 */ 1016 xive_esb_read(xd, XIVE_ESB_SET_PQ_01); 1017 1018 return 0; 1019 } 1020 1021 void xive_irq_free_data(unsigned int virq) 1022 { 1023 struct xive_irq_data *xd = irq_get_handler_data(virq); 1024 1025 if (!xd) 1026 return; 1027 irq_set_handler_data(virq, NULL); 1028 xive_cleanup_irq_data(xd); 1029 kfree(xd); 1030 } 1031 EXPORT_SYMBOL_GPL(xive_irq_free_data); 1032 1033 #ifdef CONFIG_SMP 1034 1035 static void xive_cause_ipi(int cpu) 1036 { 1037 struct xive_cpu *xc; 1038 struct xive_irq_data *xd; 1039 1040 xc = per_cpu(xive_cpu, cpu); 1041 1042 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 1043 smp_processor_id(), cpu, xc->hw_ipi); 1044 1045 xd = &xc->ipi_data; 1046 if (WARN_ON(!xd->trig_mmio)) 1047 return; 1048 out_be64(xd->trig_mmio, 0); 1049 } 1050 1051 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 1052 { 1053 return smp_ipi_demux(); 1054 } 1055 1056 static void xive_ipi_eoi(struct irq_data *d) 1057 { 1058 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1059 1060 /* Handle possible race with unplug and drop stale IPIs */ 1061 if (!xc) 1062 return; 1063 1064 DBG_VERBOSE("IPI eoi: irq=%d [0x%lx] (HW IRQ 0x%x) pending=%02x\n", 1065 d->irq, irqd_to_hwirq(d), xc->hw_ipi, xc->pending_prio); 1066 1067 xive_do_source_eoi(&xc->ipi_data); 1068 xive_do_queue_eoi(xc); 1069 } 1070 1071 static void xive_ipi_do_nothing(struct irq_data *d) 1072 { 1073 /* 1074 * Nothing to do, we never mask/unmask IPIs, but the callback 1075 * has to exist for the struct irq_chip. 1076 */ 1077 } 1078 1079 static struct irq_chip xive_ipi_chip = { 1080 .name = "XIVE-IPI", 1081 .irq_eoi = xive_ipi_eoi, 1082 .irq_mask = xive_ipi_do_nothing, 1083 .irq_unmask = xive_ipi_do_nothing, 1084 }; 1085 1086 /* 1087 * IPIs are marked per-cpu. We use separate HW interrupts under the 1088 * hood but associated with the same "linux" interrupt 1089 */ 1090 struct xive_ipi_alloc_info { 1091 irq_hw_number_t hwirq; 1092 }; 1093 1094 static int xive_ipi_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1095 unsigned int nr_irqs, void *arg) 1096 { 1097 struct xive_ipi_alloc_info *info = arg; 1098 int i; 1099 1100 for (i = 0; i < nr_irqs; i++) { 1101 irq_domain_set_info(domain, virq + i, info->hwirq + i, &xive_ipi_chip, 1102 domain->host_data, handle_percpu_irq, 1103 NULL, NULL); 1104 } 1105 return 0; 1106 } 1107 1108 static const struct irq_domain_ops xive_ipi_irq_domain_ops = { 1109 .alloc = xive_ipi_irq_domain_alloc, 1110 }; 1111 1112 static int __init xive_request_ipi(void) 1113 { 1114 struct fwnode_handle *fwnode; 1115 struct irq_domain *ipi_domain; 1116 unsigned int node; 1117 int ret = -ENOMEM; 1118 1119 fwnode = irq_domain_alloc_named_fwnode("XIVE-IPI"); 1120 if (!fwnode) 1121 goto out; 1122 1123 ipi_domain = irq_domain_create_linear(fwnode, nr_node_ids, 1124 &xive_ipi_irq_domain_ops, NULL); 1125 if (!ipi_domain) 1126 goto out_free_fwnode; 1127 1128 xive_ipis = kcalloc(nr_node_ids, sizeof(*xive_ipis), GFP_KERNEL | __GFP_NOFAIL); 1129 if (!xive_ipis) 1130 goto out_free_domain; 1131 1132 for_each_node(node) { 1133 struct xive_ipi_desc *xid = &xive_ipis[node]; 1134 struct xive_ipi_alloc_info info = { node }; 1135 1136 /* Skip nodes without CPUs */ 1137 if (cpumask_empty(cpumask_of_node(node))) 1138 continue; 1139 1140 /* 1141 * Map one IPI interrupt per node for all cpus of that node. 1142 * Since the HW interrupt number doesn't have any meaning, 1143 * simply use the node number. 1144 */ 1145 ret = irq_domain_alloc_irqs(ipi_domain, 1, node, &info); 1146 if (ret < 0) 1147 goto out_free_xive_ipis; 1148 xid->irq = ret; 1149 1150 snprintf(xid->name, sizeof(xid->name), "IPI-%d", node); 1151 1152 ret = request_irq(xid->irq, xive_muxed_ipi_action, 1153 IRQF_PERCPU | IRQF_NO_THREAD, xid->name, NULL); 1154 1155 WARN(ret < 0, "Failed to request IPI %d: %d\n", xid->irq, ret); 1156 } 1157 1158 return ret; 1159 1160 out_free_xive_ipis: 1161 kfree(xive_ipis); 1162 out_free_domain: 1163 irq_domain_remove(ipi_domain); 1164 out_free_fwnode: 1165 irq_domain_free_fwnode(fwnode); 1166 out: 1167 return ret; 1168 } 1169 1170 static int xive_setup_cpu_ipi(unsigned int cpu) 1171 { 1172 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 1173 struct xive_cpu *xc; 1174 int rc; 1175 1176 pr_debug("Setting up IPI for CPU %d\n", cpu); 1177 1178 xc = per_cpu(xive_cpu, cpu); 1179 1180 /* Check if we are already setup */ 1181 if (xc->hw_ipi != XIVE_BAD_IRQ) 1182 return 0; 1183 1184 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1185 if (xive_ops->get_ipi(cpu, xc)) 1186 return -EIO; 1187 1188 /* 1189 * Populate the IRQ data in the xive_cpu structure and 1190 * configure the HW / enable the IPIs. 1191 */ 1192 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 1193 if (rc) { 1194 pr_err("Failed to populate IPI data on CPU %d\n", cpu); 1195 return -EIO; 1196 } 1197 rc = xive_ops->configure_irq(xc->hw_ipi, 1198 get_hard_smp_processor_id(cpu), 1199 xive_irq_priority, xive_ipi_irq); 1200 if (rc) { 1201 pr_err("Failed to map IPI CPU %d\n", cpu); 1202 return -EIO; 1203 } 1204 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 1205 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 1206 1207 /* Unmask it */ 1208 xive_do_source_set_mask(&xc->ipi_data, false); 1209 1210 return 0; 1211 } 1212 1213 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 1214 { 1215 unsigned int xive_ipi_irq = xive_ipi_cpu_to_irq(cpu); 1216 1217 /* Disable the IPI and free the IRQ data */ 1218 1219 /* Already cleaned up ? */ 1220 if (xc->hw_ipi == XIVE_BAD_IRQ) 1221 return; 1222 1223 /* Mask the IPI */ 1224 xive_do_source_set_mask(&xc->ipi_data, true); 1225 1226 /* 1227 * Note: We don't call xive_cleanup_irq_data() to free 1228 * the mappings as this is called from an IPI on kexec 1229 * which is not a safe environment to call iounmap() 1230 */ 1231 1232 /* Deconfigure/mask in the backend */ 1233 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 1234 0xff, xive_ipi_irq); 1235 1236 /* Free the IPIs in the backend */ 1237 xive_ops->put_ipi(cpu, xc); 1238 } 1239 1240 void __init xive_smp_probe(void) 1241 { 1242 smp_ops->cause_ipi = xive_cause_ipi; 1243 1244 /* Register the IPI */ 1245 xive_request_ipi(); 1246 1247 /* Allocate and setup IPI for the boot CPU */ 1248 xive_setup_cpu_ipi(smp_processor_id()); 1249 } 1250 1251 #endif /* CONFIG_SMP */ 1252 1253 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1254 irq_hw_number_t hw) 1255 { 1256 int rc; 1257 1258 /* 1259 * Mark interrupts as edge sensitive by default so that resend 1260 * actually works. Will fix that up below if needed. 1261 */ 1262 irq_clear_status_flags(virq, IRQ_LEVEL); 1263 1264 rc = xive_irq_alloc_data(virq, hw); 1265 if (rc) 1266 return rc; 1267 1268 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1269 1270 return 0; 1271 } 1272 1273 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 1274 { 1275 xive_irq_free_data(virq); 1276 } 1277 1278 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 1279 const u32 *intspec, unsigned int intsize, 1280 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1281 1282 { 1283 *out_hwirq = intspec[0]; 1284 1285 /* 1286 * If intsize is at least 2, we look for the type in the second cell, 1287 * we assume the LSB indicates a level interrupt. 1288 */ 1289 if (intsize > 1) { 1290 if (intspec[1] & 1) 1291 *out_flags = IRQ_TYPE_LEVEL_LOW; 1292 else 1293 *out_flags = IRQ_TYPE_EDGE_RISING; 1294 } else 1295 *out_flags = IRQ_TYPE_LEVEL_LOW; 1296 1297 return 0; 1298 } 1299 1300 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 1301 enum irq_domain_bus_token bus_token) 1302 { 1303 return xive_ops->match(node); 1304 } 1305 1306 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1307 static const char * const esb_names[] = { "RESET", "OFF", "PENDING", "QUEUED" }; 1308 1309 static const struct { 1310 u64 mask; 1311 char *name; 1312 } xive_irq_flags[] = { 1313 { XIVE_IRQ_FLAG_STORE_EOI, "STORE_EOI" }, 1314 { XIVE_IRQ_FLAG_LSI, "LSI" }, 1315 { XIVE_IRQ_FLAG_H_INT_ESB, "H_INT_ESB" }, 1316 { XIVE_IRQ_FLAG_NO_EOI, "NO_EOI" }, 1317 }; 1318 1319 static void xive_irq_domain_debug_show(struct seq_file *m, struct irq_domain *d, 1320 struct irq_data *irqd, int ind) 1321 { 1322 struct xive_irq_data *xd; 1323 u64 val; 1324 int i; 1325 1326 /* No IRQ domain level information. To be done */ 1327 if (!irqd) 1328 return; 1329 1330 if (!is_xive_irq(irq_data_get_irq_chip(irqd))) 1331 return; 1332 1333 seq_printf(m, "%*sXIVE:\n", ind, ""); 1334 ind++; 1335 1336 xd = irq_data_get_irq_handler_data(irqd); 1337 if (!xd) { 1338 seq_printf(m, "%*snot assigned\n", ind, ""); 1339 return; 1340 } 1341 1342 val = xive_esb_read(xd, XIVE_ESB_GET); 1343 seq_printf(m, "%*sESB: %s\n", ind, "", esb_names[val & 0x3]); 1344 seq_printf(m, "%*sPstate: %s %s\n", ind, "", xd->stale_p ? "stale" : "", 1345 xd->saved_p ? "saved" : ""); 1346 seq_printf(m, "%*sTarget: %d\n", ind, "", xd->target); 1347 seq_printf(m, "%*sChip: %d\n", ind, "", xd->src_chip); 1348 seq_printf(m, "%*sTrigger: 0x%016llx\n", ind, "", xd->trig_page); 1349 seq_printf(m, "%*sEOI: 0x%016llx\n", ind, "", xd->eoi_page); 1350 seq_printf(m, "%*sFlags: 0x%llx\n", ind, "", xd->flags); 1351 for (i = 0; i < ARRAY_SIZE(xive_irq_flags); i++) { 1352 if (xd->flags & xive_irq_flags[i].mask) 1353 seq_printf(m, "%*s%s\n", ind + 12, "", xive_irq_flags[i].name); 1354 } 1355 } 1356 #endif 1357 1358 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1359 static int xive_irq_domain_translate(struct irq_domain *d, 1360 struct irq_fwspec *fwspec, 1361 unsigned long *hwirq, 1362 unsigned int *type) 1363 { 1364 return xive_irq_domain_xlate(d, to_of_node(fwspec->fwnode), 1365 fwspec->param, fwspec->param_count, 1366 hwirq, type); 1367 } 1368 1369 static int xive_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, 1370 unsigned int nr_irqs, void *arg) 1371 { 1372 struct irq_fwspec *fwspec = arg; 1373 irq_hw_number_t hwirq; 1374 unsigned int type = IRQ_TYPE_NONE; 1375 int i, rc; 1376 1377 rc = xive_irq_domain_translate(domain, fwspec, &hwirq, &type); 1378 if (rc) 1379 return rc; 1380 1381 pr_debug("%s %d/%lx #%d\n", __func__, virq, hwirq, nr_irqs); 1382 1383 for (i = 0; i < nr_irqs; i++) { 1384 /* TODO: call xive_irq_domain_map() */ 1385 1386 /* 1387 * Mark interrupts as edge sensitive by default so that resend 1388 * actually works. Will fix that up below if needed. 1389 */ 1390 irq_clear_status_flags(virq, IRQ_LEVEL); 1391 1392 /* allocates and sets handler data */ 1393 rc = xive_irq_alloc_data(virq + i, hwirq + i); 1394 if (rc) 1395 return rc; 1396 1397 irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, 1398 &xive_irq_chip, domain->host_data); 1399 irq_set_handler(virq + i, handle_fasteoi_irq); 1400 } 1401 1402 return 0; 1403 } 1404 1405 static void xive_irq_domain_free(struct irq_domain *domain, 1406 unsigned int virq, unsigned int nr_irqs) 1407 { 1408 int i; 1409 1410 pr_debug("%s %d #%d\n", __func__, virq, nr_irqs); 1411 1412 for (i = 0; i < nr_irqs; i++) 1413 xive_irq_free_data(virq + i); 1414 } 1415 #endif 1416 1417 static const struct irq_domain_ops xive_irq_domain_ops = { 1418 #ifdef CONFIG_IRQ_DOMAIN_HIERARCHY 1419 .alloc = xive_irq_domain_alloc, 1420 .free = xive_irq_domain_free, 1421 .translate = xive_irq_domain_translate, 1422 #endif 1423 .match = xive_irq_domain_match, 1424 .map = xive_irq_domain_map, 1425 .unmap = xive_irq_domain_unmap, 1426 .xlate = xive_irq_domain_xlate, 1427 #ifdef CONFIG_GENERIC_IRQ_DEBUGFS 1428 .debug_show = xive_irq_domain_debug_show, 1429 #endif 1430 }; 1431 1432 static void __init xive_init_host(struct device_node *np) 1433 { 1434 xive_irq_domain = irq_domain_add_nomap(np, XIVE_MAX_IRQ, 1435 &xive_irq_domain_ops, NULL); 1436 if (WARN_ON(xive_irq_domain == NULL)) 1437 return; 1438 irq_set_default_host(xive_irq_domain); 1439 } 1440 1441 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1442 { 1443 if (xc->queue[xive_irq_priority].qpage) 1444 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 1445 } 1446 1447 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1448 { 1449 int rc = 0; 1450 1451 /* We setup 1 queues for now with a 64k page */ 1452 if (!xc->queue[xive_irq_priority].qpage) 1453 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 1454 1455 return rc; 1456 } 1457 1458 static int xive_prepare_cpu(unsigned int cpu) 1459 { 1460 struct xive_cpu *xc; 1461 1462 xc = per_cpu(xive_cpu, cpu); 1463 if (!xc) { 1464 xc = kzalloc_node(sizeof(struct xive_cpu), 1465 GFP_KERNEL, cpu_to_node(cpu)); 1466 if (!xc) 1467 return -ENOMEM; 1468 xc->hw_ipi = XIVE_BAD_IRQ; 1469 xc->chip_id = XIVE_INVALID_CHIP_ID; 1470 if (xive_ops->prepare_cpu) 1471 xive_ops->prepare_cpu(cpu, xc); 1472 1473 per_cpu(xive_cpu, cpu) = xc; 1474 } 1475 1476 /* Setup EQs if not already */ 1477 return xive_setup_cpu_queues(cpu, xc); 1478 } 1479 1480 static void xive_setup_cpu(void) 1481 { 1482 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1483 1484 /* The backend might have additional things to do */ 1485 if (xive_ops->setup_cpu) 1486 xive_ops->setup_cpu(smp_processor_id(), xc); 1487 1488 /* Set CPPR to 0xff to enable flow of interrupts */ 1489 xc->cppr = 0xff; 1490 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1491 } 1492 1493 #ifdef CONFIG_SMP 1494 void xive_smp_setup_cpu(void) 1495 { 1496 pr_devel("SMP setup CPU %d\n", smp_processor_id()); 1497 1498 /* This will have already been done on the boot CPU */ 1499 if (smp_processor_id() != boot_cpuid) 1500 xive_setup_cpu(); 1501 1502 } 1503 1504 int xive_smp_prepare_cpu(unsigned int cpu) 1505 { 1506 int rc; 1507 1508 /* Allocate per-CPU data and queues */ 1509 rc = xive_prepare_cpu(cpu); 1510 if (rc) 1511 return rc; 1512 1513 /* Allocate and setup IPI for the new CPU */ 1514 return xive_setup_cpu_ipi(cpu); 1515 } 1516 1517 #ifdef CONFIG_HOTPLUG_CPU 1518 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 1519 { 1520 u32 irq; 1521 1522 /* We assume local irqs are disabled */ 1523 WARN_ON(!irqs_disabled()); 1524 1525 /* Check what's already in the CPU queue */ 1526 while ((irq = xive_scan_interrupts(xc, false)) != 0) { 1527 /* 1528 * We need to re-route that interrupt to its new destination. 1529 * First get and lock the descriptor 1530 */ 1531 struct irq_desc *desc = irq_to_desc(irq); 1532 struct irq_data *d = irq_desc_get_irq_data(desc); 1533 struct xive_irq_data *xd; 1534 1535 /* 1536 * Ignore anything that isn't a XIVE irq and ignore 1537 * IPIs, so can just be dropped. 1538 */ 1539 if (d->domain != xive_irq_domain) 1540 continue; 1541 1542 /* 1543 * The IRQ should have already been re-routed, it's just a 1544 * stale in the old queue, so re-trigger it in order to make 1545 * it reach is new destination. 1546 */ 1547 #ifdef DEBUG_FLUSH 1548 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 1549 cpu, irq); 1550 #endif 1551 raw_spin_lock(&desc->lock); 1552 xd = irq_desc_get_handler_data(desc); 1553 1554 /* 1555 * Clear saved_p to indicate that it's no longer pending 1556 */ 1557 xd->saved_p = false; 1558 1559 /* 1560 * For LSIs, we EOI, this will cause a resend if it's 1561 * still asserted. Otherwise do an MSI retrigger. 1562 */ 1563 if (xd->flags & XIVE_IRQ_FLAG_LSI) 1564 xive_do_source_eoi(xd); 1565 else 1566 xive_irq_retrigger(d); 1567 1568 raw_spin_unlock(&desc->lock); 1569 } 1570 } 1571 1572 void xive_smp_disable_cpu(void) 1573 { 1574 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1575 unsigned int cpu = smp_processor_id(); 1576 1577 /* Migrate interrupts away from the CPU */ 1578 irq_migrate_all_off_this_cpu(); 1579 1580 /* Set CPPR to 0 to disable flow of interrupts */ 1581 xc->cppr = 0; 1582 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1583 1584 /* Flush everything still in the queue */ 1585 xive_flush_cpu_queue(cpu, xc); 1586 1587 /* Re-enable CPPR */ 1588 xc->cppr = 0xff; 1589 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1590 } 1591 1592 void xive_flush_interrupt(void) 1593 { 1594 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1595 unsigned int cpu = smp_processor_id(); 1596 1597 /* Called if an interrupt occurs while the CPU is hot unplugged */ 1598 xive_flush_cpu_queue(cpu, xc); 1599 } 1600 1601 #endif /* CONFIG_HOTPLUG_CPU */ 1602 1603 #endif /* CONFIG_SMP */ 1604 1605 void xive_teardown_cpu(void) 1606 { 1607 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1608 unsigned int cpu = smp_processor_id(); 1609 1610 /* Set CPPR to 0 to disable flow of interrupts */ 1611 xc->cppr = 0; 1612 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1613 1614 if (xive_ops->teardown_cpu) 1615 xive_ops->teardown_cpu(cpu, xc); 1616 1617 #ifdef CONFIG_SMP 1618 /* Get rid of IPI */ 1619 xive_cleanup_cpu_ipi(cpu, xc); 1620 #endif 1621 1622 /* Disable and free the queues */ 1623 xive_cleanup_cpu_queues(cpu, xc); 1624 } 1625 1626 void xive_shutdown(void) 1627 { 1628 xive_ops->shutdown(); 1629 } 1630 1631 bool __init xive_core_init(struct device_node *np, const struct xive_ops *ops, 1632 void __iomem *area, u32 offset, u8 max_prio) 1633 { 1634 xive_tima = area; 1635 xive_tima_offset = offset; 1636 xive_ops = ops; 1637 xive_irq_priority = max_prio; 1638 1639 ppc_md.get_irq = xive_get_irq; 1640 __xive_enabled = true; 1641 1642 pr_devel("Initializing host..\n"); 1643 xive_init_host(np); 1644 1645 pr_devel("Initializing boot CPU..\n"); 1646 1647 /* Allocate per-CPU data and queues */ 1648 xive_prepare_cpu(smp_processor_id()); 1649 1650 /* Get ready for interrupts */ 1651 xive_setup_cpu(); 1652 1653 pr_info("Interrupt handling initialized with %s backend\n", 1654 xive_ops->name); 1655 pr_info("Using priority %d for all interrupts\n", max_prio); 1656 1657 return true; 1658 } 1659 1660 __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift) 1661 { 1662 unsigned int alloc_order; 1663 struct page *pages; 1664 __be32 *qpage; 1665 1666 alloc_order = xive_alloc_order(queue_shift); 1667 pages = alloc_pages_node(cpu_to_node(cpu), GFP_KERNEL, alloc_order); 1668 if (!pages) 1669 return ERR_PTR(-ENOMEM); 1670 qpage = (__be32 *)page_address(pages); 1671 memset(qpage, 0, 1 << queue_shift); 1672 1673 return qpage; 1674 } 1675 1676 static int __init xive_off(char *arg) 1677 { 1678 xive_cmdline_disabled = true; 1679 return 0; 1680 } 1681 __setup("xive=off", xive_off); 1682 1683 static void xive_debug_show_cpu(struct seq_file *m, int cpu) 1684 { 1685 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 1686 1687 seq_printf(m, "CPU %d:", cpu); 1688 if (xc) { 1689 seq_printf(m, "pp=%02x CPPR=%02x ", xc->pending_prio, xc->cppr); 1690 1691 #ifdef CONFIG_SMP 1692 { 1693 u64 val = xive_esb_read(&xc->ipi_data, XIVE_ESB_GET); 1694 1695 seq_printf(m, "IPI=0x%08x PQ=%c%c ", xc->hw_ipi, 1696 val & XIVE_ESB_VAL_P ? 'P' : '-', 1697 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1698 } 1699 #endif 1700 { 1701 struct xive_q *q = &xc->queue[xive_irq_priority]; 1702 u32 i0, i1, idx; 1703 1704 if (q->qpage) { 1705 idx = q->idx; 1706 i0 = be32_to_cpup(q->qpage + idx); 1707 idx = (idx + 1) & q->msk; 1708 i1 = be32_to_cpup(q->qpage + idx); 1709 seq_printf(m, "EQ idx=%d T=%d %08x %08x ...", 1710 q->idx, q->toggle, i0, i1); 1711 } 1712 } 1713 } 1714 seq_puts(m, "\n"); 1715 } 1716 1717 static void xive_debug_show_irq(struct seq_file *m, struct irq_data *d) 1718 { 1719 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1720 int rc; 1721 u32 target; 1722 u8 prio; 1723 u32 lirq; 1724 struct xive_irq_data *xd; 1725 u64 val; 1726 1727 rc = xive_ops->get_irq_config(hw_irq, &target, &prio, &lirq); 1728 if (rc) { 1729 seq_printf(m, "IRQ 0x%08x : no config rc=%d\n", hw_irq, rc); 1730 return; 1731 } 1732 1733 seq_printf(m, "IRQ 0x%08x : target=0x%x prio=%02x lirq=0x%x ", 1734 hw_irq, target, prio, lirq); 1735 1736 xd = irq_data_get_irq_handler_data(d); 1737 val = xive_esb_read(xd, XIVE_ESB_GET); 1738 seq_printf(m, "flags=%c%c%c PQ=%c%c", 1739 xd->flags & XIVE_IRQ_FLAG_STORE_EOI ? 'S' : ' ', 1740 xd->flags & XIVE_IRQ_FLAG_LSI ? 'L' : ' ', 1741 xd->flags & XIVE_IRQ_FLAG_H_INT_ESB ? 'H' : ' ', 1742 val & XIVE_ESB_VAL_P ? 'P' : '-', 1743 val & XIVE_ESB_VAL_Q ? 'Q' : '-'); 1744 seq_puts(m, "\n"); 1745 } 1746 1747 static int xive_core_debug_show(struct seq_file *m, void *private) 1748 { 1749 unsigned int i; 1750 struct irq_desc *desc; 1751 int cpu; 1752 1753 if (xive_ops->debug_show) 1754 xive_ops->debug_show(m, private); 1755 1756 for_each_possible_cpu(cpu) 1757 xive_debug_show_cpu(m, cpu); 1758 1759 for_each_irq_desc(i, desc) { 1760 struct irq_data *d = irq_desc_get_irq_data(desc); 1761 1762 if (d->domain == xive_irq_domain) 1763 xive_debug_show_irq(m, d); 1764 } 1765 return 0; 1766 } 1767 DEFINE_SHOW_ATTRIBUTE(xive_core_debug); 1768 1769 int xive_core_debug_init(void) 1770 { 1771 if (xive_enabled()) 1772 debugfs_create_file("xive", 0400, powerpc_debugfs_root, 1773 NULL, &xive_core_debug_fops); 1774 return 0; 1775 } 1776