1 /* 2 * Copyright 2016,2017 IBM Corporation. 3 * 4 * This program is free software; you can redistribute it and/or 5 * modify it under the terms of the GNU General Public License 6 * as published by the Free Software Foundation; either version 7 * 2 of the License, or (at your option) any later version. 8 */ 9 10 #define pr_fmt(fmt) "xive: " fmt 11 12 #include <linux/types.h> 13 #include <linux/threads.h> 14 #include <linux/kernel.h> 15 #include <linux/irq.h> 16 #include <linux/debugfs.h> 17 #include <linux/smp.h> 18 #include <linux/interrupt.h> 19 #include <linux/seq_file.h> 20 #include <linux/init.h> 21 #include <linux/cpu.h> 22 #include <linux/of.h> 23 #include <linux/slab.h> 24 #include <linux/spinlock.h> 25 #include <linux/msi.h> 26 27 #include <asm/prom.h> 28 #include <asm/io.h> 29 #include <asm/smp.h> 30 #include <asm/machdep.h> 31 #include <asm/irq.h> 32 #include <asm/errno.h> 33 #include <asm/xive.h> 34 #include <asm/xive-regs.h> 35 #include <asm/xmon.h> 36 37 #include "xive-internal.h" 38 39 #undef DEBUG_FLUSH 40 #undef DEBUG_ALL 41 42 #ifdef DEBUG_ALL 43 #define DBG_VERBOSE(fmt...) pr_devel(fmt) 44 #else 45 #define DBG_VERBOSE(fmt...) do { } while(0) 46 #endif 47 48 bool __xive_enabled; 49 EXPORT_SYMBOL_GPL(__xive_enabled); 50 bool xive_cmdline_disabled; 51 52 /* We use only one priority for now */ 53 static u8 xive_irq_priority; 54 55 /* TIMA exported to KVM */ 56 void __iomem *xive_tima; 57 EXPORT_SYMBOL_GPL(xive_tima); 58 u32 xive_tima_offset; 59 60 /* Backend ops */ 61 static const struct xive_ops *xive_ops; 62 63 /* Our global interrupt domain */ 64 static struct irq_domain *xive_irq_domain; 65 66 #ifdef CONFIG_SMP 67 /* The IPIs all use the same logical irq number */ 68 static u32 xive_ipi_irq; 69 #endif 70 71 /* Xive state for each CPU */ 72 static DEFINE_PER_CPU(struct xive_cpu *, xive_cpu); 73 74 /* 75 * A "disabled" interrupt should never fire, to catch problems 76 * we set its logical number to this 77 */ 78 #define XIVE_BAD_IRQ 0x7fffffff 79 #define XIVE_MAX_IRQ (XIVE_BAD_IRQ - 1) 80 81 /* An invalid CPU target */ 82 #define XIVE_INVALID_TARGET (-1) 83 84 /* 85 * Read the next entry in a queue, return its content if it's valid 86 * or 0 if there is no new entry. 87 * 88 * The queue pointer is moved forward unless "just_peek" is set 89 */ 90 static u32 xive_read_eq(struct xive_q *q, bool just_peek) 91 { 92 u32 cur; 93 94 if (!q->qpage) 95 return 0; 96 cur = be32_to_cpup(q->qpage + q->idx); 97 98 /* Check valid bit (31) vs current toggle polarity */ 99 if ((cur >> 31) == q->toggle) 100 return 0; 101 102 /* If consuming from the queue ... */ 103 if (!just_peek) { 104 /* Next entry */ 105 q->idx = (q->idx + 1) & q->msk; 106 107 /* Wrap around: flip valid toggle */ 108 if (q->idx == 0) 109 q->toggle ^= 1; 110 } 111 /* Mask out the valid bit (31) */ 112 return cur & 0x7fffffff; 113 } 114 115 /* 116 * Scans all the queue that may have interrupts in them 117 * (based on "pending_prio") in priority order until an 118 * interrupt is found or all the queues are empty. 119 * 120 * Then updates the CPPR (Current Processor Priority 121 * Register) based on the most favored interrupt found 122 * (0xff if none) and return what was found (0 if none). 123 * 124 * If just_peek is set, return the most favored pending 125 * interrupt if any but don't update the queue pointers. 126 * 127 * Note: This function can operate generically on any number 128 * of queues (up to 8). The current implementation of the XIVE 129 * driver only uses a single queue however. 130 * 131 * Note2: This will also "flush" "the pending_count" of a queue 132 * into the "count" when that queue is observed to be empty. 133 * This is used to keep track of the amount of interrupts 134 * targetting a queue. When an interrupt is moved away from 135 * a queue, we only decrement that queue count once the queue 136 * has been observed empty to avoid races. 137 */ 138 static u32 xive_scan_interrupts(struct xive_cpu *xc, bool just_peek) 139 { 140 u32 irq = 0; 141 u8 prio; 142 143 /* Find highest pending priority */ 144 while (xc->pending_prio != 0) { 145 struct xive_q *q; 146 147 prio = ffs(xc->pending_prio) - 1; 148 DBG_VERBOSE("scan_irq: trying prio %d\n", prio); 149 150 /* Try to fetch */ 151 irq = xive_read_eq(&xc->queue[prio], just_peek); 152 153 /* Found something ? That's it */ 154 if (irq) 155 break; 156 157 /* Clear pending bits */ 158 xc->pending_prio &= ~(1 << prio); 159 160 /* 161 * Check if the queue count needs adjusting due to 162 * interrupts being moved away. See description of 163 * xive_dec_target_count() 164 */ 165 q = &xc->queue[prio]; 166 if (atomic_read(&q->pending_count)) { 167 int p = atomic_xchg(&q->pending_count, 0); 168 if (p) { 169 WARN_ON(p > atomic_read(&q->count)); 170 atomic_sub(p, &q->count); 171 } 172 } 173 } 174 175 /* If nothing was found, set CPPR to 0xff */ 176 if (irq == 0) 177 prio = 0xff; 178 179 /* Update HW CPPR to match if necessary */ 180 if (prio != xc->cppr) { 181 DBG_VERBOSE("scan_irq: adjusting CPPR to %d\n", prio); 182 xc->cppr = prio; 183 out_8(xive_tima + xive_tima_offset + TM_CPPR, prio); 184 } 185 186 return irq; 187 } 188 189 /* 190 * This is used to perform the magic loads from an ESB 191 * described in xive.h 192 */ 193 static u8 xive_poke_esb(struct xive_irq_data *xd, u32 offset) 194 { 195 u64 val; 196 197 /* Handle HW errata */ 198 if (xd->flags & XIVE_IRQ_FLAG_SHIFT_BUG) 199 offset |= offset << 4; 200 201 val = in_be64(xd->eoi_mmio + offset); 202 203 return (u8)val; 204 } 205 206 #ifdef CONFIG_XMON 207 static void xive_dump_eq(const char *name, struct xive_q *q) 208 { 209 u32 i0, i1, idx; 210 211 if (!q->qpage) 212 return; 213 idx = q->idx; 214 i0 = be32_to_cpup(q->qpage + idx); 215 idx = (idx + 1) & q->msk; 216 i1 = be32_to_cpup(q->qpage + idx); 217 xmon_printf(" %s Q T=%d %08x %08x ...\n", name, 218 q->toggle, i0, i1); 219 } 220 221 void xmon_xive_do_dump(int cpu) 222 { 223 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 224 225 xmon_printf("XIVE state for CPU %d:\n", cpu); 226 xmon_printf(" pp=%02x cppr=%02x\n", xc->pending_prio, xc->cppr); 227 xive_dump_eq("IRQ", &xc->queue[xive_irq_priority]); 228 #ifdef CONFIG_SMP 229 { 230 u64 val = xive_poke_esb(&xc->ipi_data, XIVE_ESB_GET); 231 xmon_printf(" IPI state: %x:%c%c\n", xc->hw_ipi, 232 val & XIVE_ESB_VAL_P ? 'P' : 'p', 233 val & XIVE_ESB_VAL_P ? 'Q' : 'q'); 234 } 235 #endif 236 } 237 #endif /* CONFIG_XMON */ 238 239 static unsigned int xive_get_irq(void) 240 { 241 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 242 u32 irq; 243 244 /* 245 * This can be called either as a result of a HW interrupt or 246 * as a "replay" because EOI decided there was still something 247 * in one of the queues. 248 * 249 * First we perform an ACK cycle in order to update our mask 250 * of pending priorities. This will also have the effect of 251 * updating the CPPR to the most favored pending interrupts. 252 * 253 * In the future, if we have a way to differenciate a first 254 * entry (on HW interrupt) from a replay triggered by EOI, 255 * we could skip this on replays unless we soft-mask tells us 256 * that a new HW interrupt occurred. 257 */ 258 xive_ops->update_pending(xc); 259 260 DBG_VERBOSE("get_irq: pending=%02x\n", xc->pending_prio); 261 262 /* Scan our queue(s) for interrupts */ 263 irq = xive_scan_interrupts(xc, false); 264 265 DBG_VERBOSE("get_irq: got irq 0x%x, new pending=0x%02x\n", 266 irq, xc->pending_prio); 267 268 /* Return pending interrupt if any */ 269 if (irq == XIVE_BAD_IRQ) 270 return 0; 271 return irq; 272 } 273 274 /* 275 * After EOI'ing an interrupt, we need to re-check the queue 276 * to see if another interrupt is pending since multiple 277 * interrupts can coalesce into a single notification to the 278 * CPU. 279 * 280 * If we find that there is indeed more in there, we call 281 * force_external_irq_replay() to make Linux synthetize an 282 * external interrupt on the next call to local_irq_restore(). 283 */ 284 static void xive_do_queue_eoi(struct xive_cpu *xc) 285 { 286 if (xive_scan_interrupts(xc, true) != 0) { 287 DBG_VERBOSE("eoi: pending=0x%02x\n", xc->pending_prio); 288 force_external_irq_replay(); 289 } 290 } 291 292 /* 293 * EOI an interrupt at the source. There are several methods 294 * to do this depending on the HW version and source type 295 */ 296 void xive_do_source_eoi(u32 hw_irq, struct xive_irq_data *xd) 297 { 298 /* If the XIVE supports the new "store EOI facility, use it */ 299 if (xd->flags & XIVE_IRQ_FLAG_STORE_EOI) 300 out_be64(xd->eoi_mmio, 0); 301 else if (hw_irq && xd->flags & XIVE_IRQ_FLAG_EOI_FW) { 302 /* 303 * The FW told us to call it. This happens for some 304 * interrupt sources that need additional HW whacking 305 * beyond the ESB manipulation. For example LPC interrupts 306 * on P9 DD1.0 need a latch to be clared in the LPC bridge 307 * itself. The Firmware will take care of it. 308 */ 309 if (WARN_ON_ONCE(!xive_ops->eoi)) 310 return; 311 xive_ops->eoi(hw_irq); 312 } else { 313 u8 eoi_val; 314 315 /* 316 * Otherwise for EOI, we use the special MMIO that does 317 * a clear of both P and Q and returns the old Q, 318 * except for LSIs where we use the "EOI cycle" special 319 * load. 320 * 321 * This allows us to then do a re-trigger if Q was set 322 * rather than synthesizing an interrupt in software 323 * 324 * For LSIs, using the HW EOI cycle works around a problem 325 * on P9 DD1 PHBs where the other ESB accesses don't work 326 * properly. 327 */ 328 if (xd->flags & XIVE_IRQ_FLAG_LSI) 329 in_be64(xd->eoi_mmio); 330 else { 331 eoi_val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); 332 DBG_VERBOSE("eoi_val=%x\n", offset, eoi_val); 333 334 /* Re-trigger if needed */ 335 if ((eoi_val & XIVE_ESB_VAL_Q) && xd->trig_mmio) 336 out_be64(xd->trig_mmio, 0); 337 } 338 } 339 } 340 341 /* irq_chip eoi callback */ 342 static void xive_irq_eoi(struct irq_data *d) 343 { 344 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 345 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 346 347 DBG_VERBOSE("eoi_irq: irq=%d [0x%lx] pending=%02x\n", 348 d->irq, irqd_to_hwirq(d), xc->pending_prio); 349 350 /* 351 * EOI the source if it hasn't been disabled and hasn't 352 * been passed-through to a KVM guest 353 */ 354 if (!irqd_irq_disabled(d) && !irqd_is_forwarded_to_vcpu(d)) 355 xive_do_source_eoi(irqd_to_hwirq(d), xd); 356 357 /* 358 * Clear saved_p to indicate that it's no longer occupying 359 * a queue slot on the target queue 360 */ 361 xd->saved_p = false; 362 363 /* Check for more work in the queue */ 364 xive_do_queue_eoi(xc); 365 } 366 367 /* 368 * Helper used to mask and unmask an interrupt source. This 369 * is only called for normal interrupts that do not require 370 * masking/unmasking via firmware. 371 */ 372 static void xive_do_source_set_mask(struct xive_irq_data *xd, 373 bool mask) 374 { 375 u64 val; 376 377 /* 378 * If the interrupt had P set, it may be in a queue. 379 * 380 * We need to make sure we don't re-enable it until it 381 * has been fetched from that queue and EOId. We keep 382 * a copy of that P state and use it to restore the 383 * ESB accordingly on unmask. 384 */ 385 if (mask) { 386 val = xive_poke_esb(xd, XIVE_ESB_SET_PQ_01); 387 xd->saved_p = !!(val & XIVE_ESB_VAL_P); 388 } else if (xd->saved_p) 389 xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); 390 else 391 xive_poke_esb(xd, XIVE_ESB_SET_PQ_00); 392 } 393 394 /* 395 * Try to chose "cpu" as a new interrupt target. Increments 396 * the queue accounting for that target if it's not already 397 * full. 398 */ 399 static bool xive_try_pick_target(int cpu) 400 { 401 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 402 struct xive_q *q = &xc->queue[xive_irq_priority]; 403 int max; 404 405 /* 406 * Calculate max number of interrupts in that queue. 407 * 408 * We leave a gap of 1 just in case... 409 */ 410 max = (q->msk + 1) - 1; 411 return !!atomic_add_unless(&q->count, 1, max); 412 } 413 414 /* 415 * Un-account an interrupt for a target CPU. We don't directly 416 * decrement q->count since the interrupt might still be present 417 * in the queue. 418 * 419 * Instead increment a separate counter "pending_count" which 420 * will be substracted from "count" later when that CPU observes 421 * the queue to be empty. 422 */ 423 static void xive_dec_target_count(int cpu) 424 { 425 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 426 struct xive_q *q = &xc->queue[xive_irq_priority]; 427 428 if (unlikely(WARN_ON(cpu < 0 || !xc))) { 429 pr_err("%s: cpu=%d xc=%p\n", __func__, cpu, xc); 430 return; 431 } 432 433 /* 434 * We increment the "pending count" which will be used 435 * to decrement the target queue count whenever it's next 436 * processed and found empty. This ensure that we don't 437 * decrement while we still have the interrupt there 438 * occupying a slot. 439 */ 440 atomic_inc(&q->pending_count); 441 } 442 443 /* Find a tentative CPU target in a CPU mask */ 444 static int xive_find_target_in_mask(const struct cpumask *mask, 445 unsigned int fuzz) 446 { 447 int cpu, first, num, i; 448 449 /* Pick up a starting point CPU in the mask based on fuzz */ 450 num = cpumask_weight(mask); 451 first = fuzz % num; 452 453 /* Locate it */ 454 cpu = cpumask_first(mask); 455 for (i = 0; i < first && cpu < nr_cpu_ids; i++) 456 cpu = cpumask_next(cpu, mask); 457 458 /* Sanity check */ 459 if (WARN_ON(cpu >= nr_cpu_ids)) 460 cpu = cpumask_first(cpu_online_mask); 461 462 /* Remember first one to handle wrap-around */ 463 first = cpu; 464 465 /* 466 * Now go through the entire mask until we find a valid 467 * target. 468 */ 469 for (;;) { 470 /* 471 * We re-check online as the fallback case passes us 472 * an untested affinity mask 473 */ 474 if (cpu_online(cpu) && xive_try_pick_target(cpu)) 475 return cpu; 476 cpu = cpumask_next(cpu, mask); 477 if (cpu == first) 478 break; 479 /* Wrap around */ 480 if (cpu >= nr_cpu_ids) 481 cpu = cpumask_first(mask); 482 } 483 return -1; 484 } 485 486 /* 487 * Pick a target CPU for an interrupt. This is done at 488 * startup or if the affinity is changed in a way that 489 * invalidates the current target. 490 */ 491 static int xive_pick_irq_target(struct irq_data *d, 492 const struct cpumask *affinity) 493 { 494 static unsigned int fuzz; 495 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 496 cpumask_var_t mask; 497 int cpu = -1; 498 499 /* 500 * If we have chip IDs, first we try to build a mask of 501 * CPUs matching the CPU and find a target in there 502 */ 503 if (xd->src_chip != XIVE_INVALID_CHIP_ID && 504 zalloc_cpumask_var(&mask, GFP_ATOMIC)) { 505 /* Build a mask of matching chip IDs */ 506 for_each_cpu_and(cpu, affinity, cpu_online_mask) { 507 struct xive_cpu *xc = per_cpu(xive_cpu, cpu); 508 if (xc->chip_id == xd->src_chip) 509 cpumask_set_cpu(cpu, mask); 510 } 511 /* Try to find a target */ 512 if (cpumask_empty(mask)) 513 cpu = -1; 514 else 515 cpu = xive_find_target_in_mask(mask, fuzz++); 516 free_cpumask_var(mask); 517 if (cpu >= 0) 518 return cpu; 519 fuzz--; 520 } 521 522 /* No chip IDs, fallback to using the affinity mask */ 523 return xive_find_target_in_mask(affinity, fuzz++); 524 } 525 526 static unsigned int xive_irq_startup(struct irq_data *d) 527 { 528 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 529 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 530 int target, rc; 531 532 pr_devel("xive_irq_startup: irq %d [0x%x] data @%p\n", 533 d->irq, hw_irq, d); 534 535 #ifdef CONFIG_PCI_MSI 536 /* 537 * The generic MSI code returns with the interrupt disabled on the 538 * card, using the MSI mask bits. Firmware doesn't appear to unmask 539 * at that level, so we do it here by hand. 540 */ 541 if (irq_data_get_msi_desc(d)) 542 pci_msi_unmask_irq(d); 543 #endif 544 545 /* Pick a target */ 546 target = xive_pick_irq_target(d, irq_data_get_affinity_mask(d)); 547 if (target == XIVE_INVALID_TARGET) { 548 /* Try again breaking affinity */ 549 target = xive_pick_irq_target(d, cpu_online_mask); 550 if (target == XIVE_INVALID_TARGET) 551 return -ENXIO; 552 pr_warn("irq %d started with broken affinity\n", d->irq); 553 } 554 555 /* Sanity check */ 556 if (WARN_ON(target == XIVE_INVALID_TARGET || 557 target >= nr_cpu_ids)) 558 target = smp_processor_id(); 559 560 xd->target = target; 561 562 /* 563 * Configure the logical number to be the Linux IRQ number 564 * and set the target queue 565 */ 566 rc = xive_ops->configure_irq(hw_irq, 567 get_hard_smp_processor_id(target), 568 xive_irq_priority, d->irq); 569 if (rc) 570 return rc; 571 572 /* Unmask the ESB */ 573 xive_do_source_set_mask(xd, false); 574 575 return 0; 576 } 577 578 static void xive_irq_shutdown(struct irq_data *d) 579 { 580 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 581 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 582 583 pr_devel("xive_irq_shutdown: irq %d [0x%x] data @%p\n", 584 d->irq, hw_irq, d); 585 586 if (WARN_ON(xd->target == XIVE_INVALID_TARGET)) 587 return; 588 589 /* Mask the interrupt at the source */ 590 xive_do_source_set_mask(xd, true); 591 592 /* 593 * The above may have set saved_p. We clear it otherwise it 594 * will prevent re-enabling later on. It is ok to forget the 595 * fact that the interrupt might be in a queue because we are 596 * accounting that already in xive_dec_target_count() and will 597 * be re-routing it to a new queue with proper accounting when 598 * it's started up again 599 */ 600 xd->saved_p = false; 601 602 /* 603 * Mask the interrupt in HW in the IVT/EAS and set the number 604 * to be the "bad" IRQ number 605 */ 606 xive_ops->configure_irq(hw_irq, 607 get_hard_smp_processor_id(xd->target), 608 0xff, XIVE_BAD_IRQ); 609 610 xive_dec_target_count(xd->target); 611 xd->target = XIVE_INVALID_TARGET; 612 } 613 614 static void xive_irq_unmask(struct irq_data *d) 615 { 616 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 617 618 pr_devel("xive_irq_unmask: irq %d data @%p\n", d->irq, xd); 619 620 /* 621 * This is a workaround for PCI LSI problems on P9, for 622 * these, we call FW to set the mask. The problems might 623 * be fixed by P9 DD2.0, if that is the case, firmware 624 * will no longer set that flag. 625 */ 626 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { 627 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 628 xive_ops->configure_irq(hw_irq, 629 get_hard_smp_processor_id(xd->target), 630 xive_irq_priority, d->irq); 631 return; 632 } 633 634 xive_do_source_set_mask(xd, false); 635 } 636 637 static void xive_irq_mask(struct irq_data *d) 638 { 639 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 640 641 pr_devel("xive_irq_mask: irq %d data @%p\n", d->irq, xd); 642 643 /* 644 * This is a workaround for PCI LSI problems on P9, for 645 * these, we call OPAL to set the mask. The problems might 646 * be fixed by P9 DD2.0, if that is the case, firmware 647 * will no longer set that flag. 648 */ 649 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) { 650 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 651 xive_ops->configure_irq(hw_irq, 652 get_hard_smp_processor_id(xd->target), 653 0xff, d->irq); 654 return; 655 } 656 657 xive_do_source_set_mask(xd, true); 658 } 659 660 static int xive_irq_set_affinity(struct irq_data *d, 661 const struct cpumask *cpumask, 662 bool force) 663 { 664 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 665 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 666 u32 target, old_target; 667 int rc = 0; 668 669 pr_devel("xive_irq_set_affinity: irq %d\n", d->irq); 670 671 /* Is this valid ? */ 672 if (cpumask_any_and(cpumask, cpu_online_mask) >= nr_cpu_ids) 673 return -EINVAL; 674 675 /* 676 * If existing target is already in the new mask, and is 677 * online then do nothing. 678 */ 679 if (xd->target != XIVE_INVALID_TARGET && 680 cpu_online(xd->target) && 681 cpumask_test_cpu(xd->target, cpumask)) 682 return IRQ_SET_MASK_OK; 683 684 /* Pick a new target */ 685 target = xive_pick_irq_target(d, cpumask); 686 687 /* No target found */ 688 if (target == XIVE_INVALID_TARGET) 689 return -ENXIO; 690 691 /* Sanity check */ 692 if (WARN_ON(target >= nr_cpu_ids)) 693 target = smp_processor_id(); 694 695 old_target = xd->target; 696 697 /* 698 * Only configure the irq if it's not currently passed-through to 699 * a KVM guest 700 */ 701 if (!irqd_is_forwarded_to_vcpu(d)) 702 rc = xive_ops->configure_irq(hw_irq, 703 get_hard_smp_processor_id(target), 704 xive_irq_priority, d->irq); 705 if (rc < 0) { 706 pr_err("Error %d reconfiguring irq %d\n", rc, d->irq); 707 return rc; 708 } 709 710 pr_devel(" target: 0x%x\n", target); 711 xd->target = target; 712 713 /* Give up previous target */ 714 if (old_target != XIVE_INVALID_TARGET) 715 xive_dec_target_count(old_target); 716 717 return IRQ_SET_MASK_OK; 718 } 719 720 static int xive_irq_set_type(struct irq_data *d, unsigned int flow_type) 721 { 722 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 723 724 /* 725 * We only support these. This has really no effect other than setting 726 * the corresponding descriptor bits mind you but those will in turn 727 * affect the resend function when re-enabling an edge interrupt. 728 * 729 * Set set the default to edge as explained in map(). 730 */ 731 if (flow_type == IRQ_TYPE_DEFAULT || flow_type == IRQ_TYPE_NONE) 732 flow_type = IRQ_TYPE_EDGE_RISING; 733 734 if (flow_type != IRQ_TYPE_EDGE_RISING && 735 flow_type != IRQ_TYPE_LEVEL_LOW) 736 return -EINVAL; 737 738 irqd_set_trigger_type(d, flow_type); 739 740 /* 741 * Double check it matches what the FW thinks 742 * 743 * NOTE: We don't know yet if the PAPR interface will provide 744 * the LSI vs MSI information apart from the device-tree so 745 * this check might have to move into an optional backend call 746 * that is specific to the native backend 747 */ 748 if ((flow_type == IRQ_TYPE_LEVEL_LOW) != 749 !!(xd->flags & XIVE_IRQ_FLAG_LSI)) { 750 pr_warn("Interrupt %d (HW 0x%x) type mismatch, Linux says %s, FW says %s\n", 751 d->irq, (u32)irqd_to_hwirq(d), 752 (flow_type == IRQ_TYPE_LEVEL_LOW) ? "Level" : "Edge", 753 (xd->flags & XIVE_IRQ_FLAG_LSI) ? "Level" : "Edge"); 754 } 755 756 return IRQ_SET_MASK_OK_NOCOPY; 757 } 758 759 static int xive_irq_retrigger(struct irq_data *d) 760 { 761 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 762 763 /* This should be only for MSIs */ 764 if (WARN_ON(xd->flags & XIVE_IRQ_FLAG_LSI)) 765 return 0; 766 767 /* 768 * To perform a retrigger, we first set the PQ bits to 769 * 11, then perform an EOI. 770 */ 771 xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); 772 773 /* 774 * Note: We pass "0" to the hw_irq argument in order to 775 * avoid calling into the backend EOI code which we don't 776 * want to do in the case of a re-trigger. Backends typically 777 * only do EOI for LSIs anyway. 778 */ 779 xive_do_source_eoi(0, xd); 780 781 return 1; 782 } 783 784 static int xive_irq_set_vcpu_affinity(struct irq_data *d, void *state) 785 { 786 struct xive_irq_data *xd = irq_data_get_irq_handler_data(d); 787 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 788 int rc; 789 u8 pq; 790 791 /* 792 * We only support this on interrupts that do not require 793 * firmware calls for masking and unmasking 794 */ 795 if (xd->flags & XIVE_IRQ_FLAG_MASK_FW) 796 return -EIO; 797 798 /* 799 * This is called by KVM with state non-NULL for enabling 800 * pass-through or NULL for disabling it 801 */ 802 if (state) { 803 irqd_set_forwarded_to_vcpu(d); 804 805 /* Set it to PQ=10 state to prevent further sends */ 806 pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_10); 807 808 /* No target ? nothing to do */ 809 if (xd->target == XIVE_INVALID_TARGET) { 810 /* 811 * An untargetted interrupt should have been 812 * also masked at the source 813 */ 814 WARN_ON(pq & 2); 815 816 return 0; 817 } 818 819 /* 820 * If P was set, adjust state to PQ=11 to indicate 821 * that a resend is needed for the interrupt to reach 822 * the guest. Also remember the value of P. 823 * 824 * This also tells us that it's in flight to a host queue 825 * or has already been fetched but hasn't been EOIed yet 826 * by the host. This it's potentially using up a host 827 * queue slot. This is important to know because as long 828 * as this is the case, we must not hard-unmask it when 829 * "returning" that interrupt to the host. 830 * 831 * This saved_p is cleared by the host EOI, when we know 832 * for sure the queue slot is no longer in use. 833 */ 834 if (pq & 2) { 835 pq = xive_poke_esb(xd, XIVE_ESB_SET_PQ_11); 836 xd->saved_p = true; 837 838 /* 839 * Sync the XIVE source HW to ensure the interrupt 840 * has gone through the EAS before we change its 841 * target to the guest. That should guarantee us 842 * that we *will* eventually get an EOI for it on 843 * the host. Otherwise there would be a small window 844 * for P to be seen here but the interrupt going 845 * to the guest queue. 846 */ 847 if (xive_ops->sync_source) 848 xive_ops->sync_source(hw_irq); 849 } else 850 xd->saved_p = false; 851 } else { 852 irqd_clr_forwarded_to_vcpu(d); 853 854 /* No host target ? hard mask and return */ 855 if (xd->target == XIVE_INVALID_TARGET) { 856 xive_do_source_set_mask(xd, true); 857 return 0; 858 } 859 860 /* 861 * Sync the XIVE source HW to ensure the interrupt 862 * has gone through the EAS before we change its 863 * target to the host. 864 */ 865 if (xive_ops->sync_source) 866 xive_ops->sync_source(hw_irq); 867 868 /* 869 * By convention we are called with the interrupt in 870 * a PQ=10 or PQ=11 state, ie, it won't fire and will 871 * have latched in Q whether there's a pending HW 872 * interrupt or not. 873 * 874 * First reconfigure the target. 875 */ 876 rc = xive_ops->configure_irq(hw_irq, 877 get_hard_smp_processor_id(xd->target), 878 xive_irq_priority, d->irq); 879 if (rc) 880 return rc; 881 882 /* 883 * Then if saved_p is not set, effectively re-enable the 884 * interrupt with an EOI. If it is set, we know there is 885 * still a message in a host queue somewhere that will be 886 * EOId eventually. 887 * 888 * Note: We don't check irqd_irq_disabled(). Effectively, 889 * we *will* let the irq get through even if masked if the 890 * HW is still firing it in order to deal with the whole 891 * saved_p business properly. If the interrupt triggers 892 * while masked, the generic code will re-mask it anyway. 893 */ 894 if (!xd->saved_p) 895 xive_do_source_eoi(hw_irq, xd); 896 897 } 898 return 0; 899 } 900 901 static struct irq_chip xive_irq_chip = { 902 .name = "XIVE-IRQ", 903 .irq_startup = xive_irq_startup, 904 .irq_shutdown = xive_irq_shutdown, 905 .irq_eoi = xive_irq_eoi, 906 .irq_mask = xive_irq_mask, 907 .irq_unmask = xive_irq_unmask, 908 .irq_set_affinity = xive_irq_set_affinity, 909 .irq_set_type = xive_irq_set_type, 910 .irq_retrigger = xive_irq_retrigger, 911 .irq_set_vcpu_affinity = xive_irq_set_vcpu_affinity, 912 }; 913 914 bool is_xive_irq(struct irq_chip *chip) 915 { 916 return chip == &xive_irq_chip; 917 } 918 EXPORT_SYMBOL_GPL(is_xive_irq); 919 920 void xive_cleanup_irq_data(struct xive_irq_data *xd) 921 { 922 if (xd->eoi_mmio) { 923 iounmap(xd->eoi_mmio); 924 if (xd->eoi_mmio == xd->trig_mmio) 925 xd->trig_mmio = NULL; 926 xd->eoi_mmio = NULL; 927 } 928 if (xd->trig_mmio) { 929 iounmap(xd->trig_mmio); 930 xd->trig_mmio = NULL; 931 } 932 } 933 EXPORT_SYMBOL_GPL(xive_cleanup_irq_data); 934 935 static int xive_irq_alloc_data(unsigned int virq, irq_hw_number_t hw) 936 { 937 struct xive_irq_data *xd; 938 int rc; 939 940 xd = kzalloc(sizeof(struct xive_irq_data), GFP_KERNEL); 941 if (!xd) 942 return -ENOMEM; 943 rc = xive_ops->populate_irq_data(hw, xd); 944 if (rc) { 945 kfree(xd); 946 return rc; 947 } 948 xd->target = XIVE_INVALID_TARGET; 949 irq_set_handler_data(virq, xd); 950 951 return 0; 952 } 953 954 static void xive_irq_free_data(unsigned int virq) 955 { 956 struct xive_irq_data *xd = irq_get_handler_data(virq); 957 958 if (!xd) 959 return; 960 irq_set_handler_data(virq, NULL); 961 xive_cleanup_irq_data(xd); 962 kfree(xd); 963 } 964 965 #ifdef CONFIG_SMP 966 967 static void xive_cause_ipi(int cpu) 968 { 969 struct xive_cpu *xc; 970 struct xive_irq_data *xd; 971 972 xc = per_cpu(xive_cpu, cpu); 973 974 DBG_VERBOSE("IPI CPU %d -> %d (HW IRQ 0x%x)\n", 975 smp_processor_id(), cpu, xc->hw_ipi); 976 977 xd = &xc->ipi_data; 978 if (WARN_ON(!xd->trig_mmio)) 979 return; 980 out_be64(xd->trig_mmio, 0); 981 } 982 983 static irqreturn_t xive_muxed_ipi_action(int irq, void *dev_id) 984 { 985 return smp_ipi_demux(); 986 } 987 988 static void xive_ipi_eoi(struct irq_data *d) 989 { 990 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 991 992 /* Handle possible race with unplug and drop stale IPIs */ 993 if (!xc) 994 return; 995 xive_do_source_eoi(xc->hw_ipi, &xc->ipi_data); 996 xive_do_queue_eoi(xc); 997 } 998 999 static void xive_ipi_do_nothing(struct irq_data *d) 1000 { 1001 /* 1002 * Nothing to do, we never mask/unmask IPIs, but the callback 1003 * has to exist for the struct irq_chip. 1004 */ 1005 } 1006 1007 static struct irq_chip xive_ipi_chip = { 1008 .name = "XIVE-IPI", 1009 .irq_eoi = xive_ipi_eoi, 1010 .irq_mask = xive_ipi_do_nothing, 1011 .irq_unmask = xive_ipi_do_nothing, 1012 }; 1013 1014 static void __init xive_request_ipi(void) 1015 { 1016 unsigned int virq; 1017 1018 /* 1019 * Initialization failed, move on, we might manage to 1020 * reach the point where we display our errors before 1021 * the system falls appart 1022 */ 1023 if (!xive_irq_domain) 1024 return; 1025 1026 /* Initialize it */ 1027 virq = irq_create_mapping(xive_irq_domain, 0); 1028 xive_ipi_irq = virq; 1029 1030 WARN_ON(request_irq(virq, xive_muxed_ipi_action, 1031 IRQF_PERCPU | IRQF_NO_THREAD, "IPI", NULL)); 1032 } 1033 1034 static int xive_setup_cpu_ipi(unsigned int cpu) 1035 { 1036 struct xive_cpu *xc; 1037 int rc; 1038 1039 pr_debug("Setting up IPI for CPU %d\n", cpu); 1040 1041 xc = per_cpu(xive_cpu, cpu); 1042 1043 /* Check if we are already setup */ 1044 if (xc->hw_ipi != 0) 1045 return 0; 1046 1047 /* Grab an IPI from the backend, this will populate xc->hw_ipi */ 1048 if (xive_ops->get_ipi(cpu, xc)) 1049 return -EIO; 1050 1051 /* 1052 * Populate the IRQ data in the xive_cpu structure and 1053 * configure the HW / enable the IPIs. 1054 */ 1055 rc = xive_ops->populate_irq_data(xc->hw_ipi, &xc->ipi_data); 1056 if (rc) { 1057 pr_err("Failed to populate IPI data on CPU %d\n", cpu); 1058 return -EIO; 1059 } 1060 rc = xive_ops->configure_irq(xc->hw_ipi, 1061 get_hard_smp_processor_id(cpu), 1062 xive_irq_priority, xive_ipi_irq); 1063 if (rc) { 1064 pr_err("Failed to map IPI CPU %d\n", cpu); 1065 return -EIO; 1066 } 1067 pr_devel("CPU %d HW IPI %x, virq %d, trig_mmio=%p\n", cpu, 1068 xc->hw_ipi, xive_ipi_irq, xc->ipi_data.trig_mmio); 1069 1070 /* Unmask it */ 1071 xive_do_source_set_mask(&xc->ipi_data, false); 1072 1073 return 0; 1074 } 1075 1076 static void xive_cleanup_cpu_ipi(unsigned int cpu, struct xive_cpu *xc) 1077 { 1078 /* Disable the IPI and free the IRQ data */ 1079 1080 /* Already cleaned up ? */ 1081 if (xc->hw_ipi == 0) 1082 return; 1083 1084 /* Mask the IPI */ 1085 xive_do_source_set_mask(&xc->ipi_data, true); 1086 1087 /* 1088 * Note: We don't call xive_cleanup_irq_data() to free 1089 * the mappings as this is called from an IPI on kexec 1090 * which is not a safe environment to call iounmap() 1091 */ 1092 1093 /* Deconfigure/mask in the backend */ 1094 xive_ops->configure_irq(xc->hw_ipi, hard_smp_processor_id(), 1095 0xff, xive_ipi_irq); 1096 1097 /* Free the IPIs in the backend */ 1098 xive_ops->put_ipi(cpu, xc); 1099 } 1100 1101 void __init xive_smp_probe(void) 1102 { 1103 smp_ops->cause_ipi = xive_cause_ipi; 1104 1105 /* Register the IPI */ 1106 xive_request_ipi(); 1107 1108 /* Allocate and setup IPI for the boot CPU */ 1109 xive_setup_cpu_ipi(smp_processor_id()); 1110 } 1111 1112 #endif /* CONFIG_SMP */ 1113 1114 static int xive_irq_domain_map(struct irq_domain *h, unsigned int virq, 1115 irq_hw_number_t hw) 1116 { 1117 int rc; 1118 1119 /* 1120 * Mark interrupts as edge sensitive by default so that resend 1121 * actually works. Will fix that up below if needed. 1122 */ 1123 irq_clear_status_flags(virq, IRQ_LEVEL); 1124 1125 #ifdef CONFIG_SMP 1126 /* IPIs are special and come up with HW number 0 */ 1127 if (hw == 0) { 1128 /* 1129 * IPIs are marked per-cpu. We use separate HW interrupts under 1130 * the hood but associated with the same "linux" interrupt 1131 */ 1132 irq_set_chip_and_handler(virq, &xive_ipi_chip, 1133 handle_percpu_irq); 1134 return 0; 1135 } 1136 #endif 1137 1138 rc = xive_irq_alloc_data(virq, hw); 1139 if (rc) 1140 return rc; 1141 1142 irq_set_chip_and_handler(virq, &xive_irq_chip, handle_fasteoi_irq); 1143 1144 return 0; 1145 } 1146 1147 static void xive_irq_domain_unmap(struct irq_domain *d, unsigned int virq) 1148 { 1149 struct irq_data *data = irq_get_irq_data(virq); 1150 unsigned int hw_irq; 1151 1152 /* XXX Assign BAD number */ 1153 if (!data) 1154 return; 1155 hw_irq = (unsigned int)irqd_to_hwirq(data); 1156 if (hw_irq) 1157 xive_irq_free_data(virq); 1158 } 1159 1160 static int xive_irq_domain_xlate(struct irq_domain *h, struct device_node *ct, 1161 const u32 *intspec, unsigned int intsize, 1162 irq_hw_number_t *out_hwirq, unsigned int *out_flags) 1163 1164 { 1165 *out_hwirq = intspec[0]; 1166 1167 /* 1168 * If intsize is at least 2, we look for the type in the second cell, 1169 * we assume the LSB indicates a level interrupt. 1170 */ 1171 if (intsize > 1) { 1172 if (intspec[1] & 1) 1173 *out_flags = IRQ_TYPE_LEVEL_LOW; 1174 else 1175 *out_flags = IRQ_TYPE_EDGE_RISING; 1176 } else 1177 *out_flags = IRQ_TYPE_LEVEL_LOW; 1178 1179 return 0; 1180 } 1181 1182 static int xive_irq_domain_match(struct irq_domain *h, struct device_node *node, 1183 enum irq_domain_bus_token bus_token) 1184 { 1185 return xive_ops->match(node); 1186 } 1187 1188 static const struct irq_domain_ops xive_irq_domain_ops = { 1189 .match = xive_irq_domain_match, 1190 .map = xive_irq_domain_map, 1191 .unmap = xive_irq_domain_unmap, 1192 .xlate = xive_irq_domain_xlate, 1193 }; 1194 1195 static void __init xive_init_host(void) 1196 { 1197 xive_irq_domain = irq_domain_add_nomap(NULL, XIVE_MAX_IRQ, 1198 &xive_irq_domain_ops, NULL); 1199 if (WARN_ON(xive_irq_domain == NULL)) 1200 return; 1201 irq_set_default_host(xive_irq_domain); 1202 } 1203 1204 static void xive_cleanup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1205 { 1206 if (xc->queue[xive_irq_priority].qpage) 1207 xive_ops->cleanup_queue(cpu, xc, xive_irq_priority); 1208 } 1209 1210 static int xive_setup_cpu_queues(unsigned int cpu, struct xive_cpu *xc) 1211 { 1212 int rc = 0; 1213 1214 /* We setup 1 queues for now with a 64k page */ 1215 if (!xc->queue[xive_irq_priority].qpage) 1216 rc = xive_ops->setup_queue(cpu, xc, xive_irq_priority); 1217 1218 return rc; 1219 } 1220 1221 static int xive_prepare_cpu(unsigned int cpu) 1222 { 1223 struct xive_cpu *xc; 1224 1225 xc = per_cpu(xive_cpu, cpu); 1226 if (!xc) { 1227 struct device_node *np; 1228 1229 xc = kzalloc_node(sizeof(struct xive_cpu), 1230 GFP_KERNEL, cpu_to_node(cpu)); 1231 if (!xc) 1232 return -ENOMEM; 1233 np = of_get_cpu_node(cpu, NULL); 1234 if (np) 1235 xc->chip_id = of_get_ibm_chip_id(np); 1236 of_node_put(np); 1237 1238 per_cpu(xive_cpu, cpu) = xc; 1239 } 1240 1241 /* Setup EQs if not already */ 1242 return xive_setup_cpu_queues(cpu, xc); 1243 } 1244 1245 static void xive_setup_cpu(void) 1246 { 1247 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1248 1249 /* Debug: Dump the TM state */ 1250 pr_devel("CPU %d [HW 0x%02x] VT=%02x\n", 1251 smp_processor_id(), hard_smp_processor_id(), 1252 in_8(xive_tima + xive_tima_offset + TM_WORD2)); 1253 1254 /* The backend might have additional things to do */ 1255 if (xive_ops->setup_cpu) 1256 xive_ops->setup_cpu(smp_processor_id(), xc); 1257 1258 /* Set CPPR to 0xff to enable flow of interrupts */ 1259 xc->cppr = 0xff; 1260 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1261 } 1262 1263 #ifdef CONFIG_SMP 1264 void xive_smp_setup_cpu(void) 1265 { 1266 pr_devel("SMP setup CPU %d\n", smp_processor_id()); 1267 1268 /* This will have already been done on the boot CPU */ 1269 if (smp_processor_id() != boot_cpuid) 1270 xive_setup_cpu(); 1271 1272 } 1273 1274 int xive_smp_prepare_cpu(unsigned int cpu) 1275 { 1276 int rc; 1277 1278 /* Allocate per-CPU data and queues */ 1279 rc = xive_prepare_cpu(cpu); 1280 if (rc) 1281 return rc; 1282 1283 /* Allocate and setup IPI for the new CPU */ 1284 return xive_setup_cpu_ipi(cpu); 1285 } 1286 1287 #ifdef CONFIG_HOTPLUG_CPU 1288 static void xive_flush_cpu_queue(unsigned int cpu, struct xive_cpu *xc) 1289 { 1290 u32 irq; 1291 1292 /* We assume local irqs are disabled */ 1293 WARN_ON(!irqs_disabled()); 1294 1295 /* Check what's already in the CPU queue */ 1296 while ((irq = xive_scan_interrupts(xc, false)) != 0) { 1297 /* 1298 * We need to re-route that interrupt to its new destination. 1299 * First get and lock the descriptor 1300 */ 1301 struct irq_desc *desc = irq_to_desc(irq); 1302 struct irq_data *d = irq_desc_get_irq_data(desc); 1303 struct xive_irq_data *xd; 1304 unsigned int hw_irq = (unsigned int)irqd_to_hwirq(d); 1305 1306 /* 1307 * Ignore anything that isn't a XIVE irq and ignore 1308 * IPIs, so can just be dropped. 1309 */ 1310 if (d->domain != xive_irq_domain || hw_irq == 0) 1311 continue; 1312 1313 /* 1314 * The IRQ should have already been re-routed, it's just a 1315 * stale in the old queue, so re-trigger it in order to make 1316 * it reach is new destination. 1317 */ 1318 #ifdef DEBUG_FLUSH 1319 pr_info("CPU %d: Got irq %d while offline, re-sending...\n", 1320 cpu, irq); 1321 #endif 1322 raw_spin_lock(&desc->lock); 1323 xd = irq_desc_get_handler_data(desc); 1324 1325 /* 1326 * For LSIs, we EOI, this will cause a resend if it's 1327 * still asserted. Otherwise do an MSI retrigger. 1328 */ 1329 if (xd->flags & XIVE_IRQ_FLAG_LSI) 1330 xive_do_source_eoi(irqd_to_hwirq(d), xd); 1331 else 1332 xive_irq_retrigger(d); 1333 1334 raw_spin_unlock(&desc->lock); 1335 } 1336 } 1337 1338 void xive_smp_disable_cpu(void) 1339 { 1340 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1341 unsigned int cpu = smp_processor_id(); 1342 1343 /* Migrate interrupts away from the CPU */ 1344 irq_migrate_all_off_this_cpu(); 1345 1346 /* Set CPPR to 0 to disable flow of interrupts */ 1347 xc->cppr = 0; 1348 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1349 1350 /* Flush everything still in the queue */ 1351 xive_flush_cpu_queue(cpu, xc); 1352 1353 /* Re-enable CPPR */ 1354 xc->cppr = 0xff; 1355 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0xff); 1356 } 1357 1358 void xive_flush_interrupt(void) 1359 { 1360 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1361 unsigned int cpu = smp_processor_id(); 1362 1363 /* Called if an interrupt occurs while the CPU is hot unplugged */ 1364 xive_flush_cpu_queue(cpu, xc); 1365 } 1366 1367 #endif /* CONFIG_HOTPLUG_CPU */ 1368 1369 #endif /* CONFIG_SMP */ 1370 1371 void xive_kexec_teardown_cpu(int secondary) 1372 { 1373 struct xive_cpu *xc = __this_cpu_read(xive_cpu); 1374 unsigned int cpu = smp_processor_id(); 1375 1376 /* Set CPPR to 0 to disable flow of interrupts */ 1377 xc->cppr = 0; 1378 out_8(xive_tima + xive_tima_offset + TM_CPPR, 0); 1379 1380 /* Backend cleanup if any */ 1381 if (xive_ops->teardown_cpu) 1382 xive_ops->teardown_cpu(cpu, xc); 1383 1384 #ifdef CONFIG_SMP 1385 /* Get rid of IPI */ 1386 xive_cleanup_cpu_ipi(cpu, xc); 1387 #endif 1388 1389 /* Disable and free the queues */ 1390 xive_cleanup_cpu_queues(cpu, xc); 1391 } 1392 1393 void xive_shutdown(void) 1394 { 1395 xive_ops->shutdown(); 1396 } 1397 1398 bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset, 1399 u8 max_prio) 1400 { 1401 xive_tima = area; 1402 xive_tima_offset = offset; 1403 xive_ops = ops; 1404 xive_irq_priority = max_prio; 1405 1406 ppc_md.get_irq = xive_get_irq; 1407 __xive_enabled = true; 1408 1409 pr_devel("Initializing host..\n"); 1410 xive_init_host(); 1411 1412 pr_devel("Initializing boot CPU..\n"); 1413 1414 /* Allocate per-CPU data and queues */ 1415 xive_prepare_cpu(smp_processor_id()); 1416 1417 /* Get ready for interrupts */ 1418 xive_setup_cpu(); 1419 1420 pr_info("Interrupt handling intialized with %s backend\n", 1421 xive_ops->name); 1422 pr_info("Using priority %d for all interrupts\n", max_prio); 1423 1424 return true; 1425 } 1426 1427 static int __init xive_off(char *arg) 1428 { 1429 xive_cmdline_disabled = true; 1430 return 0; 1431 } 1432 __setup("xive=off", xive_off); 1433