1 // SPDX-License-Identifier: GPL-2.0-or-later 2 /* 3 * Derived from arch/i386/kernel/irq.c 4 * Copyright (C) 1992 Linus Torvalds 5 * Adapted from arch/i386 by Gary Thomas 6 * Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org) 7 * Updated and modified by Cort Dougan <cort@fsmlabs.com> 8 * Copyright (C) 1996-2001 Cort Dougan 9 * Adapted for Power Macintosh by Paul Mackerras 10 * Copyright (C) 1996 Paul Mackerras (paulus@cs.anu.edu.au) 11 * 12 * This file contains the code used by various IRQ handling routines: 13 * asking for different IRQ's should be done through these routines 14 * instead of just grabbing them. Thus setups with different IRQ numbers 15 * shouldn't result in any weird surprises, and installing new handlers 16 * should be easier. 17 * 18 * The MPC8xx has an interrupt mask in the SIU. If a bit is set, the 19 * interrupt is _enabled_. As expected, IRQ0 is bit 0 in the 32-bit 20 * mask register (of which only 16 are defined), hence the weird shifting 21 * and complement of the cached_irq_mask. I want to be able to stuff 22 * this right into the SIU SMASK register. 23 * Many of the prep/chrp functions are conditional compiled on CONFIG_PPC_8xx 24 * to reduce code space and undefined function references. 25 */ 26 27 #undef DEBUG 28 29 #include <linux/export.h> 30 #include <linux/threads.h> 31 #include <linux/kernel_stat.h> 32 #include <linux/signal.h> 33 #include <linux/sched.h> 34 #include <linux/ptrace.h> 35 #include <linux/ioport.h> 36 #include <linux/interrupt.h> 37 #include <linux/timex.h> 38 #include <linux/init.h> 39 #include <linux/slab.h> 40 #include <linux/delay.h> 41 #include <linux/irq.h> 42 #include <linux/seq_file.h> 43 #include <linux/cpumask.h> 44 #include <linux/profile.h> 45 #include <linux/bitops.h> 46 #include <linux/list.h> 47 #include <linux/radix-tree.h> 48 #include <linux/mutex.h> 49 #include <linux/pci.h> 50 #include <linux/debugfs.h> 51 #include <linux/of.h> 52 #include <linux/of_irq.h> 53 #include <linux/vmalloc.h> 54 #include <linux/pgtable.h> 55 56 #include <linux/uaccess.h> 57 #include <asm/interrupt.h> 58 #include <asm/io.h> 59 #include <asm/irq.h> 60 #include <asm/cache.h> 61 #include <asm/prom.h> 62 #include <asm/ptrace.h> 63 #include <asm/machdep.h> 64 #include <asm/udbg.h> 65 #include <asm/smp.h> 66 #include <asm/livepatch.h> 67 #include <asm/asm-prototypes.h> 68 #include <asm/hw_irq.h> 69 #include <asm/softirq_stack.h> 70 71 #ifdef CONFIG_PPC64 72 #include <asm/paca.h> 73 #include <asm/firmware.h> 74 #include <asm/lv1call.h> 75 #include <asm/dbell.h> 76 #endif 77 #define CREATE_TRACE_POINTS 78 #include <asm/trace.h> 79 #include <asm/cpu_has_feature.h> 80 81 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat); 82 EXPORT_PER_CPU_SYMBOL(irq_stat); 83 84 #ifdef CONFIG_PPC32 85 atomic_t ppc_n_lost_interrupts; 86 87 #ifdef CONFIG_TAU_INT 88 extern int tau_initialized; 89 u32 tau_interrupts(unsigned long cpu); 90 #endif 91 #endif /* CONFIG_PPC32 */ 92 93 #ifdef CONFIG_PPC64 94 95 int distribute_irqs = 1; 96 97 static inline notrace unsigned long get_irq_happened(void) 98 { 99 unsigned long happened; 100 101 __asm__ __volatile__("lbz %0,%1(13)" 102 : "=r" (happened) : "i" (offsetof(struct paca_struct, irq_happened))); 103 104 return happened; 105 } 106 107 void replay_soft_interrupts(void) 108 { 109 struct pt_regs regs; 110 111 /* 112 * Be careful here, calling these interrupt handlers can cause 113 * softirqs to be raised, which they may run when calling irq_exit, 114 * which will cause local_irq_enable() to be run, which can then 115 * recurse into this function. Don't keep any state across 116 * interrupt handler calls which may change underneath us. 117 * 118 * We use local_paca rather than get_paca() to avoid all the 119 * debug_smp_processor_id() business in this low level function. 120 */ 121 122 ppc_save_regs(®s); 123 regs.softe = IRQS_ENABLED; 124 125 again: 126 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 127 WARN_ON_ONCE(mfmsr() & MSR_EE); 128 129 /* 130 * Force the delivery of pending soft-disabled interrupts on PS3. 131 * Any HV call will have this side effect. 132 */ 133 if (firmware_has_feature(FW_FEATURE_PS3_LV1)) { 134 u64 tmp, tmp2; 135 lv1_get_version_info(&tmp, &tmp2); 136 } 137 138 /* 139 * Check if an hypervisor Maintenance interrupt happened. 140 * This is a higher priority interrupt than the others, so 141 * replay it first. 142 */ 143 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_HMI)) { 144 local_paca->irq_happened &= ~PACA_IRQ_HMI; 145 regs.trap = 0xe60; 146 handle_hmi_exception(®s); 147 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 148 hard_irq_disable(); 149 } 150 151 if (local_paca->irq_happened & PACA_IRQ_DEC) { 152 local_paca->irq_happened &= ~PACA_IRQ_DEC; 153 regs.trap = 0x900; 154 timer_interrupt(®s); 155 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 156 hard_irq_disable(); 157 } 158 159 if (local_paca->irq_happened & PACA_IRQ_EE) { 160 local_paca->irq_happened &= ~PACA_IRQ_EE; 161 regs.trap = 0x500; 162 do_IRQ(®s); 163 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 164 hard_irq_disable(); 165 } 166 167 if (IS_ENABLED(CONFIG_PPC_DOORBELL) && (local_paca->irq_happened & PACA_IRQ_DBELL)) { 168 local_paca->irq_happened &= ~PACA_IRQ_DBELL; 169 if (IS_ENABLED(CONFIG_PPC_BOOK3E)) 170 regs.trap = 0x280; 171 else 172 regs.trap = 0xa00; 173 doorbell_exception(®s); 174 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 175 hard_irq_disable(); 176 } 177 178 /* Book3E does not support soft-masking PMI interrupts */ 179 if (IS_ENABLED(CONFIG_PPC_BOOK3S) && (local_paca->irq_happened & PACA_IRQ_PMI)) { 180 local_paca->irq_happened &= ~PACA_IRQ_PMI; 181 regs.trap = 0xf00; 182 performance_monitor_exception(®s); 183 if (!(local_paca->irq_happened & PACA_IRQ_HARD_DIS)) 184 hard_irq_disable(); 185 } 186 187 if (local_paca->irq_happened & ~PACA_IRQ_HARD_DIS) { 188 /* 189 * We are responding to the next interrupt, so interrupt-off 190 * latencies should be reset here. 191 */ 192 trace_hardirqs_on(); 193 trace_hardirqs_off(); 194 goto again; 195 } 196 } 197 198 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_PPC_KUAP) 199 static inline void replay_soft_interrupts_irqrestore(void) 200 { 201 unsigned long kuap_state = get_kuap(); 202 203 /* 204 * Check if anything calls local_irq_enable/restore() when KUAP is 205 * disabled (user access enabled). We handle that case here by saving 206 * and re-locking AMR but we shouldn't get here in the first place, 207 * hence the warning. 208 */ 209 kuap_assert_locked(); 210 211 if (kuap_state != AMR_KUAP_BLOCKED) 212 set_kuap(AMR_KUAP_BLOCKED); 213 214 replay_soft_interrupts(); 215 216 if (kuap_state != AMR_KUAP_BLOCKED) 217 set_kuap(kuap_state); 218 } 219 #else 220 #define replay_soft_interrupts_irqrestore() replay_soft_interrupts() 221 #endif 222 223 notrace void arch_local_irq_restore(unsigned long mask) 224 { 225 unsigned char irq_happened; 226 227 /* Write the new soft-enabled value */ 228 irq_soft_mask_set(mask); 229 if (mask) 230 return; 231 232 /* 233 * From this point onward, we can take interrupts, preempt, 234 * etc... unless we got hard-disabled. We check if an event 235 * happened. If none happened, we know we can just return. 236 * 237 * We may have preempted before the check below, in which case 238 * we are checking the "new" CPU instead of the old one. This 239 * is only a problem if an event happened on the "old" CPU. 240 * 241 * External interrupt events will have caused interrupts to 242 * be hard-disabled, so there is no problem, we 243 * cannot have preempted. 244 */ 245 irq_happened = get_irq_happened(); 246 if (!irq_happened) { 247 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 248 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 249 return; 250 } 251 252 /* We need to hard disable to replay. */ 253 if (!(irq_happened & PACA_IRQ_HARD_DIS)) { 254 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) 255 WARN_ON_ONCE(!(mfmsr() & MSR_EE)); 256 __hard_irq_disable(); 257 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 258 } else { 259 /* 260 * We should already be hard disabled here. We had bugs 261 * where that wasn't the case so let's dbl check it and 262 * warn if we are wrong. Only do that when IRQ tracing 263 * is enabled as mfmsr() can be costly. 264 */ 265 if (IS_ENABLED(CONFIG_PPC_IRQ_SOFT_MASK_DEBUG)) { 266 if (WARN_ON_ONCE(mfmsr() & MSR_EE)) 267 __hard_irq_disable(); 268 } 269 270 if (irq_happened == PACA_IRQ_HARD_DIS) { 271 local_paca->irq_happened = 0; 272 __hard_irq_enable(); 273 return; 274 } 275 } 276 277 /* 278 * Disable preempt here, so that the below preempt_enable will 279 * perform resched if required (a replayed interrupt may set 280 * need_resched). 281 */ 282 preempt_disable(); 283 irq_soft_mask_set(IRQS_ALL_DISABLED); 284 trace_hardirqs_off(); 285 286 replay_soft_interrupts_irqrestore(); 287 local_paca->irq_happened = 0; 288 289 trace_hardirqs_on(); 290 irq_soft_mask_set(IRQS_ENABLED); 291 __hard_irq_enable(); 292 preempt_enable(); 293 } 294 EXPORT_SYMBOL(arch_local_irq_restore); 295 296 /* 297 * This is a helper to use when about to go into idle low-power 298 * when the latter has the side effect of re-enabling interrupts 299 * (such as calling H_CEDE under pHyp). 300 * 301 * You call this function with interrupts soft-disabled (this is 302 * already the case when ppc_md.power_save is called). The function 303 * will return whether to enter power save or just return. 304 * 305 * In the former case, it will have notified lockdep of interrupts 306 * being re-enabled and generally sanitized the lazy irq state, 307 * and in the latter case it will leave with interrupts hard 308 * disabled and marked as such, so the local_irq_enable() call 309 * in arch_cpu_idle() will properly re-enable everything. 310 */ 311 bool prep_irq_for_idle(void) 312 { 313 /* 314 * First we need to hard disable to ensure no interrupt 315 * occurs before we effectively enter the low power state 316 */ 317 __hard_irq_disable(); 318 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 319 320 /* 321 * If anything happened while we were soft-disabled, 322 * we return now and do not enter the low power state. 323 */ 324 if (lazy_irq_pending()) 325 return false; 326 327 /* Tell lockdep we are about to re-enable */ 328 trace_hardirqs_on(); 329 330 /* 331 * Mark interrupts as soft-enabled and clear the 332 * PACA_IRQ_HARD_DIS from the pending mask since we 333 * are about to hard enable as well as a side effect 334 * of entering the low power state. 335 */ 336 local_paca->irq_happened &= ~PACA_IRQ_HARD_DIS; 337 irq_soft_mask_set(IRQS_ENABLED); 338 339 /* Tell the caller to enter the low power state */ 340 return true; 341 } 342 343 #ifdef CONFIG_PPC_BOOK3S 344 /* 345 * This is for idle sequences that return with IRQs off, but the 346 * idle state itself wakes on interrupt. Tell the irq tracer that 347 * IRQs are enabled for the duration of idle so it does not get long 348 * off times. Must be paired with fini_irq_for_idle_irqsoff. 349 */ 350 bool prep_irq_for_idle_irqsoff(void) 351 { 352 WARN_ON(!irqs_disabled()); 353 354 /* 355 * First we need to hard disable to ensure no interrupt 356 * occurs before we effectively enter the low power state 357 */ 358 __hard_irq_disable(); 359 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 360 361 /* 362 * If anything happened while we were soft-disabled, 363 * we return now and do not enter the low power state. 364 */ 365 if (lazy_irq_pending()) 366 return false; 367 368 /* Tell lockdep we are about to re-enable */ 369 trace_hardirqs_on(); 370 371 return true; 372 } 373 374 /* 375 * Take the SRR1 wakeup reason, index into this table to find the 376 * appropriate irq_happened bit. 377 * 378 * Sytem reset exceptions taken in idle state also come through here, 379 * but they are NMI interrupts so do not need to wait for IRQs to be 380 * restored, and should be taken as early as practical. These are marked 381 * with 0xff in the table. The Power ISA specifies 0100b as the system 382 * reset interrupt reason. 383 */ 384 #define IRQ_SYSTEM_RESET 0xff 385 386 static const u8 srr1_to_lazyirq[0x10] = { 387 0, 0, 0, 388 PACA_IRQ_DBELL, 389 IRQ_SYSTEM_RESET, 390 PACA_IRQ_DBELL, 391 PACA_IRQ_DEC, 392 0, 393 PACA_IRQ_EE, 394 PACA_IRQ_EE, 395 PACA_IRQ_HMI, 396 0, 0, 0, 0, 0 }; 397 398 void replay_system_reset(void) 399 { 400 struct pt_regs regs; 401 402 ppc_save_regs(®s); 403 regs.trap = 0x100; 404 get_paca()->in_nmi = 1; 405 system_reset_exception(®s); 406 get_paca()->in_nmi = 0; 407 } 408 EXPORT_SYMBOL_GPL(replay_system_reset); 409 410 void irq_set_pending_from_srr1(unsigned long srr1) 411 { 412 unsigned int idx = (srr1 & SRR1_WAKEMASK_P8) >> 18; 413 u8 reason = srr1_to_lazyirq[idx]; 414 415 /* 416 * Take the system reset now, which is immediately after registers 417 * are restored from idle. It's an NMI, so interrupts need not be 418 * re-enabled before it is taken. 419 */ 420 if (unlikely(reason == IRQ_SYSTEM_RESET)) { 421 replay_system_reset(); 422 return; 423 } 424 425 if (reason == PACA_IRQ_DBELL) { 426 /* 427 * When doorbell triggers a system reset wakeup, the message 428 * is not cleared, so if the doorbell interrupt is replayed 429 * and the IPI handled, the doorbell interrupt would still 430 * fire when EE is enabled. 431 * 432 * To avoid taking the superfluous doorbell interrupt, 433 * execute a msgclr here before the interrupt is replayed. 434 */ 435 ppc_msgclr(PPC_DBELL_MSGTYPE); 436 } 437 438 /* 439 * The 0 index (SRR1[42:45]=b0000) must always evaluate to 0, 440 * so this can be called unconditionally with the SRR1 wake 441 * reason as returned by the idle code, which uses 0 to mean no 442 * interrupt. 443 * 444 * If a future CPU was to designate this as an interrupt reason, 445 * then a new index for no interrupt must be assigned. 446 */ 447 local_paca->irq_happened |= reason; 448 } 449 #endif /* CONFIG_PPC_BOOK3S */ 450 451 /* 452 * Force a replay of the external interrupt handler on this CPU. 453 */ 454 void force_external_irq_replay(void) 455 { 456 /* 457 * This must only be called with interrupts soft-disabled, 458 * the replay will happen when re-enabling. 459 */ 460 WARN_ON(!arch_irqs_disabled()); 461 462 /* 463 * Interrupts must always be hard disabled before irq_happened is 464 * modified (to prevent lost update in case of interrupt between 465 * load and store). 466 */ 467 __hard_irq_disable(); 468 local_paca->irq_happened |= PACA_IRQ_HARD_DIS; 469 470 /* Indicate in the PACA that we have an interrupt to replay */ 471 local_paca->irq_happened |= PACA_IRQ_EE; 472 } 473 474 #endif /* CONFIG_PPC64 */ 475 476 int arch_show_interrupts(struct seq_file *p, int prec) 477 { 478 int j; 479 480 #if defined(CONFIG_PPC32) && defined(CONFIG_TAU_INT) 481 if (tau_initialized) { 482 seq_printf(p, "%*s: ", prec, "TAU"); 483 for_each_online_cpu(j) 484 seq_printf(p, "%10u ", tau_interrupts(j)); 485 seq_puts(p, " PowerPC Thermal Assist (cpu temp)\n"); 486 } 487 #endif /* CONFIG_PPC32 && CONFIG_TAU_INT */ 488 489 seq_printf(p, "%*s: ", prec, "LOC"); 490 for_each_online_cpu(j) 491 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_event); 492 seq_printf(p, " Local timer interrupts for timer event device\n"); 493 494 seq_printf(p, "%*s: ", prec, "BCT"); 495 for_each_online_cpu(j) 496 seq_printf(p, "%10u ", per_cpu(irq_stat, j).broadcast_irqs_event); 497 seq_printf(p, " Broadcast timer interrupts for timer event device\n"); 498 499 seq_printf(p, "%*s: ", prec, "LOC"); 500 for_each_online_cpu(j) 501 seq_printf(p, "%10u ", per_cpu(irq_stat, j).timer_irqs_others); 502 seq_printf(p, " Local timer interrupts for others\n"); 503 504 seq_printf(p, "%*s: ", prec, "SPU"); 505 for_each_online_cpu(j) 506 seq_printf(p, "%10u ", per_cpu(irq_stat, j).spurious_irqs); 507 seq_printf(p, " Spurious interrupts\n"); 508 509 seq_printf(p, "%*s: ", prec, "PMI"); 510 for_each_online_cpu(j) 511 seq_printf(p, "%10u ", per_cpu(irq_stat, j).pmu_irqs); 512 seq_printf(p, " Performance monitoring interrupts\n"); 513 514 seq_printf(p, "%*s: ", prec, "MCE"); 515 for_each_online_cpu(j) 516 seq_printf(p, "%10u ", per_cpu(irq_stat, j).mce_exceptions); 517 seq_printf(p, " Machine check exceptions\n"); 518 519 #ifdef CONFIG_PPC_BOOK3S_64 520 if (cpu_has_feature(CPU_FTR_HVMODE)) { 521 seq_printf(p, "%*s: ", prec, "HMI"); 522 for_each_online_cpu(j) 523 seq_printf(p, "%10u ", paca_ptrs[j]->hmi_irqs); 524 seq_printf(p, " Hypervisor Maintenance Interrupts\n"); 525 } 526 #endif 527 528 seq_printf(p, "%*s: ", prec, "NMI"); 529 for_each_online_cpu(j) 530 seq_printf(p, "%10u ", per_cpu(irq_stat, j).sreset_irqs); 531 seq_printf(p, " System Reset interrupts\n"); 532 533 #ifdef CONFIG_PPC_WATCHDOG 534 seq_printf(p, "%*s: ", prec, "WDG"); 535 for_each_online_cpu(j) 536 seq_printf(p, "%10u ", per_cpu(irq_stat, j).soft_nmi_irqs); 537 seq_printf(p, " Watchdog soft-NMI interrupts\n"); 538 #endif 539 540 #ifdef CONFIG_PPC_DOORBELL 541 if (cpu_has_feature(CPU_FTR_DBELL)) { 542 seq_printf(p, "%*s: ", prec, "DBL"); 543 for_each_online_cpu(j) 544 seq_printf(p, "%10u ", per_cpu(irq_stat, j).doorbell_irqs); 545 seq_printf(p, " Doorbell interrupts\n"); 546 } 547 #endif 548 549 return 0; 550 } 551 552 /* 553 * /proc/stat helpers 554 */ 555 u64 arch_irq_stat_cpu(unsigned int cpu) 556 { 557 u64 sum = per_cpu(irq_stat, cpu).timer_irqs_event; 558 559 sum += per_cpu(irq_stat, cpu).broadcast_irqs_event; 560 sum += per_cpu(irq_stat, cpu).pmu_irqs; 561 sum += per_cpu(irq_stat, cpu).mce_exceptions; 562 sum += per_cpu(irq_stat, cpu).spurious_irqs; 563 sum += per_cpu(irq_stat, cpu).timer_irqs_others; 564 #ifdef CONFIG_PPC_BOOK3S_64 565 sum += paca_ptrs[cpu]->hmi_irqs; 566 #endif 567 sum += per_cpu(irq_stat, cpu).sreset_irqs; 568 #ifdef CONFIG_PPC_WATCHDOG 569 sum += per_cpu(irq_stat, cpu).soft_nmi_irqs; 570 #endif 571 #ifdef CONFIG_PPC_DOORBELL 572 sum += per_cpu(irq_stat, cpu).doorbell_irqs; 573 #endif 574 575 return sum; 576 } 577 578 static inline void check_stack_overflow(void) 579 { 580 long sp; 581 582 if (!IS_ENABLED(CONFIG_DEBUG_STACKOVERFLOW)) 583 return; 584 585 sp = current_stack_pointer & (THREAD_SIZE - 1); 586 587 /* check for stack overflow: is there less than 2KB free? */ 588 if (unlikely(sp < 2048)) { 589 pr_err("do_IRQ: stack overflow: %ld\n", sp); 590 dump_stack(); 591 } 592 } 593 594 static __always_inline void call_do_softirq(const void *sp) 595 { 596 /* Temporarily switch r1 to sp, call __do_softirq() then restore r1. */ 597 asm volatile ( 598 PPC_STLU " %%r1, %[offset](%[sp]) ;" 599 "mr %%r1, %[sp] ;" 600 "bl %[callee] ;" 601 PPC_LL " %%r1, 0(%%r1) ;" 602 : // Outputs 603 : // Inputs 604 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), 605 [callee] "i" (__do_softirq) 606 : // Clobbers 607 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", 608 "cr7", "r0", "r3", "r4", "r5", "r6", "r7", "r8", "r9", "r10", 609 "r11", "r12" 610 ); 611 } 612 613 static __always_inline void call_do_irq(struct pt_regs *regs, void *sp) 614 { 615 register unsigned long r3 asm("r3") = (unsigned long)regs; 616 617 /* Temporarily switch r1 to sp, call __do_irq() then restore r1. */ 618 asm volatile ( 619 PPC_STLU " %%r1, %[offset](%[sp]) ;" 620 "mr %%r1, %[sp] ;" 621 "bl %[callee] ;" 622 PPC_LL " %%r1, 0(%%r1) ;" 623 : // Outputs 624 "+r" (r3) 625 : // Inputs 626 [sp] "b" (sp), [offset] "i" (THREAD_SIZE - STACK_FRAME_OVERHEAD), 627 [callee] "i" (__do_irq) 628 : // Clobbers 629 "lr", "xer", "ctr", "memory", "cr0", "cr1", "cr5", "cr6", 630 "cr7", "r0", "r4", "r5", "r6", "r7", "r8", "r9", "r10", 631 "r11", "r12" 632 ); 633 } 634 635 void __do_irq(struct pt_regs *regs) 636 { 637 unsigned int irq; 638 639 trace_irq_entry(regs); 640 641 /* 642 * Query the platform PIC for the interrupt & ack it. 643 * 644 * This will typically lower the interrupt line to the CPU 645 */ 646 irq = ppc_md.get_irq(); 647 648 /* We can hard enable interrupts now to allow perf interrupts */ 649 may_hard_irq_enable(); 650 651 /* And finally process it */ 652 if (unlikely(!irq)) 653 __this_cpu_inc(irq_stat.spurious_irqs); 654 else 655 generic_handle_irq(irq); 656 657 trace_irq_exit(regs); 658 } 659 660 DEFINE_INTERRUPT_HANDLER_ASYNC(do_IRQ) 661 { 662 struct pt_regs *old_regs = set_irq_regs(regs); 663 void *cursp, *irqsp, *sirqsp; 664 665 /* Switch to the irq stack to handle this */ 666 cursp = (void *)(current_stack_pointer & ~(THREAD_SIZE - 1)); 667 irqsp = hardirq_ctx[raw_smp_processor_id()]; 668 sirqsp = softirq_ctx[raw_smp_processor_id()]; 669 670 check_stack_overflow(); 671 672 /* Already there ? */ 673 if (unlikely(cursp == irqsp || cursp == sirqsp)) { 674 __do_irq(regs); 675 set_irq_regs(old_regs); 676 return; 677 } 678 /* Switch stack and call */ 679 call_do_irq(regs, irqsp); 680 681 set_irq_regs(old_regs); 682 } 683 684 static void *__init alloc_vm_stack(void) 685 { 686 return __vmalloc_node(THREAD_SIZE, THREAD_ALIGN, THREADINFO_GFP, 687 NUMA_NO_NODE, (void *)_RET_IP_); 688 } 689 690 static void __init vmap_irqstack_init(void) 691 { 692 int i; 693 694 for_each_possible_cpu(i) { 695 softirq_ctx[i] = alloc_vm_stack(); 696 hardirq_ctx[i] = alloc_vm_stack(); 697 } 698 } 699 700 701 void __init init_IRQ(void) 702 { 703 if (IS_ENABLED(CONFIG_VMAP_STACK)) 704 vmap_irqstack_init(); 705 706 if (ppc_md.init_IRQ) 707 ppc_md.init_IRQ(); 708 } 709 710 #if defined(CONFIG_BOOKE) || defined(CONFIG_40x) 711 void *critirq_ctx[NR_CPUS] __read_mostly; 712 void *dbgirq_ctx[NR_CPUS] __read_mostly; 713 void *mcheckirq_ctx[NR_CPUS] __read_mostly; 714 #endif 715 716 void *softirq_ctx[NR_CPUS] __read_mostly; 717 void *hardirq_ctx[NR_CPUS] __read_mostly; 718 719 void do_softirq_own_stack(void) 720 { 721 call_do_softirq(softirq_ctx[smp_processor_id()]); 722 } 723 724 irq_hw_number_t virq_to_hw(unsigned int virq) 725 { 726 struct irq_data *irq_data = irq_get_irq_data(virq); 727 return WARN_ON(!irq_data) ? 0 : irq_data->hwirq; 728 } 729 EXPORT_SYMBOL_GPL(virq_to_hw); 730 731 #ifdef CONFIG_SMP 732 int irq_choose_cpu(const struct cpumask *mask) 733 { 734 int cpuid; 735 736 if (cpumask_equal(mask, cpu_online_mask)) { 737 static int irq_rover; 738 static DEFINE_RAW_SPINLOCK(irq_rover_lock); 739 unsigned long flags; 740 741 /* Round-robin distribution... */ 742 do_round_robin: 743 raw_spin_lock_irqsave(&irq_rover_lock, flags); 744 745 irq_rover = cpumask_next(irq_rover, cpu_online_mask); 746 if (irq_rover >= nr_cpu_ids) 747 irq_rover = cpumask_first(cpu_online_mask); 748 749 cpuid = irq_rover; 750 751 raw_spin_unlock_irqrestore(&irq_rover_lock, flags); 752 } else { 753 cpuid = cpumask_first_and(mask, cpu_online_mask); 754 if (cpuid >= nr_cpu_ids) 755 goto do_round_robin; 756 } 757 758 return get_hard_smp_processor_id(cpuid); 759 } 760 #else 761 int irq_choose_cpu(const struct cpumask *mask) 762 { 763 return hard_smp_processor_id(); 764 } 765 #endif 766 767 #ifdef CONFIG_PPC64 768 static int __init setup_noirqdistrib(char *str) 769 { 770 distribute_irqs = 0; 771 return 1; 772 } 773 774 __setup("noirqdistrib", setup_noirqdistrib); 775 #endif /* CONFIG_PPC64 */ 776