xref: /linux/arch/x86/kernel/irq.c (revision bfd5bb6f90af092aa345b15cd78143956a13c2a8)
1 /*
2  * Common interrupt code for 32 and 64 bit
3  */
4 #include <linux/cpu.h>
5 #include <linux/interrupt.h>
6 #include <linux/kernel_stat.h>
7 #include <linux/of.h>
8 #include <linux/seq_file.h>
9 #include <linux/smp.h>
10 #include <linux/ftrace.h>
11 #include <linux/delay.h>
12 #include <linux/export.h>
13 
14 #include <asm/apic.h>
15 #include <asm/io_apic.h>
16 #include <asm/irq.h>
17 #include <asm/mce.h>
18 #include <asm/hw_irq.h>
19 #include <asm/desc.h>
20 
21 #define CREATE_TRACE_POINTS
22 #include <asm/trace/irq_vectors.h>
23 
24 DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
25 EXPORT_PER_CPU_SYMBOL(irq_stat);
26 
27 DEFINE_PER_CPU(struct pt_regs *, irq_regs);
28 EXPORT_PER_CPU_SYMBOL(irq_regs);
29 
30 atomic_t irq_err_count;
31 
32 /*
33  * 'what should we do if we get a hw irq event on an illegal vector'.
34  * each architecture has to answer this themselves.
35  */
36 void ack_bad_irq(unsigned int irq)
37 {
38 	if (printk_ratelimit())
39 		pr_err("unexpected IRQ trap at vector %02x\n", irq);
40 
41 	/*
42 	 * Currently unexpected vectors happen only on SMP and APIC.
43 	 * We _must_ ack these because every local APIC has only N
44 	 * irq slots per priority level, and a 'hanging, unacked' IRQ
45 	 * holds up an irq slot - in excessive cases (when multiple
46 	 * unexpected vectors occur) that might lock up the APIC
47 	 * completely.
48 	 * But only ack when the APIC is enabled -AK
49 	 */
50 	ack_APIC_irq();
51 }
52 
53 #define irq_stats(x)		(&per_cpu(irq_stat, x))
54 /*
55  * /proc/interrupts printing for arch specific interrupts
56  */
57 int arch_show_interrupts(struct seq_file *p, int prec)
58 {
59 	int j;
60 
61 	seq_printf(p, "%*s: ", prec, "NMI");
62 	for_each_online_cpu(j)
63 		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
64 	seq_puts(p, "  Non-maskable interrupts\n");
65 #ifdef CONFIG_X86_LOCAL_APIC
66 	seq_printf(p, "%*s: ", prec, "LOC");
67 	for_each_online_cpu(j)
68 		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
69 	seq_puts(p, "  Local timer interrupts\n");
70 
71 	seq_printf(p, "%*s: ", prec, "SPU");
72 	for_each_online_cpu(j)
73 		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
74 	seq_puts(p, "  Spurious interrupts\n");
75 	seq_printf(p, "%*s: ", prec, "PMI");
76 	for_each_online_cpu(j)
77 		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
78 	seq_puts(p, "  Performance monitoring interrupts\n");
79 	seq_printf(p, "%*s: ", prec, "IWI");
80 	for_each_online_cpu(j)
81 		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
82 	seq_puts(p, "  IRQ work interrupts\n");
83 	seq_printf(p, "%*s: ", prec, "RTR");
84 	for_each_online_cpu(j)
85 		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
86 	seq_puts(p, "  APIC ICR read retries\n");
87 	if (x86_platform_ipi_callback) {
88 		seq_printf(p, "%*s: ", prec, "PLT");
89 		for_each_online_cpu(j)
90 			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
91 		seq_puts(p, "  Platform interrupts\n");
92 	}
93 #endif
94 #ifdef CONFIG_SMP
95 	seq_printf(p, "%*s: ", prec, "RES");
96 	for_each_online_cpu(j)
97 		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
98 	seq_puts(p, "  Rescheduling interrupts\n");
99 	seq_printf(p, "%*s: ", prec, "CAL");
100 	for_each_online_cpu(j)
101 		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
102 	seq_puts(p, "  Function call interrupts\n");
103 	seq_printf(p, "%*s: ", prec, "TLB");
104 	for_each_online_cpu(j)
105 		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
106 	seq_puts(p, "  TLB shootdowns\n");
107 #endif
108 #ifdef CONFIG_X86_THERMAL_VECTOR
109 	seq_printf(p, "%*s: ", prec, "TRM");
110 	for_each_online_cpu(j)
111 		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
112 	seq_puts(p, "  Thermal event interrupts\n");
113 #endif
114 #ifdef CONFIG_X86_MCE_THRESHOLD
115 	seq_printf(p, "%*s: ", prec, "THR");
116 	for_each_online_cpu(j)
117 		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
118 	seq_puts(p, "  Threshold APIC interrupts\n");
119 #endif
120 #ifdef CONFIG_X86_MCE_AMD
121 	seq_printf(p, "%*s: ", prec, "DFR");
122 	for_each_online_cpu(j)
123 		seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
124 	seq_puts(p, "  Deferred Error APIC interrupts\n");
125 #endif
126 #ifdef CONFIG_X86_MCE
127 	seq_printf(p, "%*s: ", prec, "MCE");
128 	for_each_online_cpu(j)
129 		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
130 	seq_puts(p, "  Machine check exceptions\n");
131 	seq_printf(p, "%*s: ", prec, "MCP");
132 	for_each_online_cpu(j)
133 		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
134 	seq_puts(p, "  Machine check polls\n");
135 #endif
136 #if IS_ENABLED(CONFIG_HYPERV) || defined(CONFIG_XEN)
137 	if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
138 		seq_printf(p, "%*s: ", prec, "HYP");
139 		for_each_online_cpu(j)
140 			seq_printf(p, "%10u ",
141 				   irq_stats(j)->irq_hv_callback_count);
142 		seq_puts(p, "  Hypervisor callback interrupts\n");
143 	}
144 #endif
145 #if IS_ENABLED(CONFIG_HYPERV)
146 	if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
147 		seq_printf(p, "%*s: ", prec, "HRE");
148 		for_each_online_cpu(j)
149 			seq_printf(p, "%10u ",
150 				   irq_stats(j)->irq_hv_reenlightenment_count);
151 		seq_puts(p, "  Hyper-V reenlightenment interrupts\n");
152 	}
153 	if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
154 		seq_printf(p, "%*s: ", prec, "HVS");
155 		for_each_online_cpu(j)
156 			seq_printf(p, "%10u ",
157 				   irq_stats(j)->hyperv_stimer0_count);
158 		seq_puts(p, "  Hyper-V stimer0 interrupts\n");
159 	}
160 #endif
161 	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
162 #if defined(CONFIG_X86_IO_APIC)
163 	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
164 #endif
165 #ifdef CONFIG_HAVE_KVM
166 	seq_printf(p, "%*s: ", prec, "PIN");
167 	for_each_online_cpu(j)
168 		seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
169 	seq_puts(p, "  Posted-interrupt notification event\n");
170 
171 	seq_printf(p, "%*s: ", prec, "NPI");
172 	for_each_online_cpu(j)
173 		seq_printf(p, "%10u ",
174 			   irq_stats(j)->kvm_posted_intr_nested_ipis);
175 	seq_puts(p, "  Nested posted-interrupt event\n");
176 
177 	seq_printf(p, "%*s: ", prec, "PIW");
178 	for_each_online_cpu(j)
179 		seq_printf(p, "%10u ",
180 			   irq_stats(j)->kvm_posted_intr_wakeup_ipis);
181 	seq_puts(p, "  Posted-interrupt wakeup event\n");
182 #endif
183 	return 0;
184 }
185 
186 /*
187  * /proc/stat helpers
188  */
189 u64 arch_irq_stat_cpu(unsigned int cpu)
190 {
191 	u64 sum = irq_stats(cpu)->__nmi_count;
192 
193 #ifdef CONFIG_X86_LOCAL_APIC
194 	sum += irq_stats(cpu)->apic_timer_irqs;
195 	sum += irq_stats(cpu)->irq_spurious_count;
196 	sum += irq_stats(cpu)->apic_perf_irqs;
197 	sum += irq_stats(cpu)->apic_irq_work_irqs;
198 	sum += irq_stats(cpu)->icr_read_retry_count;
199 	if (x86_platform_ipi_callback)
200 		sum += irq_stats(cpu)->x86_platform_ipis;
201 #endif
202 #ifdef CONFIG_SMP
203 	sum += irq_stats(cpu)->irq_resched_count;
204 	sum += irq_stats(cpu)->irq_call_count;
205 #endif
206 #ifdef CONFIG_X86_THERMAL_VECTOR
207 	sum += irq_stats(cpu)->irq_thermal_count;
208 #endif
209 #ifdef CONFIG_X86_MCE_THRESHOLD
210 	sum += irq_stats(cpu)->irq_threshold_count;
211 #endif
212 #ifdef CONFIG_X86_MCE
213 	sum += per_cpu(mce_exception_count, cpu);
214 	sum += per_cpu(mce_poll_count, cpu);
215 #endif
216 	return sum;
217 }
218 
219 u64 arch_irq_stat(void)
220 {
221 	u64 sum = atomic_read(&irq_err_count);
222 	return sum;
223 }
224 
225 
226 /*
227  * do_IRQ handles all normal device IRQ's (the special
228  * SMP cross-CPU interrupts have their own specific
229  * handlers).
230  */
231 __visible unsigned int __irq_entry do_IRQ(struct pt_regs *regs)
232 {
233 	struct pt_regs *old_regs = set_irq_regs(regs);
234 	struct irq_desc * desc;
235 	/* high bit used in ret_from_ code  */
236 	unsigned vector = ~regs->orig_ax;
237 
238 	entering_irq();
239 
240 	/* entering_irq() tells RCU that we're not quiescent.  Check it. */
241 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
242 
243 	desc = __this_cpu_read(vector_irq[vector]);
244 
245 	if (!handle_irq(desc, regs)) {
246 		ack_APIC_irq();
247 
248 		if (desc != VECTOR_RETRIGGERED) {
249 			pr_emerg_ratelimited("%s: %d.%d No irq handler for vector\n",
250 					     __func__, smp_processor_id(),
251 					     vector);
252 		} else {
253 			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
254 		}
255 	}
256 
257 	exiting_irq();
258 
259 	set_irq_regs(old_regs);
260 	return 1;
261 }
262 
263 #ifdef CONFIG_X86_LOCAL_APIC
264 /* Function pointer for generic interrupt vector handling */
265 void (*x86_platform_ipi_callback)(void) = NULL;
266 /*
267  * Handler for X86_PLATFORM_IPI_VECTOR.
268  */
269 __visible void __irq_entry smp_x86_platform_ipi(struct pt_regs *regs)
270 {
271 	struct pt_regs *old_regs = set_irq_regs(regs);
272 
273 	entering_ack_irq();
274 	trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
275 	inc_irq_stat(x86_platform_ipis);
276 	if (x86_platform_ipi_callback)
277 		x86_platform_ipi_callback();
278 	trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
279 	exiting_irq();
280 	set_irq_regs(old_regs);
281 }
282 #endif
283 
284 #ifdef CONFIG_HAVE_KVM
285 static void dummy_handler(void) {}
286 static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
287 
288 void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
289 {
290 	if (handler)
291 		kvm_posted_intr_wakeup_handler = handler;
292 	else
293 		kvm_posted_intr_wakeup_handler = dummy_handler;
294 }
295 EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
296 
297 /*
298  * Handler for POSTED_INTERRUPT_VECTOR.
299  */
300 __visible void smp_kvm_posted_intr_ipi(struct pt_regs *regs)
301 {
302 	struct pt_regs *old_regs = set_irq_regs(regs);
303 
304 	entering_ack_irq();
305 	inc_irq_stat(kvm_posted_intr_ipis);
306 	exiting_irq();
307 	set_irq_regs(old_regs);
308 }
309 
310 /*
311  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
312  */
313 __visible void smp_kvm_posted_intr_wakeup_ipi(struct pt_regs *regs)
314 {
315 	struct pt_regs *old_regs = set_irq_regs(regs);
316 
317 	entering_ack_irq();
318 	inc_irq_stat(kvm_posted_intr_wakeup_ipis);
319 	kvm_posted_intr_wakeup_handler();
320 	exiting_irq();
321 	set_irq_regs(old_regs);
322 }
323 
324 /*
325  * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
326  */
327 __visible void smp_kvm_posted_intr_nested_ipi(struct pt_regs *regs)
328 {
329 	struct pt_regs *old_regs = set_irq_regs(regs);
330 
331 	entering_ack_irq();
332 	inc_irq_stat(kvm_posted_intr_nested_ipis);
333 	exiting_irq();
334 	set_irq_regs(old_regs);
335 }
336 #endif
337 
338 
339 #ifdef CONFIG_HOTPLUG_CPU
340 /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
341 void fixup_irqs(void)
342 {
343 	unsigned int irr, vector;
344 	struct irq_desc *desc;
345 	struct irq_data *data;
346 	struct irq_chip *chip;
347 
348 	irq_migrate_all_off_this_cpu();
349 
350 	/*
351 	 * We can remove mdelay() and then send spuriuous interrupts to
352 	 * new cpu targets for all the irqs that were handled previously by
353 	 * this cpu. While it works, I have seen spurious interrupt messages
354 	 * (nothing wrong but still...).
355 	 *
356 	 * So for now, retain mdelay(1) and check the IRR and then send those
357 	 * interrupts to new targets as this cpu is already offlined...
358 	 */
359 	mdelay(1);
360 
361 	/*
362 	 * We can walk the vector array of this cpu without holding
363 	 * vector_lock because the cpu is already marked !online, so
364 	 * nothing else will touch it.
365 	 */
366 	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
367 		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
368 			continue;
369 
370 		irr = apic_read(APIC_IRR + (vector / 32 * 0x10));
371 		if (irr  & (1 << (vector % 32))) {
372 			desc = __this_cpu_read(vector_irq[vector]);
373 
374 			raw_spin_lock(&desc->lock);
375 			data = irq_desc_get_irq_data(desc);
376 			chip = irq_data_get_irq_chip(data);
377 			if (chip->irq_retrigger) {
378 				chip->irq_retrigger(data);
379 				__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
380 			}
381 			raw_spin_unlock(&desc->lock);
382 		}
383 		if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
384 			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
385 	}
386 }
387 #endif
388