xref: /linux/arch/x86/kernel/irq.c (revision c924c5e9b8c65b3a479a90e5e37d74cc8cd9fe0a)
1  // SPDX-License-Identifier: GPL-2.0-only
2  /*
3   * Common interrupt code for 32 and 64 bit
4   */
5  #include <linux/cpu.h>
6  #include <linux/interrupt.h>
7  #include <linux/kernel_stat.h>
8  #include <linux/of.h>
9  #include <linux/seq_file.h>
10  #include <linux/smp.h>
11  #include <linux/ftrace.h>
12  #include <linux/delay.h>
13  #include <linux/export.h>
14  #include <linux/irq.h>
15  
16  #include <asm/irq_stack.h>
17  #include <asm/apic.h>
18  #include <asm/io_apic.h>
19  #include <asm/irq.h>
20  #include <asm/mce.h>
21  #include <asm/hw_irq.h>
22  #include <asm/desc.h>
23  #include <asm/traps.h>
24  #include <asm/thermal.h>
25  #include <asm/posted_intr.h>
26  #include <asm/irq_remapping.h>
27  
28  #if defined(CONFIG_X86_LOCAL_APIC) || defined(CONFIG_X86_THERMAL_VECTOR)
29  #define CREATE_TRACE_POINTS
30  #include <asm/trace/irq_vectors.h>
31  #endif
32  
33  DEFINE_PER_CPU_SHARED_ALIGNED(irq_cpustat_t, irq_stat);
34  EXPORT_PER_CPU_SYMBOL(irq_stat);
35  
36  DEFINE_PER_CPU_CACHE_HOT(u16, __softirq_pending);
37  EXPORT_PER_CPU_SYMBOL(__softirq_pending);
38  
39  DEFINE_PER_CPU_CACHE_HOT(struct irq_stack *, hardirq_stack_ptr);
40  
41  atomic_t irq_err_count;
42  
43  /*
44   * 'what should we do if we get a hw irq event on an illegal vector'.
45   * each architecture has to answer this themselves.
46   */
ack_bad_irq(unsigned int irq)47  void ack_bad_irq(unsigned int irq)
48  {
49  	if (printk_ratelimit())
50  		pr_err("unexpected IRQ trap at vector %02x\n", irq);
51  
52  	/*
53  	 * Currently unexpected vectors happen only on SMP and APIC.
54  	 * We _must_ ack these because every local APIC has only N
55  	 * irq slots per priority level, and a 'hanging, unacked' IRQ
56  	 * holds up an irq slot - in excessive cases (when multiple
57  	 * unexpected vectors occur) that might lock up the APIC
58  	 * completely.
59  	 * But only ack when the APIC is enabled -AK
60  	 */
61  	apic_eoi();
62  }
63  
64  #define irq_stats(x)		(&per_cpu(irq_stat, x))
65  /*
66   * /proc/interrupts printing for arch specific interrupts
67   */
arch_show_interrupts(struct seq_file * p,int prec)68  int arch_show_interrupts(struct seq_file *p, int prec)
69  {
70  	int j;
71  
72  	seq_printf(p, "%*s: ", prec, "NMI");
73  	for_each_online_cpu(j)
74  		seq_printf(p, "%10u ", irq_stats(j)->__nmi_count);
75  	seq_puts(p, "  Non-maskable interrupts\n");
76  #ifdef CONFIG_X86_LOCAL_APIC
77  	seq_printf(p, "%*s: ", prec, "LOC");
78  	for_each_online_cpu(j)
79  		seq_printf(p, "%10u ", irq_stats(j)->apic_timer_irqs);
80  	seq_puts(p, "  Local timer interrupts\n");
81  
82  	seq_printf(p, "%*s: ", prec, "SPU");
83  	for_each_online_cpu(j)
84  		seq_printf(p, "%10u ", irq_stats(j)->irq_spurious_count);
85  	seq_puts(p, "  Spurious interrupts\n");
86  	seq_printf(p, "%*s: ", prec, "PMI");
87  	for_each_online_cpu(j)
88  		seq_printf(p, "%10u ", irq_stats(j)->apic_perf_irqs);
89  	seq_puts(p, "  Performance monitoring interrupts\n");
90  	seq_printf(p, "%*s: ", prec, "IWI");
91  	for_each_online_cpu(j)
92  		seq_printf(p, "%10u ", irq_stats(j)->apic_irq_work_irqs);
93  	seq_puts(p, "  IRQ work interrupts\n");
94  	seq_printf(p, "%*s: ", prec, "RTR");
95  	for_each_online_cpu(j)
96  		seq_printf(p, "%10u ", irq_stats(j)->icr_read_retry_count);
97  	seq_puts(p, "  APIC ICR read retries\n");
98  	if (x86_platform_ipi_callback) {
99  		seq_printf(p, "%*s: ", prec, "PLT");
100  		for_each_online_cpu(j)
101  			seq_printf(p, "%10u ", irq_stats(j)->x86_platform_ipis);
102  		seq_puts(p, "  Platform interrupts\n");
103  	}
104  #endif
105  #ifdef CONFIG_SMP
106  	seq_printf(p, "%*s: ", prec, "RES");
107  	for_each_online_cpu(j)
108  		seq_printf(p, "%10u ", irq_stats(j)->irq_resched_count);
109  	seq_puts(p, "  Rescheduling interrupts\n");
110  	seq_printf(p, "%*s: ", prec, "CAL");
111  	for_each_online_cpu(j)
112  		seq_printf(p, "%10u ", irq_stats(j)->irq_call_count);
113  	seq_puts(p, "  Function call interrupts\n");
114  	seq_printf(p, "%*s: ", prec, "TLB");
115  	for_each_online_cpu(j)
116  		seq_printf(p, "%10u ", irq_stats(j)->irq_tlb_count);
117  	seq_puts(p, "  TLB shootdowns\n");
118  #endif
119  #ifdef CONFIG_X86_THERMAL_VECTOR
120  	seq_printf(p, "%*s: ", prec, "TRM");
121  	for_each_online_cpu(j)
122  		seq_printf(p, "%10u ", irq_stats(j)->irq_thermal_count);
123  	seq_puts(p, "  Thermal event interrupts\n");
124  #endif
125  #ifdef CONFIG_X86_MCE_THRESHOLD
126  	seq_printf(p, "%*s: ", prec, "THR");
127  	for_each_online_cpu(j)
128  		seq_printf(p, "%10u ", irq_stats(j)->irq_threshold_count);
129  	seq_puts(p, "  Threshold APIC interrupts\n");
130  #endif
131  #ifdef CONFIG_X86_MCE_AMD
132  	seq_printf(p, "%*s: ", prec, "DFR");
133  	for_each_online_cpu(j)
134  		seq_printf(p, "%10u ", irq_stats(j)->irq_deferred_error_count);
135  	seq_puts(p, "  Deferred Error APIC interrupts\n");
136  #endif
137  #ifdef CONFIG_X86_MCE
138  	seq_printf(p, "%*s: ", prec, "MCE");
139  	for_each_online_cpu(j)
140  		seq_printf(p, "%10u ", per_cpu(mce_exception_count, j));
141  	seq_puts(p, "  Machine check exceptions\n");
142  	seq_printf(p, "%*s: ", prec, "MCP");
143  	for_each_online_cpu(j)
144  		seq_printf(p, "%10u ", per_cpu(mce_poll_count, j));
145  	seq_puts(p, "  Machine check polls\n");
146  #endif
147  #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
148  	if (test_bit(HYPERVISOR_CALLBACK_VECTOR, system_vectors)) {
149  		seq_printf(p, "%*s: ", prec, "HYP");
150  		for_each_online_cpu(j)
151  			seq_printf(p, "%10u ",
152  				   irq_stats(j)->irq_hv_callback_count);
153  		seq_puts(p, "  Hypervisor callback interrupts\n");
154  	}
155  #endif
156  #if IS_ENABLED(CONFIG_HYPERV)
157  	if (test_bit(HYPERV_REENLIGHTENMENT_VECTOR, system_vectors)) {
158  		seq_printf(p, "%*s: ", prec, "HRE");
159  		for_each_online_cpu(j)
160  			seq_printf(p, "%10u ",
161  				   irq_stats(j)->irq_hv_reenlightenment_count);
162  		seq_puts(p, "  Hyper-V reenlightenment interrupts\n");
163  	}
164  	if (test_bit(HYPERV_STIMER0_VECTOR, system_vectors)) {
165  		seq_printf(p, "%*s: ", prec, "HVS");
166  		for_each_online_cpu(j)
167  			seq_printf(p, "%10u ",
168  				   irq_stats(j)->hyperv_stimer0_count);
169  		seq_puts(p, "  Hyper-V stimer0 interrupts\n");
170  	}
171  #endif
172  	seq_printf(p, "%*s: %10u\n", prec, "ERR", atomic_read(&irq_err_count));
173  #if defined(CONFIG_X86_IO_APIC)
174  	seq_printf(p, "%*s: %10u\n", prec, "MIS", atomic_read(&irq_mis_count));
175  #endif
176  #if IS_ENABLED(CONFIG_KVM)
177  	seq_printf(p, "%*s: ", prec, "PIN");
178  	for_each_online_cpu(j)
179  		seq_printf(p, "%10u ", irq_stats(j)->kvm_posted_intr_ipis);
180  	seq_puts(p, "  Posted-interrupt notification event\n");
181  
182  	seq_printf(p, "%*s: ", prec, "NPI");
183  	for_each_online_cpu(j)
184  		seq_printf(p, "%10u ",
185  			   irq_stats(j)->kvm_posted_intr_nested_ipis);
186  	seq_puts(p, "  Nested posted-interrupt event\n");
187  
188  	seq_printf(p, "%*s: ", prec, "PIW");
189  	for_each_online_cpu(j)
190  		seq_printf(p, "%10u ",
191  			   irq_stats(j)->kvm_posted_intr_wakeup_ipis);
192  	seq_puts(p, "  Posted-interrupt wakeup event\n");
193  #endif
194  #ifdef CONFIG_X86_POSTED_MSI
195  	seq_printf(p, "%*s: ", prec, "PMN");
196  	for_each_online_cpu(j)
197  		seq_printf(p, "%10u ",
198  			   irq_stats(j)->posted_msi_notification_count);
199  	seq_puts(p, "  Posted MSI notification event\n");
200  #endif
201  	return 0;
202  }
203  
204  /*
205   * /proc/stat helpers
206   */
arch_irq_stat_cpu(unsigned int cpu)207  u64 arch_irq_stat_cpu(unsigned int cpu)
208  {
209  	u64 sum = irq_stats(cpu)->__nmi_count;
210  
211  #ifdef CONFIG_X86_LOCAL_APIC
212  	sum += irq_stats(cpu)->apic_timer_irqs;
213  	sum += irq_stats(cpu)->irq_spurious_count;
214  	sum += irq_stats(cpu)->apic_perf_irqs;
215  	sum += irq_stats(cpu)->apic_irq_work_irqs;
216  	sum += irq_stats(cpu)->icr_read_retry_count;
217  	if (x86_platform_ipi_callback)
218  		sum += irq_stats(cpu)->x86_platform_ipis;
219  #endif
220  #ifdef CONFIG_SMP
221  	sum += irq_stats(cpu)->irq_resched_count;
222  	sum += irq_stats(cpu)->irq_call_count;
223  #endif
224  #ifdef CONFIG_X86_THERMAL_VECTOR
225  	sum += irq_stats(cpu)->irq_thermal_count;
226  #endif
227  #ifdef CONFIG_X86_MCE_THRESHOLD
228  	sum += irq_stats(cpu)->irq_threshold_count;
229  #endif
230  #ifdef CONFIG_X86_HV_CALLBACK_VECTOR
231  	sum += irq_stats(cpu)->irq_hv_callback_count;
232  #endif
233  #if IS_ENABLED(CONFIG_HYPERV)
234  	sum += irq_stats(cpu)->irq_hv_reenlightenment_count;
235  	sum += irq_stats(cpu)->hyperv_stimer0_count;
236  #endif
237  #ifdef CONFIG_X86_MCE
238  	sum += per_cpu(mce_exception_count, cpu);
239  	sum += per_cpu(mce_poll_count, cpu);
240  #endif
241  	return sum;
242  }
243  
arch_irq_stat(void)244  u64 arch_irq_stat(void)
245  {
246  	u64 sum = atomic_read(&irq_err_count);
247  	return sum;
248  }
249  
handle_irq(struct irq_desc * desc,struct pt_regs * regs)250  static __always_inline void handle_irq(struct irq_desc *desc,
251  				       struct pt_regs *regs)
252  {
253  	if (IS_ENABLED(CONFIG_X86_64))
254  		generic_handle_irq_desc(desc);
255  	else
256  		__handle_irq(desc, regs);
257  }
258  
call_irq_handler(int vector,struct pt_regs * regs)259  static __always_inline int call_irq_handler(int vector, struct pt_regs *regs)
260  {
261  	struct irq_desc *desc;
262  	int ret = 0;
263  
264  	desc = __this_cpu_read(vector_irq[vector]);
265  	if (likely(!IS_ERR_OR_NULL(desc))) {
266  		handle_irq(desc, regs);
267  	} else {
268  		ret = -EINVAL;
269  		if (desc == VECTOR_UNUSED) {
270  			pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
271  					     __func__, smp_processor_id(),
272  					     vector);
273  		} else {
274  			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
275  		}
276  	}
277  
278  	return ret;
279  }
280  
281  /*
282   * common_interrupt() handles all normal device IRQ's (the special SMP
283   * cross-CPU interrupts have their own entry points).
284   */
DEFINE_IDTENTRY_IRQ(common_interrupt)285  DEFINE_IDTENTRY_IRQ(common_interrupt)
286  {
287  	struct pt_regs *old_regs = set_irq_regs(regs);
288  
289  	/* entry code tells RCU that we're not quiescent.  Check it. */
290  	RCU_LOCKDEP_WARN(!rcu_is_watching(), "IRQ failed to wake up RCU");
291  
292  	if (unlikely(call_irq_handler(vector, regs)))
293  		apic_eoi();
294  
295  	set_irq_regs(old_regs);
296  }
297  
298  #ifdef CONFIG_X86_LOCAL_APIC
299  /* Function pointer for generic interrupt vector handling */
300  void (*x86_platform_ipi_callback)(void) = NULL;
301  /*
302   * Handler for X86_PLATFORM_IPI_VECTOR.
303   */
DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)304  DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
305  {
306  	struct pt_regs *old_regs = set_irq_regs(regs);
307  
308  	apic_eoi();
309  	trace_x86_platform_ipi_entry(X86_PLATFORM_IPI_VECTOR);
310  	inc_irq_stat(x86_platform_ipis);
311  	if (x86_platform_ipi_callback)
312  		x86_platform_ipi_callback();
313  	trace_x86_platform_ipi_exit(X86_PLATFORM_IPI_VECTOR);
314  	set_irq_regs(old_regs);
315  }
316  #endif
317  
318  #if IS_ENABLED(CONFIG_KVM)
dummy_handler(void)319  static void dummy_handler(void) {}
320  static void (*kvm_posted_intr_wakeup_handler)(void) = dummy_handler;
321  
kvm_set_posted_intr_wakeup_handler(void (* handler)(void))322  void kvm_set_posted_intr_wakeup_handler(void (*handler)(void))
323  {
324  	if (handler)
325  		kvm_posted_intr_wakeup_handler = handler;
326  	else {
327  		kvm_posted_intr_wakeup_handler = dummy_handler;
328  		synchronize_rcu();
329  	}
330  }
331  EXPORT_SYMBOL_GPL(kvm_set_posted_intr_wakeup_handler);
332  
333  /*
334   * Handler for POSTED_INTERRUPT_VECTOR.
335   */
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)336  DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
337  {
338  	apic_eoi();
339  	inc_irq_stat(kvm_posted_intr_ipis);
340  }
341  
342  /*
343   * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
344   */
DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)345  DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
346  {
347  	apic_eoi();
348  	inc_irq_stat(kvm_posted_intr_wakeup_ipis);
349  	kvm_posted_intr_wakeup_handler();
350  }
351  
352  /*
353   * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
354   */
DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)355  DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
356  {
357  	apic_eoi();
358  	inc_irq_stat(kvm_posted_intr_nested_ipis);
359  }
360  #endif
361  
362  #ifdef CONFIG_X86_POSTED_MSI
363  
364  /* Posted Interrupt Descriptors for coalesced MSIs to be posted */
365  DEFINE_PER_CPU_ALIGNED(struct pi_desc, posted_msi_pi_desc);
366  
intel_posted_msi_init(void)367  void intel_posted_msi_init(void)
368  {
369  	u32 destination;
370  	u32 apic_id;
371  
372  	this_cpu_write(posted_msi_pi_desc.nv, POSTED_MSI_NOTIFICATION_VECTOR);
373  
374  	/*
375  	 * APIC destination ID is stored in bit 8:15 while in XAPIC mode.
376  	 * VT-d spec. CH 9.11
377  	 */
378  	apic_id = this_cpu_read(x86_cpu_to_apicid);
379  	destination = x2apic_enabled() ? apic_id : apic_id << 8;
380  	this_cpu_write(posted_msi_pi_desc.ndst, destination);
381  }
382  
383  /*
384   * De-multiplexing posted interrupts is on the performance path, the code
385   * below is written to optimize the cache performance based on the following
386   * considerations:
387   * 1.Posted interrupt descriptor (PID) fits in a cache line that is frequently
388   *   accessed by both CPU and IOMMU.
389   * 2.During posted MSI processing, the CPU needs to do 64-bit read and xchg
390   *   for checking and clearing posted interrupt request (PIR), a 256 bit field
391   *   within the PID.
392   * 3.On the other side, the IOMMU does atomic swaps of the entire PID cache
393   *   line when posting interrupts and setting control bits.
394   * 4.The CPU can access the cache line a magnitude faster than the IOMMU.
395   * 5.Each time the IOMMU does interrupt posting to the PIR will evict the PID
396   *   cache line. The cache line states after each operation are as follows:
397   *   CPU		IOMMU			PID Cache line state
398   *   ---------------------------------------------------------------
399   *...read64					exclusive
400   *...lock xchg64				modified
401   *...			post/atomic swap	invalid
402   *...-------------------------------------------------------------
403   *
404   * To reduce L1 data cache miss, it is important to avoid contention with
405   * IOMMU's interrupt posting/atomic swap. Therefore, a copy of PIR is used
406   * to dispatch interrupt handlers.
407   *
408   * In addition, the code is trying to keep the cache line state consistent
409   * as much as possible. e.g. when making a copy and clearing the PIR
410   * (assuming non-zero PIR bits are present in the entire PIR), it does:
411   *		read, read, read, read, xchg, xchg, xchg, xchg
412   * instead of:
413   *		read, xchg, read, xchg, read, xchg, read, xchg
414   */
handle_pending_pir(u64 * pir,struct pt_regs * regs)415  static __always_inline bool handle_pending_pir(u64 *pir, struct pt_regs *regs)
416  {
417  	int i, vec = FIRST_EXTERNAL_VECTOR;
418  	unsigned long pir_copy[4];
419  	bool handled = false;
420  
421  	for (i = 0; i < 4; i++)
422  		pir_copy[i] = pir[i];
423  
424  	for (i = 0; i < 4; i++) {
425  		if (!pir_copy[i])
426  			continue;
427  
428  		pir_copy[i] = arch_xchg(&pir[i], 0);
429  		handled = true;
430  	}
431  
432  	if (handled) {
433  		for_each_set_bit_from(vec, pir_copy, FIRST_SYSTEM_VECTOR)
434  			call_irq_handler(vec, regs);
435  	}
436  
437  	return handled;
438  }
439  
440  /*
441   * Performance data shows that 3 is good enough to harvest 90+% of the benefit
442   * on high IRQ rate workload.
443   */
444  #define MAX_POSTED_MSI_COALESCING_LOOP 3
445  
446  /*
447   * For MSIs that are delivered as posted interrupts, the CPU notifications
448   * can be coalesced if the MSIs arrive in high frequency bursts.
449   */
DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)450  DEFINE_IDTENTRY_SYSVEC(sysvec_posted_msi_notification)
451  {
452  	struct pt_regs *old_regs = set_irq_regs(regs);
453  	struct pi_desc *pid;
454  	int i = 0;
455  
456  	pid = this_cpu_ptr(&posted_msi_pi_desc);
457  
458  	inc_irq_stat(posted_msi_notification_count);
459  	irq_enter();
460  
461  	/*
462  	 * Max coalescing count includes the extra round of handle_pending_pir
463  	 * after clearing the outstanding notification bit. Hence, at most
464  	 * MAX_POSTED_MSI_COALESCING_LOOP - 1 loops are executed here.
465  	 */
466  	while (++i < MAX_POSTED_MSI_COALESCING_LOOP) {
467  		if (!handle_pending_pir(pid->pir64, regs))
468  			break;
469  	}
470  
471  	/*
472  	 * Clear outstanding notification bit to allow new IRQ notifications,
473  	 * do this last to maximize the window of interrupt coalescing.
474  	 */
475  	pi_clear_on(pid);
476  
477  	/*
478  	 * There could be a race of PI notification and the clearing of ON bit,
479  	 * process PIR bits one last time such that handling the new interrupts
480  	 * are not delayed until the next IRQ.
481  	 */
482  	handle_pending_pir(pid->pir64, regs);
483  
484  	apic_eoi();
485  	irq_exit();
486  	set_irq_regs(old_regs);
487  }
488  #endif /* X86_POSTED_MSI */
489  
490  #ifdef CONFIG_HOTPLUG_CPU
491  /* A cpu has been removed from cpu_online_mask.  Reset irq affinities. */
fixup_irqs(void)492  void fixup_irqs(void)
493  {
494  	unsigned int vector;
495  	struct irq_desc *desc;
496  	struct irq_data *data;
497  	struct irq_chip *chip;
498  
499  	irq_migrate_all_off_this_cpu();
500  
501  	/*
502  	 * We can remove mdelay() and then send spurious interrupts to
503  	 * new cpu targets for all the irqs that were handled previously by
504  	 * this cpu. While it works, I have seen spurious interrupt messages
505  	 * (nothing wrong but still...).
506  	 *
507  	 * So for now, retain mdelay(1) and check the IRR and then send those
508  	 * interrupts to new targets as this cpu is already offlined...
509  	 */
510  	mdelay(1);
511  
512  	/*
513  	 * We can walk the vector array of this cpu without holding
514  	 * vector_lock because the cpu is already marked !online, so
515  	 * nothing else will touch it.
516  	 */
517  	for (vector = FIRST_EXTERNAL_VECTOR; vector < NR_VECTORS; vector++) {
518  		if (IS_ERR_OR_NULL(__this_cpu_read(vector_irq[vector])))
519  			continue;
520  
521  		if (is_vector_pending(vector)) {
522  			desc = __this_cpu_read(vector_irq[vector]);
523  
524  			raw_spin_lock(&desc->lock);
525  			data = irq_desc_get_irq_data(desc);
526  			chip = irq_data_get_irq_chip(data);
527  			if (chip->irq_retrigger) {
528  				chip->irq_retrigger(data);
529  				__this_cpu_write(vector_irq[vector], VECTOR_RETRIGGERED);
530  			}
531  			raw_spin_unlock(&desc->lock);
532  		}
533  		if (__this_cpu_read(vector_irq[vector]) != VECTOR_RETRIGGERED)
534  			__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
535  	}
536  }
537  #endif
538  
539  #ifdef CONFIG_X86_THERMAL_VECTOR
smp_thermal_vector(void)540  static void smp_thermal_vector(void)
541  {
542  	if (x86_thermal_enabled())
543  		intel_thermal_interrupt();
544  	else
545  		pr_err("CPU%d: Unexpected LVT thermal interrupt!\n",
546  		       smp_processor_id());
547  }
548  
DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)549  DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
550  {
551  	trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
552  	inc_irq_stat(irq_thermal_count);
553  	smp_thermal_vector();
554  	trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
555  	apic_eoi();
556  }
557  #endif
558