xref: /linux/arch/x86/kernel/irq_64.c (revision b8d312aa075f33282565467662c4628dae0a2aff)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4  *
5  * This file contains the lowest level x86_64-specific interrupt
6  * entry and irq statistics code. All the remaining irq logic is
7  * done by the generic kernel/irq/ code and in the
8  * x86_64-specific irq controller code. (e.g. i8259.c and
9  * io_apic.c.)
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/seq_file.h>
16 #include <linux/delay.h>
17 #include <linux/ftrace.h>
18 #include <linux/uaccess.h>
19 #include <linux/smp.h>
20 #include <linux/sched/task_stack.h>
21 
22 #include <asm/cpu_entry_area.h>
23 #include <asm/io_apic.h>
24 #include <asm/apic.h>
25 
26 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
27 DECLARE_INIT_PER_CPU(irq_stack_backing_store);
28 
29 bool handle_irq(struct irq_desc *desc, struct pt_regs *regs)
30 {
31 	if (IS_ERR_OR_NULL(desc))
32 		return false;
33 
34 	generic_handle_irq_desc(desc);
35 	return true;
36 }
37 
38 #ifdef CONFIG_VMAP_STACK
39 /*
40  * VMAP the backing store with guard pages
41  */
42 static int map_irq_stack(unsigned int cpu)
43 {
44 	char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
45 	struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
46 	void *va;
47 	int i;
48 
49 	for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
50 		phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
51 
52 		pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
53 	}
54 
55 	va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, GFP_KERNEL, PAGE_KERNEL);
56 	if (!va)
57 		return -ENOMEM;
58 
59 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
60 	return 0;
61 }
62 #else
63 /*
64  * If VMAP stacks are disabled due to KASAN, just use the per cpu
65  * backing store without guard pages.
66  */
67 static int map_irq_stack(unsigned int cpu)
68 {
69 	void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
70 
71 	per_cpu(hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE;
72 	return 0;
73 }
74 #endif
75 
76 int irq_init_percpu_irqstack(unsigned int cpu)
77 {
78 	if (per_cpu(hardirq_stack_ptr, cpu))
79 		return 0;
80 	return map_irq_stack(cpu);
81 }
82