xref: /linux/arch/x86/kernel/irq_64.c (revision bfb921b2a9d5d1123d1d10b196a39db629ddef87)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  *	Copyright (C) 1992, 1998 Linus Torvalds, Ingo Molnar
4  *
5  * This file contains the lowest level x86_64-specific interrupt
6  * entry and irq statistics code. All the remaining irq logic is
7  * done by the generic kernel/irq/ code and in the
8  * x86_64-specific irq controller code. (e.g. i8259.c and
9  * io_apic.c.)
10  */
11 
12 #include <linux/kernel_stat.h>
13 #include <linux/interrupt.h>
14 #include <linux/irq.h>
15 #include <linux/seq_file.h>
16 #include <linux/delay.h>
17 #include <linux/ftrace.h>
18 #include <linux/uaccess.h>
19 #include <linux/smp.h>
20 #include <linux/sched/task_stack.h>
21 #include <linux/vmalloc.h>
22 
23 #include <asm/cpu_entry_area.h>
24 #include <asm/softirq_stack.h>
25 #include <asm/irq_stack.h>
26 #include <asm/io_apic.h>
27 #include <asm/apic.h>
28 
29 DEFINE_PER_CPU_PAGE_ALIGNED(struct irq_stack, irq_stack_backing_store) __visible;
30 DECLARE_INIT_PER_CPU(irq_stack_backing_store);
31 
32 #ifdef CONFIG_VMAP_STACK
33 /*
34  * VMAP the backing store with guard pages
35  */
36 static int map_irq_stack(unsigned int cpu)
37 {
38 	char *stack = (char *)per_cpu_ptr(&irq_stack_backing_store, cpu);
39 	struct page *pages[IRQ_STACK_SIZE / PAGE_SIZE];
40 	void *va;
41 	int i;
42 
43 	for (i = 0; i < IRQ_STACK_SIZE / PAGE_SIZE; i++) {
44 		phys_addr_t pa = per_cpu_ptr_to_phys(stack + (i << PAGE_SHIFT));
45 
46 		pages[i] = pfn_to_page(pa >> PAGE_SHIFT);
47 	}
48 
49 	va = vmap(pages, IRQ_STACK_SIZE / PAGE_SIZE, VM_MAP, PAGE_KERNEL);
50 	if (!va)
51 		return -ENOMEM;
52 
53 	/* Store actual TOS to avoid adjustment in the hotpath */
54 	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
55 	return 0;
56 }
57 #else
58 /*
59  * If VMAP stacks are disabled due to KASAN, just use the per cpu
60  * backing store without guard pages.
61  */
62 static int map_irq_stack(unsigned int cpu)
63 {
64 	void *va = per_cpu_ptr(&irq_stack_backing_store, cpu);
65 
66 	/* Store actual TOS to avoid adjustment in the hotpath */
67 	per_cpu(pcpu_hot.hardirq_stack_ptr, cpu) = va + IRQ_STACK_SIZE - 8;
68 	return 0;
69 }
70 #endif
71 
72 int irq_init_percpu_irqstack(unsigned int cpu)
73 {
74 	if (per_cpu(pcpu_hot.hardirq_stack_ptr, cpu))
75 		return 0;
76 	return map_irq_stack(cpu);
77 }
78