xref: /linux/arch/x86/xen/xen-asm.S (revision 8faabc041a001140564f718dabe37753e88b37fa)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Asm versions of Xen pv-ops, suitable for direct use.
4 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
7 */
8
9#include <asm/errno.h>
10#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
13#include <asm/segment.h>
14#include <asm/thread_info.h>
15#include <asm/asm.h>
16#include <asm/frame.h>
17#include <asm/unwind_hints.h>
18
19#include <xen/interface/xen.h>
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23#include <linux/objtool.h>
24#include <../entry/calling.h>
25
26.pushsection .noinstr.text, "ax"
27/*
28 * PV hypercall interface to the hypervisor.
29 *
30 * Called via inline asm(), so better preserve %rcx and %r11.
31 *
32 * Input:
33 *	%eax: hypercall number
34 *	%rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
35 * Output: %rax
36 */
37SYM_FUNC_START(xen_hypercall_pv)
38	ANNOTATE_NOENDBR
39	push %rcx
40	push %r11
41	UNWIND_HINT_SAVE
42	syscall
43	UNWIND_HINT_RESTORE
44	pop %r11
45	pop %rcx
46	RET
47SYM_FUNC_END(xen_hypercall_pv)
48
49/*
50 * Disabling events is simply a matter of making the event mask
51 * non-zero.
52 */
53SYM_FUNC_START(xen_irq_disable_direct)
54	movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
55	RET
56SYM_FUNC_END(xen_irq_disable_direct)
57
58/*
59 * Force an event check by making a hypercall, but preserve regs
60 * before making the call.
61 */
62SYM_FUNC_START(check_events)
63	FRAME_BEGIN
64	push %rax
65	push %rcx
66	push %rdx
67	push %rsi
68	push %rdi
69	push %r8
70	push %r9
71	push %r10
72	push %r11
73	call xen_force_evtchn_callback
74	pop %r11
75	pop %r10
76	pop %r9
77	pop %r8
78	pop %rdi
79	pop %rsi
80	pop %rdx
81	pop %rcx
82	pop %rax
83	FRAME_END
84	RET
85SYM_FUNC_END(check_events)
86
87/*
88 * Enable events.  This clears the event mask and tests the pending
89 * event status with one and operation.  If there are pending events,
90 * then enter the hypervisor to get them handled.
91 */
92SYM_FUNC_START(xen_irq_enable_direct)
93	FRAME_BEGIN
94	/* Unmask events */
95	movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
96
97	/*
98	 * Preempt here doesn't matter because that will deal with any
99	 * pending interrupts.  The pending check may end up being run
100	 * on the wrong CPU, but that doesn't hurt.
101	 */
102
103	/* Test for pending */
104	testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)
105	jz 1f
106
107	call check_events
1081:
109	FRAME_END
110	RET
111SYM_FUNC_END(xen_irq_enable_direct)
112
113/*
114 * (xen_)save_fl is used to get the current interrupt enable status.
115 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
116 * may be set in the return value.  We take advantage of this by
117 * making sure that X86_EFLAGS_IF has the right value (and other bits
118 * in that byte are 0), but other bits in the return value are
119 * undefined.  We need to toggle the state of the bit, because Xen and
120 * x86 use opposite senses (mask vs enable).
121 */
122SYM_FUNC_START(xen_save_fl_direct)
123	testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
124	setz %ah
125	addb %ah, %ah
126	RET
127SYM_FUNC_END(xen_save_fl_direct)
128
129SYM_FUNC_START(xen_read_cr2)
130	FRAME_BEGIN
131	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
132	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
133	FRAME_END
134	RET
135SYM_FUNC_END(xen_read_cr2);
136
137SYM_FUNC_START(xen_read_cr2_direct)
138	FRAME_BEGIN
139	_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
140	FRAME_END
141	RET
142SYM_FUNC_END(xen_read_cr2_direct);
143.popsection
144
145.macro xen_pv_trap name
146SYM_CODE_START(xen_\name)
147	UNWIND_HINT_ENTRY
148	ENDBR
149	pop %rcx
150	pop %r11
151	jmp  \name
152SYM_CODE_END(xen_\name)
153_ASM_NOKPROBE(xen_\name)
154.endm
155
156xen_pv_trap asm_exc_divide_error
157xen_pv_trap asm_xenpv_exc_debug
158xen_pv_trap asm_exc_int3
159xen_pv_trap asm_xenpv_exc_nmi
160xen_pv_trap asm_exc_overflow
161xen_pv_trap asm_exc_bounds
162xen_pv_trap asm_exc_invalid_op
163xen_pv_trap asm_exc_device_not_available
164xen_pv_trap asm_xenpv_exc_double_fault
165xen_pv_trap asm_exc_coproc_segment_overrun
166xen_pv_trap asm_exc_invalid_tss
167xen_pv_trap asm_exc_segment_not_present
168xen_pv_trap asm_exc_stack_segment
169xen_pv_trap asm_exc_general_protection
170xen_pv_trap asm_exc_page_fault
171xen_pv_trap asm_exc_spurious_interrupt_bug
172xen_pv_trap asm_exc_coprocessor_error
173xen_pv_trap asm_exc_alignment_check
174#ifdef CONFIG_X86_CET
175xen_pv_trap asm_exc_control_protection
176#endif
177#ifdef CONFIG_X86_MCE
178xen_pv_trap asm_xenpv_exc_machine_check
179#endif /* CONFIG_X86_MCE */
180xen_pv_trap asm_exc_simd_coprocessor_error
181#ifdef CONFIG_IA32_EMULATION
182xen_pv_trap asm_int80_emulation
183#endif
184xen_pv_trap asm_exc_xen_unknown_trap
185xen_pv_trap asm_exc_xen_hypervisor_callback
186
187	__INIT
188SYM_CODE_START(xen_early_idt_handler_array)
189	i = 0
190	.rept NUM_EXCEPTION_VECTORS
191	UNWIND_HINT_UNDEFINED
192	ENDBR
193	pop %rcx
194	pop %r11
195	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
196	i = i + 1
197	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
198	.endr
199SYM_CODE_END(xen_early_idt_handler_array)
200	__FINIT
201
202/*
203 * Xen64 iret frame:
204 *
205 *	ss
206 *	rsp
207 *	rflags
208 *	cs
209 *	rip		<-- standard iret frame
210 *
211 *	flags		<-- xen_iret must push from here on
212 *
213 *	rcx
214 *	r11
215 * rsp->rax
216 */
217.macro xen_hypercall_iret
218	pushq $0	/* Flags */
219	push %rcx
220	push %r11
221	push %rax
222	mov  $__HYPERVISOR_iret, %eax
223	syscall		/* Do the IRET. */
224#ifdef CONFIG_MITIGATION_SLS
225	int3
226#endif
227.endm
228
229SYM_CODE_START(xen_iret)
230	UNWIND_HINT_UNDEFINED
231	ANNOTATE_NOENDBR
232	xen_hypercall_iret
233SYM_CODE_END(xen_iret)
234
235/*
236 * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
237 * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
238 * in XEN pv would cause %rsp to move up to the top of the kernel stack and
239 * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
240 * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
241 * frame at the same address is useless.
242 */
243SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
244	UNWIND_HINT_REGS
245	POP_REGS
246
247	/* stackleak_erase() can work safely on the kernel stack. */
248	STACKLEAK_ERASE_NOCLOBBER
249
250	addq	$8, %rsp	/* skip regs->orig_ax */
251	jmp xen_iret
252SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
253
254/*
255 * Xen handles syscall callbacks much like ordinary exceptions, which
256 * means we have:
257 * - kernel gs
258 * - kernel rsp
259 * - an iret-like stack frame on the stack (including rcx and r11):
260 *	ss
261 *	rsp
262 *	rflags
263 *	cs
264 *	rip
265 *	r11
266 * rsp->rcx
267 */
268
269/* Normal 64-bit system call target */
270SYM_CODE_START(xen_entry_SYSCALL_64)
271	UNWIND_HINT_ENTRY
272	ENDBR
273	popq %rcx
274	popq %r11
275
276	/*
277	 * Neither Xen nor the kernel really knows what the old SS and
278	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
279	 * report those values even though Xen will guess its own values.
280	 */
281	movq $__USER_DS, 4*8(%rsp)
282	movq $__USER_CS, 1*8(%rsp)
283
284	jmp entry_SYSCALL_64_after_hwframe
285SYM_CODE_END(xen_entry_SYSCALL_64)
286
287#ifdef CONFIG_IA32_EMULATION
288
289/* 32-bit compat syscall target */
290SYM_CODE_START(xen_entry_SYSCALL_compat)
291	UNWIND_HINT_ENTRY
292	ENDBR
293	popq %rcx
294	popq %r11
295
296	/*
297	 * Neither Xen nor the kernel really knows what the old SS and
298	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
299	 * report those values even though Xen will guess its own values.
300	 */
301	movq $__USER_DS, 4*8(%rsp)
302	movq $__USER32_CS, 1*8(%rsp)
303
304	jmp entry_SYSCALL_compat_after_hwframe
305SYM_CODE_END(xen_entry_SYSCALL_compat)
306
307/* 32-bit compat sysenter target */
308SYM_CODE_START(xen_entry_SYSENTER_compat)
309	UNWIND_HINT_ENTRY
310	ENDBR
311	/*
312	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
313	 * that we don't need to guard against single step exceptions here.
314	 */
315	popq %rcx
316	popq %r11
317
318	/*
319	 * Neither Xen nor the kernel really knows what the old SS and
320	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
321	 * report those values even though Xen will guess its own values.
322	 */
323	movq $__USER_DS, 4*8(%rsp)
324	movq $__USER32_CS, 1*8(%rsp)
325
326	jmp entry_SYSENTER_compat_after_hwframe
327SYM_CODE_END(xen_entry_SYSENTER_compat)
328
329#else /* !CONFIG_IA32_EMULATION */
330
331SYM_CODE_START(xen_entry_SYSCALL_compat)
332SYM_CODE_START(xen_entry_SYSENTER_compat)
333	UNWIND_HINT_ENTRY
334	ENDBR
335	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
336	mov $-ENOSYS, %rax
337	xen_hypercall_iret
338SYM_CODE_END(xen_entry_SYSENTER_compat)
339SYM_CODE_END(xen_entry_SYSCALL_compat)
340
341#endif	/* CONFIG_IA32_EMULATION */
342