xref: /linux/arch/x86/xen/xen-asm.S (revision c24a65b6a27c78d8540409800886b6622ea86ebf)
1/* SPDX-License-Identifier: GPL-2.0 */
2/*
3 * Asm versions of Xen pv-ops, suitable for direct use.
4 *
5 * We only bother with direct forms (ie, vcpu in percpu data) of the
6 * operations here; the indirect forms are better handled in C.
7 */
8
9#include <asm/errno.h>
10#include <asm/asm-offsets.h>
11#include <asm/percpu.h>
12#include <asm/processor-flags.h>
13#include <asm/segment.h>
14#include <asm/thread_info.h>
15#include <asm/asm.h>
16#include <asm/frame.h>
17#include <asm/unwind_hints.h>
18
19#include <xen/interface/xen.h>
20
21#include <linux/init.h>
22#include <linux/linkage.h>
23#include <linux/objtool.h>
24#include <../entry/calling.h>
25
26.pushsection .noinstr.text, "ax"
27/*
28 * PV hypercall interface to the hypervisor.
29 *
30 * Called via inline asm(), so better preserve %rcx and %r11.
31 *
32 * Input:
33 *	%eax: hypercall number
34 *	%rdi, %rsi, %rdx, %r10, %r8: args 1..5 for the hypercall
35 * Output: %rax
36 */
37SYM_FUNC_START(xen_hypercall_pv)
38	ANNOTATE_NOENDBR
39	push %rcx
40	push %r11
41	UNWIND_HINT_SAVE
42	syscall
43	UNWIND_HINT_RESTORE
44	pop %r11
45	pop %rcx
46	RET
47SYM_FUNC_END(xen_hypercall_pv)
48
49/*
50 * Disabling events is simply a matter of making the event mask
51 * non-zero.
52 */
53SYM_FUNC_START(xen_irq_disable_direct)
54	ENDBR
55	movb $1, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
56	RET
57SYM_FUNC_END(xen_irq_disable_direct)
58
59/*
60 * Force an event check by making a hypercall, but preserve regs
61 * before making the call.
62 */
63SYM_FUNC_START(check_events)
64	FRAME_BEGIN
65	push %rax
66	push %rcx
67	push %rdx
68	push %rsi
69	push %rdi
70	push %r8
71	push %r9
72	push %r10
73	push %r11
74	call xen_force_evtchn_callback
75	pop %r11
76	pop %r10
77	pop %r9
78	pop %r8
79	pop %rdi
80	pop %rsi
81	pop %rdx
82	pop %rcx
83	pop %rax
84	FRAME_END
85	RET
86SYM_FUNC_END(check_events)
87
88/*
89 * Enable events.  This clears the event mask and tests the pending
90 * event status with one and operation.  If there are pending events,
91 * then enter the hypervisor to get them handled.
92 */
93SYM_FUNC_START(xen_irq_enable_direct)
94	ENDBR
95	FRAME_BEGIN
96	/* Unmask events */
97	movb $0, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
98
99	/*
100	 * Preempt here doesn't matter because that will deal with any
101	 * pending interrupts.  The pending check may end up being run
102	 * on the wrong CPU, but that doesn't hurt.
103	 */
104
105	/* Test for pending */
106	testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_pending)
107	jz 1f
108
109	call check_events
1101:
111	FRAME_END
112	RET
113SYM_FUNC_END(xen_irq_enable_direct)
114
115/*
116 * (xen_)save_fl is used to get the current interrupt enable status.
117 * Callers expect the status to be in X86_EFLAGS_IF, and other bits
118 * may be set in the return value.  We take advantage of this by
119 * making sure that X86_EFLAGS_IF has the right value (and other bits
120 * in that byte are 0), but other bits in the return value are
121 * undefined.  We need to toggle the state of the bit, because Xen and
122 * x86 use opposite senses (mask vs enable).
123 */
124SYM_FUNC_START(xen_save_fl_direct)
125	ENDBR
126	testb $0xff, PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_mask)
127	setz %ah
128	addb %ah, %ah
129	RET
130SYM_FUNC_END(xen_save_fl_direct)
131
132SYM_FUNC_START(xen_read_cr2)
133	ENDBR
134	FRAME_BEGIN
135	_ASM_MOV PER_CPU_VAR(xen_vcpu), %_ASM_AX
136	_ASM_MOV XEN_vcpu_info_arch_cr2(%_ASM_AX), %_ASM_AX
137	FRAME_END
138	RET
139SYM_FUNC_END(xen_read_cr2);
140
141SYM_FUNC_START(xen_read_cr2_direct)
142	ENDBR
143	FRAME_BEGIN
144	_ASM_MOV PER_CPU_VAR(xen_vcpu_info + XEN_vcpu_info_arch_cr2), %_ASM_AX
145	FRAME_END
146	RET
147SYM_FUNC_END(xen_read_cr2_direct);
148.popsection
149
150.macro xen_pv_trap name
151SYM_CODE_START(xen_\name)
152	UNWIND_HINT_ENTRY
153	ENDBR
154	pop %rcx
155	pop %r11
156	jmp  \name
157SYM_CODE_END(xen_\name)
158_ASM_NOKPROBE(xen_\name)
159.endm
160
161xen_pv_trap asm_exc_divide_error
162xen_pv_trap asm_xenpv_exc_debug
163xen_pv_trap asm_exc_int3
164xen_pv_trap asm_xenpv_exc_nmi
165xen_pv_trap asm_exc_overflow
166xen_pv_trap asm_exc_bounds
167xen_pv_trap asm_exc_invalid_op
168xen_pv_trap asm_exc_device_not_available
169xen_pv_trap asm_xenpv_exc_double_fault
170xen_pv_trap asm_exc_coproc_segment_overrun
171xen_pv_trap asm_exc_invalid_tss
172xen_pv_trap asm_exc_segment_not_present
173xen_pv_trap asm_exc_stack_segment
174xen_pv_trap asm_exc_general_protection
175xen_pv_trap asm_exc_page_fault
176xen_pv_trap asm_exc_spurious_interrupt_bug
177xen_pv_trap asm_exc_coprocessor_error
178xen_pv_trap asm_exc_alignment_check
179#ifdef CONFIG_X86_CET
180xen_pv_trap asm_exc_control_protection
181#endif
182#ifdef CONFIG_X86_MCE
183xen_pv_trap asm_xenpv_exc_machine_check
184#endif /* CONFIG_X86_MCE */
185xen_pv_trap asm_exc_simd_coprocessor_error
186#ifdef CONFIG_IA32_EMULATION
187xen_pv_trap asm_int80_emulation
188#endif
189xen_pv_trap asm_exc_xen_unknown_trap
190xen_pv_trap asm_exc_xen_hypervisor_callback
191
192	__INIT
193SYM_CODE_START(xen_early_idt_handler_array)
194	i = 0
195	.rept NUM_EXCEPTION_VECTORS
196	UNWIND_HINT_UNDEFINED
197	ENDBR
198	pop %rcx
199	pop %r11
200	jmp early_idt_handler_array + i*EARLY_IDT_HANDLER_SIZE
201	i = i + 1
202	.fill xen_early_idt_handler_array + i*XEN_EARLY_IDT_HANDLER_SIZE - ., 1, 0xcc
203	.endr
204SYM_CODE_END(xen_early_idt_handler_array)
205	__FINIT
206
207/*
208 * Xen64 iret frame:
209 *
210 *	ss
211 *	rsp
212 *	rflags
213 *	cs
214 *	rip		<-- standard iret frame
215 *
216 *	flags		<-- xen_iret must push from here on
217 *
218 *	rcx
219 *	r11
220 * rsp->rax
221 */
222.macro xen_hypercall_iret
223	pushq $0	/* Flags */
224	push %rcx
225	push %r11
226	push %rax
227	mov  $__HYPERVISOR_iret, %eax
228	syscall		/* Do the IRET. */
229#ifdef CONFIG_MITIGATION_SLS
230	int3
231#endif
232.endm
233
234SYM_CODE_START(xen_iret)
235	UNWIND_HINT_UNDEFINED
236	ANNOTATE_NOENDBR
237	xen_hypercall_iret
238SYM_CODE_END(xen_iret)
239
240/*
241 * XEN pv doesn't use trampoline stack, PER_CPU_VAR(cpu_tss_rw + TSS_sp0) is
242 * also the kernel stack.  Reusing swapgs_restore_regs_and_return_to_usermode()
243 * in XEN pv would cause %rsp to move up to the top of the kernel stack and
244 * leave the IRET frame below %rsp, which is dangerous to be corrupted if #NMI
245 * interrupts. And swapgs_restore_regs_and_return_to_usermode() pushing the IRET
246 * frame at the same address is useless.
247 */
248SYM_CODE_START(xenpv_restore_regs_and_return_to_usermode)
249	UNWIND_HINT_REGS
250	POP_REGS
251
252	/* stackleak_erase() can work safely on the kernel stack. */
253	STACKLEAK_ERASE_NOCLOBBER
254
255	addq	$8, %rsp	/* skip regs->orig_ax */
256	jmp xen_iret
257SYM_CODE_END(xenpv_restore_regs_and_return_to_usermode)
258
259/*
260 * Xen handles syscall callbacks much like ordinary exceptions, which
261 * means we have:
262 * - kernel gs
263 * - kernel rsp
264 * - an iret-like stack frame on the stack (including rcx and r11):
265 *	ss
266 *	rsp
267 *	rflags
268 *	cs
269 *	rip
270 *	r11
271 * rsp->rcx
272 */
273
274/* Normal 64-bit system call target */
275SYM_CODE_START(xen_entry_SYSCALL_64)
276	UNWIND_HINT_ENTRY
277	ENDBR
278	popq %rcx
279	popq %r11
280
281	/*
282	 * Neither Xen nor the kernel really knows what the old SS and
283	 * CS were.  The kernel expects __USER_DS and __USER_CS, so
284	 * report those values even though Xen will guess its own values.
285	 */
286	movq $__USER_DS, 4*8(%rsp)
287	movq $__USER_CS, 1*8(%rsp)
288
289	jmp entry_SYSCALL_64_after_hwframe
290SYM_CODE_END(xen_entry_SYSCALL_64)
291
292#ifdef CONFIG_IA32_EMULATION
293
294/* 32-bit compat syscall target */
295SYM_CODE_START(xen_entry_SYSCALL_compat)
296	UNWIND_HINT_ENTRY
297	ENDBR
298	popq %rcx
299	popq %r11
300
301	/*
302	 * Neither Xen nor the kernel really knows what the old SS and
303	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
304	 * report those values even though Xen will guess its own values.
305	 */
306	movq $__USER_DS, 4*8(%rsp)
307	movq $__USER32_CS, 1*8(%rsp)
308
309	jmp entry_SYSCALL_compat_after_hwframe
310SYM_CODE_END(xen_entry_SYSCALL_compat)
311
312/* 32-bit compat sysenter target */
313SYM_CODE_START(xen_entry_SYSENTER_compat)
314	UNWIND_HINT_ENTRY
315	ENDBR
316	/*
317	 * NB: Xen is polite and clears TF from EFLAGS for us.  This means
318	 * that we don't need to guard against single step exceptions here.
319	 */
320	popq %rcx
321	popq %r11
322
323	/*
324	 * Neither Xen nor the kernel really knows what the old SS and
325	 * CS were.  The kernel expects __USER_DS and __USER32_CS, so
326	 * report those values even though Xen will guess its own values.
327	 */
328	movq $__USER_DS, 4*8(%rsp)
329	movq $__USER32_CS, 1*8(%rsp)
330
331	jmp entry_SYSENTER_compat_after_hwframe
332SYM_CODE_END(xen_entry_SYSENTER_compat)
333
334#else /* !CONFIG_IA32_EMULATION */
335
336SYM_CODE_START(xen_entry_SYSCALL_compat)
337SYM_CODE_START(xen_entry_SYSENTER_compat)
338	UNWIND_HINT_ENTRY
339	ENDBR
340	lea 16(%rsp), %rsp	/* strip %rcx, %r11 */
341	mov $-ENOSYS, %rax
342	xen_hypercall_iret
343SYM_CODE_END(xen_entry_SYSENTER_compat)
344SYM_CODE_END(xen_entry_SYSCALL_compat)
345
346#endif	/* CONFIG_IA32_EMULATION */
347