xref: /linux/arch/arm64/kvm/hyp/entry.S (revision 511bd85485c676744a4c3a22f26965926891b131)
1/* SPDX-License-Identifier: GPL-2.0-only */
2/*
3 * Copyright (C) 2015 - ARM Ltd
4 * Author: Marc Zyngier <marc.zyngier@arm.com>
5 */
6
7#include <linux/linkage.h>
8
9#include <asm/alternative.h>
10#include <asm/asm-offsets.h>
11#include <asm/assembler.h>
12#include <asm/fpsimdmacros.h>
13#include <asm/kvm.h>
14#include <asm/kvm_arm.h>
15#include <asm/kvm_asm.h>
16#include <asm/kvm_mmu.h>
17#include <asm/kvm_ptrauth.h>
18
19#define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
20#define CPU_SP_EL0_OFFSET	(CPU_XREG_OFFSET(30) + 8)
21
22	.text
23
24/*
25 * We treat x18 as callee-saved as the host may use it as a platform
26 * register (e.g. for shadow call stack).
27 */
28.macro save_callee_saved_regs ctxt
29	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
30	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
31	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
32	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
33	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
34	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
35	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
36.endm
37
38.macro restore_callee_saved_regs ctxt
39	// We require \ctxt is not x18-x28
40	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
41	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
42	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
43	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
44	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
45	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
46	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
47.endm
48
49.macro save_sp_el0 ctxt, tmp
50	mrs	\tmp,	sp_el0
51	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
52.endm
53
54.macro restore_sp_el0 ctxt, tmp
55	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
56	msr	sp_el0, \tmp
57.endm
58
59/*
60 * u64 __guest_enter(struct kvm_vcpu *vcpu,
61 *		     struct kvm_cpu_context *host_ctxt);
62 */
63SYM_FUNC_START(__guest_enter)
64	// x0: vcpu
65	// x1: host context
66	// x2-x17: clobbered by macros
67	// x29: guest context
68
69	// Store the host regs
70	save_callee_saved_regs x1
71
72	// Save the host's sp_el0
73	save_sp_el0	x1, x2
74
75	// Now the host state is stored if we have a pending RAS SError it must
76	// affect the host. If any asynchronous exception is pending we defer
77	// the guest entry. The DSB isn't necessary before v8.2 as any SError
78	// would be fatal.
79alternative_if ARM64_HAS_RAS_EXTN
80	dsb	nshst
81	isb
82alternative_else_nop_endif
83	mrs	x1, isr_el1
84	cbz	x1,  1f
85	mov	x0, #ARM_EXCEPTION_IRQ
86	ret
87
881:
89	add	x29, x0, #VCPU_CONTEXT
90
91	// Macro ptrauth_switch_to_guest format:
92	// 	ptrauth_switch_to_guest(guest cxt, tmp1, tmp2, tmp3)
93	// The below macro to restore guest keys is not implemented in C code
94	// as it may cause Pointer Authentication key signing mismatch errors
95	// when this feature is enabled for kernel code.
96	ptrauth_switch_to_guest x29, x0, x1, x2
97
98	// Restore the guest's sp_el0
99	restore_sp_el0 x29, x0
100
101	// Restore guest regs x0-x17
102	ldp	x0, x1,   [x29, #CPU_XREG_OFFSET(0)]
103	ldp	x2, x3,   [x29, #CPU_XREG_OFFSET(2)]
104	ldp	x4, x5,   [x29, #CPU_XREG_OFFSET(4)]
105	ldp	x6, x7,   [x29, #CPU_XREG_OFFSET(6)]
106	ldp	x8, x9,   [x29, #CPU_XREG_OFFSET(8)]
107	ldp	x10, x11, [x29, #CPU_XREG_OFFSET(10)]
108	ldp	x12, x13, [x29, #CPU_XREG_OFFSET(12)]
109	ldp	x14, x15, [x29, #CPU_XREG_OFFSET(14)]
110	ldp	x16, x17, [x29, #CPU_XREG_OFFSET(16)]
111
112	// Restore guest regs x18-x29, lr
113	restore_callee_saved_regs x29
114
115	// Do not touch any register after this!
116	eret
117	sb
118
119SYM_INNER_LABEL(__guest_exit, SYM_L_GLOBAL)
120	// x0: return code
121	// x1: vcpu
122	// x2-x29,lr: vcpu regs
123	// vcpu x0-x1 on the stack
124
125	add	x1, x1, #VCPU_CONTEXT
126
127	ALTERNATIVE(nop, SET_PSTATE_PAN(1), ARM64_HAS_PAN, CONFIG_ARM64_PAN)
128
129	// Store the guest regs x2 and x3
130	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(2)]
131
132	// Retrieve the guest regs x0-x1 from the stack
133	ldp	x2, x3, [sp], #16	// x0, x1
134
135	// Store the guest regs x0-x1 and x4-x17
136	stp	x2, x3,   [x1, #CPU_XREG_OFFSET(0)]
137	stp	x4, x5,   [x1, #CPU_XREG_OFFSET(4)]
138	stp	x6, x7,   [x1, #CPU_XREG_OFFSET(6)]
139	stp	x8, x9,   [x1, #CPU_XREG_OFFSET(8)]
140	stp	x10, x11, [x1, #CPU_XREG_OFFSET(10)]
141	stp	x12, x13, [x1, #CPU_XREG_OFFSET(12)]
142	stp	x14, x15, [x1, #CPU_XREG_OFFSET(14)]
143	stp	x16, x17, [x1, #CPU_XREG_OFFSET(16)]
144
145	// Store the guest regs x18-x29, lr
146	save_callee_saved_regs x1
147
148	// Store the guest's sp_el0
149	save_sp_el0	x1, x2
150
151	get_host_ctxt	x2, x3
152
153	// Macro ptrauth_switch_to_guest format:
154	// 	ptrauth_switch_to_host(guest cxt, host cxt, tmp1, tmp2, tmp3)
155	// The below macro to save/restore keys is not implemented in C code
156	// as it may cause Pointer Authentication key signing mismatch errors
157	// when this feature is enabled for kernel code.
158	ptrauth_switch_to_host x1, x2, x3, x4, x5
159
160	// Restore the hosts's sp_el0
161	restore_sp_el0 x2, x3
162
163	// Now restore the host regs
164	restore_callee_saved_regs x2
165
166alternative_if ARM64_HAS_RAS_EXTN
167	// If we have the RAS extensions we can consume a pending error
168	// without an unmask-SError and isb. The ESB-instruction consumed any
169	// pending guest error when we took the exception from the guest.
170	mrs_s	x2, SYS_DISR_EL1
171	str	x2, [x1, #(VCPU_FAULT_DISR - VCPU_CONTEXT)]
172	cbz	x2, 1f
173	msr_s	SYS_DISR_EL1, xzr
174	orr	x0, x0, #(1<<ARM_EXIT_WITH_SERROR_BIT)
1751:	ret
176alternative_else
177	dsb	sy		// Synchronize against in-flight ld/st
178	isb			// Prevent an early read of side-effect free ISR
179	mrs	x2, isr_el1
180	tbnz	x2, #8, 2f	// ISR_EL1.A
181	ret
182	nop
1832:
184alternative_endif
185	// We know we have a pending asynchronous abort, now is the
186	// time to flush it out. From your VAXorcist book, page 666:
187	// "Threaten me not, oh Evil one!  For I speak with
188	// the power of DEC, and I command thee to show thyself!"
189	mrs	x2, elr_el2
190	mrs	x3, esr_el2
191	mrs	x4, spsr_el2
192	mov	x5, x0
193
194	msr	daifclr, #4	// Unmask aborts
195
196	// This is our single instruction exception window. A pending
197	// SError is guaranteed to occur at the earliest when we unmask
198	// it, and at the latest just after the ISB.
199abort_guest_exit_start:
200
201	isb
202
203abort_guest_exit_end:
204
205	msr	daifset, #4	// Mask aborts
206	ret
207
208	_kvm_extable	abort_guest_exit_start, 9997f
209	_kvm_extable	abort_guest_exit_end, 9997f
2109997:
211	msr	daifset, #4	// Mask aborts
212	mov	x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
213
214	// restore the EL1 exception context so that we can report some
215	// information. Merge the exception code with the SError pending bit.
216	msr	elr_el2, x2
217	msr	esr_el2, x3
218	msr	spsr_el2, x4
219	orr	x0, x0, x5
2201:	ret
221SYM_FUNC_END(__guest_enter)
222