xref: /linux/arch/arm64/include/asm/kvm_asm.h (revision 0e1368a28dd5231ae0dbe240dfe0ff2657de5647)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM_KVM_ASM_H__
8 #define __ARM_KVM_ASM_H__
9 
10 #include <asm/hyp_image.h>
11 #include <asm/insn.h>
12 #include <asm/virt.h>
13 #include <asm/sysreg.h>
14 
15 #define ARM_EXIT_WITH_SERROR_BIT  31
16 #define ARM_EXCEPTION_CODE(x)	  ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
17 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
18 #define ARM_SERROR_PENDING(x)	  !!((x) & (1U << ARM_EXIT_WITH_SERROR_BIT))
19 
20 #define ARM_EXCEPTION_IRQ	  0
21 #define ARM_EXCEPTION_EL1_SERROR  1
22 #define ARM_EXCEPTION_TRAP	  2
23 #define ARM_EXCEPTION_IL	  3
24 /* The hyp-stub will return this for any kvm_call_hyp() call */
25 #define ARM_EXCEPTION_HYP_GONE	  HVC_STUB_ERR
26 
27 #define kvm_arm_exception_type					\
28 	{ARM_EXCEPTION_IRQ,		"IRQ"		},	\
29 	{ARM_EXCEPTION_EL1_SERROR, 	"SERROR"	},	\
30 	{ARM_EXCEPTION_TRAP, 		"TRAP"		},	\
31 	{ARM_EXCEPTION_HYP_GONE,	"HYP_GONE"	}
32 
33 /*
34  * Size of the HYP vectors preamble. kvm_patch_vector_branch() generates code
35  * that jumps over this.
36  */
37 #define KVM_VECTOR_PREAMBLE	(2 * AARCH64_INSN_SIZE)
38 
39 #define KVM_HOST_SMCCC_ID(id)						\
40 	ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL,				\
41 			   ARM_SMCCC_SMC_64,				\
42 			   ARM_SMCCC_OWNER_VENDOR_HYP,			\
43 			   (id))
44 
45 #define KVM_HOST_SMCCC_FUNC(name) KVM_HOST_SMCCC_ID(__KVM_HOST_SMCCC_FUNC_##name)
46 
47 #define __KVM_HOST_SMCCC_FUNC___kvm_hyp_init			0
48 
49 #ifndef __ASSEMBLER__
50 
51 #include <linux/mm.h>
52 
53 #define MARKER(m)				\
54 	m, __after_##m = m - 1
55 
56 enum __kvm_host_smccc_func {
57 	/* Hypercalls that are unavailable once pKVM has finalised. */
58 	/* __KVM_HOST_SMCCC_FUNC___kvm_hyp_init */
59 	__KVM_HOST_SMCCC_FUNC___pkvm_init = __KVM_HOST_SMCCC_FUNC___kvm_hyp_init + 1,
60 	__KVM_HOST_SMCCC_FUNC___pkvm_create_private_mapping,
61 	__KVM_HOST_SMCCC_FUNC___pkvm_cpu_set_vector,
62 	__KVM_HOST_SMCCC_FUNC___kvm_enable_ssbs,
63 	__KVM_HOST_SMCCC_FUNC___vgic_v3_init_lrs,
64 	__KVM_HOST_SMCCC_FUNC___vgic_v3_get_gic_config,
65 
66 	MARKER(__KVM_HOST_SMCCC_FUNC_MIN_PKVM),
67 
68 	__KVM_HOST_SMCCC_FUNC___pkvm_prot_finalize,
69 
70 	/* Hypercalls that are always available and common to [nh]VHE/pKVM. */
71 	__KVM_HOST_SMCCC_FUNC___kvm_adjust_pc,
72 	__KVM_HOST_SMCCC_FUNC___kvm_vcpu_run,
73 	__KVM_HOST_SMCCC_FUNC___kvm_flush_vm_context,
74 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa,
75 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_ipa_nsh,
76 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid,
77 	__KVM_HOST_SMCCC_FUNC___kvm_tlb_flush_vmid_range,
78 	__KVM_HOST_SMCCC_FUNC___kvm_flush_cpu_context,
79 	__KVM_HOST_SMCCC_FUNC___kvm_timer_set_cntvoff,
80 	__KVM_HOST_SMCCC_FUNC___tracing_load,
81 	__KVM_HOST_SMCCC_FUNC___tracing_unload,
82 	__KVM_HOST_SMCCC_FUNC___tracing_enable,
83 	__KVM_HOST_SMCCC_FUNC___tracing_swap_reader,
84 	__KVM_HOST_SMCCC_FUNC___tracing_update_clock,
85 	__KVM_HOST_SMCCC_FUNC___tracing_reset,
86 	__KVM_HOST_SMCCC_FUNC___tracing_enable_event,
87 	__KVM_HOST_SMCCC_FUNC___tracing_write_event,
88 	__KVM_HOST_SMCCC_FUNC___vgic_v3_save_aprs,
89 	__KVM_HOST_SMCCC_FUNC___vgic_v3_restore_vmcr_aprs,
90 	__KVM_HOST_SMCCC_FUNC___vgic_v5_save_apr,
91 	__KVM_HOST_SMCCC_FUNC___vgic_v5_restore_vmcr_apr,
92 
93 	MARKER(__KVM_HOST_SMCCC_FUNC_PKVM_ONLY),
94 
95 	/* Hypercalls that are available only when pKVM has finalised. */
96 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_hyp,
97 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_hyp,
98 	__KVM_HOST_SMCCC_FUNC___pkvm_host_donate_guest,
99 	__KVM_HOST_SMCCC_FUNC___pkvm_host_share_guest,
100 	__KVM_HOST_SMCCC_FUNC___pkvm_host_unshare_guest,
101 	__KVM_HOST_SMCCC_FUNC___pkvm_host_relax_perms_guest,
102 	__KVM_HOST_SMCCC_FUNC___pkvm_host_wrprotect_guest,
103 	__KVM_HOST_SMCCC_FUNC___pkvm_host_test_clear_young_guest,
104 	__KVM_HOST_SMCCC_FUNC___pkvm_host_mkyoung_guest,
105 	__KVM_HOST_SMCCC_FUNC___pkvm_reserve_vm,
106 	__KVM_HOST_SMCCC_FUNC___pkvm_unreserve_vm,
107 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vm,
108 	__KVM_HOST_SMCCC_FUNC___pkvm_init_vcpu,
109 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_in_poison_fault,
110 	__KVM_HOST_SMCCC_FUNC___pkvm_force_reclaim_guest_page,
111 	__KVM_HOST_SMCCC_FUNC___pkvm_reclaim_dying_guest_page,
112 	__KVM_HOST_SMCCC_FUNC___pkvm_start_teardown_vm,
113 	__KVM_HOST_SMCCC_FUNC___pkvm_finalize_teardown_vm,
114 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_load,
115 	__KVM_HOST_SMCCC_FUNC___pkvm_vcpu_put,
116 	__KVM_HOST_SMCCC_FUNC___pkvm_tlb_flush_vmid,
117 
118 	MARKER(__KVM_HOST_SMCCC_FUNC_MAX)
119 };
120 
121 #define DECLARE_KVM_VHE_SYM(sym)	extern char sym[]
122 #define DECLARE_KVM_NVHE_SYM(sym)	extern char kvm_nvhe_sym(sym)[]
123 
124 /*
125  * Define a pair of symbols sharing the same name but one defined in
126  * VHE and the other in nVHE hyp implementations.
127  */
128 #define DECLARE_KVM_HYP_SYM(sym)		\
129 	DECLARE_KVM_VHE_SYM(sym);		\
130 	DECLARE_KVM_NVHE_SYM(sym)
131 
132 #define DECLARE_KVM_VHE_PER_CPU(type, sym)	\
133 	DECLARE_PER_CPU(type, sym)
134 #define DECLARE_KVM_NVHE_PER_CPU(type, sym)	\
135 	DECLARE_PER_CPU(type, kvm_nvhe_sym(sym))
136 
137 #define DECLARE_KVM_HYP_PER_CPU(type, sym)	\
138 	DECLARE_KVM_VHE_PER_CPU(type, sym);	\
139 	DECLARE_KVM_NVHE_PER_CPU(type, sym)
140 
141 /*
142  * Compute pointer to a symbol defined in nVHE percpu region.
143  * Returns NULL if percpu memory has not been allocated yet.
144  */
145 #define this_cpu_ptr_nvhe_sym(sym)	per_cpu_ptr_nvhe_sym(sym, smp_processor_id())
146 #define per_cpu_ptr_nvhe_sym(sym, cpu)						\
147 	({									\
148 		unsigned long base, off;					\
149 		base = kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[cpu];		\
150 		off = (unsigned long)&CHOOSE_NVHE_SYM(sym) -			\
151 		      (unsigned long)&CHOOSE_NVHE_SYM(__per_cpu_start);		\
152 		base ? (typeof(CHOOSE_NVHE_SYM(sym))*)(base + off) : NULL;	\
153 	})
154 
155 #if defined(__KVM_NVHE_HYPERVISOR__)
156 
157 #define CHOOSE_NVHE_SYM(sym)	sym
158 #define CHOOSE_HYP_SYM(sym)	CHOOSE_NVHE_SYM(sym)
159 
160 /* The nVHE hypervisor shouldn't even try to access VHE symbols */
161 extern void *__nvhe_undefined_symbol;
162 #define CHOOSE_VHE_SYM(sym)		__nvhe_undefined_symbol
163 #define this_cpu_ptr_hyp_sym(sym)	(&__nvhe_undefined_symbol)
164 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__nvhe_undefined_symbol)
165 
166 #elif defined(__KVM_VHE_HYPERVISOR__)
167 
168 #define CHOOSE_VHE_SYM(sym)	sym
169 #define CHOOSE_HYP_SYM(sym)	CHOOSE_VHE_SYM(sym)
170 
171 /* The VHE hypervisor shouldn't even try to access nVHE symbols */
172 extern void *__vhe_undefined_symbol;
173 #define CHOOSE_NVHE_SYM(sym)		__vhe_undefined_symbol
174 #define this_cpu_ptr_hyp_sym(sym)	(&__vhe_undefined_symbol)
175 #define per_cpu_ptr_hyp_sym(sym, cpu)	(&__vhe_undefined_symbol)
176 
177 #else
178 
179 /*
180  * BIG FAT WARNINGS:
181  *
182  * - Don't be tempted to change the following is_kernel_in_hyp_mode()
183  *   to has_vhe(). has_vhe() is implemented as a *final* capability,
184  *   while this is used early at boot time, when the capabilities are
185  *   not final yet....
186  *
187  * - Don't let the nVHE hypervisor have access to this, as it will
188  *   pick the *wrong* symbol (yes, it runs at EL2...).
189  */
190 #define CHOOSE_HYP_SYM(sym)		(is_kernel_in_hyp_mode()	\
191 					   ? CHOOSE_VHE_SYM(sym)	\
192 					   : CHOOSE_NVHE_SYM(sym))
193 
194 #define this_cpu_ptr_hyp_sym(sym)	(is_kernel_in_hyp_mode()	\
195 					   ? this_cpu_ptr(&sym)		\
196 					   : this_cpu_ptr_nvhe_sym(sym))
197 
198 #define per_cpu_ptr_hyp_sym(sym, cpu)	(is_kernel_in_hyp_mode()	\
199 					   ? per_cpu_ptr(&sym, cpu)	\
200 					   : per_cpu_ptr_nvhe_sym(sym, cpu))
201 
202 #define CHOOSE_VHE_SYM(sym)	sym
203 #define CHOOSE_NVHE_SYM(sym)	kvm_nvhe_sym(sym)
204 
205 #endif
206 
207 struct kvm_nvhe_init_params {
208 	unsigned long mair_el2;
209 	unsigned long tcr_el2;
210 	unsigned long tpidr_el2;
211 	unsigned long stack_hyp_va;
212 	unsigned long stack_pa;
213 	phys_addr_t pgd_pa;
214 	unsigned long hcr_el2;
215 	unsigned long vttbr;
216 	unsigned long vtcr;
217 	unsigned long tmp;
218 };
219 
220 /*
221  * Used by the host in EL1 to dump the nVHE hypervisor backtrace on
222  * hyp_panic() in non-protected mode.
223  *
224  * @stack_base:                 hyp VA of the hyp_stack base.
225  * @overflow_stack_base:        hyp VA of the hyp_overflow_stack base.
226  * @fp:                         hyp FP where the backtrace begins.
227  * @pc:                         hyp PC where the backtrace begins.
228  */
229 struct kvm_nvhe_stacktrace_info {
230 	unsigned long stack_base;
231 	unsigned long overflow_stack_base;
232 	unsigned long fp;
233 	unsigned long pc;
234 };
235 
236 /* Translate a kernel address @ptr into its equivalent linear mapping */
237 #define kvm_ksym_ref(ptr)						\
238 	({								\
239 		void *val = (ptr);					\
240 		if (!is_kernel_in_hyp_mode())				\
241 			val = lm_alias((ptr));				\
242 		val;							\
243 	 })
244 #define kvm_ksym_ref_nvhe(sym)	kvm_ksym_ref(kvm_nvhe_sym(sym))
245 
246 struct kvm;
247 struct kvm_vcpu;
248 struct kvm_s2_mmu;
249 
250 DECLARE_KVM_NVHE_SYM(__kvm_hyp_init);
251 DECLARE_KVM_HYP_SYM(__kvm_hyp_vector);
252 #define __kvm_hyp_init		CHOOSE_NVHE_SYM(__kvm_hyp_init)
253 #define __kvm_hyp_vector	CHOOSE_HYP_SYM(__kvm_hyp_vector)
254 
255 extern unsigned long kvm_nvhe_sym(kvm_arm_hyp_percpu_base)[];
256 DECLARE_KVM_NVHE_SYM(__per_cpu_start);
257 DECLARE_KVM_NVHE_SYM(__per_cpu_end);
258 
259 DECLARE_KVM_HYP_SYM(__bp_harden_hyp_vecs);
260 #define __bp_harden_hyp_vecs	CHOOSE_HYP_SYM(__bp_harden_hyp_vecs)
261 
262 extern void __kvm_flush_vm_context(void);
263 extern void __kvm_flush_cpu_context(struct kvm_s2_mmu *mmu);
264 extern void __kvm_tlb_flush_vmid_ipa(struct kvm_s2_mmu *mmu, phys_addr_t ipa,
265 				     int level);
266 extern void __kvm_tlb_flush_vmid_ipa_nsh(struct kvm_s2_mmu *mmu,
267 					 phys_addr_t ipa,
268 					 int level);
269 extern void __kvm_tlb_flush_vmid_range(struct kvm_s2_mmu *mmu,
270 					phys_addr_t start, unsigned long pages);
271 extern void __kvm_tlb_flush_vmid(struct kvm_s2_mmu *mmu);
272 
273 extern int __kvm_tlbi_s1e2(struct kvm_s2_mmu *mmu, u64 va, u64 sys_encoding);
274 
275 extern void __kvm_timer_set_cntvoff(u64 cntvoff);
276 extern int __kvm_at_s1e01(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
277 extern int __kvm_at_s1e2(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
278 extern int __kvm_at_s12(struct kvm_vcpu *vcpu, u32 op, u64 vaddr);
279 
280 extern int __kvm_vcpu_run(struct kvm_vcpu *vcpu);
281 
282 extern void __kvm_adjust_pc(struct kvm_vcpu *vcpu);
283 
284 extern u64 __vgic_v3_get_gic_config(void);
285 extern void __vgic_v3_init_lrs(void);
286 
287 #define __KVM_EXTABLE(from, to)						\
288 	"	.pushsection	__kvm_ex_table, \"a\"\n"		\
289 	"	.align		3\n"					\
290 	"	.long		(" #from " - .), (" #to " - .)\n"	\
291 	"	.popsection\n"
292 
293 
294 #define __kvm_at(at_op, addr)						\
295 ( { 									\
296 	int __kvm_at_err = 0;						\
297 	u64 spsr, elr;							\
298 	asm volatile(							\
299 	"	mrs	%1, spsr_el2\n"					\
300 	"	mrs	%2, elr_el2\n"					\
301 	"1:	" __msr_s(at_op, "%3") "\n"				\
302 	"	isb\n"							\
303 	"	b	9f\n"						\
304 	"2:	msr	spsr_el2, %1\n"					\
305 	"	msr	elr_el2, %2\n"					\
306 	"	mov	%w0, %4\n"					\
307 	"9:\n"								\
308 	__KVM_EXTABLE(1b, 2b)						\
309 	: "+r" (__kvm_at_err), "=&r" (spsr), "=&r" (elr)		\
310 	: "r" (addr), "i" (-EFAULT));					\
311 	__kvm_at_err;							\
312 } )
313 
314 void __noreturn hyp_panic(void);
315 asmlinkage void kvm_unexpected_el2_exception(void);
316 asmlinkage void __noreturn hyp_panic(void);
317 asmlinkage void __noreturn hyp_panic_bad_stack(void);
318 asmlinkage void kvm_unexpected_el2_exception(void);
319 struct kvm_cpu_context;
320 void handle_trap(struct kvm_cpu_context *host_ctxt);
321 asmlinkage void __noreturn __kvm_host_psci_cpu_on_entry(void);
322 asmlinkage void __noreturn __kvm_host_psci_cpu_resume_entry(void);
323 void __noreturn __pkvm_init_finalise(void);
324 void kvm_nvhe_prepare_backtrace(unsigned long fp, unsigned long pc);
325 void kvm_patch_vector_branch(struct alt_instr *alt,
326 	__le32 *origptr, __le32 *updptr, int nr_inst);
327 void kvm_get_kimage_voffset(struct alt_instr *alt,
328 	__le32 *origptr, __le32 *updptr, int nr_inst);
329 void kvm_compute_final_ctr_el0(struct alt_instr *alt,
330 	__le32 *origptr, __le32 *updptr, int nr_inst);
331 void __noreturn __cold nvhe_hyp_panic_handler(u64 esr, u64 spsr, u64 elr_virt,
332 	u64 elr_phys, u64 par, uintptr_t vcpu, u64 far, u64 hpfar);
333 
334 #else /* __ASSEMBLER__ */
335 
336 .macro get_host_ctxt reg, tmp
337 	adr_this_cpu \reg, kvm_host_data, \tmp
338 	add	\reg, \reg, #HOST_DATA_CONTEXT
339 .endm
340 
341 .macro get_vcpu_ptr vcpu, ctxt
342 	get_host_ctxt \ctxt, \vcpu
343 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
344 .endm
345 
346 .macro get_loaded_vcpu vcpu, ctxt
347 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \vcpu
348 	ldr	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
349 .endm
350 
351 .macro set_loaded_vcpu vcpu, ctxt, tmp
352 	adr_this_cpu \ctxt, kvm_hyp_ctxt, \tmp
353 	str	\vcpu, [\ctxt, #HOST_CONTEXT_VCPU]
354 .endm
355 
356 /*
357  * KVM extable for unexpected exceptions.
358  * Create a struct kvm_exception_table_entry output to a section that can be
359  * mapped by EL2. The table is not sorted.
360  *
361  * The caller must ensure:
362  * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented
363  * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup.
364  */
365 .macro	_kvm_extable, from, to
366 	.pushsection	__kvm_ex_table, "a"
367 	.align		3
368 	.long		(\from - .), (\to - .)
369 	.popsection
370 .endm
371 
372 #define CPU_XREG_OFFSET(x)	(CPU_USER_PT_REGS + 8*x)
373 #define CPU_LR_OFFSET		CPU_XREG_OFFSET(30)
374 #define CPU_SP_EL0_OFFSET	(CPU_LR_OFFSET + 8)
375 
376 /*
377  * We treat x18 as callee-saved as the host may use it as a platform
378  * register (e.g. for shadow call stack).
379  */
380 .macro save_callee_saved_regs ctxt
381 	str	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
382 	stp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
383 	stp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
384 	stp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
385 	stp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
386 	stp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
387 	stp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
388 .endm
389 
390 .macro restore_callee_saved_regs ctxt
391 	// We require \ctxt is not x18-x28
392 	ldr	x18,      [\ctxt, #CPU_XREG_OFFSET(18)]
393 	ldp	x19, x20, [\ctxt, #CPU_XREG_OFFSET(19)]
394 	ldp	x21, x22, [\ctxt, #CPU_XREG_OFFSET(21)]
395 	ldp	x23, x24, [\ctxt, #CPU_XREG_OFFSET(23)]
396 	ldp	x25, x26, [\ctxt, #CPU_XREG_OFFSET(25)]
397 	ldp	x27, x28, [\ctxt, #CPU_XREG_OFFSET(27)]
398 	ldp	x29, lr,  [\ctxt, #CPU_XREG_OFFSET(29)]
399 .endm
400 
401 .macro save_sp_el0 ctxt, tmp
402 	mrs	\tmp,	sp_el0
403 	str	\tmp,	[\ctxt, #CPU_SP_EL0_OFFSET]
404 .endm
405 
406 .macro restore_sp_el0 ctxt, tmp
407 	ldr	\tmp,	  [\ctxt, #CPU_SP_EL0_OFFSET]
408 	msr	sp_el0, \tmp
409 .endm
410 
411 #endif
412 
413 #endif /* __ARM_KVM_ASM_H__ */
414