xref: /linux/arch/arm64/include/asm/kvm_mmu.h (revision e2ee2e9b159094527ae7ad78058b1316f62fc5b7)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_MMU_H__
8 #define __ARM64_KVM_MMU_H__
9 
10 #include <asm/page.h>
11 #include <asm/memory.h>
12 #include <asm/mmu.h>
13 #include <asm/cpufeature.h>
14 
15 /*
16  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
17  * "negative" addresses. This makes it impossible to directly share
18  * mappings with the kernel.
19  *
20  * Instead, give the HYP mode its own VA region at a fixed offset from
21  * the kernel by just masking the top bits (which are all ones for a
22  * kernel address). We need to find out how many bits to mask.
23  *
24  * We want to build a set of page tables that cover both parts of the
25  * idmap (the trampoline page used to initialize EL2), and our normal
26  * runtime VA space, at the same time.
27  *
28  * Given that the kernel uses VA_BITS for its entire address space,
29  * and that half of that space (VA_BITS - 1) is used for the linear
30  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
31  *
32  * The main question is "Within the VA_BITS space, does EL2 use the
33  * top or the bottom half of that space to shadow the kernel's linear
34  * mapping?". As we need to idmap the trampoline page, this is
35  * determined by the range in which this page lives.
36  *
37  * If the page is in the bottom half, we have to use the top half. If
38  * the page is in the top half, we have to use the bottom half:
39  *
40  * T = __pa_symbol(__hyp_idmap_text_start)
41  * if (T & BIT(VA_BITS - 1))
42  *	HYP_VA_MIN = 0  //idmap in upper half
43  * else
44  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
45  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
46  *
47  * When using VHE, there are no separate hyp mappings and all KVM
48  * functionality is already mapped as part of the main kernel
49  * mappings, and none of this applies in that case.
50  */
51 
52 #ifdef __ASSEMBLY__
53 
54 #include <asm/alternative.h>
55 
56 /*
57  * Convert a hypervisor VA to a PA
58  * reg: hypervisor address to be converted in place
59  * tmp: temporary register
60  */
61 .macro hyp_pa reg, tmp
62 	ldr_l	\tmp, hyp_physvirt_offset
63 	add	\reg, \reg, \tmp
64 .endm
65 
66 /*
67  * Convert a hypervisor VA to a kernel image address
68  * reg: hypervisor address to be converted in place
69  * tmp: temporary register
70  *
71  * The actual code generation takes place in kvm_get_kimage_voffset, and
72  * the instructions below are only there to reserve the space and
73  * perform the register allocation (kvm_get_kimage_voffset uses the
74  * specific registers encoded in the instructions).
75  */
76 .macro hyp_kimg_va reg, tmp
77 	/* Convert hyp VA -> PA. */
78 	hyp_pa	\reg, \tmp
79 
80 	/* Load kimage_voffset. */
81 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
82 	movz	\tmp, #0
83 	movk	\tmp, #0, lsl #16
84 	movk	\tmp, #0, lsl #32
85 	movk	\tmp, #0, lsl #48
86 alternative_cb_end
87 
88 	/* Convert PA -> kimg VA. */
89 	add	\reg, \reg, \tmp
90 .endm
91 
92 #else
93 
94 #include <linux/pgtable.h>
95 #include <asm/pgalloc.h>
96 #include <asm/cache.h>
97 #include <asm/cacheflush.h>
98 #include <asm/mmu_context.h>
99 #include <asm/kvm_emulate.h>
100 #include <asm/kvm_host.h>
101 #include <asm/kvm_nested.h>
102 
103 void kvm_update_va_mask(struct alt_instr *alt,
104 			__le32 *origptr, __le32 *updptr, int nr_inst);
105 void kvm_compute_layout(void);
106 void kvm_apply_hyp_relocations(void);
107 
108 #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
109 
110 /*
111  * Convert a kernel VA into a HYP VA.
112  *
113  * Can be called from hyp or non-hyp context.
114  *
115  * The actual code generation takes place in kvm_update_va_mask(), and
116  * the instructions below are only there to reserve the space and
117  * perform the register allocation (kvm_update_va_mask() uses the
118  * specific registers encoded in the instructions).
119  */
120 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
121 {
122 /*
123  * This #ifndef is an optimisation for when this is called from VHE hyp
124  * context.  When called from a VHE non-hyp context, kvm_update_va_mask() will
125  * replace the instructions with `nop`s.
126  */
127 #ifndef __KVM_VHE_HYPERVISOR__
128 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"         /* mask with va_mask */
129 				    "ror %0, %0, #1\n"         /* rotate to the first tag bit */
130 				    "add %0, %0, #0\n"         /* insert the low 12 bits of the tag */
131 				    "add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
132 				    "ror %0, %0, #63\n",       /* rotate back */
133 				    ARM64_ALWAYS_SYSTEM,
134 				    kvm_update_va_mask)
135 		     : "+r" (v));
136 #endif
137 	return v;
138 }
139 
140 #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
141 
142 extern u32 __hyp_va_bits;
143 
144 /*
145  * We currently support using a VM-specified IPA size. For backward
146  * compatibility, the default IPA size is fixed to 40bits.
147  */
148 #define KVM_PHYS_SHIFT	(40)
149 
150 #define kvm_phys_shift(mmu)		VTCR_EL2_IPA((mmu)->vtcr)
151 #define kvm_phys_size(mmu)		(_AC(1, ULL) << kvm_phys_shift(mmu))
152 #define kvm_phys_mask(mmu)		(kvm_phys_size(mmu) - _AC(1, ULL))
153 
154 #include <asm/kvm_pgtable.h>
155 #include <asm/stage2_pgtable.h>
156 
157 int kvm_share_hyp(void *from, void *to);
158 void kvm_unshare_hyp(void *from, void *to);
159 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
160 int __create_hyp_mappings(unsigned long start, unsigned long size,
161 			  unsigned long phys, enum kvm_pgtable_prot prot);
162 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
163 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
164 			   void __iomem **kaddr,
165 			   void __iomem **haddr);
166 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
167 			     void **haddr);
168 int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
169 void __init free_hyp_pgds(void);
170 
171 void kvm_stage2_unmap_range(struct kvm_s2_mmu *mmu, phys_addr_t start,
172 			    u64 size, bool may_block);
173 void kvm_stage2_flush_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
174 void kvm_stage2_wp_range(struct kvm_s2_mmu *mmu, phys_addr_t addr, phys_addr_t end);
175 
176 void stage2_unmap_vm(struct kvm *kvm);
177 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
178 void kvm_uninit_stage2_mmu(struct kvm *kvm);
179 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
180 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
181 			  phys_addr_t pa, unsigned long size, bool writable);
182 
183 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
184 
185 phys_addr_t kvm_mmu_get_httbr(void);
186 phys_addr_t kvm_get_idmap_vector(void);
187 int __init kvm_mmu_init(u32 *hyp_va_bits);
188 
189 static inline void *__kvm_vector_slot2addr(void *base,
190 					   enum arm64_hyp_spectre_vector slot)
191 {
192 	int idx = slot - (slot != HYP_VECTOR_DIRECT);
193 
194 	return base + (idx * SZ_2K);
195 }
196 
197 struct kvm;
198 
199 #define kvm_flush_dcache_to_poc(a,l)	\
200 	dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
201 
202 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
203 {
204 	u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
205 	int reg;
206 
207 	if (vcpu_is_el2(vcpu))
208 		reg = SCTLR_EL2;
209 	else
210 		reg = SCTLR_EL1;
211 
212 	return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
213 }
214 
215 static inline void __clean_dcache_guest_page(void *va, size_t size)
216 {
217 	/*
218 	 * With FWB, we ensure that the guest always accesses memory using
219 	 * cacheable attributes, and we don't have to clean to PoC when
220 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
221 	 * PoU is not required either in this case.
222 	 */
223 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
224 		return;
225 
226 	kvm_flush_dcache_to_poc(va, size);
227 }
228 
229 static inline size_t __invalidate_icache_max_range(void)
230 {
231 	u8 iminline;
232 	u64 ctr;
233 
234 	asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
235 				    "movk %0, #0, lsl #16\n"
236 				    "movk %0, #0, lsl #32\n"
237 				    "movk %0, #0, lsl #48\n",
238 				    ARM64_ALWAYS_SYSTEM,
239 				    kvm_compute_final_ctr_el0)
240 		     : "=r" (ctr));
241 
242 	iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2;
243 	return MAX_DVM_OPS << iminline;
244 }
245 
246 static inline void __invalidate_icache_guest_page(void *va, size_t size)
247 {
248 	/*
249 	 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
250 	 * invalidation range exceeds our arbitrary limit on invadations by
251 	 * cache line.
252 	 */
253 	if (icache_is_aliasing() || size > __invalidate_icache_max_range())
254 		icache_inval_all_pou();
255 	else
256 		icache_inval_pou((unsigned long)va, (unsigned long)va + size);
257 }
258 
259 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
260 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
261 
262 static inline unsigned int kvm_get_vmid_bits(void)
263 {
264 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
265 
266 	return get_vmid_bits(reg);
267 }
268 
269 /*
270  * We are not in the kvm->srcu critical section most of the time, so we take
271  * the SRCU read lock here. Since we copy the data from the user page, we
272  * can immediately drop the lock again.
273  */
274 static inline int kvm_read_guest_lock(struct kvm *kvm,
275 				      gpa_t gpa, void *data, unsigned long len)
276 {
277 	int srcu_idx = srcu_read_lock(&kvm->srcu);
278 	int ret = kvm_read_guest(kvm, gpa, data, len);
279 
280 	srcu_read_unlock(&kvm->srcu, srcu_idx);
281 
282 	return ret;
283 }
284 
285 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
286 				       const void *data, unsigned long len)
287 {
288 	int srcu_idx = srcu_read_lock(&kvm->srcu);
289 	int ret = kvm_write_guest(kvm, gpa, data, len);
290 
291 	srcu_read_unlock(&kvm->srcu, srcu_idx);
292 
293 	return ret;
294 }
295 
296 #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
297 
298 /*
299  * When this is (directly or indirectly) used on the TLB invalidation
300  * path, we rely on a previously issued DSB so that page table updates
301  * and VMID reads are correctly ordered.
302  */
303 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
304 {
305 	struct kvm_vmid *vmid = &mmu->vmid;
306 	u64 vmid_field, baddr;
307 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
308 
309 	baddr = mmu->pgd_phys;
310 	vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
311 	vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
312 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
313 }
314 
315 /*
316  * Must be called from hyp code running at EL2 with an updated VTTBR
317  * and interrupts disabled.
318  */
319 static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
320 					  struct kvm_arch *arch)
321 {
322 	write_sysreg(mmu->vtcr, vtcr_el2);
323 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
324 
325 	/*
326 	 * ARM errata 1165522 and 1530923 require the actual execution of the
327 	 * above before we can switch to the EL1/EL0 translation regime used by
328 	 * the guest.
329 	 */
330 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
331 }
332 
333 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
334 {
335 	return container_of(mmu->arch, struct kvm, arch);
336 }
337 
338 static inline u64 get_vmid(u64 vttbr)
339 {
340 	return (vttbr & VTTBR_VMID_MASK(kvm_get_vmid_bits())) >>
341 		VTTBR_VMID_SHIFT;
342 }
343 
344 static inline bool kvm_s2_mmu_valid(struct kvm_s2_mmu *mmu)
345 {
346 	return !(mmu->tlb_vttbr & VTTBR_CNP_BIT);
347 }
348 
349 static inline bool kvm_is_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
350 {
351 	/*
352 	 * Be careful, mmu may not be fully initialised so do look at
353 	 * *any* of its fields.
354 	 */
355 	return &kvm->arch.mmu != mmu;
356 }
357 
358 static inline void kvm_fault_lock(struct kvm *kvm)
359 {
360 	if (is_protected_kvm_enabled())
361 		write_lock(&kvm->mmu_lock);
362 	else
363 		read_lock(&kvm->mmu_lock);
364 }
365 
366 static inline void kvm_fault_unlock(struct kvm *kvm)
367 {
368 	if (is_protected_kvm_enabled())
369 		write_unlock(&kvm->mmu_lock);
370 	else
371 		read_unlock(&kvm->mmu_lock);
372 }
373 
374 #ifdef CONFIG_PTDUMP_STAGE2_DEBUGFS
375 void kvm_s2_ptdump_create_debugfs(struct kvm *kvm);
376 #else
377 static inline void kvm_s2_ptdump_create_debugfs(struct kvm *kvm) {}
378 #endif /* CONFIG_PTDUMP_STAGE2_DEBUGFS */
379 
380 #endif /* __ASSEMBLY__ */
381 #endif /* __ARM64_KVM_MMU_H__ */
382