xref: /linux/arch/arm64/include/asm/kvm_mmu.h (revision d2a4a07190f42e4f82805daf58e708400b703f1c)
1 /* SPDX-License-Identifier: GPL-2.0-only */
2 /*
3  * Copyright (C) 2012,2013 - ARM Ltd
4  * Author: Marc Zyngier <marc.zyngier@arm.com>
5  */
6 
7 #ifndef __ARM64_KVM_MMU_H__
8 #define __ARM64_KVM_MMU_H__
9 
10 #include <asm/page.h>
11 #include <asm/memory.h>
12 #include <asm/mmu.h>
13 #include <asm/cpufeature.h>
14 
15 /*
16  * As ARMv8.0 only has the TTBR0_EL2 register, we cannot express
17  * "negative" addresses. This makes it impossible to directly share
18  * mappings with the kernel.
19  *
20  * Instead, give the HYP mode its own VA region at a fixed offset from
21  * the kernel by just masking the top bits (which are all ones for a
22  * kernel address). We need to find out how many bits to mask.
23  *
24  * We want to build a set of page tables that cover both parts of the
25  * idmap (the trampoline page used to initialize EL2), and our normal
26  * runtime VA space, at the same time.
27  *
28  * Given that the kernel uses VA_BITS for its entire address space,
29  * and that half of that space (VA_BITS - 1) is used for the linear
30  * mapping, we can also limit the EL2 space to (VA_BITS - 1).
31  *
32  * The main question is "Within the VA_BITS space, does EL2 use the
33  * top or the bottom half of that space to shadow the kernel's linear
34  * mapping?". As we need to idmap the trampoline page, this is
35  * determined by the range in which this page lives.
36  *
37  * If the page is in the bottom half, we have to use the top half. If
38  * the page is in the top half, we have to use the bottom half:
39  *
40  * T = __pa_symbol(__hyp_idmap_text_start)
41  * if (T & BIT(VA_BITS - 1))
42  *	HYP_VA_MIN = 0  //idmap in upper half
43  * else
44  *	HYP_VA_MIN = 1 << (VA_BITS - 1)
45  * HYP_VA_MAX = HYP_VA_MIN + (1 << (VA_BITS - 1)) - 1
46  *
47  * When using VHE, there are no separate hyp mappings and all KVM
48  * functionality is already mapped as part of the main kernel
49  * mappings, and none of this applies in that case.
50  */
51 
52 #ifdef __ASSEMBLY__
53 
54 #include <asm/alternative.h>
55 
56 /*
57  * Convert a hypervisor VA to a PA
58  * reg: hypervisor address to be converted in place
59  * tmp: temporary register
60  */
61 .macro hyp_pa reg, tmp
62 	ldr_l	\tmp, hyp_physvirt_offset
63 	add	\reg, \reg, \tmp
64 .endm
65 
66 /*
67  * Convert a hypervisor VA to a kernel image address
68  * reg: hypervisor address to be converted in place
69  * tmp: temporary register
70  *
71  * The actual code generation takes place in kvm_get_kimage_voffset, and
72  * the instructions below are only there to reserve the space and
73  * perform the register allocation (kvm_get_kimage_voffset uses the
74  * specific registers encoded in the instructions).
75  */
76 .macro hyp_kimg_va reg, tmp
77 	/* Convert hyp VA -> PA. */
78 	hyp_pa	\reg, \tmp
79 
80 	/* Load kimage_voffset. */
81 alternative_cb ARM64_ALWAYS_SYSTEM, kvm_get_kimage_voffset
82 	movz	\tmp, #0
83 	movk	\tmp, #0, lsl #16
84 	movk	\tmp, #0, lsl #32
85 	movk	\tmp, #0, lsl #48
86 alternative_cb_end
87 
88 	/* Convert PA -> kimg VA. */
89 	add	\reg, \reg, \tmp
90 .endm
91 
92 #else
93 
94 #include <linux/pgtable.h>
95 #include <asm/pgalloc.h>
96 #include <asm/cache.h>
97 #include <asm/cacheflush.h>
98 #include <asm/mmu_context.h>
99 #include <asm/kvm_emulate.h>
100 #include <asm/kvm_host.h>
101 
102 void kvm_update_va_mask(struct alt_instr *alt,
103 			__le32 *origptr, __le32 *updptr, int nr_inst);
104 void kvm_compute_layout(void);
105 void kvm_apply_hyp_relocations(void);
106 
107 #define __hyp_pa(x) (((phys_addr_t)(x)) + hyp_physvirt_offset)
108 
109 /*
110  * Convert a kernel VA into a HYP VA.
111  *
112  * Can be called from hyp or non-hyp context.
113  *
114  * The actual code generation takes place in kvm_update_va_mask(), and
115  * the instructions below are only there to reserve the space and
116  * perform the register allocation (kvm_update_va_mask() uses the
117  * specific registers encoded in the instructions).
118  */
119 static __always_inline unsigned long __kern_hyp_va(unsigned long v)
120 {
121 /*
122  * This #ifndef is an optimisation for when this is called from VHE hyp
123  * context.  When called from a VHE non-hyp context, kvm_update_va_mask() will
124  * replace the instructions with `nop`s.
125  */
126 #ifndef __KVM_VHE_HYPERVISOR__
127 	asm volatile(ALTERNATIVE_CB("and %0, %0, #1\n"         /* mask with va_mask */
128 				    "ror %0, %0, #1\n"         /* rotate to the first tag bit */
129 				    "add %0, %0, #0\n"         /* insert the low 12 bits of the tag */
130 				    "add %0, %0, #0, lsl 12\n" /* insert the top 12 bits of the tag */
131 				    "ror %0, %0, #63\n",       /* rotate back */
132 				    ARM64_ALWAYS_SYSTEM,
133 				    kvm_update_va_mask)
134 		     : "+r" (v));
135 #endif
136 	return v;
137 }
138 
139 #define kern_hyp_va(v) 	((typeof(v))(__kern_hyp_va((unsigned long)(v))))
140 
141 /*
142  * We currently support using a VM-specified IPA size. For backward
143  * compatibility, the default IPA size is fixed to 40bits.
144  */
145 #define KVM_PHYS_SHIFT	(40)
146 
147 #define kvm_phys_shift(mmu)		VTCR_EL2_IPA((mmu)->vtcr)
148 #define kvm_phys_size(mmu)		(_AC(1, ULL) << kvm_phys_shift(mmu))
149 #define kvm_phys_mask(mmu)		(kvm_phys_size(mmu) - _AC(1, ULL))
150 
151 #include <asm/kvm_pgtable.h>
152 #include <asm/stage2_pgtable.h>
153 
154 int kvm_share_hyp(void *from, void *to);
155 void kvm_unshare_hyp(void *from, void *to);
156 int create_hyp_mappings(void *from, void *to, enum kvm_pgtable_prot prot);
157 int __create_hyp_mappings(unsigned long start, unsigned long size,
158 			  unsigned long phys, enum kvm_pgtable_prot prot);
159 int hyp_alloc_private_va_range(size_t size, unsigned long *haddr);
160 int create_hyp_io_mappings(phys_addr_t phys_addr, size_t size,
161 			   void __iomem **kaddr,
162 			   void __iomem **haddr);
163 int create_hyp_exec_mappings(phys_addr_t phys_addr, size_t size,
164 			     void **haddr);
165 int create_hyp_stack(phys_addr_t phys_addr, unsigned long *haddr);
166 void __init free_hyp_pgds(void);
167 
168 void stage2_unmap_vm(struct kvm *kvm);
169 int kvm_init_stage2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu, unsigned long type);
170 void kvm_uninit_stage2_mmu(struct kvm *kvm);
171 void kvm_free_stage2_pgd(struct kvm_s2_mmu *mmu);
172 int kvm_phys_addr_ioremap(struct kvm *kvm, phys_addr_t guest_ipa,
173 			  phys_addr_t pa, unsigned long size, bool writable);
174 
175 int kvm_handle_guest_abort(struct kvm_vcpu *vcpu);
176 
177 phys_addr_t kvm_mmu_get_httbr(void);
178 phys_addr_t kvm_get_idmap_vector(void);
179 int __init kvm_mmu_init(u32 *hyp_va_bits);
180 
181 static inline void *__kvm_vector_slot2addr(void *base,
182 					   enum arm64_hyp_spectre_vector slot)
183 {
184 	int idx = slot - (slot != HYP_VECTOR_DIRECT);
185 
186 	return base + (idx * SZ_2K);
187 }
188 
189 struct kvm;
190 
191 #define kvm_flush_dcache_to_poc(a,l)	\
192 	dcache_clean_inval_poc((unsigned long)(a), (unsigned long)(a)+(l))
193 
194 static inline bool vcpu_has_cache_enabled(struct kvm_vcpu *vcpu)
195 {
196 	u64 cache_bits = SCTLR_ELx_M | SCTLR_ELx_C;
197 	int reg;
198 
199 	if (vcpu_is_el2(vcpu))
200 		reg = SCTLR_EL2;
201 	else
202 		reg = SCTLR_EL1;
203 
204 	return (vcpu_read_sys_reg(vcpu, reg) & cache_bits) == cache_bits;
205 }
206 
207 static inline void __clean_dcache_guest_page(void *va, size_t size)
208 {
209 	/*
210 	 * With FWB, we ensure that the guest always accesses memory using
211 	 * cacheable attributes, and we don't have to clean to PoC when
212 	 * faulting in pages. Furthermore, FWB implies IDC, so cleaning to
213 	 * PoU is not required either in this case.
214 	 */
215 	if (cpus_have_final_cap(ARM64_HAS_STAGE2_FWB))
216 		return;
217 
218 	kvm_flush_dcache_to_poc(va, size);
219 }
220 
221 static inline size_t __invalidate_icache_max_range(void)
222 {
223 	u8 iminline;
224 	u64 ctr;
225 
226 	asm volatile(ALTERNATIVE_CB("movz %0, #0\n"
227 				    "movk %0, #0, lsl #16\n"
228 				    "movk %0, #0, lsl #32\n"
229 				    "movk %0, #0, lsl #48\n",
230 				    ARM64_ALWAYS_SYSTEM,
231 				    kvm_compute_final_ctr_el0)
232 		     : "=r" (ctr));
233 
234 	iminline = SYS_FIELD_GET(CTR_EL0, IminLine, ctr) + 2;
235 	return MAX_DVM_OPS << iminline;
236 }
237 
238 static inline void __invalidate_icache_guest_page(void *va, size_t size)
239 {
240 	/*
241 	 * Blow the whole I-cache if it is aliasing (i.e. VIPT) or the
242 	 * invalidation range exceeds our arbitrary limit on invadations by
243 	 * cache line.
244 	 */
245 	if (icache_is_aliasing() || size > __invalidate_icache_max_range())
246 		icache_inval_all_pou();
247 	else
248 		icache_inval_pou((unsigned long)va, (unsigned long)va + size);
249 }
250 
251 void kvm_set_way_flush(struct kvm_vcpu *vcpu);
252 void kvm_toggle_cache(struct kvm_vcpu *vcpu, bool was_enabled);
253 
254 static inline unsigned int kvm_get_vmid_bits(void)
255 {
256 	int reg = read_sanitised_ftr_reg(SYS_ID_AA64MMFR1_EL1);
257 
258 	return get_vmid_bits(reg);
259 }
260 
261 /*
262  * We are not in the kvm->srcu critical section most of the time, so we take
263  * the SRCU read lock here. Since we copy the data from the user page, we
264  * can immediately drop the lock again.
265  */
266 static inline int kvm_read_guest_lock(struct kvm *kvm,
267 				      gpa_t gpa, void *data, unsigned long len)
268 {
269 	int srcu_idx = srcu_read_lock(&kvm->srcu);
270 	int ret = kvm_read_guest(kvm, gpa, data, len);
271 
272 	srcu_read_unlock(&kvm->srcu, srcu_idx);
273 
274 	return ret;
275 }
276 
277 static inline int kvm_write_guest_lock(struct kvm *kvm, gpa_t gpa,
278 				       const void *data, unsigned long len)
279 {
280 	int srcu_idx = srcu_read_lock(&kvm->srcu);
281 	int ret = kvm_write_guest(kvm, gpa, data, len);
282 
283 	srcu_read_unlock(&kvm->srcu, srcu_idx);
284 
285 	return ret;
286 }
287 
288 #define kvm_phys_to_vttbr(addr)		phys_to_ttbr(addr)
289 
290 /*
291  * When this is (directly or indirectly) used on the TLB invalidation
292  * path, we rely on a previously issued DSB so that page table updates
293  * and VMID reads are correctly ordered.
294  */
295 static __always_inline u64 kvm_get_vttbr(struct kvm_s2_mmu *mmu)
296 {
297 	struct kvm_vmid *vmid = &mmu->vmid;
298 	u64 vmid_field, baddr;
299 	u64 cnp = system_supports_cnp() ? VTTBR_CNP_BIT : 0;
300 
301 	baddr = mmu->pgd_phys;
302 	vmid_field = atomic64_read(&vmid->id) << VTTBR_VMID_SHIFT;
303 	vmid_field &= VTTBR_VMID_MASK(kvm_arm_vmid_bits);
304 	return kvm_phys_to_vttbr(baddr) | vmid_field | cnp;
305 }
306 
307 /*
308  * Must be called from hyp code running at EL2 with an updated VTTBR
309  * and interrupts disabled.
310  */
311 static __always_inline void __load_stage2(struct kvm_s2_mmu *mmu,
312 					  struct kvm_arch *arch)
313 {
314 	write_sysreg(mmu->vtcr, vtcr_el2);
315 	write_sysreg(kvm_get_vttbr(mmu), vttbr_el2);
316 
317 	/*
318 	 * ARM errata 1165522 and 1530923 require the actual execution of the
319 	 * above before we can switch to the EL1/EL0 translation regime used by
320 	 * the guest.
321 	 */
322 	asm(ALTERNATIVE("nop", "isb", ARM64_WORKAROUND_SPECULATIVE_AT));
323 }
324 
325 static inline struct kvm *kvm_s2_mmu_to_kvm(struct kvm_s2_mmu *mmu)
326 {
327 	return container_of(mmu->arch, struct kvm, arch);
328 }
329 #endif /* __ASSEMBLY__ */
330 #endif /* __ARM64_KVM_MMU_H__ */
331