xref: /linux/arch/arm64/include/asm/kvm_nested.h (revision e1914add2799225a87502051415fc5c32aeb02ae)
1 /* SPDX-License-Identifier: GPL-2.0 */
2 #ifndef __ARM64_KVM_NESTED_H
3 #define __ARM64_KVM_NESTED_H
4 
5 #include <linux/bitfield.h>
6 #include <linux/kvm_host.h>
7 #include <asm/kvm_emulate.h>
8 #include <asm/kvm_pgtable.h>
9 
vcpu_has_nv(const struct kvm_vcpu * vcpu)10 static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
11 {
12 	return (!__is_defined(__KVM_NVHE_HYPERVISOR__) &&
13 		cpus_have_final_cap(ARM64_HAS_NESTED_VIRT) &&
14 		vcpu_has_feature(vcpu, KVM_ARM_VCPU_HAS_EL2));
15 }
16 
17 /* Translation helpers from non-VHE EL2 to EL1 */
tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)18 static inline u64 tcr_el2_ps_to_tcr_el1_ips(u64 tcr_el2)
19 {
20 	return (u64)FIELD_GET(TCR_EL2_PS_MASK, tcr_el2) << TCR_IPS_SHIFT;
21 }
22 
translate_tcr_el2_to_tcr_el1(u64 tcr)23 static inline u64 translate_tcr_el2_to_tcr_el1(u64 tcr)
24 {
25 	return TCR_EPD1_MASK |				/* disable TTBR1_EL1 */
26 	       ((tcr & TCR_EL2_DS) ? TCR_DS : 0) |
27 	       ((tcr & TCR_EL2_TBI) ? TCR_TBI0 : 0) |
28 	       tcr_el2_ps_to_tcr_el1_ips(tcr) |
29 	       (tcr & TCR_EL2_TG0_MASK) |
30 	       (tcr & TCR_EL2_ORGN0_MASK) |
31 	       (tcr & TCR_EL2_IRGN0_MASK) |
32 	       (tcr & TCR_EL2_T0SZ_MASK);
33 }
34 
translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)35 static inline u64 translate_cptr_el2_to_cpacr_el1(u64 cptr_el2)
36 {
37 	u64 cpacr_el1 = CPACR_EL1_RES1;
38 
39 	if (cptr_el2 & CPTR_EL2_TTA)
40 		cpacr_el1 |= CPACR_EL1_TTA;
41 	if (!(cptr_el2 & CPTR_EL2_TFP))
42 		cpacr_el1 |= CPACR_EL1_FPEN;
43 	if (!(cptr_el2 & CPTR_EL2_TZ))
44 		cpacr_el1 |= CPACR_EL1_ZEN;
45 
46 	cpacr_el1 |= cptr_el2 & (CPTR_EL2_TCPAC | CPTR_EL2_TAM);
47 
48 	return cpacr_el1;
49 }
50 
translate_sctlr_el2_to_sctlr_el1(u64 val)51 static inline u64 translate_sctlr_el2_to_sctlr_el1(u64 val)
52 {
53 	/* Only preserve the minimal set of bits we support */
54 	val &= (SCTLR_ELx_M | SCTLR_ELx_A | SCTLR_ELx_C | SCTLR_ELx_SA |
55 		SCTLR_ELx_I | SCTLR_ELx_IESB | SCTLR_ELx_WXN | SCTLR_ELx_EE);
56 	val |= SCTLR_EL1_RES1;
57 
58 	return val;
59 }
60 
translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)61 static inline u64 translate_ttbr0_el2_to_ttbr0_el1(u64 ttbr0)
62 {
63 	/* Clear the ASID field */
64 	return ttbr0 & ~GENMASK_ULL(63, 48);
65 }
66 
67 extern bool forward_smc_trap(struct kvm_vcpu *vcpu);
68 extern bool forward_debug_exception(struct kvm_vcpu *vcpu);
69 extern void kvm_init_nested(struct kvm *kvm);
70 extern int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu);
71 extern void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu);
72 extern struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu);
73 
74 union tlbi_info;
75 
76 extern void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
77 				       const union tlbi_info *info,
78 				       void (*)(struct kvm_s2_mmu *,
79 						const union tlbi_info *));
80 extern void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu);
81 extern void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu);
82 
83 extern void check_nested_vcpu_requests(struct kvm_vcpu *vcpu);
84 extern void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu);
85 extern void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu);
86 
87 extern void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu);
88 
89 struct kvm_s2_trans {
90 	phys_addr_t output;
91 	unsigned long block_size;
92 	bool writable;
93 	bool readable;
94 	int level;
95 	u32 esr;
96 	u64 desc;
97 };
98 
kvm_s2_trans_output(struct kvm_s2_trans * trans)99 static inline phys_addr_t kvm_s2_trans_output(struct kvm_s2_trans *trans)
100 {
101 	return trans->output;
102 }
103 
kvm_s2_trans_size(struct kvm_s2_trans * trans)104 static inline unsigned long kvm_s2_trans_size(struct kvm_s2_trans *trans)
105 {
106 	return trans->block_size;
107 }
108 
kvm_s2_trans_esr(struct kvm_s2_trans * trans)109 static inline u32 kvm_s2_trans_esr(struct kvm_s2_trans *trans)
110 {
111 	return trans->esr;
112 }
113 
kvm_s2_trans_readable(struct kvm_s2_trans * trans)114 static inline bool kvm_s2_trans_readable(struct kvm_s2_trans *trans)
115 {
116 	return trans->readable;
117 }
118 
kvm_s2_trans_writable(struct kvm_s2_trans * trans)119 static inline bool kvm_s2_trans_writable(struct kvm_s2_trans *trans)
120 {
121 	return trans->writable;
122 }
123 
kvm_has_xnx(struct kvm * kvm)124 static inline bool kvm_has_xnx(struct kvm *kvm)
125 {
126 	return cpus_have_final_cap(ARM64_HAS_XNX) &&
127 		kvm_has_feat(kvm, ID_AA64MMFR1_EL1, XNX, IMP);
128 }
129 
kvm_s2_trans_exec_el0(struct kvm * kvm,struct kvm_s2_trans * trans)130 static inline bool kvm_s2_trans_exec_el0(struct kvm *kvm, struct kvm_s2_trans *trans)
131 {
132 	u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
133 
134 	if (!kvm_has_xnx(kvm))
135 		xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
136 
137 	switch (xn) {
138 	case 0b00:
139 	case 0b01:
140 		return true;
141 	default:
142 		return false;
143 	}
144 }
145 
kvm_s2_trans_exec_el1(struct kvm * kvm,struct kvm_s2_trans * trans)146 static inline bool kvm_s2_trans_exec_el1(struct kvm *kvm, struct kvm_s2_trans *trans)
147 {
148 	u8 xn = FIELD_GET(KVM_PTE_LEAF_ATTR_HI_S2_XN, trans->desc);
149 
150 	if (!kvm_has_xnx(kvm))
151 		xn &= FIELD_PREP(KVM_PTE_LEAF_ATTR_HI_S2_XN, 0b10);
152 
153 	switch (xn) {
154 	case 0b00:
155 	case 0b11:
156 		return true;
157 	default:
158 		return false;
159 	}
160 }
161 
162 extern int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
163 			      struct kvm_s2_trans *result);
164 extern int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu,
165 				    struct kvm_s2_trans *trans);
166 extern int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2);
167 extern void kvm_nested_s2_wp(struct kvm *kvm);
168 extern void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block);
169 extern void kvm_nested_s2_flush(struct kvm *kvm);
170 
171 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val);
172 
kvm_supported_tlbi_s1e1_op(struct kvm_vcpu * vpcu,u32 instr)173 static inline bool kvm_supported_tlbi_s1e1_op(struct kvm_vcpu *vpcu, u32 instr)
174 {
175 	struct kvm *kvm = vpcu->kvm;
176 	u8 CRm = sys_reg_CRm(instr);
177 
178 	if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
179 	      sys_reg_Op1(instr) == TLBI_Op1_EL1))
180 		return false;
181 
182 	if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
183 	      (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
184 	       kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
185 		return false;
186 
187 	if (CRm == TLBI_CRm_nROS &&
188 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
189 		return false;
190 
191 	if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
192 	     CRm == TLBI_CRm_RNS) &&
193 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
194 		return false;
195 
196 	return true;
197 }
198 
kvm_supported_tlbi_s1e2_op(struct kvm_vcpu * vpcu,u32 instr)199 static inline bool kvm_supported_tlbi_s1e2_op(struct kvm_vcpu *vpcu, u32 instr)
200 {
201 	struct kvm *kvm = vpcu->kvm;
202 	u8 CRm = sys_reg_CRm(instr);
203 
204 	if (!(sys_reg_Op0(instr) == TLBI_Op0 &&
205 	      sys_reg_Op1(instr) == TLBI_Op1_EL2))
206 		return false;
207 
208 	if (!(sys_reg_CRn(instr) == TLBI_CRn_XS ||
209 	      (sys_reg_CRn(instr) == TLBI_CRn_nXS &&
210 	       kvm_has_feat(kvm, ID_AA64ISAR1_EL1, XS, IMP))))
211 		return false;
212 
213 	if (CRm == TLBI_CRm_IPAIS || CRm == TLBI_CRm_IPAONS)
214 		return false;
215 
216 	if (CRm == TLBI_CRm_nROS &&
217 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, OS))
218 		return false;
219 
220 	if ((CRm == TLBI_CRm_RIS || CRm == TLBI_CRm_ROS ||
221 	     CRm == TLBI_CRm_RNS) &&
222 	    !kvm_has_feat(kvm, ID_AA64ISAR0_EL1, TLB, RANGE))
223 		return false;
224 
225 	return true;
226 }
227 
228 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu);
229 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val);
230 
231 #ifdef CONFIG_ARM64_PTR_AUTH
232 bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr);
233 #else
kvm_auth_eretax(struct kvm_vcpu * vcpu,u64 * elr)234 static inline bool kvm_auth_eretax(struct kvm_vcpu *vcpu, u64 *elr)
235 {
236 	/* We really should never execute this... */
237 	WARN_ON_ONCE(1);
238 	*elr = 0xbad9acc0debadbad;
239 	return false;
240 }
241 #endif
242 
243 #define KVM_NV_GUEST_MAP_SZ	(KVM_PGTABLE_PROT_SW1 | KVM_PGTABLE_PROT_SW0)
244 
kvm_encode_nested_level(struct kvm_s2_trans * trans)245 static inline u64 kvm_encode_nested_level(struct kvm_s2_trans *trans)
246 {
247 	return FIELD_PREP(KVM_NV_GUEST_MAP_SZ, trans->level);
248 }
249 
250 /* Adjust alignment for the contiguous bit as per StageOA() */
251 #define contiguous_bit_shift(d, wi, l)					\
252 	({								\
253 		u8 shift = 0;						\
254 									\
255 		if ((d) & PTE_CONT) {					\
256 			switch (BIT((wi)->pgshift)) {			\
257 			case SZ_4K:					\
258 				shift = 4;				\
259 				break;					\
260 			case SZ_16K:					\
261 				shift = (l) == 2 ? 5 : 7;		\
262 				break;					\
263 			case SZ_64K:					\
264 				shift = 5;				\
265 				break;					\
266 			}						\
267 		}							\
268 									\
269 		shift;							\
270 	})
271 
decode_range_tlbi(u64 val,u64 * range,u16 * asid)272 static inline u64 decode_range_tlbi(u64 val, u64 *range, u16 *asid)
273 {
274 	u64 base, tg, num, scale;
275 	int shift;
276 
277 	tg	= FIELD_GET(GENMASK(47, 46), val);
278 
279 	switch(tg) {
280 	case 1:
281 		shift = 12;
282 		break;
283 	case 2:
284 		shift = 14;
285 		break;
286 	case 3:
287 	default:		/* IMPDEF: handle tg==0 as 64k */
288 		shift = 16;
289 		break;
290 	}
291 
292 	base	= (val & GENMASK(36, 0)) << shift;
293 
294 	if (asid)
295 		*asid = FIELD_GET(TLBIR_ASID_MASK, val);
296 
297 	scale	= FIELD_GET(GENMASK(45, 44), val);
298 	num	= FIELD_GET(GENMASK(43, 39), val);
299 	*range	= __TLBI_RANGE_PAGES(num, scale) << shift;
300 
301 	return base;
302 }
303 
ps_to_output_size(unsigned int ps,bool pa52bit)304 static inline unsigned int ps_to_output_size(unsigned int ps, bool pa52bit)
305 {
306 	switch (ps) {
307 	case 0: return 32;
308 	case 1: return 36;
309 	case 2: return 40;
310 	case 3: return 42;
311 	case 4: return 44;
312 	case 5: return 48;
313 	case 6: if (pa52bit)
314 			return 52;
315 		fallthrough;
316 	default:
317 		return 48;
318 	}
319 }
320 
321 enum trans_regime {
322 	TR_EL10,
323 	TR_EL20,
324 	TR_EL2,
325 };
326 
327 struct s1_walk_info;
328 
329 struct s1_walk_context {
330 	struct s1_walk_info	*wi;
331 	u64			table_ipa;
332 	int			level;
333 };
334 
335 struct s1_walk_filter {
336 	int	(*fn)(struct s1_walk_context *, void *);
337 	void	*priv;
338 };
339 
340 struct s1_walk_info {
341 	struct s1_walk_filter	*filter;
342 	u64	     		baddr;
343 	enum trans_regime	regime;
344 	unsigned int		max_oa_bits;
345 	unsigned int		pgshift;
346 	unsigned int		txsz;
347 	int 	     		sl;
348 	u8			sh;
349 	bool			as_el0;
350 	bool	     		hpd;
351 	bool			e0poe;
352 	bool			poe;
353 	bool			pan;
354 	bool	     		be;
355 	bool	     		s2;
356 	bool			pa52bit;
357 	bool			ha;
358 };
359 
360 struct s1_walk_result {
361 	union {
362 		struct {
363 			u64	desc;
364 			u64	pa;
365 			s8	level;
366 			u8	APTable;
367 			bool	nG;
368 			u16	asid;
369 			bool	UXNTable;
370 			bool	PXNTable;
371 			bool	uwxn;
372 			bool	uov;
373 			bool	ur;
374 			bool	uw;
375 			bool	ux;
376 			bool	pwxn;
377 			bool	pov;
378 			bool	pr;
379 			bool	pw;
380 			bool	px;
381 		};
382 		struct {
383 			u8	fst;
384 			bool	ptw;
385 			bool	s2;
386 		};
387 	};
388 	bool	failed;
389 };
390 
391 int __kvm_translate_va(struct kvm_vcpu *vcpu, struct s1_walk_info *wi,
392 		       struct s1_walk_result *wr, u64 va);
393 int __kvm_find_s1_desc_level(struct kvm_vcpu *vcpu, u64 va, u64 ipa,
394 			     int *level);
395 
396 /* VNCR management */
397 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu);
398 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu);
399 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val);
400 
401 u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime);
402 
403 #define vncr_fixmap(c)						\
404 	({							\
405 		u32 __c = (c);					\
406 		BUG_ON(__c >= NR_CPUS);				\
407 		(FIX_VNCR - __c);				\
408 	})
409 
410 int __kvm_at_swap_desc(struct kvm *kvm, gpa_t ipa, u64 old, u64 new);
411 
412 #endif /* __ARM64_KVM_NESTED_H */
413