xref: /linux/arch/arm64/kvm/nested.c (revision dde63797055cf3615bdac744d641e19e165467bb)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 
11 #include <asm/fixmap.h>
12 #include <asm/kvm_arm.h>
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/sysreg.h>
17 
18 #include "sys_regs.h"
19 
20 struct vncr_tlb {
21 	/* The guest's VNCR_EL2 */
22 	u64			gva;
23 	struct s1_walk_info	wi;
24 	struct s1_walk_result	wr;
25 
26 	u64			hpa;
27 
28 	/* -1 when not mapped on a CPU */
29 	int			cpu;
30 
31 	/*
32 	 * true if the TLB is valid. Can only be changed with the
33 	 * mmu_lock held.
34 	 */
35 	bool			valid;
36 };
37 
38 /*
39  * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
40  * memory usage and potential number of different sets of S2 PTs in
41  * the guests. Running out of S2 MMUs only affects performance (we
42  * will invalidate them more often).
43  */
44 #define S2_MMU_PER_VCPU		2
45 
kvm_init_nested(struct kvm * kvm)46 void kvm_init_nested(struct kvm *kvm)
47 {
48 	kvm->arch.nested_mmus = NULL;
49 	kvm->arch.nested_mmus_size = 0;
50 	atomic_set(&kvm->arch.vncr_map_count, 0);
51 }
52 
init_nested_s2_mmu(struct kvm * kvm,struct kvm_s2_mmu * mmu)53 static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
54 {
55 	/*
56 	 * We only initialise the IPA range on the canonical MMU, which
57 	 * defines the contract between KVM and userspace on where the
58 	 * "hardware" is in the IPA space. This affects the validity of MMIO
59 	 * exits forwarded to userspace, for example.
60 	 *
61 	 * For nested S2s, we use the PARange as exposed to the guest, as it
62 	 * is allowed to use it at will to expose whatever memory map it
63 	 * wants to its own guests as it would be on real HW.
64 	 */
65 	return kvm_init_stage2_mmu(kvm, mmu, kvm_get_pa_bits(kvm));
66 }
67 
kvm_vcpu_init_nested(struct kvm_vcpu * vcpu)68 int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
69 {
70 	struct kvm *kvm = vcpu->kvm;
71 	struct kvm_s2_mmu *tmp;
72 	int num_mmus, ret = 0;
73 
74 	if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
75 	    !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
76 		return -EINVAL;
77 
78 	if (!vcpu->arch.ctxt.vncr_array)
79 		vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT |
80 								    __GFP_ZERO);
81 
82 	if (!vcpu->arch.ctxt.vncr_array)
83 		return -ENOMEM;
84 
85 	/*
86 	 * Let's treat memory allocation failures as benign: If we fail to
87 	 * allocate anything, return an error and keep the allocated array
88 	 * alive. Userspace may try to recover by intializing the vcpu
89 	 * again, and there is no reason to affect the whole VM for this.
90 	 */
91 	num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
92 	tmp = kvrealloc(kvm->arch.nested_mmus,
93 			size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
94 			GFP_KERNEL_ACCOUNT | __GFP_ZERO);
95 	if (!tmp)
96 		return -ENOMEM;
97 
98 	swap(kvm->arch.nested_mmus, tmp);
99 
100 	/*
101 	 * If we went through a realocation, adjust the MMU back-pointers in
102 	 * the previously initialised kvm_pgtable structures.
103 	 */
104 	if (kvm->arch.nested_mmus != tmp)
105 		for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
106 			kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
107 
108 	for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
109 		ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
110 
111 	if (ret) {
112 		for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
113 			kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
114 
115 		free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
116 		vcpu->arch.ctxt.vncr_array = NULL;
117 
118 		return ret;
119 	}
120 
121 	kvm->arch.nested_mmus_size = num_mmus;
122 
123 	return 0;
124 }
125 
126 struct s2_walk_info {
127 	int	     (*read_desc)(phys_addr_t pa, u64 *desc, void *data);
128 	void	     *data;
129 	u64	     baddr;
130 	unsigned int max_oa_bits;
131 	unsigned int pgshift;
132 	unsigned int sl;
133 	unsigned int t0sz;
134 	bool	     be;
135 };
136 
compute_fsc(int level,u32 fsc)137 static u32 compute_fsc(int level, u32 fsc)
138 {
139 	return fsc | (level & 0x3);
140 }
141 
esr_s2_fault(struct kvm_vcpu * vcpu,int level,u32 fsc)142 static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
143 {
144 	u32 esr;
145 
146 	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
147 	esr |= compute_fsc(level, fsc);
148 	return esr;
149 }
150 
get_ia_size(struct s2_walk_info * wi)151 static int get_ia_size(struct s2_walk_info *wi)
152 {
153 	return 64 - wi->t0sz;
154 }
155 
check_base_s2_limits(struct s2_walk_info * wi,int level,int input_size,int stride)156 static int check_base_s2_limits(struct s2_walk_info *wi,
157 				int level, int input_size, int stride)
158 {
159 	int start_size, ia_size;
160 
161 	ia_size = get_ia_size(wi);
162 
163 	/* Check translation limits */
164 	switch (BIT(wi->pgshift)) {
165 	case SZ_64K:
166 		if (level == 0 || (level == 1 && ia_size <= 42))
167 			return -EFAULT;
168 		break;
169 	case SZ_16K:
170 		if (level == 0 || (level == 1 && ia_size <= 40))
171 			return -EFAULT;
172 		break;
173 	case SZ_4K:
174 		if (level < 0 || (level == 0 && ia_size <= 42))
175 			return -EFAULT;
176 		break;
177 	}
178 
179 	/* Check input size limits */
180 	if (input_size > ia_size)
181 		return -EFAULT;
182 
183 	/* Check number of entries in starting level table */
184 	start_size = input_size - ((3 - level) * stride + wi->pgshift);
185 	if (start_size < 1 || start_size > stride + 4)
186 		return -EFAULT;
187 
188 	return 0;
189 }
190 
191 /* Check if output is within boundaries */
check_output_size(struct s2_walk_info * wi,phys_addr_t output)192 static int check_output_size(struct s2_walk_info *wi, phys_addr_t output)
193 {
194 	unsigned int output_size = wi->max_oa_bits;
195 
196 	if (output_size != 48 && (output & GENMASK_ULL(47, output_size)))
197 		return -1;
198 
199 	return 0;
200 }
201 
202 /*
203  * This is essentially a C-version of the pseudo code from the ARM ARM
204  * AArch64.TranslationTableWalk  function.  I strongly recommend looking at
205  * that pseudocode in trying to understand this.
206  *
207  * Must be called with the kvm->srcu read lock held
208  */
walk_nested_s2_pgd(phys_addr_t ipa,struct s2_walk_info * wi,struct kvm_s2_trans * out)209 static int walk_nested_s2_pgd(phys_addr_t ipa,
210 			      struct s2_walk_info *wi, struct kvm_s2_trans *out)
211 {
212 	int first_block_level, level, stride, input_size, base_lower_bound;
213 	phys_addr_t base_addr;
214 	unsigned int addr_top, addr_bottom;
215 	u64 desc;  /* page table entry */
216 	int ret;
217 	phys_addr_t paddr;
218 
219 	switch (BIT(wi->pgshift)) {
220 	default:
221 	case SZ_64K:
222 	case SZ_16K:
223 		level = 3 - wi->sl;
224 		first_block_level = 2;
225 		break;
226 	case SZ_4K:
227 		level = 2 - wi->sl;
228 		first_block_level = 1;
229 		break;
230 	}
231 
232 	stride = wi->pgshift - 3;
233 	input_size = get_ia_size(wi);
234 	if (input_size > 48 || input_size < 25)
235 		return -EFAULT;
236 
237 	ret = check_base_s2_limits(wi, level, input_size, stride);
238 	if (WARN_ON(ret))
239 		return ret;
240 
241 	base_lower_bound = 3 + input_size - ((3 - level) * stride +
242 			   wi->pgshift);
243 	base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound);
244 
245 	if (check_output_size(wi, base_addr)) {
246 		out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
247 		return 1;
248 	}
249 
250 	addr_top = input_size - 1;
251 
252 	while (1) {
253 		phys_addr_t index;
254 
255 		addr_bottom = (3 - level) * stride + wi->pgshift;
256 		index = (ipa & GENMASK_ULL(addr_top, addr_bottom))
257 			>> (addr_bottom - 3);
258 
259 		paddr = base_addr | index;
260 		ret = wi->read_desc(paddr, &desc, wi->data);
261 		if (ret < 0)
262 			return ret;
263 
264 		/*
265 		 * Handle reversedescriptors if endianness differs between the
266 		 * host and the guest hypervisor.
267 		 */
268 		if (wi->be)
269 			desc = be64_to_cpu((__force __be64)desc);
270 		else
271 			desc = le64_to_cpu((__force __le64)desc);
272 
273 		/* Check for valid descriptor at this point */
274 		if (!(desc & 1) || ((desc & 3) == 1 && level == 3)) {
275 			out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
276 			out->desc = desc;
277 			return 1;
278 		}
279 
280 		/* We're at the final level or block translation level */
281 		if ((desc & 3) == 1 || level == 3)
282 			break;
283 
284 		if (check_output_size(wi, desc)) {
285 			out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
286 			out->desc = desc;
287 			return 1;
288 		}
289 
290 		base_addr = desc & GENMASK_ULL(47, wi->pgshift);
291 
292 		level += 1;
293 		addr_top = addr_bottom - 1;
294 	}
295 
296 	if (level < first_block_level) {
297 		out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
298 		out->desc = desc;
299 		return 1;
300 	}
301 
302 	if (check_output_size(wi, desc)) {
303 		out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
304 		out->desc = desc;
305 		return 1;
306 	}
307 
308 	if (!(desc & BIT(10))) {
309 		out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
310 		out->desc = desc;
311 		return 1;
312 	}
313 
314 	addr_bottom += contiguous_bit_shift(desc, wi, level);
315 
316 	/* Calculate and return the result */
317 	paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
318 		(ipa & GENMASK_ULL(addr_bottom - 1, 0));
319 	out->output = paddr;
320 	out->block_size = 1UL << ((3 - level) * stride + wi->pgshift);
321 	out->readable = desc & (0b01 << 6);
322 	out->writable = desc & (0b10 << 6);
323 	out->level = level;
324 	out->desc = desc;
325 	return 0;
326 }
327 
read_guest_s2_desc(phys_addr_t pa,u64 * desc,void * data)328 static int read_guest_s2_desc(phys_addr_t pa, u64 *desc, void *data)
329 {
330 	struct kvm_vcpu *vcpu = data;
331 
332 	return kvm_read_guest(vcpu->kvm, pa, desc, sizeof(*desc));
333 }
334 
vtcr_to_walk_info(u64 vtcr,struct s2_walk_info * wi)335 static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
336 {
337 	wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
338 
339 	switch (vtcr & VTCR_EL2_TG0_MASK) {
340 	case VTCR_EL2_TG0_4K:
341 		wi->pgshift = 12;	 break;
342 	case VTCR_EL2_TG0_16K:
343 		wi->pgshift = 14;	 break;
344 	case VTCR_EL2_TG0_64K:
345 	default:	    /* IMPDEF: treat any other value as 64k */
346 		wi->pgshift = 16;	 break;
347 	}
348 
349 	wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
350 	/* Global limit for now, should eventually be per-VM */
351 	wi->max_oa_bits = min(get_kvm_ipa_limit(),
352 			      ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr)));
353 }
354 
kvm_walk_nested_s2(struct kvm_vcpu * vcpu,phys_addr_t gipa,struct kvm_s2_trans * result)355 int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
356 		       struct kvm_s2_trans *result)
357 {
358 	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
359 	struct s2_walk_info wi;
360 	int ret;
361 
362 	result->esr = 0;
363 
364 	if (!vcpu_has_nv(vcpu))
365 		return 0;
366 
367 	wi.read_desc = read_guest_s2_desc;
368 	wi.data = vcpu;
369 	wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
370 
371 	vtcr_to_walk_info(vtcr, &wi);
372 
373 	wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
374 
375 	ret = walk_nested_s2_pgd(gipa, &wi, result);
376 	if (ret)
377 		result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC);
378 
379 	return ret;
380 }
381 
ttl_to_size(u8 ttl)382 static unsigned int ttl_to_size(u8 ttl)
383 {
384 	int level = ttl & 3;
385 	int gran = (ttl >> 2) & 3;
386 	unsigned int max_size = 0;
387 
388 	switch (gran) {
389 	case TLBI_TTL_TG_4K:
390 		switch (level) {
391 		case 0:
392 			break;
393 		case 1:
394 			max_size = SZ_1G;
395 			break;
396 		case 2:
397 			max_size = SZ_2M;
398 			break;
399 		case 3:
400 			max_size = SZ_4K;
401 			break;
402 		}
403 		break;
404 	case TLBI_TTL_TG_16K:
405 		switch (level) {
406 		case 0:
407 		case 1:
408 			break;
409 		case 2:
410 			max_size = SZ_32M;
411 			break;
412 		case 3:
413 			max_size = SZ_16K;
414 			break;
415 		}
416 		break;
417 	case TLBI_TTL_TG_64K:
418 		switch (level) {
419 		case 0:
420 		case 1:
421 			/* No 52bit IPA support */
422 			break;
423 		case 2:
424 			max_size = SZ_512M;
425 			break;
426 		case 3:
427 			max_size = SZ_64K;
428 			break;
429 		}
430 		break;
431 	default:			/* No size information */
432 		break;
433 	}
434 
435 	return max_size;
436 }
437 
pgshift_level_to_ttl(u16 shift,u8 level)438 static u8 pgshift_level_to_ttl(u16 shift, u8 level)
439 {
440 	u8 ttl;
441 
442 	switch(shift) {
443 	case 12:
444 		ttl = TLBI_TTL_TG_4K;
445 		break;
446 	case 14:
447 		ttl = TLBI_TTL_TG_16K;
448 		break;
449 	case 16:
450 		ttl = TLBI_TTL_TG_64K;
451 		break;
452 	default:
453 		BUG();
454 	}
455 
456 	ttl <<= 2;
457 	ttl |= level & 3;
458 
459 	return ttl;
460 }
461 
462 /*
463  * Compute the equivalent of the TTL field by parsing the shadow PT.  The
464  * granule size is extracted from the cached VTCR_EL2.TG0 while the level is
465  * retrieved from first entry carrying the level as a tag.
466  */
get_guest_mapping_ttl(struct kvm_s2_mmu * mmu,u64 addr)467 static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
468 {
469 	u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
470 	kvm_pte_t pte;
471 	u8 ttl, level;
472 
473 	lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
474 
475 	switch (vtcr & VTCR_EL2_TG0_MASK) {
476 	case VTCR_EL2_TG0_4K:
477 		ttl = (TLBI_TTL_TG_4K << 2);
478 		break;
479 	case VTCR_EL2_TG0_16K:
480 		ttl = (TLBI_TTL_TG_16K << 2);
481 		break;
482 	case VTCR_EL2_TG0_64K:
483 	default:	    /* IMPDEF: treat any other value as 64k */
484 		ttl = (TLBI_TTL_TG_64K << 2);
485 		break;
486 	}
487 
488 	tmp = addr;
489 
490 again:
491 	/* Iteratively compute the block sizes for a particular granule size */
492 	switch (vtcr & VTCR_EL2_TG0_MASK) {
493 	case VTCR_EL2_TG0_4K:
494 		if	(sz < SZ_4K)	sz = SZ_4K;
495 		else if (sz < SZ_2M)	sz = SZ_2M;
496 		else if (sz < SZ_1G)	sz = SZ_1G;
497 		else			sz = 0;
498 		break;
499 	case VTCR_EL2_TG0_16K:
500 		if	(sz < SZ_16K)	sz = SZ_16K;
501 		else if (sz < SZ_32M)	sz = SZ_32M;
502 		else			sz = 0;
503 		break;
504 	case VTCR_EL2_TG0_64K:
505 	default:	    /* IMPDEF: treat any other value as 64k */
506 		if	(sz < SZ_64K)	sz = SZ_64K;
507 		else if (sz < SZ_512M)	sz = SZ_512M;
508 		else			sz = 0;
509 		break;
510 	}
511 
512 	if (sz == 0)
513 		return 0;
514 
515 	tmp &= ~(sz - 1);
516 	if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL))
517 		goto again;
518 	if (!(pte & PTE_VALID))
519 		goto again;
520 	level = FIELD_GET(KVM_NV_GUEST_MAP_SZ, pte);
521 	if (!level)
522 		goto again;
523 
524 	ttl |= level;
525 
526 	/*
527 	 * We now have found some level information in the shadow S2. Check
528 	 * that the resulting range is actually including the original IPA.
529 	 */
530 	sz = ttl_to_size(ttl);
531 	if (addr < (tmp + sz))
532 		return ttl;
533 
534 	return 0;
535 }
536 
compute_tlb_inval_range(struct kvm_s2_mmu * mmu,u64 val)537 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
538 {
539 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
540 	unsigned long max_size;
541 	u8 ttl;
542 
543 	ttl = FIELD_GET(TLBI_TTL_MASK, val);
544 
545 	if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
546 		/* No TTL, check the shadow S2 for a hint */
547 		u64 addr = (val & GENMASK_ULL(35, 0)) << 12;
548 		ttl = get_guest_mapping_ttl(mmu, addr);
549 	}
550 
551 	max_size = ttl_to_size(ttl);
552 
553 	if (!max_size) {
554 		/* Compute the maximum extent of the invalidation */
555 		switch (mmu->tlb_vtcr & VTCR_EL2_TG0_MASK) {
556 		case VTCR_EL2_TG0_4K:
557 			max_size = SZ_1G;
558 			break;
559 		case VTCR_EL2_TG0_16K:
560 			max_size = SZ_32M;
561 			break;
562 		case VTCR_EL2_TG0_64K:
563 		default:    /* IMPDEF: treat any other value as 64k */
564 			/*
565 			 * No, we do not support 52bit IPA in nested yet. Once
566 			 * we do, this should be 4TB.
567 			 */
568 			max_size = SZ_512M;
569 			break;
570 		}
571 	}
572 
573 	WARN_ON(!max_size);
574 	return max_size;
575 }
576 
577 /*
578  * We can have multiple *different* MMU contexts with the same VMID:
579  *
580  * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
581  *
582  * - Multiple vcpus using private S2s (huh huh...), hence differing by the
583  *   VBBTR_EL2.BADDR address
584  *
585  * - A combination of the above...
586  *
587  * We can always identify which MMU context to pick at run-time.  However,
588  * TLB invalidation involving a VMID must take action on all the TLBs using
589  * this particular VMID. This translates into applying the same invalidation
590  * operation to all the contexts that are using this VMID. Moar phun!
591  */
kvm_s2_mmu_iterate_by_vmid(struct kvm * kvm,u16 vmid,const union tlbi_info * info,void (* tlbi_callback)(struct kvm_s2_mmu *,const union tlbi_info *))592 void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
593 				const union tlbi_info *info,
594 				void (*tlbi_callback)(struct kvm_s2_mmu *,
595 						      const union tlbi_info *))
596 {
597 	write_lock(&kvm->mmu_lock);
598 
599 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
600 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
601 
602 		if (!kvm_s2_mmu_valid(mmu))
603 			continue;
604 
605 		if (vmid == get_vmid(mmu->tlb_vttbr))
606 			tlbi_callback(mmu, info);
607 	}
608 
609 	write_unlock(&kvm->mmu_lock);
610 }
611 
lookup_s2_mmu(struct kvm_vcpu * vcpu)612 struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu)
613 {
614 	struct kvm *kvm = vcpu->kvm;
615 	bool nested_stage2_enabled;
616 	u64 vttbr, vtcr, hcr;
617 
618 	lockdep_assert_held_write(&kvm->mmu_lock);
619 
620 	vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
621 	vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
622 	hcr = vcpu_read_sys_reg(vcpu, HCR_EL2);
623 
624 	nested_stage2_enabled = hcr & HCR_VM;
625 
626 	/* Don't consider the CnP bit for the vttbr match */
627 	vttbr &= ~VTTBR_CNP_BIT;
628 
629 	/*
630 	 * Two possibilities when looking up a S2 MMU context:
631 	 *
632 	 * - either S2 is enabled in the guest, and we need a context that is
633 	 *   S2-enabled and matches the full VTTBR (VMID+BADDR) and VTCR,
634 	 *   which makes it safe from a TLB conflict perspective (a broken
635 	 *   guest won't be able to generate them),
636 	 *
637 	 * - or S2 is disabled, and we need a context that is S2-disabled
638 	 *   and matches the VMID only, as all TLBs are tagged by VMID even
639 	 *   if S2 translation is disabled.
640 	 */
641 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
642 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
643 
644 		if (!kvm_s2_mmu_valid(mmu))
645 			continue;
646 
647 		if (nested_stage2_enabled &&
648 		    mmu->nested_stage2_enabled &&
649 		    vttbr == mmu->tlb_vttbr &&
650 		    vtcr == mmu->tlb_vtcr)
651 			return mmu;
652 
653 		if (!nested_stage2_enabled &&
654 		    !mmu->nested_stage2_enabled &&
655 		    get_vmid(vttbr) == get_vmid(mmu->tlb_vttbr))
656 			return mmu;
657 	}
658 	return NULL;
659 }
660 
get_s2_mmu_nested(struct kvm_vcpu * vcpu)661 static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
662 {
663 	struct kvm *kvm = vcpu->kvm;
664 	struct kvm_s2_mmu *s2_mmu;
665 	int i;
666 
667 	lockdep_assert_held_write(&vcpu->kvm->mmu_lock);
668 
669 	s2_mmu = lookup_s2_mmu(vcpu);
670 	if (s2_mmu)
671 		goto out;
672 
673 	/*
674 	 * Make sure we don't always search from the same point, or we
675 	 * will always reuse a potentially active context, leaving
676 	 * free contexts unused.
677 	 */
678 	for (i = kvm->arch.nested_mmus_next;
679 	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
680 	     i++) {
681 		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
682 
683 		if (atomic_read(&s2_mmu->refcnt) == 0)
684 			break;
685 	}
686 	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
687 
688 	/* Set the scene for the next search */
689 	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
690 
691 	/* Make sure we don't forget to do the laundry */
692 	if (kvm_s2_mmu_valid(s2_mmu))
693 		s2_mmu->pending_unmap = true;
694 
695 	/*
696 	 * The virtual VMID (modulo CnP) will be used as a key when matching
697 	 * an existing kvm_s2_mmu.
698 	 *
699 	 * We cache VTCR at allocation time, once and for all. It'd be great
700 	 * if the guest didn't screw that one up, as this is not very
701 	 * forgiving...
702 	 */
703 	s2_mmu->tlb_vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2) & ~VTTBR_CNP_BIT;
704 	s2_mmu->tlb_vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
705 	s2_mmu->nested_stage2_enabled = vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM;
706 
707 out:
708 	atomic_inc(&s2_mmu->refcnt);
709 
710 	/*
711 	 * Set the vCPU request to perform an unmap, even if the pending unmap
712 	 * originates from another vCPU. This guarantees that the MMU has been
713 	 * completely unmapped before any vCPU actually uses it, and allows
714 	 * multiple vCPUs to lend a hand with completing the unmap.
715 	 */
716 	if (s2_mmu->pending_unmap)
717 		kvm_make_request(KVM_REQ_NESTED_S2_UNMAP, vcpu);
718 
719 	return s2_mmu;
720 }
721 
kvm_init_nested_s2_mmu(struct kvm_s2_mmu * mmu)722 void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
723 {
724 	/* CnP being set denotes an invalid entry */
725 	mmu->tlb_vttbr = VTTBR_CNP_BIT;
726 	mmu->nested_stage2_enabled = false;
727 	atomic_set(&mmu->refcnt, 0);
728 }
729 
kvm_vcpu_load_hw_mmu(struct kvm_vcpu * vcpu)730 void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
731 {
732 	/*
733 	 * If the vCPU kept its reference on the MMU after the last put,
734 	 * keep rolling with it.
735 	 */
736 	if (is_hyp_ctxt(vcpu)) {
737 		if (!vcpu->arch.hw_mmu)
738 			vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
739 	} else {
740 		if (!vcpu->arch.hw_mmu) {
741 			scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
742 				vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
743 		}
744 
745 		if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
746 			kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
747 	}
748 }
749 
kvm_vcpu_put_hw_mmu(struct kvm_vcpu * vcpu)750 void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
751 {
752 	/* Unconditionally drop the VNCR mapping if we have one */
753 	if (host_data_test_flag(L1_VNCR_MAPPED)) {
754 		BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
755 		BUG_ON(is_hyp_ctxt(vcpu));
756 
757 		clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
758 		vcpu->arch.vncr_tlb->cpu = -1;
759 		host_data_clear_flag(L1_VNCR_MAPPED);
760 		atomic_dec(&vcpu->kvm->arch.vncr_map_count);
761 	}
762 
763 	/*
764 	 * Keep a reference on the associated stage-2 MMU if the vCPU is
765 	 * scheduling out and not in WFI emulation, suggesting it is likely to
766 	 * reuse the MMU sometime soon.
767 	 */
768 	if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI))
769 		return;
770 
771 	if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu))
772 		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
773 
774 	vcpu->arch.hw_mmu = NULL;
775 }
776 
777 /*
778  * Returns non-zero if permission fault is handled by injecting it to the next
779  * level hypervisor.
780  */
kvm_s2_handle_perm_fault(struct kvm_vcpu * vcpu,struct kvm_s2_trans * trans)781 int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
782 {
783 	bool forward_fault = false;
784 
785 	trans->esr = 0;
786 
787 	if (!kvm_vcpu_trap_is_permission_fault(vcpu))
788 		return 0;
789 
790 	if (kvm_vcpu_trap_is_iabt(vcpu)) {
791 		forward_fault = !kvm_s2_trans_executable(trans);
792 	} else {
793 		bool write_fault = kvm_is_write_fault(vcpu);
794 
795 		forward_fault = ((write_fault && !trans->writable) ||
796 				 (!write_fault && !trans->readable));
797 	}
798 
799 	if (forward_fault)
800 		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
801 
802 	return forward_fault;
803 }
804 
kvm_inject_s2_fault(struct kvm_vcpu * vcpu,u64 esr_el2)805 int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
806 {
807 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
808 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
809 
810 	return kvm_inject_nested_sync(vcpu, esr_el2);
811 }
812 
invalidate_vncr(struct vncr_tlb * vt)813 static void invalidate_vncr(struct vncr_tlb *vt)
814 {
815 	vt->valid = false;
816 	if (vt->cpu != -1)
817 		clear_fixmap(vncr_fixmap(vt->cpu));
818 }
819 
kvm_invalidate_vncr_ipa(struct kvm * kvm,u64 start,u64 end)820 static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
821 {
822 	struct kvm_vcpu *vcpu;
823 	unsigned long i;
824 
825 	lockdep_assert_held_write(&kvm->mmu_lock);
826 
827 	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
828 		return;
829 
830 	kvm_for_each_vcpu(i, vcpu, kvm) {
831 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
832 		u64 ipa_start, ipa_end, ipa_size;
833 
834 		/*
835 		 * Careful here: We end-up here from an MMU notifier,
836 		 * and this can race against a vcpu not being onlined
837 		 * yet, without the pseudo-TLB being allocated.
838 		 *
839 		 * Skip those, as they obviously don't participate in
840 		 * the invalidation at this stage.
841 		 */
842 		if (!vt)
843 			continue;
844 
845 		if (!vt->valid)
846 			continue;
847 
848 		ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
849 							    vt->wr.level));
850 		ipa_start = vt->wr.pa & (ipa_size - 1);
851 		ipa_end = ipa_start + ipa_size;
852 
853 		if (ipa_end <= start || ipa_start >= end)
854 			continue;
855 
856 		invalidate_vncr(vt);
857 	}
858 }
859 
860 struct s1e2_tlbi_scope {
861 	enum {
862 		TLBI_ALL,
863 		TLBI_VA,
864 		TLBI_VAA,
865 		TLBI_ASID,
866 	} type;
867 
868 	u16 asid;
869 	u64 va;
870 	u64 size;
871 };
872 
invalidate_vncr_va(struct kvm * kvm,struct s1e2_tlbi_scope * scope)873 static void invalidate_vncr_va(struct kvm *kvm,
874 			       struct s1e2_tlbi_scope *scope)
875 {
876 	struct kvm_vcpu *vcpu;
877 	unsigned long i;
878 
879 	lockdep_assert_held_write(&kvm->mmu_lock);
880 
881 	kvm_for_each_vcpu(i, vcpu, kvm) {
882 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
883 		u64 va_start, va_end, va_size;
884 
885 		if (!vt->valid)
886 			continue;
887 
888 		va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
889 							   vt->wr.level));
890 		va_start = vt->gva & (va_size - 1);
891 		va_end = va_start + va_size;
892 
893 		switch (scope->type) {
894 		case TLBI_ALL:
895 			break;
896 
897 		case TLBI_VA:
898 			if (va_end <= scope->va ||
899 			    va_start >= (scope->va + scope->size))
900 				continue;
901 			if (vt->wr.nG && vt->wr.asid != scope->asid)
902 				continue;
903 			break;
904 
905 		case TLBI_VAA:
906 			if (va_end <= scope->va ||
907 			    va_start >= (scope->va + scope->size))
908 				continue;
909 			break;
910 
911 		case TLBI_ASID:
912 			if (!vt->wr.nG || vt->wr.asid != scope->asid)
913 				continue;
914 			break;
915 		}
916 
917 		invalidate_vncr(vt);
918 	}
919 }
920 
921 #define tlbi_va_s1_to_va(v)	(u64)sign_extend64((v) << 12, 48)
922 
compute_s1_tlbi_range(struct kvm_vcpu * vcpu,u32 inst,u64 val,struct s1e2_tlbi_scope * scope)923 static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
924 				  struct s1e2_tlbi_scope *scope)
925 {
926 	switch (inst) {
927 	case OP_TLBI_ALLE2:
928 	case OP_TLBI_ALLE2IS:
929 	case OP_TLBI_ALLE2OS:
930 	case OP_TLBI_VMALLE1:
931 	case OP_TLBI_VMALLE1IS:
932 	case OP_TLBI_VMALLE1OS:
933 	case OP_TLBI_ALLE2NXS:
934 	case OP_TLBI_ALLE2ISNXS:
935 	case OP_TLBI_ALLE2OSNXS:
936 	case OP_TLBI_VMALLE1NXS:
937 	case OP_TLBI_VMALLE1ISNXS:
938 	case OP_TLBI_VMALLE1OSNXS:
939 		scope->type = TLBI_ALL;
940 		break;
941 	case OP_TLBI_VAE2:
942 	case OP_TLBI_VAE2IS:
943 	case OP_TLBI_VAE2OS:
944 	case OP_TLBI_VAE1:
945 	case OP_TLBI_VAE1IS:
946 	case OP_TLBI_VAE1OS:
947 	case OP_TLBI_VAE2NXS:
948 	case OP_TLBI_VAE2ISNXS:
949 	case OP_TLBI_VAE2OSNXS:
950 	case OP_TLBI_VAE1NXS:
951 	case OP_TLBI_VAE1ISNXS:
952 	case OP_TLBI_VAE1OSNXS:
953 	case OP_TLBI_VALE2:
954 	case OP_TLBI_VALE2IS:
955 	case OP_TLBI_VALE2OS:
956 	case OP_TLBI_VALE1:
957 	case OP_TLBI_VALE1IS:
958 	case OP_TLBI_VALE1OS:
959 	case OP_TLBI_VALE2NXS:
960 	case OP_TLBI_VALE2ISNXS:
961 	case OP_TLBI_VALE2OSNXS:
962 	case OP_TLBI_VALE1NXS:
963 	case OP_TLBI_VALE1ISNXS:
964 	case OP_TLBI_VALE1OSNXS:
965 		scope->type = TLBI_VA;
966 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
967 		if (!scope->size)
968 			scope->size = SZ_1G;
969 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
970 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
971 		break;
972 	case OP_TLBI_ASIDE1:
973 	case OP_TLBI_ASIDE1IS:
974 	case OP_TLBI_ASIDE1OS:
975 	case OP_TLBI_ASIDE1NXS:
976 	case OP_TLBI_ASIDE1ISNXS:
977 	case OP_TLBI_ASIDE1OSNXS:
978 		scope->type = TLBI_ASID;
979 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
980 		break;
981 	case OP_TLBI_VAAE1:
982 	case OP_TLBI_VAAE1IS:
983 	case OP_TLBI_VAAE1OS:
984 	case OP_TLBI_VAAE1NXS:
985 	case OP_TLBI_VAAE1ISNXS:
986 	case OP_TLBI_VAAE1OSNXS:
987 	case OP_TLBI_VAALE1:
988 	case OP_TLBI_VAALE1IS:
989 	case OP_TLBI_VAALE1OS:
990 	case OP_TLBI_VAALE1NXS:
991 	case OP_TLBI_VAALE1ISNXS:
992 	case OP_TLBI_VAALE1OSNXS:
993 		scope->type = TLBI_VAA;
994 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
995 		if (!scope->size)
996 			scope->size = SZ_1G;
997 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
998 		break;
999 	case OP_TLBI_RVAE2:
1000 	case OP_TLBI_RVAE2IS:
1001 	case OP_TLBI_RVAE2OS:
1002 	case OP_TLBI_RVAE1:
1003 	case OP_TLBI_RVAE1IS:
1004 	case OP_TLBI_RVAE1OS:
1005 	case OP_TLBI_RVAE2NXS:
1006 	case OP_TLBI_RVAE2ISNXS:
1007 	case OP_TLBI_RVAE2OSNXS:
1008 	case OP_TLBI_RVAE1NXS:
1009 	case OP_TLBI_RVAE1ISNXS:
1010 	case OP_TLBI_RVAE1OSNXS:
1011 	case OP_TLBI_RVALE2:
1012 	case OP_TLBI_RVALE2IS:
1013 	case OP_TLBI_RVALE2OS:
1014 	case OP_TLBI_RVALE1:
1015 	case OP_TLBI_RVALE1IS:
1016 	case OP_TLBI_RVALE1OS:
1017 	case OP_TLBI_RVALE2NXS:
1018 	case OP_TLBI_RVALE2ISNXS:
1019 	case OP_TLBI_RVALE2OSNXS:
1020 	case OP_TLBI_RVALE1NXS:
1021 	case OP_TLBI_RVALE1ISNXS:
1022 	case OP_TLBI_RVALE1OSNXS:
1023 		scope->type = TLBI_VA;
1024 		scope->va = decode_range_tlbi(val, &scope->size, &scope->asid);
1025 		break;
1026 	case OP_TLBI_RVAAE1:
1027 	case OP_TLBI_RVAAE1IS:
1028 	case OP_TLBI_RVAAE1OS:
1029 	case OP_TLBI_RVAAE1NXS:
1030 	case OP_TLBI_RVAAE1ISNXS:
1031 	case OP_TLBI_RVAAE1OSNXS:
1032 	case OP_TLBI_RVAALE1:
1033 	case OP_TLBI_RVAALE1IS:
1034 	case OP_TLBI_RVAALE1OS:
1035 	case OP_TLBI_RVAALE1NXS:
1036 	case OP_TLBI_RVAALE1ISNXS:
1037 	case OP_TLBI_RVAALE1OSNXS:
1038 		scope->type = TLBI_VAA;
1039 		scope->va = decode_range_tlbi(val, &scope->size, NULL);
1040 		break;
1041 	}
1042 }
1043 
kvm_handle_s1e2_tlbi(struct kvm_vcpu * vcpu,u32 inst,u64 val)1044 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val)
1045 {
1046 	struct s1e2_tlbi_scope scope = {};
1047 
1048 	compute_s1_tlbi_range(vcpu, inst, val, &scope);
1049 
1050 	guard(write_lock)(&vcpu->kvm->mmu_lock);
1051 	invalidate_vncr_va(vcpu->kvm, &scope);
1052 }
1053 
kvm_nested_s2_wp(struct kvm * kvm)1054 void kvm_nested_s2_wp(struct kvm *kvm)
1055 {
1056 	int i;
1057 
1058 	lockdep_assert_held_write(&kvm->mmu_lock);
1059 
1060 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1061 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1062 
1063 		if (kvm_s2_mmu_valid(mmu))
1064 			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
1065 	}
1066 
1067 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1068 }
1069 
kvm_nested_s2_unmap(struct kvm * kvm,bool may_block)1070 void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
1071 {
1072 	int i;
1073 
1074 	lockdep_assert_held_write(&kvm->mmu_lock);
1075 
1076 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1077 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1078 
1079 		if (kvm_s2_mmu_valid(mmu))
1080 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
1081 	}
1082 
1083 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1084 }
1085 
kvm_nested_s2_flush(struct kvm * kvm)1086 void kvm_nested_s2_flush(struct kvm *kvm)
1087 {
1088 	int i;
1089 
1090 	lockdep_assert_held_write(&kvm->mmu_lock);
1091 
1092 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1093 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1094 
1095 		if (kvm_s2_mmu_valid(mmu))
1096 			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(mmu));
1097 	}
1098 }
1099 
kvm_arch_flush_shadow_all(struct kvm * kvm)1100 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1101 {
1102 	int i;
1103 
1104 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1105 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1106 
1107 		if (!WARN_ON(atomic_read(&mmu->refcnt)))
1108 			kvm_free_stage2_pgd(mmu);
1109 	}
1110 	kvfree(kvm->arch.nested_mmus);
1111 	kvm->arch.nested_mmus = NULL;
1112 	kvm->arch.nested_mmus_size = 0;
1113 	kvm_uninit_stage2_mmu(kvm);
1114 }
1115 
1116 /*
1117  * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter:
1118  *
1119  * - We introduce an internal representation of a vcpu-private TLB,
1120  *   representing the mapping between the guest VA contained in VNCR_EL2,
1121  *   the IPA the guest's EL2 PTs point to, and the actual PA this lives at.
1122  *
1123  * - On translation fault from a nested VNCR access, we create such a TLB.
1124  *   If there is no mapping to describe, the guest inherits the fault.
1125  *   Crucially, no actual mapping is done at this stage.
1126  *
1127  * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
1128  *   TLB exists, we map it in the fixmap for this CPU, and run with it. We
1129  *   have to respect the permissions dictated by the guest, but not the
1130  *   memory type (FWB is a must).
1131  *
1132  * - Note that we usually don't do a vcpu_load() on the back of a fault
1133  *   (unless we are preempted), so the resolution of a translation fault
1134  *   must go via a request that will map the VNCR page in the fixmap.
1135  *   vcpu_load() might as well use the same mechanism.
1136  *
1137  * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
1138  *   mapped, we unmap it. Yes it is that simple. The TLB still exists
1139  *   though, and may be reused at a later load.
1140  *
1141  * - On permission fault, we simply forward the fault to the guest's EL2.
1142  *   Get out of my way.
1143  *
1144  * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
1145  *   intersects with the TLBI request, invalidate it, and unmap the page
1146  *   from the fixmap. Because we need to look at all the vcpu-private TLBs,
1147  *   this requires some wide-ranging locking to ensure that nothing races
1148  *   against it. This may require some refcounting to avoid the search when
1149  *   no such TLB is present.
1150  *
1151  * - On MMU notifiers, we must invalidate our TLB in a similar way, but
1152  *   looking at the IPA instead. The funny part is that there may not be a
1153  *   stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
1154  *   instructions.
1155  */
1156 
kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu * vcpu)1157 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
1158 {
1159 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
1160 		return 0;
1161 
1162 	vcpu->arch.vncr_tlb = kzalloc(sizeof(*vcpu->arch.vncr_tlb),
1163 				      GFP_KERNEL_ACCOUNT);
1164 	if (!vcpu->arch.vncr_tlb)
1165 		return -ENOMEM;
1166 
1167 	return 0;
1168 }
1169 
read_vncr_el2(struct kvm_vcpu * vcpu)1170 static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
1171 {
1172 	return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
1173 }
1174 
kvm_translate_vncr(struct kvm_vcpu * vcpu)1175 static int kvm_translate_vncr(struct kvm_vcpu *vcpu)
1176 {
1177 	bool write_fault, writable;
1178 	unsigned long mmu_seq;
1179 	struct vncr_tlb *vt;
1180 	struct page *page;
1181 	u64 va, pfn, gfn;
1182 	int ret;
1183 
1184 	vt = vcpu->arch.vncr_tlb;
1185 
1186 	/*
1187 	 * If we're about to walk the EL2 S1 PTs, we must invalidate the
1188 	 * current TLB, as it could be sampled from another vcpu doing a
1189 	 * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
1190 	 * translation, so not much of a choice.
1191 	 *
1192 	 * We also prepare the next walk wilst we're at it.
1193 	 */
1194 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1195 		invalidate_vncr(vt);
1196 
1197 		vt->wi = (struct s1_walk_info) {
1198 			.regime	= TR_EL20,
1199 			.as_el0	= false,
1200 			.pan	= false,
1201 		};
1202 		vt->wr = (struct s1_walk_result){};
1203 	}
1204 
1205 	guard(srcu)(&vcpu->kvm->srcu);
1206 
1207 	va =  read_vncr_el2(vcpu);
1208 
1209 	ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
1210 	if (ret)
1211 		return ret;
1212 
1213 	write_fault = kvm_is_write_fault(vcpu);
1214 
1215 	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1216 	smp_rmb();
1217 
1218 	gfn = vt->wr.pa >> PAGE_SHIFT;
1219 	pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writable, &page);
1220 	if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
1221 		return -EFAULT;
1222 
1223 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1224 		if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
1225 			return -EAGAIN;
1226 
1227 		vt->gva = va;
1228 		vt->hpa = pfn << PAGE_SHIFT;
1229 		vt->valid = true;
1230 		vt->cpu = -1;
1231 
1232 		kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
1233 		kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
1234 	}
1235 
1236 	if (vt->wr.pw)
1237 		mark_page_dirty(vcpu->kvm, gfn);
1238 
1239 	return 0;
1240 }
1241 
inject_vncr_perm(struct kvm_vcpu * vcpu)1242 static void inject_vncr_perm(struct kvm_vcpu *vcpu)
1243 {
1244 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1245 	u64 esr = kvm_vcpu_get_esr(vcpu);
1246 
1247 	/* Adjust the fault level to reflect that of the guest's */
1248 	esr &= ~ESR_ELx_FSC;
1249 	esr |= FIELD_PREP(ESR_ELx_FSC,
1250 			  ESR_ELx_FSC_PERM_L(vt->wr.level));
1251 
1252 	kvm_inject_nested_sync(vcpu, esr);
1253 }
1254 
kvm_vncr_tlb_lookup(struct kvm_vcpu * vcpu)1255 static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
1256 {
1257 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1258 
1259 	lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
1260 
1261 	if (!vt->valid)
1262 		return false;
1263 
1264 	if (read_vncr_el2(vcpu) != vt->gva)
1265 		return false;
1266 
1267 	if (vt->wr.nG) {
1268 		u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1269 		u64 ttbr = ((tcr & TCR_A1) ?
1270 			    vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1271 			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1272 		u16 asid;
1273 
1274 		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1275 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1276 		    !(tcr & TCR_ASID16))
1277 			asid &= GENMASK(7, 0);
1278 
1279 		return asid != vt->wr.asid;
1280 	}
1281 
1282 	return true;
1283 }
1284 
kvm_handle_vncr_abort(struct kvm_vcpu * vcpu)1285 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
1286 {
1287 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1288 	u64 esr = kvm_vcpu_get_esr(vcpu);
1289 
1290 	BUG_ON(!(esr & ESR_ELx_VNCR_SHIFT));
1291 
1292 	if (esr_fsc_is_permission_fault(esr)) {
1293 		inject_vncr_perm(vcpu);
1294 	} else if (esr_fsc_is_translation_fault(esr)) {
1295 		bool valid;
1296 		int ret;
1297 
1298 		scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
1299 			valid = kvm_vncr_tlb_lookup(vcpu);
1300 
1301 		if (!valid)
1302 			ret = kvm_translate_vncr(vcpu);
1303 		else
1304 			ret = -EPERM;
1305 
1306 		switch (ret) {
1307 		case -EAGAIN:
1308 		case -ENOMEM:
1309 			/* Let's try again... */
1310 			break;
1311 		case -EFAULT:
1312 		case -EINVAL:
1313 		case -ENOENT:
1314 		case -EACCES:
1315 			/*
1316 			 * Translation failed, inject the corresponding
1317 			 * exception back to EL2.
1318 			 */
1319 			BUG_ON(!vt->wr.failed);
1320 
1321 			esr &= ~ESR_ELx_FSC;
1322 			esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
1323 
1324 			kvm_inject_nested_sync(vcpu, esr);
1325 			break;
1326 		case -EPERM:
1327 			/* Hack to deal with POE until we get kernel support */
1328 			inject_vncr_perm(vcpu);
1329 			break;
1330 		case 0:
1331 			break;
1332 		}
1333 	} else {
1334 		WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
1335 	}
1336 
1337 	return 1;
1338 }
1339 
kvm_map_l1_vncr(struct kvm_vcpu * vcpu)1340 static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
1341 {
1342 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1343 	pgprot_t prot;
1344 
1345 	guard(preempt)();
1346 	guard(read_lock)(&vcpu->kvm->mmu_lock);
1347 
1348 	/*
1349 	 * The request to map VNCR may have raced against some other
1350 	 * event, such as an interrupt, and may not be valid anymore.
1351 	 */
1352 	if (is_hyp_ctxt(vcpu))
1353 		return;
1354 
1355 	/*
1356 	 * Check that the pseudo-TLB is valid and that VNCR_EL2 still
1357 	 * contains the expected value. If it doesn't, we simply bail out
1358 	 * without a mapping -- a transformed MSR/MRS will generate the
1359 	 * fault and allows us to populate the pseudo-TLB.
1360 	 */
1361 	if (!vt->valid)
1362 		return;
1363 
1364 	if (read_vncr_el2(vcpu) != vt->gva)
1365 		return;
1366 
1367 	if (vt->wr.nG) {
1368 		u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1369 		u64 ttbr = ((tcr & TCR_A1) ?
1370 			    vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1371 			    vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1372 		u16 asid;
1373 
1374 		asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1375 		if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1376 		    !(tcr & TCR_ASID16))
1377 			asid &= GENMASK(7, 0);
1378 
1379 		if (asid != vt->wr.asid)
1380 			return;
1381 	}
1382 
1383 	vt->cpu = smp_processor_id();
1384 
1385 	if (vt->wr.pw && vt->wr.pr)
1386 		prot = PAGE_KERNEL;
1387 	else if (vt->wr.pr)
1388 		prot = PAGE_KERNEL_RO;
1389 	else
1390 		prot = PAGE_NONE;
1391 
1392 	/*
1393 	 * We can't map write-only (or no permission at all) in the kernel,
1394 	 * but the guest can do it if using POE, so we'll have to turn a
1395 	 * translation fault into a permission fault at runtime.
1396 	 * FIXME: WO doesn't work at all, need POE support in the kernel.
1397 	 */
1398 	if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
1399 		__set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
1400 		host_data_set_flag(L1_VNCR_MAPPED);
1401 		atomic_inc(&vcpu->kvm->arch.vncr_map_count);
1402 	}
1403 }
1404 
1405 /*
1406  * Our emulated CPU doesn't support all the possible features. For the
1407  * sake of simplicity (and probably mental sanity), wipe out a number
1408  * of feature bits we don't intend to support for the time being.
1409  * This list should get updated as new features get added to the NV
1410  * support, and new extension to the architecture.
1411  */
limit_nv_id_reg(struct kvm * kvm,u32 reg,u64 val)1412 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
1413 {
1414 	switch (reg) {
1415 	case SYS_ID_AA64ISAR0_EL1:
1416 		/* Support everything but TME */
1417 		val &= ~ID_AA64ISAR0_EL1_TME;
1418 		break;
1419 
1420 	case SYS_ID_AA64ISAR1_EL1:
1421 		/* Support everything but LS64 and Spec Invalidation */
1422 		val &= ~(ID_AA64ISAR1_EL1_LS64	|
1423 			 ID_AA64ISAR1_EL1_SPECRES);
1424 		break;
1425 
1426 	case SYS_ID_AA64PFR0_EL1:
1427 		/* No RME, AMU, MPAM, S-EL2, or RAS */
1428 		val &= ~(ID_AA64PFR0_EL1_RME	|
1429 			 ID_AA64PFR0_EL1_AMU	|
1430 			 ID_AA64PFR0_EL1_MPAM	|
1431 			 ID_AA64PFR0_EL1_SEL2	|
1432 			 ID_AA64PFR0_EL1_RAS	|
1433 			 ID_AA64PFR0_EL1_EL3	|
1434 			 ID_AA64PFR0_EL1_EL2	|
1435 			 ID_AA64PFR0_EL1_EL1	|
1436 			 ID_AA64PFR0_EL1_EL0);
1437 		/* 64bit only at any EL */
1438 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
1439 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
1440 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
1441 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
1442 		break;
1443 
1444 	case SYS_ID_AA64PFR1_EL1:
1445 		/* Only support BTI, SSBS, CSV2_frac */
1446 		val &= (ID_AA64PFR1_EL1_BT	|
1447 			ID_AA64PFR1_EL1_SSBS	|
1448 			ID_AA64PFR1_EL1_CSV2_frac);
1449 		break;
1450 
1451 	case SYS_ID_AA64MMFR0_EL1:
1452 		/* Hide ExS, Secure Memory */
1453 		val &= ~(ID_AA64MMFR0_EL1_EXS		|
1454 			 ID_AA64MMFR0_EL1_TGRAN4_2	|
1455 			 ID_AA64MMFR0_EL1_TGRAN16_2	|
1456 			 ID_AA64MMFR0_EL1_TGRAN64_2	|
1457 			 ID_AA64MMFR0_EL1_SNSMEM);
1458 
1459 		/* Hide CNTPOFF if present */
1460 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
1461 
1462 		/* Disallow unsupported S2 page sizes */
1463 		switch (PAGE_SIZE) {
1464 		case SZ_64K:
1465 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
1466 			fallthrough;
1467 		case SZ_16K:
1468 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
1469 			fallthrough;
1470 		case SZ_4K:
1471 			/* Support everything */
1472 			break;
1473 		}
1474 
1475 		/*
1476 		 * Since we can't support a guest S2 page size smaller
1477 		 * than the host's own page size (due to KVM only
1478 		 * populating its own S2 using the kernel's page
1479 		 * size), advertise the limitation using FEAT_GTG.
1480 		 */
1481 		switch (PAGE_SIZE) {
1482 		case SZ_4K:
1483 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
1484 			fallthrough;
1485 		case SZ_16K:
1486 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
1487 			fallthrough;
1488 		case SZ_64K:
1489 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
1490 			break;
1491 		}
1492 
1493 		/* Cap PARange to 48bits */
1494 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
1495 		break;
1496 
1497 	case SYS_ID_AA64MMFR1_EL1:
1498 		val &= (ID_AA64MMFR1_EL1_HCX	|
1499 			ID_AA64MMFR1_EL1_PAN	|
1500 			ID_AA64MMFR1_EL1_LO	|
1501 			ID_AA64MMFR1_EL1_HPDS	|
1502 			ID_AA64MMFR1_EL1_VH	|
1503 			ID_AA64MMFR1_EL1_VMIDBits);
1504 		/* FEAT_E2H0 implies no VHE */
1505 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
1506 			val &= ~ID_AA64MMFR1_EL1_VH;
1507 		break;
1508 
1509 	case SYS_ID_AA64MMFR2_EL1:
1510 		val &= ~(ID_AA64MMFR2_EL1_BBM	|
1511 			 ID_AA64MMFR2_EL1_TTL	|
1512 			 GENMASK_ULL(47, 44)	|
1513 			 ID_AA64MMFR2_EL1_ST	|
1514 			 ID_AA64MMFR2_EL1_CCIDX	|
1515 			 ID_AA64MMFR2_EL1_VARange);
1516 
1517 		/* Force TTL support */
1518 		val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
1519 		break;
1520 
1521 	case SYS_ID_AA64MMFR4_EL1:
1522 		/*
1523 		 * You get EITHER
1524 		 *
1525 		 * - FEAT_VHE without FEAT_E2H0
1526 		 * - FEAT_NV limited to FEAT_NV2
1527 		 * - HCR_EL2.NV1 being RES0
1528 		 *
1529 		 * OR
1530 		 *
1531 		 * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
1532 		 *
1533 		 * Life is too short for anything else.
1534 		 */
1535 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
1536 			val = 0;
1537 		} else {
1538 			val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
1539 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
1540 		}
1541 		break;
1542 
1543 	case SYS_ID_AA64DFR0_EL1:
1544 		/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
1545 		val &= (ID_AA64DFR0_EL1_PMUVer	|
1546 			ID_AA64DFR0_EL1_WRPs	|
1547 			ID_AA64DFR0_EL1_BRPs	|
1548 			ID_AA64DFR0_EL1_DebugVer|
1549 			ID_AA64DFR0_EL1_HPMN0);
1550 
1551 		/* Cap Debug to ARMv8.1 */
1552 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, VHE);
1553 		break;
1554 	}
1555 
1556 	return val;
1557 }
1558 
kvm_vcpu_apply_reg_masks(const struct kvm_vcpu * vcpu,enum vcpu_sysreg sr,u64 v)1559 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
1560 			     enum vcpu_sysreg sr, u64 v)
1561 {
1562 	struct kvm_sysreg_masks *masks;
1563 
1564 	masks = vcpu->kvm->arch.sysreg_masks;
1565 
1566 	if (masks) {
1567 		sr -= __SANITISED_REG_START__;
1568 
1569 		v &= ~masks->mask[sr].res0;
1570 		v |= masks->mask[sr].res1;
1571 	}
1572 
1573 	return v;
1574 }
1575 
set_sysreg_masks(struct kvm * kvm,int sr,u64 res0,u64 res1)1576 static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
1577 {
1578 	int i = sr - __SANITISED_REG_START__;
1579 
1580 	BUILD_BUG_ON(!__builtin_constant_p(sr));
1581 	BUILD_BUG_ON(sr < __SANITISED_REG_START__);
1582 	BUILD_BUG_ON(sr >= NR_SYS_REGS);
1583 
1584 	kvm->arch.sysreg_masks->mask[i].res0 = res0;
1585 	kvm->arch.sysreg_masks->mask[i].res1 = res1;
1586 }
1587 
kvm_init_nv_sysregs(struct kvm_vcpu * vcpu)1588 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
1589 {
1590 	struct kvm *kvm = vcpu->kvm;
1591 	u64 res0, res1;
1592 
1593 	lockdep_assert_held(&kvm->arch.config_lock);
1594 
1595 	if (kvm->arch.sysreg_masks)
1596 		goto out;
1597 
1598 	kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
1599 					 GFP_KERNEL_ACCOUNT);
1600 	if (!kvm->arch.sysreg_masks)
1601 		return -ENOMEM;
1602 
1603 	/* VTTBR_EL2 */
1604 	res0 = res1 = 0;
1605 	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
1606 		res0 |= GENMASK(63, 56);
1607 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
1608 		res0 |= VTTBR_CNP_BIT;
1609 	set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
1610 
1611 	/* VTCR_EL2 */
1612 	res0 = GENMASK(63, 32) | GENMASK(30, 20);
1613 	res1 = BIT(31);
1614 	set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
1615 
1616 	/* VMPIDR_EL2 */
1617 	res0 = GENMASK(63, 40) | GENMASK(30, 24);
1618 	res1 = BIT(31);
1619 	set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
1620 
1621 	/* HCR_EL2 */
1622 	get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1);
1623 	set_sysreg_masks(kvm, HCR_EL2, res0, res1);
1624 
1625 	/* HCRX_EL2 */
1626 	get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1);
1627 	set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
1628 
1629 	/* HFG[RW]TR_EL2 */
1630 	get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1);
1631 	set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1);
1632 	get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1);
1633 	set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1);
1634 
1635 	/* HDFG[RW]TR_EL2 */
1636 	get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1);
1637 	set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1);
1638 	get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1);
1639 	set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1);
1640 
1641 	/* HFGITR_EL2 */
1642 	get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1);
1643 	set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
1644 
1645 	/* HAFGRTR_EL2 - not a lot to see here */
1646 	get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1);
1647 	set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
1648 
1649 	/* HFG[RW]TR2_EL2 */
1650 	get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1);
1651 	set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1);
1652 	get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1);
1653 	set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1);
1654 
1655 	/* HDFG[RW]TR2_EL2 */
1656 	get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1);
1657 	set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1);
1658 	get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1);
1659 	set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1);
1660 
1661 	/* HFGITR2_EL2 */
1662 	get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1);
1663 	set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1);
1664 
1665 	/* TCR2_EL2 */
1666 	res0 = TCR2_EL2_RES0;
1667 	res1 = TCR2_EL2_RES1;
1668 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, D128, IMP))
1669 		res0 |= (TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1 | TCR2_EL2_D128);
1670 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, MEC, IMP))
1671 		res0 |= TCR2_EL2_AMEC1 | TCR2_EL2_AMEC0;
1672 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, HAFDBS, HAFT))
1673 		res0 |= TCR2_EL2_HAFT;
1674 	if (!kvm_has_feat(kvm, ID_AA64PFR1_EL1, THE, IMP))
1675 		res0 |= TCR2_EL2_PTTWI | TCR2_EL2_PnCH;
1676 	if (!kvm_has_feat(kvm, ID_AA64MMFR3_EL1, AIE, IMP))
1677 		res0 |= TCR2_EL2_AIE;
1678 	if (!kvm_has_s1poe(kvm))
1679 		res0 |= TCR2_EL2_POE | TCR2_EL2_E0POE;
1680 	if (!kvm_has_s1pie(kvm))
1681 		res0 |= TCR2_EL2_PIE;
1682 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1683 		res0 |= (TCR2_EL2_E0POE | TCR2_EL2_D128 |
1684 			 TCR2_EL2_AMEC1 | TCR2_EL2_DisCH0 | TCR2_EL2_DisCH1);
1685 	set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
1686 
1687 	/* SCTLR_EL1 */
1688 	res0 = SCTLR_EL1_RES0;
1689 	res1 = SCTLR_EL1_RES1;
1690 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, PAN, PAN3))
1691 		res0 |= SCTLR_EL1_EPAN;
1692 	set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
1693 
1694 	/* MDCR_EL2 */
1695 	res0 = MDCR_EL2_RES0;
1696 	res1 = MDCR_EL2_RES1;
1697 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, IMP))
1698 		res0 |= (MDCR_EL2_HPMN | MDCR_EL2_TPMCR |
1699 			 MDCR_EL2_TPM | MDCR_EL2_HPME);
1700 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, IMP))
1701 		res0 |= MDCR_EL2_E2PB | MDCR_EL2_TPMS;
1702 	if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, SPMU, IMP))
1703 		res0 |= MDCR_EL2_EnSPM;
1704 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P1))
1705 		res0 |= MDCR_EL2_HPMD;
1706 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceFilt, IMP))
1707 		res0 |= MDCR_EL2_TTRF;
1708 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P5))
1709 		res0 |= MDCR_EL2_HCCD | MDCR_EL2_HLP;
1710 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, TraceBuffer, IMP))
1711 		res0 |= MDCR_EL2_E2TB;
1712 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, FGT, IMP))
1713 		res0 |= MDCR_EL2_TDCC;
1714 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, MTPMU, IMP) ||
1715 	    kvm_has_feat(kvm, ID_AA64PFR0_EL1, EL3, IMP))
1716 		res0 |= MDCR_EL2_MTPME;
1717 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMUVer, V3P7))
1718 		res0 |= MDCR_EL2_HPMFZO;
1719 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSS, IMP))
1720 		res0 |= MDCR_EL2_PMSSE;
1721 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, PMSVer, V1P2))
1722 		res0 |= MDCR_EL2_HPMFZS;
1723 	if (!kvm_has_feat(kvm, ID_AA64DFR1_EL1, EBEP, IMP))
1724 		res0 |= MDCR_EL2_PMEE;
1725 	if (!kvm_has_feat(kvm, ID_AA64DFR0_EL1, DebugVer, V8P9))
1726 		res0 |= MDCR_EL2_EBWE;
1727 	if (!kvm_has_feat(kvm, ID_AA64DFR2_EL1, STEP, IMP))
1728 		res0 |= MDCR_EL2_EnSTEPOP;
1729 	set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
1730 
1731 	/* CNTHCTL_EL2 */
1732 	res0 = GENMASK(63, 20);
1733 	res1 = 0;
1734 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
1735 		res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
1736 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
1737 		res0 |= CNTHCTL_ECV;
1738 		if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
1739 			res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
1740 				 CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
1741 	}
1742 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1743 		res0 |= GENMASK(11, 8);
1744 	set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1);
1745 
1746 	/* ICH_HCR_EL2 */
1747 	res0 = ICH_HCR_EL2_RES0;
1748 	res1 = ICH_HCR_EL2_RES1;
1749 	if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
1750 		res0 |= ICH_HCR_EL2_TDIR;
1751 	/* No GICv4 is presented to the guest */
1752 	res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
1753 	set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1);
1754 
1755 	/* VNCR_EL2 */
1756 	set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1);
1757 
1758 out:
1759 	for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1760 		__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
1761 
1762 	return 0;
1763 }
1764 
check_nested_vcpu_requests(struct kvm_vcpu * vcpu)1765 void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
1766 {
1767 	if (kvm_check_request(KVM_REQ_NESTED_S2_UNMAP, vcpu)) {
1768 		struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1769 
1770 		write_lock(&vcpu->kvm->mmu_lock);
1771 		if (mmu->pending_unmap) {
1772 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), true);
1773 			mmu->pending_unmap = false;
1774 		}
1775 		write_unlock(&vcpu->kvm->mmu_lock);
1776 	}
1777 
1778 	if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
1779 		kvm_map_l1_vncr(vcpu);
1780 
1781 	/* Must be last, as may switch context! */
1782 	if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
1783 		kvm_inject_nested_irq(vcpu);
1784 }
1785