xref: /linux/arch/arm64/kvm/nested.c (revision c715f13bb30f9f4d1bd8888667ef32e43b6fedc1)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 
11 #include <asm/fixmap.h>
12 #include <asm/kvm_arm.h>
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/sysreg.h>
17 
18 #include "sys_regs.h"
19 
20 struct vncr_tlb {
21 	/* The guest's VNCR_EL2 */
22 	u64			gva;
23 	struct s1_walk_info	wi;
24 	struct s1_walk_result	wr;
25 
26 	u64			hpa;
27 
28 	/* -1 when not mapped on a CPU */
29 	int			cpu;
30 
31 	/*
32 	 * true if the TLB is valid. Can only be changed with the
33 	 * mmu_lock held.
34 	 */
35 	bool			valid;
36 };
37 
38 /*
39  * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
40  * memory usage and potential number of different sets of S2 PTs in
41  * the guests. Running out of S2 MMUs only affects performance (we
42  * will invalidate them more often).
43  */
44 #define S2_MMU_PER_VCPU		2
45 
46 void kvm_init_nested(struct kvm *kvm)
47 {
48 	kvm->arch.nested_mmus = NULL;
49 	kvm->arch.nested_mmus_size = 0;
50 	atomic_set(&kvm->arch.vncr_map_count, 0);
51 }
52 
53 static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
54 {
55 	/*
56 	 * We only initialise the IPA range on the canonical MMU, which
57 	 * defines the contract between KVM and userspace on where the
58 	 * "hardware" is in the IPA space. This affects the validity of MMIO
59 	 * exits forwarded to userspace, for example.
60 	 *
61 	 * For nested S2s, we use the PARange as exposed to the guest, as it
62 	 * is allowed to use it at will to expose whatever memory map it
63 	 * wants to its own guests as it would be on real HW.
64 	 */
65 	return kvm_init_stage2_mmu(kvm, mmu, kvm_get_pa_bits(kvm));
66 }
67 
68 int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
69 {
70 	struct kvm *kvm = vcpu->kvm;
71 	struct kvm_s2_mmu *tmp;
72 	int num_mmus, ret = 0;
73 
74 	if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
75 	    !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
76 		return -EINVAL;
77 
78 	if (!vcpu->arch.ctxt.vncr_array)
79 		vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT |
80 								    __GFP_ZERO);
81 
82 	if (!vcpu->arch.ctxt.vncr_array)
83 		return -ENOMEM;
84 
85 	/*
86 	 * Let's treat memory allocation failures as benign: If we fail to
87 	 * allocate anything, return an error and keep the allocated array
88 	 * alive. Userspace may try to recover by initializing the vcpu
89 	 * again, and there is no reason to affect the whole VM for this.
90 	 */
91 	num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
92 	tmp = kvrealloc(kvm->arch.nested_mmus,
93 			size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
94 			GFP_KERNEL_ACCOUNT | __GFP_ZERO);
95 	if (!tmp)
96 		return -ENOMEM;
97 
98 	swap(kvm->arch.nested_mmus, tmp);
99 
100 	/*
101 	 * If we went through a realocation, adjust the MMU back-pointers in
102 	 * the previously initialised kvm_pgtable structures.
103 	 */
104 	if (kvm->arch.nested_mmus != tmp)
105 		for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
106 			kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
107 
108 	for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
109 		ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
110 
111 	if (ret) {
112 		for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
113 			kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
114 
115 		free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
116 		vcpu->arch.ctxt.vncr_array = NULL;
117 
118 		return ret;
119 	}
120 
121 	kvm->arch.nested_mmus_size = num_mmus;
122 
123 	return 0;
124 }
125 
126 struct s2_walk_info {
127 	u64		baddr;
128 	unsigned int	max_oa_bits;
129 	unsigned int	pgshift;
130 	unsigned int	sl;
131 	unsigned int	t0sz;
132 	bool		be;
133 	bool		ha;
134 };
135 
136 static u32 compute_fsc(int level, u32 fsc)
137 {
138 	return fsc | (level & 0x3);
139 }
140 
141 static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
142 {
143 	u32 esr;
144 
145 	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
146 	esr |= compute_fsc(level, fsc);
147 	return esr;
148 }
149 
150 static int get_ia_size(struct s2_walk_info *wi)
151 {
152 	return 64 - wi->t0sz;
153 }
154 
155 static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi,
156 				int level, int input_size, int stride)
157 {
158 	int start_size, pa_max;
159 
160 	pa_max = kvm_get_pa_bits(vcpu->kvm);
161 
162 	/* Check translation limits */
163 	switch (BIT(wi->pgshift)) {
164 	case SZ_64K:
165 		if (level == 0 || (level == 1 && pa_max <= 42))
166 			return -EFAULT;
167 		break;
168 	case SZ_16K:
169 		if (level == 0 || (level == 1 && pa_max <= 40))
170 			return -EFAULT;
171 		break;
172 	case SZ_4K:
173 		if (level < 0 || (level == 0 && pa_max <= 42))
174 			return -EFAULT;
175 		break;
176 	}
177 
178 	/* Check input size limits */
179 	if (input_size > pa_max)
180 		return -EFAULT;
181 
182 	/* Check number of entries in starting level table */
183 	start_size = input_size - ((3 - level) * stride + wi->pgshift);
184 	if (start_size < 1 || start_size > stride + 4)
185 		return -EFAULT;
186 
187 	return 0;
188 }
189 
190 /* Check if output is within boundaries */
191 static int check_output_size(struct s2_walk_info *wi, phys_addr_t output)
192 {
193 	unsigned int output_size = wi->max_oa_bits;
194 
195 	if (output_size != 48 && (output & GENMASK_ULL(47, output_size)))
196 		return -1;
197 
198 	return 0;
199 }
200 
201 static int read_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 *desc,
202 			      struct s2_walk_info *wi)
203 {
204 	u64 val;
205 	int r;
206 
207 	r = kvm_read_guest(vcpu->kvm, pa, &val, sizeof(val));
208 	if (r)
209 		return r;
210 
211 	/*
212 	 * Handle reversedescriptors if endianness differs between the
213 	 * host and the guest hypervisor.
214 	 */
215 	if (wi->be)
216 		*desc = be64_to_cpu((__force __be64)val);
217 	else
218 		*desc = le64_to_cpu((__force __le64)val);
219 
220 	return 0;
221 }
222 
223 static int swap_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 old, u64 new,
224 			      struct s2_walk_info *wi)
225 {
226 	if (wi->be) {
227 		old = (__force u64)cpu_to_be64(old);
228 		new = (__force u64)cpu_to_be64(new);
229 	} else {
230 		old = (__force u64)cpu_to_le64(old);
231 		new = (__force u64)cpu_to_le64(new);
232 	}
233 
234 	return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
235 }
236 
237 /*
238  * This is essentially a C-version of the pseudo code from the ARM ARM
239  * AArch64.TranslationTableWalk  function.  I strongly recommend looking at
240  * that pseudocode in trying to understand this.
241  *
242  * Must be called with the kvm->srcu read lock held
243  */
244 static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
245 			      struct s2_walk_info *wi, struct kvm_s2_trans *out)
246 {
247 	int first_block_level, level, stride, input_size, base_lower_bound;
248 	phys_addr_t base_addr;
249 	unsigned int addr_top, addr_bottom;
250 	u64 desc, new_desc;  /* page table entry */
251 	int ret;
252 	phys_addr_t paddr;
253 
254 	switch (BIT(wi->pgshift)) {
255 	default:
256 	case SZ_64K:
257 	case SZ_16K:
258 		level = 3 - wi->sl;
259 		first_block_level = 2;
260 		break;
261 	case SZ_4K:
262 		level = 2 - wi->sl;
263 		first_block_level = 1;
264 		break;
265 	}
266 
267 	stride = wi->pgshift - 3;
268 	input_size = get_ia_size(wi);
269 	if (input_size > 48 || input_size < 25)
270 		return -EFAULT;
271 
272 	ret = check_base_s2_limits(vcpu, wi, level, input_size, stride);
273 	if (WARN_ON(ret)) {
274 		out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT);
275 		return ret;
276 	}
277 
278 	base_lower_bound = 3 + input_size - ((3 - level) * stride +
279 			   wi->pgshift);
280 	base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound);
281 
282 	if (check_output_size(wi, base_addr)) {
283 		/* R_BFHQH */
284 		out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ);
285 		return 1;
286 	}
287 
288 	addr_top = input_size - 1;
289 
290 	while (1) {
291 		phys_addr_t index;
292 
293 		addr_bottom = (3 - level) * stride + wi->pgshift;
294 		index = (ipa & GENMASK_ULL(addr_top, addr_bottom))
295 			>> (addr_bottom - 3);
296 
297 		paddr = base_addr | index;
298 		ret = read_guest_s2_desc(vcpu, paddr, &desc, wi);
299 		if (ret < 0) {
300 			out->esr = ESR_ELx_FSC_SEA_TTW(level);
301 			return ret;
302 		}
303 
304 		new_desc = desc;
305 
306 		/* Check for valid descriptor at this point */
307 		if (!(desc & KVM_PTE_VALID)) {
308 			out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
309 			out->desc = desc;
310 			return 1;
311 		}
312 
313 		if (FIELD_GET(KVM_PTE_TYPE, desc) == KVM_PTE_TYPE_BLOCK) {
314 			if (level < 3)
315 				break;
316 
317 			out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
318 			out->desc = desc;
319 			return 1;
320 		}
321 
322 		/* We're at the final level */
323 		if (level == 3)
324 			break;
325 
326 		if (check_output_size(wi, desc)) {
327 			out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
328 			out->desc = desc;
329 			return 1;
330 		}
331 
332 		base_addr = desc & GENMASK_ULL(47, wi->pgshift);
333 
334 		level += 1;
335 		addr_top = addr_bottom - 1;
336 	}
337 
338 	if (level < first_block_level) {
339 		out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
340 		out->desc = desc;
341 		return 1;
342 	}
343 
344 	if (check_output_size(wi, desc)) {
345 		out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
346 		out->desc = desc;
347 		return 1;
348 	}
349 
350 	if (wi->ha)
351 		new_desc |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
352 
353 	if (new_desc != desc) {
354 		ret = swap_guest_s2_desc(vcpu, paddr, desc, new_desc, wi);
355 		if (ret)
356 			return ret;
357 
358 		desc = new_desc;
359 	}
360 
361 	if (!(desc & KVM_PTE_LEAF_ATTR_LO_S2_AF)) {
362 		out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
363 		out->desc = desc;
364 		return 1;
365 	}
366 
367 	addr_bottom += contiguous_bit_shift(desc, wi, level);
368 
369 	/* Calculate and return the result */
370 	paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
371 		(ipa & GENMASK_ULL(addr_bottom - 1, 0));
372 	out->output = paddr;
373 	out->block_size = 1UL << ((3 - level) * stride + wi->pgshift);
374 	out->readable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
375 	out->writable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
376 	out->level = level;
377 	out->desc = desc;
378 	return 0;
379 }
380 
381 static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
382 {
383 	wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
384 
385 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
386 	case VTCR_EL2_TG0_4K:
387 		wi->pgshift = 12;	 break;
388 	case VTCR_EL2_TG0_16K:
389 		wi->pgshift = 14;	 break;
390 	case VTCR_EL2_TG0_64K:
391 	default:	    /* IMPDEF: treat any other value as 64k */
392 		wi->pgshift = 16;	 break;
393 	}
394 
395 	wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
396 	/* Global limit for now, should eventually be per-VM */
397 	wi->max_oa_bits = min(get_kvm_ipa_limit(),
398 			      ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr), false));
399 
400 	wi->ha = vtcr & VTCR_EL2_HA;
401 }
402 
403 int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
404 		       struct kvm_s2_trans *result)
405 {
406 	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
407 	struct s2_walk_info wi;
408 	int ret;
409 
410 	result->esr = 0;
411 
412 	if (!vcpu_has_nv(vcpu))
413 		return 0;
414 
415 	wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
416 
417 	vtcr_to_walk_info(vtcr, &wi);
418 
419 	wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
420 
421 	ret = walk_nested_s2_pgd(vcpu, gipa, &wi, result);
422 	if (ret)
423 		result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC);
424 
425 	return ret;
426 }
427 
428 static unsigned int ttl_to_size(u8 ttl)
429 {
430 	int level = ttl & 3;
431 	int gran = (ttl >> 2) & 3;
432 	unsigned int max_size = 0;
433 
434 	switch (gran) {
435 	case TLBI_TTL_TG_4K:
436 		switch (level) {
437 		case 0:
438 			break;
439 		case 1:
440 			max_size = SZ_1G;
441 			break;
442 		case 2:
443 			max_size = SZ_2M;
444 			break;
445 		case 3:
446 			max_size = SZ_4K;
447 			break;
448 		}
449 		break;
450 	case TLBI_TTL_TG_16K:
451 		switch (level) {
452 		case 0:
453 		case 1:
454 			break;
455 		case 2:
456 			max_size = SZ_32M;
457 			break;
458 		case 3:
459 			max_size = SZ_16K;
460 			break;
461 		}
462 		break;
463 	case TLBI_TTL_TG_64K:
464 		switch (level) {
465 		case 0:
466 		case 1:
467 			/* No 52bit IPA support */
468 			break;
469 		case 2:
470 			max_size = SZ_512M;
471 			break;
472 		case 3:
473 			max_size = SZ_64K;
474 			break;
475 		}
476 		break;
477 	default:			/* No size information */
478 		break;
479 	}
480 
481 	return max_size;
482 }
483 
484 static u8 pgshift_level_to_ttl(u16 shift, u8 level)
485 {
486 	u8 ttl;
487 
488 	switch(shift) {
489 	case 12:
490 		ttl = TLBI_TTL_TG_4K;
491 		break;
492 	case 14:
493 		ttl = TLBI_TTL_TG_16K;
494 		break;
495 	case 16:
496 		ttl = TLBI_TTL_TG_64K;
497 		break;
498 	default:
499 		BUG();
500 	}
501 
502 	ttl <<= 2;
503 	ttl |= level & 3;
504 
505 	return ttl;
506 }
507 
508 /*
509  * Compute the equivalent of the TTL field by parsing the shadow PT.  The
510  * granule size is extracted from the cached VTCR_EL2.TG0 while the level is
511  * retrieved from first entry carrying the level as a tag.
512  */
513 static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
514 {
515 	u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
516 	kvm_pte_t pte;
517 	u8 ttl, level;
518 
519 	lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
520 
521 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
522 	case VTCR_EL2_TG0_4K:
523 		ttl = (TLBI_TTL_TG_4K << 2);
524 		break;
525 	case VTCR_EL2_TG0_16K:
526 		ttl = (TLBI_TTL_TG_16K << 2);
527 		break;
528 	case VTCR_EL2_TG0_64K:
529 	default:	    /* IMPDEF: treat any other value as 64k */
530 		ttl = (TLBI_TTL_TG_64K << 2);
531 		break;
532 	}
533 
534 	tmp = addr;
535 
536 again:
537 	/* Iteratively compute the block sizes for a particular granule size */
538 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
539 	case VTCR_EL2_TG0_4K:
540 		if	(sz < SZ_4K)	sz = SZ_4K;
541 		else if (sz < SZ_2M)	sz = SZ_2M;
542 		else if (sz < SZ_1G)	sz = SZ_1G;
543 		else			sz = 0;
544 		break;
545 	case VTCR_EL2_TG0_16K:
546 		if	(sz < SZ_16K)	sz = SZ_16K;
547 		else if (sz < SZ_32M)	sz = SZ_32M;
548 		else			sz = 0;
549 		break;
550 	case VTCR_EL2_TG0_64K:
551 	default:	    /* IMPDEF: treat any other value as 64k */
552 		if	(sz < SZ_64K)	sz = SZ_64K;
553 		else if (sz < SZ_512M)	sz = SZ_512M;
554 		else			sz = 0;
555 		break;
556 	}
557 
558 	if (sz == 0)
559 		return 0;
560 
561 	tmp &= ~(sz - 1);
562 	if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL))
563 		goto again;
564 	if (!(pte & PTE_VALID))
565 		goto again;
566 	level = FIELD_GET(KVM_NV_GUEST_MAP_SZ, pte);
567 	if (!level)
568 		goto again;
569 
570 	ttl |= level;
571 
572 	/*
573 	 * We now have found some level information in the shadow S2. Check
574 	 * that the resulting range is actually including the original IPA.
575 	 */
576 	sz = ttl_to_size(ttl);
577 	if (addr < (tmp + sz))
578 		return ttl;
579 
580 	return 0;
581 }
582 
583 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
584 {
585 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
586 	unsigned long max_size;
587 	u8 ttl;
588 
589 	ttl = FIELD_GET(TLBI_TTL_MASK, val);
590 
591 	if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
592 		/* No TTL, check the shadow S2 for a hint */
593 		u64 addr = (val & GENMASK_ULL(35, 0)) << 12;
594 		ttl = get_guest_mapping_ttl(mmu, addr);
595 	}
596 
597 	max_size = ttl_to_size(ttl);
598 
599 	if (!max_size) {
600 		/* Compute the maximum extent of the invalidation */
601 		switch (FIELD_GET(VTCR_EL2_TG0_MASK, mmu->tlb_vtcr)) {
602 		case VTCR_EL2_TG0_4K:
603 			max_size = SZ_1G;
604 			break;
605 		case VTCR_EL2_TG0_16K:
606 			max_size = SZ_32M;
607 			break;
608 		case VTCR_EL2_TG0_64K:
609 		default:    /* IMPDEF: treat any other value as 64k */
610 			/*
611 			 * No, we do not support 52bit IPA in nested yet. Once
612 			 * we do, this should be 4TB.
613 			 */
614 			max_size = SZ_512M;
615 			break;
616 		}
617 	}
618 
619 	WARN_ON(!max_size);
620 	return max_size;
621 }
622 
623 /*
624  * We can have multiple *different* MMU contexts with the same VMID:
625  *
626  * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
627  *
628  * - Multiple vcpus using private S2s (huh huh...), hence differing by the
629  *   VBBTR_EL2.BADDR address
630  *
631  * - A combination of the above...
632  *
633  * We can always identify which MMU context to pick at run-time.  However,
634  * TLB invalidation involving a VMID must take action on all the TLBs using
635  * this particular VMID. This translates into applying the same invalidation
636  * operation to all the contexts that are using this VMID. Moar phun!
637  */
638 void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
639 				const union tlbi_info *info,
640 				void (*tlbi_callback)(struct kvm_s2_mmu *,
641 						      const union tlbi_info *))
642 {
643 	write_lock(&kvm->mmu_lock);
644 
645 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
646 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
647 
648 		if (!kvm_s2_mmu_valid(mmu))
649 			continue;
650 
651 		if (vmid == get_vmid(mmu->tlb_vttbr))
652 			tlbi_callback(mmu, info);
653 	}
654 
655 	write_unlock(&kvm->mmu_lock);
656 }
657 
658 struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu)
659 {
660 	struct kvm *kvm = vcpu->kvm;
661 	bool nested_stage2_enabled;
662 	u64 vttbr, vtcr, hcr;
663 
664 	lockdep_assert_held_write(&kvm->mmu_lock);
665 
666 	vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
667 	vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
668 	hcr = vcpu_read_sys_reg(vcpu, HCR_EL2);
669 
670 	nested_stage2_enabled = hcr & HCR_VM;
671 
672 	/* Don't consider the CnP bit for the vttbr match */
673 	vttbr &= ~VTTBR_CNP_BIT;
674 
675 	/*
676 	 * Two possibilities when looking up a S2 MMU context:
677 	 *
678 	 * - either S2 is enabled in the guest, and we need a context that is
679 	 *   S2-enabled and matches the full VTTBR (VMID+BADDR) and VTCR,
680 	 *   which makes it safe from a TLB conflict perspective (a broken
681 	 *   guest won't be able to generate them),
682 	 *
683 	 * - or S2 is disabled, and we need a context that is S2-disabled
684 	 *   and matches the VMID only, as all TLBs are tagged by VMID even
685 	 *   if S2 translation is disabled.
686 	 */
687 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
688 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
689 
690 		if (!kvm_s2_mmu_valid(mmu))
691 			continue;
692 
693 		if (nested_stage2_enabled &&
694 		    mmu->nested_stage2_enabled &&
695 		    vttbr == mmu->tlb_vttbr &&
696 		    vtcr == mmu->tlb_vtcr)
697 			return mmu;
698 
699 		if (!nested_stage2_enabled &&
700 		    !mmu->nested_stage2_enabled &&
701 		    get_vmid(vttbr) == get_vmid(mmu->tlb_vttbr))
702 			return mmu;
703 	}
704 	return NULL;
705 }
706 
707 static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
708 {
709 	struct kvm *kvm = vcpu->kvm;
710 	struct kvm_s2_mmu *s2_mmu;
711 	int i;
712 
713 	lockdep_assert_held_write(&vcpu->kvm->mmu_lock);
714 
715 	s2_mmu = lookup_s2_mmu(vcpu);
716 	if (s2_mmu)
717 		goto out;
718 
719 	/*
720 	 * Make sure we don't always search from the same point, or we
721 	 * will always reuse a potentially active context, leaving
722 	 * free contexts unused.
723 	 */
724 	for (i = kvm->arch.nested_mmus_next;
725 	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
726 	     i++) {
727 		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
728 
729 		if (atomic_read(&s2_mmu->refcnt) == 0)
730 			break;
731 	}
732 	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
733 
734 	/* Set the scene for the next search */
735 	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
736 
737 	/* Make sure we don't forget to do the laundry */
738 	if (kvm_s2_mmu_valid(s2_mmu))
739 		s2_mmu->pending_unmap = true;
740 
741 	/*
742 	 * The virtual VMID (modulo CnP) will be used as a key when matching
743 	 * an existing kvm_s2_mmu.
744 	 *
745 	 * We cache VTCR at allocation time, once and for all. It'd be great
746 	 * if the guest didn't screw that one up, as this is not very
747 	 * forgiving...
748 	 */
749 	s2_mmu->tlb_vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2) & ~VTTBR_CNP_BIT;
750 	s2_mmu->tlb_vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
751 	s2_mmu->nested_stage2_enabled = vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM;
752 
753 out:
754 	atomic_inc(&s2_mmu->refcnt);
755 
756 	/*
757 	 * Set the vCPU request to perform an unmap, even if the pending unmap
758 	 * originates from another vCPU. This guarantees that the MMU has been
759 	 * completely unmapped before any vCPU actually uses it, and allows
760 	 * multiple vCPUs to lend a hand with completing the unmap.
761 	 */
762 	if (s2_mmu->pending_unmap)
763 		kvm_make_request(KVM_REQ_NESTED_S2_UNMAP, vcpu);
764 
765 	return s2_mmu;
766 }
767 
768 void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
769 {
770 	/* CnP being set denotes an invalid entry */
771 	mmu->tlb_vttbr = VTTBR_CNP_BIT;
772 	mmu->nested_stage2_enabled = false;
773 	atomic_set(&mmu->refcnt, 0);
774 }
775 
776 void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
777 {
778 	/*
779 	 * If the vCPU kept its reference on the MMU after the last put,
780 	 * keep rolling with it.
781 	 */
782 	if (is_hyp_ctxt(vcpu)) {
783 		if (!vcpu->arch.hw_mmu)
784 			vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
785 	} else {
786 		if (!vcpu->arch.hw_mmu) {
787 			scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
788 				vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
789 		}
790 
791 		if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
792 			kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
793 	}
794 }
795 
796 void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
797 {
798 	/* Unconditionally drop the VNCR mapping if we have one */
799 	if (host_data_test_flag(L1_VNCR_MAPPED)) {
800 		BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
801 		BUG_ON(is_hyp_ctxt(vcpu));
802 
803 		clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
804 		vcpu->arch.vncr_tlb->cpu = -1;
805 		host_data_clear_flag(L1_VNCR_MAPPED);
806 		atomic_dec(&vcpu->kvm->arch.vncr_map_count);
807 	}
808 
809 	/*
810 	 * Keep a reference on the associated stage-2 MMU if the vCPU is
811 	 * scheduling out and not in WFI emulation, suggesting it is likely to
812 	 * reuse the MMU sometime soon.
813 	 */
814 	if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI))
815 		return;
816 
817 	if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu))
818 		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
819 
820 	vcpu->arch.hw_mmu = NULL;
821 }
822 
823 /*
824  * Returns non-zero if permission fault is handled by injecting it to the next
825  * level hypervisor.
826  */
827 int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
828 {
829 	bool forward_fault = false;
830 
831 	trans->esr = 0;
832 
833 	if (!kvm_vcpu_trap_is_permission_fault(vcpu))
834 		return 0;
835 
836 	if (kvm_vcpu_trap_is_iabt(vcpu)) {
837 		if (vcpu_mode_priv(vcpu))
838 			forward_fault = !kvm_s2_trans_exec_el1(vcpu->kvm, trans);
839 		else
840 			forward_fault = !kvm_s2_trans_exec_el0(vcpu->kvm, trans);
841 	} else {
842 		bool write_fault = kvm_is_write_fault(vcpu);
843 
844 		forward_fault = ((write_fault && !trans->writable) ||
845 				 (!write_fault && !trans->readable));
846 	}
847 
848 	if (forward_fault)
849 		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
850 
851 	return forward_fault;
852 }
853 
854 int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
855 {
856 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
857 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
858 
859 	return kvm_inject_nested_sync(vcpu, esr_el2);
860 }
861 
862 u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime)
863 {
864 	enum vcpu_sysreg ttbr_elx;
865 	u64 tcr;
866 	u16 asid;
867 
868 	switch (regime) {
869 	case TR_EL10:
870 		tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
871 		ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1;
872 		break;
873 	case TR_EL20:
874 		tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
875 		ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2;
876 		break;
877 	default:
878 		BUG();
879 	}
880 
881 	asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx));
882 	if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
883 	    !(tcr & TCR_ASID16))
884 		asid &= GENMASK(7, 0);
885 
886 	return asid;
887 }
888 
889 static void invalidate_vncr(struct vncr_tlb *vt)
890 {
891 	vt->valid = false;
892 	if (vt->cpu != -1)
893 		clear_fixmap(vncr_fixmap(vt->cpu));
894 }
895 
896 static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
897 {
898 	struct kvm_vcpu *vcpu;
899 	unsigned long i;
900 
901 	lockdep_assert_held_write(&kvm->mmu_lock);
902 
903 	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
904 		return;
905 
906 	kvm_for_each_vcpu(i, vcpu, kvm) {
907 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
908 		u64 ipa_start, ipa_end, ipa_size;
909 
910 		/*
911 		 * Careful here: We end-up here from an MMU notifier,
912 		 * and this can race against a vcpu not being onlined
913 		 * yet, without the pseudo-TLB being allocated.
914 		 *
915 		 * Skip those, as they obviously don't participate in
916 		 * the invalidation at this stage.
917 		 */
918 		if (!vt)
919 			continue;
920 
921 		if (!vt->valid)
922 			continue;
923 
924 		ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
925 							    vt->wr.level));
926 		ipa_start = vt->wr.pa & ~(ipa_size - 1);
927 		ipa_end = ipa_start + ipa_size;
928 
929 		if (ipa_end <= start || ipa_start >= end)
930 			continue;
931 
932 		invalidate_vncr(vt);
933 	}
934 }
935 
936 struct s1e2_tlbi_scope {
937 	enum {
938 		TLBI_ALL,
939 		TLBI_VA,
940 		TLBI_VAA,
941 		TLBI_ASID,
942 	} type;
943 
944 	u16 asid;
945 	u64 va;
946 	u64 size;
947 };
948 
949 static void invalidate_vncr_va(struct kvm *kvm,
950 			       struct s1e2_tlbi_scope *scope)
951 {
952 	struct kvm_vcpu *vcpu;
953 	unsigned long i;
954 
955 	lockdep_assert_held_write(&kvm->mmu_lock);
956 
957 	kvm_for_each_vcpu(i, vcpu, kvm) {
958 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
959 		u64 va_start, va_end, va_size;
960 
961 		if (!vt->valid)
962 			continue;
963 
964 		va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
965 							   vt->wr.level));
966 		va_start = vt->gva & ~(va_size - 1);
967 		va_end = va_start + va_size;
968 
969 		switch (scope->type) {
970 		case TLBI_ALL:
971 			break;
972 
973 		case TLBI_VA:
974 			if (va_end <= scope->va ||
975 			    va_start >= (scope->va + scope->size))
976 				continue;
977 			if (vt->wr.nG && vt->wr.asid != scope->asid)
978 				continue;
979 			break;
980 
981 		case TLBI_VAA:
982 			if (va_end <= scope->va ||
983 			    va_start >= (scope->va + scope->size))
984 				continue;
985 			break;
986 
987 		case TLBI_ASID:
988 			if (!vt->wr.nG || vt->wr.asid != scope->asid)
989 				continue;
990 			break;
991 		}
992 
993 		invalidate_vncr(vt);
994 	}
995 }
996 
997 #define tlbi_va_s1_to_va(v)	(u64)sign_extend64((v) << 12, 48)
998 
999 static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
1000 				  struct s1e2_tlbi_scope *scope)
1001 {
1002 	switch (inst) {
1003 	case OP_TLBI_ALLE2:
1004 	case OP_TLBI_ALLE2IS:
1005 	case OP_TLBI_ALLE2OS:
1006 	case OP_TLBI_VMALLE1:
1007 	case OP_TLBI_VMALLE1IS:
1008 	case OP_TLBI_VMALLE1OS:
1009 	case OP_TLBI_ALLE2NXS:
1010 	case OP_TLBI_ALLE2ISNXS:
1011 	case OP_TLBI_ALLE2OSNXS:
1012 	case OP_TLBI_VMALLE1NXS:
1013 	case OP_TLBI_VMALLE1ISNXS:
1014 	case OP_TLBI_VMALLE1OSNXS:
1015 		scope->type = TLBI_ALL;
1016 		break;
1017 	case OP_TLBI_VAE2:
1018 	case OP_TLBI_VAE2IS:
1019 	case OP_TLBI_VAE2OS:
1020 	case OP_TLBI_VAE1:
1021 	case OP_TLBI_VAE1IS:
1022 	case OP_TLBI_VAE1OS:
1023 	case OP_TLBI_VAE2NXS:
1024 	case OP_TLBI_VAE2ISNXS:
1025 	case OP_TLBI_VAE2OSNXS:
1026 	case OP_TLBI_VAE1NXS:
1027 	case OP_TLBI_VAE1ISNXS:
1028 	case OP_TLBI_VAE1OSNXS:
1029 	case OP_TLBI_VALE2:
1030 	case OP_TLBI_VALE2IS:
1031 	case OP_TLBI_VALE2OS:
1032 	case OP_TLBI_VALE1:
1033 	case OP_TLBI_VALE1IS:
1034 	case OP_TLBI_VALE1OS:
1035 	case OP_TLBI_VALE2NXS:
1036 	case OP_TLBI_VALE2ISNXS:
1037 	case OP_TLBI_VALE2OSNXS:
1038 	case OP_TLBI_VALE1NXS:
1039 	case OP_TLBI_VALE1ISNXS:
1040 	case OP_TLBI_VALE1OSNXS:
1041 		scope->type = TLBI_VA;
1042 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
1043 		if (!scope->size)
1044 			scope->size = SZ_1G;
1045 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
1046 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
1047 		break;
1048 	case OP_TLBI_ASIDE1:
1049 	case OP_TLBI_ASIDE1IS:
1050 	case OP_TLBI_ASIDE1OS:
1051 	case OP_TLBI_ASIDE1NXS:
1052 	case OP_TLBI_ASIDE1ISNXS:
1053 	case OP_TLBI_ASIDE1OSNXS:
1054 		scope->type = TLBI_ASID;
1055 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
1056 		break;
1057 	case OP_TLBI_VAAE1:
1058 	case OP_TLBI_VAAE1IS:
1059 	case OP_TLBI_VAAE1OS:
1060 	case OP_TLBI_VAAE1NXS:
1061 	case OP_TLBI_VAAE1ISNXS:
1062 	case OP_TLBI_VAAE1OSNXS:
1063 	case OP_TLBI_VAALE1:
1064 	case OP_TLBI_VAALE1IS:
1065 	case OP_TLBI_VAALE1OS:
1066 	case OP_TLBI_VAALE1NXS:
1067 	case OP_TLBI_VAALE1ISNXS:
1068 	case OP_TLBI_VAALE1OSNXS:
1069 		scope->type = TLBI_VAA;
1070 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
1071 		if (!scope->size)
1072 			scope->size = SZ_1G;
1073 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
1074 		break;
1075 	case OP_TLBI_RVAE2:
1076 	case OP_TLBI_RVAE2IS:
1077 	case OP_TLBI_RVAE2OS:
1078 	case OP_TLBI_RVAE1:
1079 	case OP_TLBI_RVAE1IS:
1080 	case OP_TLBI_RVAE1OS:
1081 	case OP_TLBI_RVAE2NXS:
1082 	case OP_TLBI_RVAE2ISNXS:
1083 	case OP_TLBI_RVAE2OSNXS:
1084 	case OP_TLBI_RVAE1NXS:
1085 	case OP_TLBI_RVAE1ISNXS:
1086 	case OP_TLBI_RVAE1OSNXS:
1087 	case OP_TLBI_RVALE2:
1088 	case OP_TLBI_RVALE2IS:
1089 	case OP_TLBI_RVALE2OS:
1090 	case OP_TLBI_RVALE1:
1091 	case OP_TLBI_RVALE1IS:
1092 	case OP_TLBI_RVALE1OS:
1093 	case OP_TLBI_RVALE2NXS:
1094 	case OP_TLBI_RVALE2ISNXS:
1095 	case OP_TLBI_RVALE2OSNXS:
1096 	case OP_TLBI_RVALE1NXS:
1097 	case OP_TLBI_RVALE1ISNXS:
1098 	case OP_TLBI_RVALE1OSNXS:
1099 		scope->type = TLBI_VA;
1100 		scope->va = decode_range_tlbi(val, &scope->size, &scope->asid);
1101 		break;
1102 	case OP_TLBI_RVAAE1:
1103 	case OP_TLBI_RVAAE1IS:
1104 	case OP_TLBI_RVAAE1OS:
1105 	case OP_TLBI_RVAAE1NXS:
1106 	case OP_TLBI_RVAAE1ISNXS:
1107 	case OP_TLBI_RVAAE1OSNXS:
1108 	case OP_TLBI_RVAALE1:
1109 	case OP_TLBI_RVAALE1IS:
1110 	case OP_TLBI_RVAALE1OS:
1111 	case OP_TLBI_RVAALE1NXS:
1112 	case OP_TLBI_RVAALE1ISNXS:
1113 	case OP_TLBI_RVAALE1OSNXS:
1114 		scope->type = TLBI_VAA;
1115 		scope->va = decode_range_tlbi(val, &scope->size, NULL);
1116 		break;
1117 	}
1118 }
1119 
1120 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val)
1121 {
1122 	struct s1e2_tlbi_scope scope = {};
1123 
1124 	compute_s1_tlbi_range(vcpu, inst, val, &scope);
1125 
1126 	guard(write_lock)(&vcpu->kvm->mmu_lock);
1127 	invalidate_vncr_va(vcpu->kvm, &scope);
1128 }
1129 
1130 void kvm_nested_s2_wp(struct kvm *kvm)
1131 {
1132 	int i;
1133 
1134 	lockdep_assert_held_write(&kvm->mmu_lock);
1135 
1136 	if (!kvm->arch.nested_mmus_size)
1137 		return;
1138 
1139 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1140 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1141 
1142 		if (kvm_s2_mmu_valid(mmu))
1143 			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
1144 	}
1145 
1146 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1147 }
1148 
1149 void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
1150 {
1151 	int i;
1152 
1153 	lockdep_assert_held_write(&kvm->mmu_lock);
1154 
1155 	if (!kvm->arch.nested_mmus_size)
1156 		return;
1157 
1158 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1159 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1160 
1161 		if (kvm_s2_mmu_valid(mmu))
1162 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
1163 	}
1164 
1165 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1166 }
1167 
1168 void kvm_nested_s2_flush(struct kvm *kvm)
1169 {
1170 	int i;
1171 
1172 	lockdep_assert_held_write(&kvm->mmu_lock);
1173 
1174 	if (!kvm->arch.nested_mmus_size)
1175 		return;
1176 
1177 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1178 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1179 
1180 		if (kvm_s2_mmu_valid(mmu))
1181 			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(mmu));
1182 	}
1183 }
1184 
1185 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1186 {
1187 	int i;
1188 
1189 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1190 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1191 
1192 		if (!WARN_ON(atomic_read(&mmu->refcnt)))
1193 			kvm_free_stage2_pgd(mmu);
1194 	}
1195 	kvfree(kvm->arch.nested_mmus);
1196 	kvm->arch.nested_mmus = NULL;
1197 	kvm->arch.nested_mmus_size = 0;
1198 	kvm_uninit_stage2_mmu(kvm);
1199 }
1200 
1201 /*
1202  * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter:
1203  *
1204  * - We introduce an internal representation of a vcpu-private TLB,
1205  *   representing the mapping between the guest VA contained in VNCR_EL2,
1206  *   the IPA the guest's EL2 PTs point to, and the actual PA this lives at.
1207  *
1208  * - On translation fault from a nested VNCR access, we create such a TLB.
1209  *   If there is no mapping to describe, the guest inherits the fault.
1210  *   Crucially, no actual mapping is done at this stage.
1211  *
1212  * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
1213  *   TLB exists, we map it in the fixmap for this CPU, and run with it. We
1214  *   have to respect the permissions dictated by the guest, but not the
1215  *   memory type (FWB is a must).
1216  *
1217  * - Note that we usually don't do a vcpu_load() on the back of a fault
1218  *   (unless we are preempted), so the resolution of a translation fault
1219  *   must go via a request that will map the VNCR page in the fixmap.
1220  *   vcpu_load() might as well use the same mechanism.
1221  *
1222  * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
1223  *   mapped, we unmap it. Yes it is that simple. The TLB still exists
1224  *   though, and may be reused at a later load.
1225  *
1226  * - On permission fault, we simply forward the fault to the guest's EL2.
1227  *   Get out of my way.
1228  *
1229  * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
1230  *   intersects with the TLBI request, invalidate it, and unmap the page
1231  *   from the fixmap. Because we need to look at all the vcpu-private TLBs,
1232  *   this requires some wide-ranging locking to ensure that nothing races
1233  *   against it. This may require some refcounting to avoid the search when
1234  *   no such TLB is present.
1235  *
1236  * - On MMU notifiers, we must invalidate our TLB in a similar way, but
1237  *   looking at the IPA instead. The funny part is that there may not be a
1238  *   stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
1239  *   instructions.
1240  */
1241 
1242 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
1243 {
1244 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
1245 		return 0;
1246 
1247 	vcpu->arch.vncr_tlb = kzalloc_obj(*vcpu->arch.vncr_tlb,
1248 					  GFP_KERNEL_ACCOUNT);
1249 	if (!vcpu->arch.vncr_tlb)
1250 		return -ENOMEM;
1251 
1252 	return 0;
1253 }
1254 
1255 static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
1256 {
1257 	return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
1258 }
1259 
1260 static int kvm_translate_vncr(struct kvm_vcpu *vcpu, bool *is_gmem)
1261 {
1262 	struct kvm_memory_slot *memslot;
1263 	bool write_fault, writable;
1264 	unsigned long mmu_seq;
1265 	struct vncr_tlb *vt;
1266 	struct page *page;
1267 	u64 va, pfn, gfn;
1268 	int ret;
1269 
1270 	vt = vcpu->arch.vncr_tlb;
1271 
1272 	/*
1273 	 * If we're about to walk the EL2 S1 PTs, we must invalidate the
1274 	 * current TLB, as it could be sampled from another vcpu doing a
1275 	 * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
1276 	 * translation, so not much of a choice.
1277 	 *
1278 	 * We also prepare the next walk wilst we're at it.
1279 	 */
1280 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1281 		invalidate_vncr(vt);
1282 
1283 		vt->wi = (struct s1_walk_info) {
1284 			.regime	= TR_EL20,
1285 			.as_el0	= false,
1286 			.pan	= false,
1287 		};
1288 		vt->wr = (struct s1_walk_result){};
1289 	}
1290 
1291 	guard(srcu)(&vcpu->kvm->srcu);
1292 
1293 	va =  read_vncr_el2(vcpu);
1294 
1295 	ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
1296 	if (ret)
1297 		return ret;
1298 
1299 	write_fault = kvm_is_write_fault(vcpu);
1300 
1301 	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1302 	smp_rmb();
1303 
1304 	gfn = vt->wr.pa >> PAGE_SHIFT;
1305 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
1306 	if (!memslot)
1307 		return -EFAULT;
1308 
1309 	*is_gmem = kvm_slot_has_gmem(memslot);
1310 	if (!*is_gmem) {
1311 		pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
1312 					&writable, &page);
1313 		if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
1314 			return -EFAULT;
1315 	} else {
1316 		ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL);
1317 		if (ret) {
1318 			kvm_prepare_memory_fault_exit(vcpu, vt->wr.pa, PAGE_SIZE,
1319 					      write_fault, false, false);
1320 			return ret;
1321 		}
1322 	}
1323 
1324 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1325 		if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
1326 			return -EAGAIN;
1327 
1328 		vt->gva = va;
1329 		vt->hpa = pfn << PAGE_SHIFT;
1330 		vt->valid = true;
1331 		vt->cpu = -1;
1332 
1333 		kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
1334 		kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
1335 	}
1336 
1337 	if (vt->wr.pw)
1338 		mark_page_dirty(vcpu->kvm, gfn);
1339 
1340 	return 0;
1341 }
1342 
1343 static void inject_vncr_perm(struct kvm_vcpu *vcpu)
1344 {
1345 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1346 	u64 esr = kvm_vcpu_get_esr(vcpu);
1347 
1348 	/* Adjust the fault level to reflect that of the guest's */
1349 	esr &= ~ESR_ELx_FSC;
1350 	esr |= FIELD_PREP(ESR_ELx_FSC,
1351 			  ESR_ELx_FSC_PERM_L(vt->wr.level));
1352 
1353 	kvm_inject_nested_sync(vcpu, esr);
1354 }
1355 
1356 static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
1357 {
1358 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1359 
1360 	lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
1361 
1362 	if (!vt->valid)
1363 		return false;
1364 
1365 	if (read_vncr_el2(vcpu) != vt->gva)
1366 		return false;
1367 
1368 	if (vt->wr.nG)
1369 		return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid;
1370 
1371 	return true;
1372 }
1373 
1374 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
1375 {
1376 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1377 	u64 esr = kvm_vcpu_get_esr(vcpu);
1378 
1379 	WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
1380 
1381 	if (kvm_vcpu_abt_issea(vcpu))
1382 		return kvm_handle_guest_sea(vcpu);
1383 
1384 	if (esr_fsc_is_permission_fault(esr)) {
1385 		inject_vncr_perm(vcpu);
1386 	} else if (esr_fsc_is_translation_fault(esr)) {
1387 		bool valid, is_gmem = false;
1388 		int ret;
1389 
1390 		scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
1391 			valid = kvm_vncr_tlb_lookup(vcpu);
1392 
1393 		if (!valid)
1394 			ret = kvm_translate_vncr(vcpu, &is_gmem);
1395 		else
1396 			ret = -EPERM;
1397 
1398 		switch (ret) {
1399 		case -EAGAIN:
1400 			/* Let's try again... */
1401 			break;
1402 		case -ENOMEM:
1403 			/*
1404 			 * For guest_memfd, this indicates that it failed to
1405 			 * create a folio to back the memory. Inform userspace.
1406 			 */
1407 			if (is_gmem)
1408 				return 0;
1409 			/* Otherwise, let's try again... */
1410 			break;
1411 		case -EFAULT:
1412 		case -EIO:
1413 		case -EHWPOISON:
1414 			if (is_gmem)
1415 				return 0;
1416 			fallthrough;
1417 		case -EINVAL:
1418 		case -ENOENT:
1419 		case -EACCES:
1420 			/*
1421 			 * Translation failed, inject the corresponding
1422 			 * exception back to EL2.
1423 			 */
1424 			BUG_ON(!vt->wr.failed);
1425 
1426 			esr &= ~ESR_ELx_FSC;
1427 			esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
1428 
1429 			kvm_inject_nested_sync(vcpu, esr);
1430 			break;
1431 		case -EPERM:
1432 			/* Hack to deal with POE until we get kernel support */
1433 			inject_vncr_perm(vcpu);
1434 			break;
1435 		case 0:
1436 			break;
1437 		}
1438 	} else {
1439 		WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
1440 	}
1441 
1442 	return 1;
1443 }
1444 
1445 static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
1446 {
1447 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1448 	pgprot_t prot;
1449 
1450 	guard(preempt)();
1451 	guard(read_lock)(&vcpu->kvm->mmu_lock);
1452 
1453 	/*
1454 	 * The request to map VNCR may have raced against some other
1455 	 * event, such as an interrupt, and may not be valid anymore.
1456 	 */
1457 	if (is_hyp_ctxt(vcpu))
1458 		return;
1459 
1460 	/*
1461 	 * Check that the pseudo-TLB is valid and that VNCR_EL2 still
1462 	 * contains the expected value. If it doesn't, we simply bail out
1463 	 * without a mapping -- a transformed MSR/MRS will generate the
1464 	 * fault and allows us to populate the pseudo-TLB.
1465 	 */
1466 	if (!vt->valid)
1467 		return;
1468 
1469 	if (read_vncr_el2(vcpu) != vt->gva)
1470 		return;
1471 
1472 	if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid)
1473 		return;
1474 
1475 	vt->cpu = smp_processor_id();
1476 
1477 	if (vt->wr.pw && vt->wr.pr)
1478 		prot = PAGE_KERNEL;
1479 	else if (vt->wr.pr)
1480 		prot = PAGE_KERNEL_RO;
1481 	else
1482 		prot = PAGE_NONE;
1483 
1484 	/*
1485 	 * We can't map write-only (or no permission at all) in the kernel,
1486 	 * but the guest can do it if using POE, so we'll have to turn a
1487 	 * translation fault into a permission fault at runtime.
1488 	 * FIXME: WO doesn't work at all, need POE support in the kernel.
1489 	 */
1490 	if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
1491 		__set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
1492 		host_data_set_flag(L1_VNCR_MAPPED);
1493 		atomic_inc(&vcpu->kvm->arch.vncr_map_count);
1494 	}
1495 }
1496 
1497 #define has_tgran_2(__r, __sz)						\
1498 	({								\
1499 		u64 _s1, _s2, _mmfr0 = __r;				\
1500 									\
1501 		_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
1502 				    TGRAN##__sz##_2, _mmfr0);		\
1503 									\
1504 		_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
1505 				    TGRAN##__sz, _mmfr0);		\
1506 									\
1507 		((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI &&		\
1508 		  _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
1509 		 (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
1510 		  _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI));		\
1511 	})
1512 /*
1513  * Our emulated CPU doesn't support all the possible features. For the
1514  * sake of simplicity (and probably mental sanity), wipe out a number
1515  * of feature bits we don't intend to support for the time being.
1516  * This list should get updated as new features get added to the NV
1517  * support, and new extension to the architecture.
1518  */
1519 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
1520 {
1521 	u64 orig_val = val;
1522 
1523 	switch (reg) {
1524 	case SYS_ID_AA64ISAR1_EL1:
1525 		/* Support everything but LS64 and Spec Invalidation */
1526 		val &= ~(ID_AA64ISAR1_EL1_LS64	|
1527 			 ID_AA64ISAR1_EL1_SPECRES);
1528 		break;
1529 
1530 	case SYS_ID_AA64PFR0_EL1:
1531 		/* No RME, AMU, MPAM, or S-EL2 */
1532 		val &= ~(ID_AA64PFR0_EL1_RME	|
1533 			 ID_AA64PFR0_EL1_AMU	|
1534 			 ID_AA64PFR0_EL1_MPAM	|
1535 			 ID_AA64PFR0_EL1_SEL2	|
1536 			 ID_AA64PFR0_EL1_EL3	|
1537 			 ID_AA64PFR0_EL1_EL2	|
1538 			 ID_AA64PFR0_EL1_EL1	|
1539 			 ID_AA64PFR0_EL1_EL0);
1540 		/* 64bit only at any EL */
1541 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
1542 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
1543 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
1544 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
1545 		break;
1546 
1547 	case SYS_ID_AA64PFR1_EL1:
1548 		/* Only support BTI, SSBS, CSV2_frac */
1549 		val &= ~(ID_AA64PFR1_EL1_PFAR		|
1550 			 ID_AA64PFR1_EL1_MTEX		|
1551 			 ID_AA64PFR1_EL1_THE		|
1552 			 ID_AA64PFR1_EL1_GCS		|
1553 			 ID_AA64PFR1_EL1_MTE_frac	|
1554 			 ID_AA64PFR1_EL1_NMI		|
1555 			 ID_AA64PFR1_EL1_SME		|
1556 			 ID_AA64PFR1_EL1_RES0		|
1557 			 ID_AA64PFR1_EL1_MPAM_frac	|
1558 			 ID_AA64PFR1_EL1_MTE);
1559 		break;
1560 
1561 	case SYS_ID_AA64MMFR0_EL1:
1562 		/* Hide ExS, Secure Memory */
1563 		val &= ~(ID_AA64MMFR0_EL1_EXS		|
1564 			 ID_AA64MMFR0_EL1_TGRAN4_2	|
1565 			 ID_AA64MMFR0_EL1_TGRAN16_2	|
1566 			 ID_AA64MMFR0_EL1_TGRAN64_2	|
1567 			 ID_AA64MMFR0_EL1_SNSMEM);
1568 
1569 		/* Hide CNTPOFF if present */
1570 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
1571 
1572 		/* Disallow unsupported S2 page sizes */
1573 		switch (PAGE_SIZE) {
1574 		case SZ_64K:
1575 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
1576 			fallthrough;
1577 		case SZ_16K:
1578 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
1579 			fallthrough;
1580 		case SZ_4K:
1581 			/* Support everything */
1582 			break;
1583 		}
1584 
1585 		/*
1586 		 * Since we can't support a guest S2 page size smaller
1587 		 * than the host's own page size (due to KVM only
1588 		 * populating its own S2 using the kernel's page
1589 		 * size), advertise the limitation using FEAT_GTG.
1590 		 */
1591 		switch (PAGE_SIZE) {
1592 		case SZ_4K:
1593 			if (has_tgran_2(orig_val, 4))
1594 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
1595 			fallthrough;
1596 		case SZ_16K:
1597 			if (has_tgran_2(orig_val, 16))
1598 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
1599 			fallthrough;
1600 		case SZ_64K:
1601 			if (has_tgran_2(orig_val, 64))
1602 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
1603 			break;
1604 		}
1605 
1606 		/* Cap PARange to 48bits */
1607 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
1608 		break;
1609 
1610 	case SYS_ID_AA64MMFR1_EL1:
1611 		val &= ~(ID_AA64MMFR1_EL1_CMOW		|
1612 			 ID_AA64MMFR1_EL1_nTLBPA	|
1613 			 ID_AA64MMFR1_EL1_ETS);
1614 
1615 		/* FEAT_E2H0 implies no VHE */
1616 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
1617 			val &= ~ID_AA64MMFR1_EL1_VH;
1618 
1619 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR1_EL1, HAFDBS, AF);
1620 		break;
1621 
1622 	case SYS_ID_AA64MMFR2_EL1:
1623 		val &= ~(ID_AA64MMFR2_EL1_BBM	|
1624 			 ID_AA64MMFR2_EL1_TTL	|
1625 			 GENMASK_ULL(47, 44)	|
1626 			 ID_AA64MMFR2_EL1_ST	|
1627 			 ID_AA64MMFR2_EL1_CCIDX	|
1628 			 ID_AA64MMFR2_EL1_VARange);
1629 
1630 		/* Force TTL support */
1631 		val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
1632 		break;
1633 
1634 	case SYS_ID_AA64MMFR4_EL1:
1635 		/*
1636 		 * You get EITHER
1637 		 *
1638 		 * - FEAT_VHE without FEAT_E2H0
1639 		 * - FEAT_NV limited to FEAT_NV2
1640 		 * - HCR_EL2.NV1 being RES0
1641 		 *
1642 		 * OR
1643 		 *
1644 		 * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
1645 		 *
1646 		 * Life is too short for anything else.
1647 		 */
1648 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
1649 			val = 0;
1650 		} else {
1651 			val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
1652 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
1653 		}
1654 		break;
1655 
1656 	case SYS_ID_AA64DFR0_EL1:
1657 		/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
1658 		val &= ~(ID_AA64DFR0_EL1_ExtTrcBuff	|
1659 			 ID_AA64DFR0_EL1_BRBE		|
1660 			 ID_AA64DFR0_EL1_MTPMU		|
1661 			 ID_AA64DFR0_EL1_TraceBuffer	|
1662 			 ID_AA64DFR0_EL1_TraceFilt	|
1663 			 ID_AA64DFR0_EL1_PMSVer		|
1664 			 ID_AA64DFR0_EL1_CTX_CMPs	|
1665 			 ID_AA64DFR0_EL1_SEBEP		|
1666 			 ID_AA64DFR0_EL1_PMSS		|
1667 			 ID_AA64DFR0_EL1_TraceVer);
1668 
1669 		/*
1670 		 * FEAT_Debugv8p9 requires support for extended breakpoints /
1671 		 * watchpoints.
1672 		 */
1673 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1674 		break;
1675 	}
1676 
1677 	return val;
1678 }
1679 
1680 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
1681 			     enum vcpu_sysreg sr, u64 v)
1682 {
1683 	struct resx resx;
1684 
1685 	resx = kvm_get_sysreg_resx(vcpu->kvm, sr);
1686 	v &= ~resx.res0;
1687 	v |= resx.res1;
1688 
1689 	return v;
1690 }
1691 
1692 static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, struct resx resx)
1693 {
1694 	BUILD_BUG_ON(!__builtin_constant_p(sr));
1695 	BUILD_BUG_ON(sr < __SANITISED_REG_START__);
1696 	BUILD_BUG_ON(sr >= NR_SYS_REGS);
1697 
1698 	kvm_set_sysreg_resx(kvm, sr, resx);
1699 }
1700 
1701 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
1702 {
1703 	struct kvm *kvm = vcpu->kvm;
1704 	struct resx resx;
1705 
1706 	lockdep_assert_held(&kvm->arch.config_lock);
1707 
1708 	if (kvm->arch.sysreg_masks)
1709 		goto out;
1710 
1711 	kvm->arch.sysreg_masks = kzalloc_obj(*(kvm->arch.sysreg_masks),
1712 					     GFP_KERNEL_ACCOUNT);
1713 	if (!kvm->arch.sysreg_masks)
1714 		return -ENOMEM;
1715 
1716 	/* VTTBR_EL2 */
1717 	resx = (typeof(resx)){};
1718 	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
1719 		resx.res0 |= GENMASK(63, 56);
1720 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
1721 		resx.res0 |= VTTBR_CNP_BIT;
1722 	set_sysreg_masks(kvm, VTTBR_EL2, resx);
1723 
1724 	/* VTCR_EL2 */
1725 	resx = get_reg_fixed_bits(kvm, VTCR_EL2);
1726 	set_sysreg_masks(kvm, VTCR_EL2, resx);
1727 
1728 	/* VMPIDR_EL2 */
1729 	resx.res0 = GENMASK(63, 40) | GENMASK(30, 24);
1730 	resx.res1 = BIT(31);
1731 	set_sysreg_masks(kvm, VMPIDR_EL2, resx);
1732 
1733 	/* HCR_EL2 */
1734 	resx = get_reg_fixed_bits(kvm, HCR_EL2);
1735 	set_sysreg_masks(kvm, HCR_EL2, resx);
1736 
1737 	/* HCRX_EL2 */
1738 	resx = get_reg_fixed_bits(kvm, HCRX_EL2);
1739 	set_sysreg_masks(kvm, HCRX_EL2, resx);
1740 
1741 	/* HFG[RW]TR_EL2 */
1742 	resx = get_reg_fixed_bits(kvm, HFGRTR_EL2);
1743 	set_sysreg_masks(kvm, HFGRTR_EL2, resx);
1744 	resx = get_reg_fixed_bits(kvm, HFGWTR_EL2);
1745 	set_sysreg_masks(kvm, HFGWTR_EL2, resx);
1746 
1747 	/* HDFG[RW]TR_EL2 */
1748 	resx = get_reg_fixed_bits(kvm, HDFGRTR_EL2);
1749 	set_sysreg_masks(kvm, HDFGRTR_EL2, resx);
1750 	resx = get_reg_fixed_bits(kvm, HDFGWTR_EL2);
1751 	set_sysreg_masks(kvm, HDFGWTR_EL2, resx);
1752 
1753 	/* HFGITR_EL2 */
1754 	resx = get_reg_fixed_bits(kvm, HFGITR_EL2);
1755 	set_sysreg_masks(kvm, HFGITR_EL2, resx);
1756 
1757 	/* HAFGRTR_EL2 - not a lot to see here */
1758 	resx = get_reg_fixed_bits(kvm, HAFGRTR_EL2);
1759 	set_sysreg_masks(kvm, HAFGRTR_EL2, resx);
1760 
1761 	/* HFG[RW]TR2_EL2 */
1762 	resx = get_reg_fixed_bits(kvm, HFGRTR2_EL2);
1763 	set_sysreg_masks(kvm, HFGRTR2_EL2, resx);
1764 	resx = get_reg_fixed_bits(kvm, HFGWTR2_EL2);
1765 	set_sysreg_masks(kvm, HFGWTR2_EL2, resx);
1766 
1767 	/* HDFG[RW]TR2_EL2 */
1768 	resx = get_reg_fixed_bits(kvm, HDFGRTR2_EL2);
1769 	set_sysreg_masks(kvm, HDFGRTR2_EL2, resx);
1770 	resx = get_reg_fixed_bits(kvm, HDFGWTR2_EL2);
1771 	set_sysreg_masks(kvm, HDFGWTR2_EL2, resx);
1772 
1773 	/* HFGITR2_EL2 */
1774 	resx = get_reg_fixed_bits(kvm, HFGITR2_EL2);
1775 	set_sysreg_masks(kvm, HFGITR2_EL2, resx);
1776 
1777 	/* TCR2_EL2 */
1778 	resx = get_reg_fixed_bits(kvm, TCR2_EL2);
1779 	set_sysreg_masks(kvm, TCR2_EL2, resx);
1780 
1781 	/* SCTLR_EL1 */
1782 	resx = get_reg_fixed_bits(kvm, SCTLR_EL1);
1783 	set_sysreg_masks(kvm, SCTLR_EL1, resx);
1784 
1785 	/* SCTLR_EL2 */
1786 	resx = get_reg_fixed_bits(kvm, SCTLR_EL2);
1787 	set_sysreg_masks(kvm, SCTLR_EL2, resx);
1788 
1789 	/* SCTLR2_ELx */
1790 	resx = get_reg_fixed_bits(kvm, SCTLR2_EL1);
1791 	set_sysreg_masks(kvm, SCTLR2_EL1, resx);
1792 	resx = get_reg_fixed_bits(kvm, SCTLR2_EL2);
1793 	set_sysreg_masks(kvm, SCTLR2_EL2, resx);
1794 
1795 	/* MDCR_EL2 */
1796 	resx = get_reg_fixed_bits(kvm, MDCR_EL2);
1797 	set_sysreg_masks(kvm, MDCR_EL2, resx);
1798 
1799 	/* CNTHCTL_EL2 */
1800 	resx.res0 = GENMASK(63, 20);
1801 	resx.res1 = 0;
1802 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
1803 		resx.res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
1804 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
1805 		resx.res0 |= CNTHCTL_ECV;
1806 		if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
1807 			resx.res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
1808 				      CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
1809 	}
1810 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1811 		resx.res0 |= GENMASK(11, 8);
1812 	set_sysreg_masks(kvm, CNTHCTL_EL2, resx);
1813 
1814 	/* ICH_HCR_EL2 */
1815 	resx.res0 = ICH_HCR_EL2_RES0;
1816 	resx.res1 = ICH_HCR_EL2_RES1;
1817 	if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
1818 		resx.res0 |= ICH_HCR_EL2_TDIR;
1819 	/* No GICv4 is presented to the guest */
1820 	resx.res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
1821 	set_sysreg_masks(kvm, ICH_HCR_EL2, resx);
1822 
1823 	/* VNCR_EL2 */
1824 	resx.res0 = VNCR_EL2_RES0;
1825 	resx.res1 = VNCR_EL2_RES1;
1826 	set_sysreg_masks(kvm, VNCR_EL2, resx);
1827 
1828 out:
1829 	for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1830 		__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
1831 
1832 	return 0;
1833 }
1834 
1835 void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
1836 {
1837 	if (kvm_check_request(KVM_REQ_NESTED_S2_UNMAP, vcpu)) {
1838 		struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1839 
1840 		write_lock(&vcpu->kvm->mmu_lock);
1841 		if (mmu->pending_unmap) {
1842 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), true);
1843 			mmu->pending_unmap = false;
1844 		}
1845 		write_unlock(&vcpu->kvm->mmu_lock);
1846 	}
1847 
1848 	if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
1849 		kvm_map_l1_vncr(vcpu);
1850 
1851 	/* Must be last, as may switch context! */
1852 	if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
1853 		kvm_inject_nested_irq(vcpu);
1854 }
1855 
1856 /*
1857  * One of the many architectural bugs in FEAT_NV2 is that the guest hypervisor
1858  * can write to HCR_EL2 behind our back, potentially changing the exception
1859  * routing / masking for even the host context.
1860  *
1861  * What follows is some slop to (1) react to exception routing / masking and (2)
1862  * preserve the pending SError state across translation regimes.
1863  */
1864 void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu)
1865 {
1866 	if (!vcpu_has_nv(vcpu))
1867 		return;
1868 
1869 	if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1870 		kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1871 }
1872 
1873 void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu)
1874 {
1875 	unsigned long *hcr = vcpu_hcr(vcpu);
1876 
1877 	if (!vcpu_has_nv(vcpu))
1878 		return;
1879 
1880 	/*
1881 	 * We previously decided that an SError was deliverable to the guest.
1882 	 * Reap the pending state from HCR_EL2 and...
1883 	 */
1884 	if (unlikely(__test_and_clear_bit(__ffs(HCR_VSE), hcr)))
1885 		vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
1886 
1887 	/*
1888 	 * Re-attempt SError injection in case the deliverability has changed,
1889 	 * which is necessary to faithfully emulate WFI the case of a pending
1890 	 * SError being a wakeup condition.
1891 	 */
1892 	if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1893 		kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1894 }
1895 
1896 /*
1897  * KVM unconditionally sets most of these traps anyway but use an allowlist
1898  * to document the guest hypervisor traps that may take precedence and guard
1899  * against future changes to the non-nested trap configuration.
1900  */
1901 #define NV_MDCR_GUEST_INCLUDE	(MDCR_EL2_TDE	|	\
1902 				 MDCR_EL2_TDA	|	\
1903 				 MDCR_EL2_TDRA	|	\
1904 				 MDCR_EL2_TTRF	|	\
1905 				 MDCR_EL2_TPMS	|	\
1906 				 MDCR_EL2_TPM	|	\
1907 				 MDCR_EL2_TPMCR	|	\
1908 				 MDCR_EL2_TDCC	|	\
1909 				 MDCR_EL2_TDOSA)
1910 
1911 void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
1912 {
1913 	u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
1914 
1915 	if (is_nested_ctxt(vcpu))
1916 		vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
1917 	/*
1918 	 * In yet another example where FEAT_NV2 is fscking broken, accesses
1919 	 * to MDSCR_EL1 are redirected to the VNCR despite having an effect
1920 	 * at EL2. Use a big hammer to apply sanity.
1921 	 *
1922 	 * Unless of course we have FEAT_FGT, in which case we can precisely
1923 	 * trap MDSCR_EL1.
1924 	 */
1925 	else if (!cpus_have_final_cap(ARM64_HAS_FGT))
1926 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
1927 }
1928