xref: /linux/arch/arm64/kvm/nested.c (revision e85d1c0cc77b08b21a44912d69d0c0c405b1808c)
1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4  * Author: Jintack Lim <jintack.lim@linaro.org>
5  */
6 
7 #include <linux/bitfield.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10 
11 #include <asm/fixmap.h>
12 #include <asm/kvm_arm.h>
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/sysreg.h>
17 
18 #include "sys_regs.h"
19 
20 struct vncr_tlb {
21 	/* The guest's VNCR_EL2 */
22 	u64			gva;
23 	struct s1_walk_info	wi;
24 	struct s1_walk_result	wr;
25 
26 	u64			hpa;
27 
28 	/* -1 when not mapped on a CPU */
29 	int			cpu;
30 
31 	/*
32 	 * true if the TLB is valid. Can only be changed with the
33 	 * mmu_lock held.
34 	 */
35 	bool			valid;
36 };
37 
38 /*
39  * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
40  * memory usage and potential number of different sets of S2 PTs in
41  * the guests. Running out of S2 MMUs only affects performance (we
42  * will invalidate them more often).
43  */
44 #define S2_MMU_PER_VCPU		2
45 
46 void kvm_init_nested(struct kvm *kvm)
47 {
48 	kvm->arch.nested_mmus = NULL;
49 	kvm->arch.nested_mmus_size = 0;
50 	atomic_set(&kvm->arch.vncr_map_count, 0);
51 }
52 
53 static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
54 {
55 	/*
56 	 * We only initialise the IPA range on the canonical MMU, which
57 	 * defines the contract between KVM and userspace on where the
58 	 * "hardware" is in the IPA space. This affects the validity of MMIO
59 	 * exits forwarded to userspace, for example.
60 	 *
61 	 * For nested S2s, we use the PARange as exposed to the guest, as it
62 	 * is allowed to use it at will to expose whatever memory map it
63 	 * wants to its own guests as it would be on real HW.
64 	 */
65 	return kvm_init_stage2_mmu(kvm, mmu, kvm_get_pa_bits(kvm));
66 }
67 
68 int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
69 {
70 	struct kvm *kvm = vcpu->kvm;
71 	struct kvm_s2_mmu *tmp;
72 	int num_mmus, ret = 0;
73 
74 	if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
75 	    !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
76 		return -EINVAL;
77 
78 	if (!vcpu->arch.ctxt.vncr_array)
79 		vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT |
80 								    __GFP_ZERO);
81 
82 	if (!vcpu->arch.ctxt.vncr_array)
83 		return -ENOMEM;
84 
85 	/*
86 	 * Let's treat memory allocation failures as benign: If we fail to
87 	 * allocate anything, return an error and keep the allocated array
88 	 * alive. Userspace may try to recover by initializing the vcpu
89 	 * again, and there is no reason to affect the whole VM for this.
90 	 */
91 	num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
92 	tmp = kvrealloc(kvm->arch.nested_mmus,
93 			size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
94 			GFP_KERNEL_ACCOUNT | __GFP_ZERO);
95 	if (!tmp)
96 		return -ENOMEM;
97 
98 	swap(kvm->arch.nested_mmus, tmp);
99 
100 	/*
101 	 * If we went through a realocation, adjust the MMU back-pointers in
102 	 * the previously initialised kvm_pgtable structures.
103 	 */
104 	if (kvm->arch.nested_mmus != tmp)
105 		for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
106 			kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
107 
108 	for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
109 		ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
110 
111 	if (ret) {
112 		for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
113 			kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
114 
115 		free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
116 		vcpu->arch.ctxt.vncr_array = NULL;
117 
118 		return ret;
119 	}
120 
121 	kvm->arch.nested_mmus_size = num_mmus;
122 
123 	return 0;
124 }
125 
126 struct s2_walk_info {
127 	u64		baddr;
128 	unsigned int	max_oa_bits;
129 	unsigned int	pgshift;
130 	unsigned int	sl;
131 	unsigned int	t0sz;
132 	bool		be;
133 	bool		ha;
134 };
135 
136 static u32 compute_fsc(int level, u32 fsc)
137 {
138 	return fsc | (level & 0x3);
139 }
140 
141 static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
142 {
143 	u32 esr;
144 
145 	esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
146 	esr |= compute_fsc(level, fsc);
147 	return esr;
148 }
149 
150 static int get_ia_size(struct s2_walk_info *wi)
151 {
152 	return 64 - wi->t0sz;
153 }
154 
155 static int check_base_s2_limits(struct kvm_vcpu *vcpu, struct s2_walk_info *wi,
156 				int level, int input_size, int stride)
157 {
158 	int start_size, pa_max;
159 
160 	pa_max = kvm_get_pa_bits(vcpu->kvm);
161 
162 	/* Check translation limits */
163 	switch (BIT(wi->pgshift)) {
164 	case SZ_64K:
165 		if (level == 0 || (level == 1 && pa_max <= 42))
166 			return -EFAULT;
167 		break;
168 	case SZ_16K:
169 		if (level == 0 || (level == 1 && pa_max <= 40))
170 			return -EFAULT;
171 		break;
172 	case SZ_4K:
173 		if (level < 0 || (level == 0 && pa_max <= 42))
174 			return -EFAULT;
175 		break;
176 	}
177 
178 	/* Check input size limits */
179 	if (input_size > pa_max)
180 		return -EFAULT;
181 
182 	/* Check number of entries in starting level table */
183 	start_size = input_size - ((3 - level) * stride + wi->pgshift);
184 	if (start_size < 1 || start_size > stride + 4)
185 		return -EFAULT;
186 
187 	return 0;
188 }
189 
190 /* Check if output is within boundaries */
191 static int check_output_size(struct s2_walk_info *wi, phys_addr_t output)
192 {
193 	unsigned int output_size = wi->max_oa_bits;
194 
195 	if (output_size != 48 && (output & GENMASK_ULL(47, output_size)))
196 		return -1;
197 
198 	return 0;
199 }
200 
201 static int read_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 *desc,
202 			      struct s2_walk_info *wi)
203 {
204 	u64 val;
205 	int r;
206 
207 	r = kvm_read_guest(vcpu->kvm, pa, &val, sizeof(val));
208 	if (r)
209 		return r;
210 
211 	/*
212 	 * Handle reversedescriptors if endianness differs between the
213 	 * host and the guest hypervisor.
214 	 */
215 	if (wi->be)
216 		*desc = be64_to_cpu((__force __be64)val);
217 	else
218 		*desc = le64_to_cpu((__force __le64)val);
219 
220 	return 0;
221 }
222 
223 static int swap_guest_s2_desc(struct kvm_vcpu *vcpu, phys_addr_t pa, u64 old, u64 new,
224 			      struct s2_walk_info *wi)
225 {
226 	if (wi->be) {
227 		old = (__force u64)cpu_to_be64(old);
228 		new = (__force u64)cpu_to_be64(new);
229 	} else {
230 		old = (__force u64)cpu_to_le64(old);
231 		new = (__force u64)cpu_to_le64(new);
232 	}
233 
234 	return __kvm_at_swap_desc(vcpu->kvm, pa, old, new);
235 }
236 
237 /*
238  * This is essentially a C-version of the pseudo code from the ARM ARM
239  * AArch64.TranslationTableWalk  function.  I strongly recommend looking at
240  * that pseudocode in trying to understand this.
241  *
242  * Must be called with the kvm->srcu read lock held
243  */
244 static int walk_nested_s2_pgd(struct kvm_vcpu *vcpu, phys_addr_t ipa,
245 			      struct s2_walk_info *wi, struct kvm_s2_trans *out)
246 {
247 	int first_block_level, level, stride, input_size, base_lower_bound;
248 	phys_addr_t base_addr;
249 	unsigned int addr_top, addr_bottom;
250 	u64 desc, new_desc;  /* page table entry */
251 	int ret;
252 	phys_addr_t paddr;
253 
254 	switch (BIT(wi->pgshift)) {
255 	default:
256 	case SZ_64K:
257 	case SZ_16K:
258 		level = 3 - wi->sl;
259 		first_block_level = 2;
260 		break;
261 	case SZ_4K:
262 		level = 2 - wi->sl;
263 		first_block_level = 1;
264 		break;
265 	}
266 
267 	stride = wi->pgshift - 3;
268 	input_size = get_ia_size(wi);
269 	if (input_size > 48 || input_size < 25)
270 		return -EFAULT;
271 
272 	ret = check_base_s2_limits(vcpu, wi, level, input_size, stride);
273 	if (WARN_ON(ret)) {
274 		out->esr = compute_fsc(0, ESR_ELx_FSC_FAULT);
275 		return ret;
276 	}
277 
278 	base_lower_bound = 3 + input_size - ((3 - level) * stride +
279 			   wi->pgshift);
280 	base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound);
281 
282 	if (check_output_size(wi, base_addr)) {
283 		/* R_BFHQH */
284 		out->esr = compute_fsc(0, ESR_ELx_FSC_ADDRSZ);
285 		return 1;
286 	}
287 
288 	addr_top = input_size - 1;
289 
290 	while (1) {
291 		phys_addr_t index;
292 
293 		addr_bottom = (3 - level) * stride + wi->pgshift;
294 		index = (ipa & GENMASK_ULL(addr_top, addr_bottom))
295 			>> (addr_bottom - 3);
296 
297 		paddr = base_addr | index;
298 		ret = read_guest_s2_desc(vcpu, paddr, &desc, wi);
299 		if (ret < 0) {
300 			out->esr = ESR_ELx_FSC_SEA_TTW(level);
301 			return ret;
302 		}
303 
304 		new_desc = desc;
305 
306 		/* Check for valid descriptor at this point */
307 		if (!(desc & KVM_PTE_VALID)) {
308 			out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
309 			out->desc = desc;
310 			return 1;
311 		}
312 
313 		if (FIELD_GET(KVM_PTE_TYPE, desc) == KVM_PTE_TYPE_BLOCK) {
314 			if (level < 3)
315 				break;
316 
317 			out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
318 			out->desc = desc;
319 			return 1;
320 		}
321 
322 		/* We're at the final level */
323 		if (level == 3)
324 			break;
325 
326 		if (check_output_size(wi, desc)) {
327 			out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
328 			out->desc = desc;
329 			return 1;
330 		}
331 
332 		base_addr = desc & GENMASK_ULL(47, wi->pgshift);
333 
334 		level += 1;
335 		addr_top = addr_bottom - 1;
336 	}
337 
338 	if (level < first_block_level) {
339 		out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
340 		out->desc = desc;
341 		return 1;
342 	}
343 
344 	if (check_output_size(wi, desc)) {
345 		out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
346 		out->desc = desc;
347 		return 1;
348 	}
349 
350 	if (wi->ha)
351 		new_desc |= KVM_PTE_LEAF_ATTR_LO_S2_AF;
352 
353 	if (new_desc != desc) {
354 		ret = swap_guest_s2_desc(vcpu, paddr, desc, new_desc, wi);
355 		if (ret)
356 			return ret;
357 
358 		desc = new_desc;
359 	}
360 
361 	if (!(desc & KVM_PTE_LEAF_ATTR_LO_S2_AF)) {
362 		out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
363 		out->desc = desc;
364 		return 1;
365 	}
366 
367 	addr_bottom += contiguous_bit_shift(desc, wi, level);
368 
369 	/* Calculate and return the result */
370 	paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
371 		(ipa & GENMASK_ULL(addr_bottom - 1, 0));
372 	out->output = paddr;
373 	out->block_size = 1UL << ((3 - level) * stride + wi->pgshift);
374 	out->readable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_R;
375 	out->writable = desc & KVM_PTE_LEAF_ATTR_LO_S2_S2AP_W;
376 	out->level = level;
377 	out->desc = desc;
378 	return 0;
379 }
380 
381 static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
382 {
383 	wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
384 
385 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
386 	case VTCR_EL2_TG0_4K:
387 		wi->pgshift = 12;	 break;
388 	case VTCR_EL2_TG0_16K:
389 		wi->pgshift = 14;	 break;
390 	case VTCR_EL2_TG0_64K:
391 	default:	    /* IMPDEF: treat any other value as 64k */
392 		wi->pgshift = 16;	 break;
393 	}
394 
395 	wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
396 	/* Global limit for now, should eventually be per-VM */
397 	wi->max_oa_bits = min(get_kvm_ipa_limit(),
398 			      ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr), false));
399 
400 	wi->ha = vtcr & VTCR_EL2_HA;
401 }
402 
403 int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
404 		       struct kvm_s2_trans *result)
405 {
406 	u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
407 	struct s2_walk_info wi;
408 	int ret;
409 
410 	result->esr = 0;
411 
412 	if (!vcpu_has_nv(vcpu))
413 		return 0;
414 
415 	wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
416 
417 	vtcr_to_walk_info(vtcr, &wi);
418 
419 	wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
420 
421 	ret = walk_nested_s2_pgd(vcpu, gipa, &wi, result);
422 	if (ret)
423 		result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC);
424 
425 	return ret;
426 }
427 
428 static unsigned int ttl_to_size(u8 ttl)
429 {
430 	int level = ttl & 3;
431 	int gran = (ttl >> 2) & 3;
432 	unsigned int max_size = 0;
433 
434 	switch (gran) {
435 	case TLBI_TTL_TG_4K:
436 		switch (level) {
437 		case 0:
438 			break;
439 		case 1:
440 			max_size = SZ_1G;
441 			break;
442 		case 2:
443 			max_size = SZ_2M;
444 			break;
445 		case 3:
446 			max_size = SZ_4K;
447 			break;
448 		}
449 		break;
450 	case TLBI_TTL_TG_16K:
451 		switch (level) {
452 		case 0:
453 		case 1:
454 			break;
455 		case 2:
456 			max_size = SZ_32M;
457 			break;
458 		case 3:
459 			max_size = SZ_16K;
460 			break;
461 		}
462 		break;
463 	case TLBI_TTL_TG_64K:
464 		switch (level) {
465 		case 0:
466 		case 1:
467 			/* No 52bit IPA support */
468 			break;
469 		case 2:
470 			max_size = SZ_512M;
471 			break;
472 		case 3:
473 			max_size = SZ_64K;
474 			break;
475 		}
476 		break;
477 	default:			/* No size information */
478 		break;
479 	}
480 
481 	return max_size;
482 }
483 
484 static u8 pgshift_level_to_ttl(u16 shift, u8 level)
485 {
486 	u8 ttl;
487 
488 	switch(shift) {
489 	case 12:
490 		ttl = TLBI_TTL_TG_4K;
491 		break;
492 	case 14:
493 		ttl = TLBI_TTL_TG_16K;
494 		break;
495 	case 16:
496 		ttl = TLBI_TTL_TG_64K;
497 		break;
498 	default:
499 		BUG();
500 	}
501 
502 	ttl <<= 2;
503 	ttl |= level & 3;
504 
505 	return ttl;
506 }
507 
508 /*
509  * Compute the equivalent of the TTL field by parsing the shadow PT.  The
510  * granule size is extracted from the cached VTCR_EL2.TG0 while the level is
511  * retrieved from first entry carrying the level as a tag.
512  */
513 static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
514 {
515 	u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
516 	kvm_pte_t pte;
517 	u8 ttl, level;
518 
519 	lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
520 
521 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
522 	case VTCR_EL2_TG0_4K:
523 		ttl = (TLBI_TTL_TG_4K << 2);
524 		break;
525 	case VTCR_EL2_TG0_16K:
526 		ttl = (TLBI_TTL_TG_16K << 2);
527 		break;
528 	case VTCR_EL2_TG0_64K:
529 	default:	    /* IMPDEF: treat any other value as 64k */
530 		ttl = (TLBI_TTL_TG_64K << 2);
531 		break;
532 	}
533 
534 	tmp = addr;
535 
536 again:
537 	/* Iteratively compute the block sizes for a particular granule size */
538 	switch (FIELD_GET(VTCR_EL2_TG0_MASK, vtcr)) {
539 	case VTCR_EL2_TG0_4K:
540 		if	(sz < SZ_4K)	sz = SZ_4K;
541 		else if (sz < SZ_2M)	sz = SZ_2M;
542 		else if (sz < SZ_1G)	sz = SZ_1G;
543 		else			sz = 0;
544 		break;
545 	case VTCR_EL2_TG0_16K:
546 		if	(sz < SZ_16K)	sz = SZ_16K;
547 		else if (sz < SZ_32M)	sz = SZ_32M;
548 		else			sz = 0;
549 		break;
550 	case VTCR_EL2_TG0_64K:
551 	default:	    /* IMPDEF: treat any other value as 64k */
552 		if	(sz < SZ_64K)	sz = SZ_64K;
553 		else if (sz < SZ_512M)	sz = SZ_512M;
554 		else			sz = 0;
555 		break;
556 	}
557 
558 	if (sz == 0)
559 		return 0;
560 
561 	tmp &= ~(sz - 1);
562 	if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL))
563 		goto again;
564 	if (!(pte & PTE_VALID))
565 		goto again;
566 	level = FIELD_GET(KVM_NV_GUEST_MAP_SZ, pte);
567 	if (!level)
568 		goto again;
569 
570 	ttl |= level;
571 
572 	/*
573 	 * We now have found some level information in the shadow S2. Check
574 	 * that the resulting range is actually including the original IPA.
575 	 */
576 	sz = ttl_to_size(ttl);
577 	if (addr < (tmp + sz))
578 		return ttl;
579 
580 	return 0;
581 }
582 
583 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
584 {
585 	struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
586 	unsigned long max_size;
587 	u8 ttl;
588 
589 	ttl = FIELD_GET(TLBI_TTL_MASK, val);
590 
591 	if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
592 		/* No TTL, check the shadow S2 for a hint */
593 		u64 addr = (val & GENMASK_ULL(35, 0)) << 12;
594 		ttl = get_guest_mapping_ttl(mmu, addr);
595 	}
596 
597 	max_size = ttl_to_size(ttl);
598 
599 	if (!max_size) {
600 		/* Compute the maximum extent of the invalidation */
601 		switch (FIELD_GET(VTCR_EL2_TG0_MASK, mmu->tlb_vtcr)) {
602 		case VTCR_EL2_TG0_4K:
603 			max_size = SZ_1G;
604 			break;
605 		case VTCR_EL2_TG0_16K:
606 			max_size = SZ_32M;
607 			break;
608 		case VTCR_EL2_TG0_64K:
609 		default:    /* IMPDEF: treat any other value as 64k */
610 			/*
611 			 * No, we do not support 52bit IPA in nested yet. Once
612 			 * we do, this should be 4TB.
613 			 */
614 			max_size = SZ_512M;
615 			break;
616 		}
617 	}
618 
619 	WARN_ON(!max_size);
620 	return max_size;
621 }
622 
623 /*
624  * We can have multiple *different* MMU contexts with the same VMID:
625  *
626  * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
627  *
628  * - Multiple vcpus using private S2s (huh huh...), hence differing by the
629  *   VBBTR_EL2.BADDR address
630  *
631  * - A combination of the above...
632  *
633  * We can always identify which MMU context to pick at run-time.  However,
634  * TLB invalidation involving a VMID must take action on all the TLBs using
635  * this particular VMID. This translates into applying the same invalidation
636  * operation to all the contexts that are using this VMID. Moar phun!
637  */
638 void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
639 				const union tlbi_info *info,
640 				void (*tlbi_callback)(struct kvm_s2_mmu *,
641 						      const union tlbi_info *))
642 {
643 	write_lock(&kvm->mmu_lock);
644 
645 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
646 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
647 
648 		if (!kvm_s2_mmu_valid(mmu))
649 			continue;
650 
651 		if (vmid == get_vmid(mmu->tlb_vttbr))
652 			tlbi_callback(mmu, info);
653 	}
654 
655 	write_unlock(&kvm->mmu_lock);
656 }
657 
658 struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu)
659 {
660 	struct kvm *kvm = vcpu->kvm;
661 	bool nested_stage2_enabled;
662 	u64 vttbr, vtcr, hcr;
663 
664 	lockdep_assert_held_write(&kvm->mmu_lock);
665 
666 	vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
667 	vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
668 	hcr = vcpu_read_sys_reg(vcpu, HCR_EL2);
669 
670 	nested_stage2_enabled = hcr & HCR_VM;
671 
672 	/* Don't consider the CnP bit for the vttbr match */
673 	vttbr &= ~VTTBR_CNP_BIT;
674 
675 	/*
676 	 * Two possibilities when looking up a S2 MMU context:
677 	 *
678 	 * - either S2 is enabled in the guest, and we need a context that is
679 	 *   S2-enabled and matches the full VTTBR (VMID+BADDR) and VTCR,
680 	 *   which makes it safe from a TLB conflict perspective (a broken
681 	 *   guest won't be able to generate them),
682 	 *
683 	 * - or S2 is disabled, and we need a context that is S2-disabled
684 	 *   and matches the VMID only, as all TLBs are tagged by VMID even
685 	 *   if S2 translation is disabled.
686 	 */
687 	for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
688 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
689 
690 		if (!kvm_s2_mmu_valid(mmu))
691 			continue;
692 
693 		if (nested_stage2_enabled &&
694 		    mmu->nested_stage2_enabled &&
695 		    vttbr == mmu->tlb_vttbr &&
696 		    vtcr == mmu->tlb_vtcr)
697 			return mmu;
698 
699 		if (!nested_stage2_enabled &&
700 		    !mmu->nested_stage2_enabled &&
701 		    get_vmid(vttbr) == get_vmid(mmu->tlb_vttbr))
702 			return mmu;
703 	}
704 	return NULL;
705 }
706 
707 static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
708 {
709 	struct kvm *kvm = vcpu->kvm;
710 	struct kvm_s2_mmu *s2_mmu;
711 	int i;
712 
713 	lockdep_assert_held_write(&vcpu->kvm->mmu_lock);
714 
715 	s2_mmu = lookup_s2_mmu(vcpu);
716 	if (s2_mmu)
717 		goto out;
718 
719 	/*
720 	 * Make sure we don't always search from the same point, or we
721 	 * will always reuse a potentially active context, leaving
722 	 * free contexts unused.
723 	 */
724 	for (i = kvm->arch.nested_mmus_next;
725 	     i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
726 	     i++) {
727 		s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
728 
729 		if (atomic_read(&s2_mmu->refcnt) == 0)
730 			break;
731 	}
732 	BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
733 
734 	/* Set the scene for the next search */
735 	kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
736 
737 	/* Make sure we don't forget to do the laundry */
738 	if (kvm_s2_mmu_valid(s2_mmu)) {
739 		kvm_nested_s2_ptdump_remove_debugfs(s2_mmu);
740 		s2_mmu->pending_unmap = true;
741 	}
742 
743 	/*
744 	 * The virtual VMID (modulo CnP) will be used as a key when matching
745 	 * an existing kvm_s2_mmu.
746 	 *
747 	 * We cache VTCR at allocation time, once and for all. It'd be great
748 	 * if the guest didn't screw that one up, as this is not very
749 	 * forgiving...
750 	 */
751 	s2_mmu->tlb_vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2) & ~VTTBR_CNP_BIT;
752 	s2_mmu->tlb_vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
753 	s2_mmu->nested_stage2_enabled = vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM;
754 
755 	kvm_nested_s2_ptdump_create_debugfs(s2_mmu);
756 
757 out:
758 	atomic_inc(&s2_mmu->refcnt);
759 
760 	/*
761 	 * Set the vCPU request to perform an unmap, even if the pending unmap
762 	 * originates from another vCPU. This guarantees that the MMU has been
763 	 * completely unmapped before any vCPU actually uses it, and allows
764 	 * multiple vCPUs to lend a hand with completing the unmap.
765 	 */
766 	if (s2_mmu->pending_unmap)
767 		kvm_make_request(KVM_REQ_NESTED_S2_UNMAP, vcpu);
768 
769 	return s2_mmu;
770 }
771 
772 void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
773 {
774 	/* CnP being set denotes an invalid entry */
775 	mmu->tlb_vttbr = VTTBR_CNP_BIT;
776 	mmu->nested_stage2_enabled = false;
777 	atomic_set(&mmu->refcnt, 0);
778 }
779 
780 void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
781 {
782 	/*
783 	 * If the vCPU kept its reference on the MMU after the last put,
784 	 * keep rolling with it.
785 	 */
786 	if (is_hyp_ctxt(vcpu)) {
787 		if (!vcpu->arch.hw_mmu)
788 			vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
789 	} else {
790 		if (!vcpu->arch.hw_mmu) {
791 			scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
792 				vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
793 		}
794 
795 		if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
796 			kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
797 	}
798 }
799 
800 void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
801 {
802 	/* Unconditionally drop the VNCR mapping if we have one */
803 	if (host_data_test_flag(L1_VNCR_MAPPED)) {
804 		BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
805 		BUG_ON(is_hyp_ctxt(vcpu));
806 
807 		clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
808 		vcpu->arch.vncr_tlb->cpu = -1;
809 		host_data_clear_flag(L1_VNCR_MAPPED);
810 		atomic_dec(&vcpu->kvm->arch.vncr_map_count);
811 	}
812 
813 	/*
814 	 * Keep a reference on the associated stage-2 MMU if the vCPU is
815 	 * scheduling out and not in WFI emulation, suggesting it is likely to
816 	 * reuse the MMU sometime soon.
817 	 */
818 	if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI))
819 		return;
820 
821 	if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu))
822 		atomic_dec(&vcpu->arch.hw_mmu->refcnt);
823 
824 	vcpu->arch.hw_mmu = NULL;
825 }
826 
827 /*
828  * Returns non-zero if permission fault is handled by injecting it to the next
829  * level hypervisor.
830  */
831 int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
832 {
833 	bool forward_fault = false;
834 
835 	trans->esr = 0;
836 
837 	if (!kvm_vcpu_trap_is_permission_fault(vcpu))
838 		return 0;
839 
840 	if (kvm_vcpu_trap_is_iabt(vcpu)) {
841 		if (vcpu_mode_priv(vcpu))
842 			forward_fault = !kvm_s2_trans_exec_el1(vcpu->kvm, trans);
843 		else
844 			forward_fault = !kvm_s2_trans_exec_el0(vcpu->kvm, trans);
845 	} else {
846 		bool write_fault = kvm_is_write_fault(vcpu);
847 
848 		forward_fault = ((write_fault && !trans->writable) ||
849 				 (!write_fault && !trans->readable));
850 	}
851 
852 	if (forward_fault)
853 		trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
854 
855 	return forward_fault;
856 }
857 
858 int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
859 {
860 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
861 	vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
862 
863 	return kvm_inject_nested_sync(vcpu, esr_el2);
864 }
865 
866 u16 get_asid_by_regime(struct kvm_vcpu *vcpu, enum trans_regime regime)
867 {
868 	enum vcpu_sysreg ttbr_elx;
869 	u64 tcr;
870 	u16 asid;
871 
872 	switch (regime) {
873 	case TR_EL10:
874 		tcr = vcpu_read_sys_reg(vcpu, TCR_EL1);
875 		ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL1 : TTBR0_EL1;
876 		break;
877 	case TR_EL20:
878 		tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
879 		ttbr_elx = (tcr & TCR_A1) ? TTBR1_EL2 : TTBR0_EL2;
880 		break;
881 	default:
882 		BUG();
883 	}
884 
885 	asid = FIELD_GET(TTBRx_EL1_ASID, vcpu_read_sys_reg(vcpu, ttbr_elx));
886 	if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
887 	    !(tcr & TCR_ASID16))
888 		asid &= GENMASK(7, 0);
889 
890 	return asid;
891 }
892 
893 static void invalidate_vncr(struct vncr_tlb *vt)
894 {
895 	vt->valid = false;
896 	if (vt->cpu != -1)
897 		clear_fixmap(vncr_fixmap(vt->cpu));
898 }
899 
900 static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
901 {
902 	struct kvm_vcpu *vcpu;
903 	unsigned long i;
904 
905 	lockdep_assert_held_write(&kvm->mmu_lock);
906 
907 	if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
908 		return;
909 
910 	kvm_for_each_vcpu(i, vcpu, kvm) {
911 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
912 		u64 ipa_start, ipa_end, ipa_size;
913 
914 		/*
915 		 * Careful here: We end-up here from an MMU notifier,
916 		 * and this can race against a vcpu not being onlined
917 		 * yet, without the pseudo-TLB being allocated.
918 		 *
919 		 * Skip those, as they obviously don't participate in
920 		 * the invalidation at this stage.
921 		 */
922 		if (!vt)
923 			continue;
924 
925 		if (!vt->valid)
926 			continue;
927 
928 		ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
929 							    vt->wr.level));
930 		ipa_start = vt->wr.pa & ~(ipa_size - 1);
931 		ipa_end = ipa_start + ipa_size;
932 
933 		if (ipa_end <= start || ipa_start >= end)
934 			continue;
935 
936 		invalidate_vncr(vt);
937 	}
938 }
939 
940 struct s1e2_tlbi_scope {
941 	enum {
942 		TLBI_ALL,
943 		TLBI_VA,
944 		TLBI_VAA,
945 		TLBI_ASID,
946 	} type;
947 
948 	u16 asid;
949 	u64 va;
950 	u64 size;
951 };
952 
953 static void invalidate_vncr_va(struct kvm *kvm,
954 			       struct s1e2_tlbi_scope *scope)
955 {
956 	struct kvm_vcpu *vcpu;
957 	unsigned long i;
958 
959 	lockdep_assert_held_write(&kvm->mmu_lock);
960 
961 	kvm_for_each_vcpu(i, vcpu, kvm) {
962 		struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
963 		u64 va_start, va_end, va_size;
964 
965 		if (!vt->valid)
966 			continue;
967 
968 		va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
969 							   vt->wr.level));
970 		va_start = vt->gva & ~(va_size - 1);
971 		va_end = va_start + va_size;
972 
973 		switch (scope->type) {
974 		case TLBI_ALL:
975 			break;
976 
977 		case TLBI_VA:
978 			if (va_end <= scope->va ||
979 			    va_start >= (scope->va + scope->size))
980 				continue;
981 			if (vt->wr.nG && vt->wr.asid != scope->asid)
982 				continue;
983 			break;
984 
985 		case TLBI_VAA:
986 			if (va_end <= scope->va ||
987 			    va_start >= (scope->va + scope->size))
988 				continue;
989 			break;
990 
991 		case TLBI_ASID:
992 			if (!vt->wr.nG || vt->wr.asid != scope->asid)
993 				continue;
994 			break;
995 		}
996 
997 		invalidate_vncr(vt);
998 	}
999 }
1000 
1001 #define tlbi_va_s1_to_va(v)	(u64)sign_extend64((v) << 12, 48)
1002 
1003 static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
1004 				  struct s1e2_tlbi_scope *scope)
1005 {
1006 	switch (inst) {
1007 	case OP_TLBI_ALLE2:
1008 	case OP_TLBI_ALLE2IS:
1009 	case OP_TLBI_ALLE2OS:
1010 	case OP_TLBI_VMALLE1:
1011 	case OP_TLBI_VMALLE1IS:
1012 	case OP_TLBI_VMALLE1OS:
1013 	case OP_TLBI_ALLE2NXS:
1014 	case OP_TLBI_ALLE2ISNXS:
1015 	case OP_TLBI_ALLE2OSNXS:
1016 	case OP_TLBI_VMALLE1NXS:
1017 	case OP_TLBI_VMALLE1ISNXS:
1018 	case OP_TLBI_VMALLE1OSNXS:
1019 		scope->type = TLBI_ALL;
1020 		break;
1021 	case OP_TLBI_VAE2:
1022 	case OP_TLBI_VAE2IS:
1023 	case OP_TLBI_VAE2OS:
1024 	case OP_TLBI_VAE1:
1025 	case OP_TLBI_VAE1IS:
1026 	case OP_TLBI_VAE1OS:
1027 	case OP_TLBI_VAE2NXS:
1028 	case OP_TLBI_VAE2ISNXS:
1029 	case OP_TLBI_VAE2OSNXS:
1030 	case OP_TLBI_VAE1NXS:
1031 	case OP_TLBI_VAE1ISNXS:
1032 	case OP_TLBI_VAE1OSNXS:
1033 	case OP_TLBI_VALE2:
1034 	case OP_TLBI_VALE2IS:
1035 	case OP_TLBI_VALE2OS:
1036 	case OP_TLBI_VALE1:
1037 	case OP_TLBI_VALE1IS:
1038 	case OP_TLBI_VALE1OS:
1039 	case OP_TLBI_VALE2NXS:
1040 	case OP_TLBI_VALE2ISNXS:
1041 	case OP_TLBI_VALE2OSNXS:
1042 	case OP_TLBI_VALE1NXS:
1043 	case OP_TLBI_VALE1ISNXS:
1044 	case OP_TLBI_VALE1OSNXS:
1045 		scope->type = TLBI_VA;
1046 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
1047 		if (!scope->size)
1048 			scope->size = SZ_1G;
1049 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
1050 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
1051 		break;
1052 	case OP_TLBI_ASIDE1:
1053 	case OP_TLBI_ASIDE1IS:
1054 	case OP_TLBI_ASIDE1OS:
1055 	case OP_TLBI_ASIDE1NXS:
1056 	case OP_TLBI_ASIDE1ISNXS:
1057 	case OP_TLBI_ASIDE1OSNXS:
1058 		scope->type = TLBI_ASID;
1059 		scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
1060 		break;
1061 	case OP_TLBI_VAAE1:
1062 	case OP_TLBI_VAAE1IS:
1063 	case OP_TLBI_VAAE1OS:
1064 	case OP_TLBI_VAAE1NXS:
1065 	case OP_TLBI_VAAE1ISNXS:
1066 	case OP_TLBI_VAAE1OSNXS:
1067 	case OP_TLBI_VAALE1:
1068 	case OP_TLBI_VAALE1IS:
1069 	case OP_TLBI_VAALE1OS:
1070 	case OP_TLBI_VAALE1NXS:
1071 	case OP_TLBI_VAALE1ISNXS:
1072 	case OP_TLBI_VAALE1OSNXS:
1073 		scope->type = TLBI_VAA;
1074 		scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
1075 		if (!scope->size)
1076 			scope->size = SZ_1G;
1077 		scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
1078 		break;
1079 	case OP_TLBI_RVAE2:
1080 	case OP_TLBI_RVAE2IS:
1081 	case OP_TLBI_RVAE2OS:
1082 	case OP_TLBI_RVAE1:
1083 	case OP_TLBI_RVAE1IS:
1084 	case OP_TLBI_RVAE1OS:
1085 	case OP_TLBI_RVAE2NXS:
1086 	case OP_TLBI_RVAE2ISNXS:
1087 	case OP_TLBI_RVAE2OSNXS:
1088 	case OP_TLBI_RVAE1NXS:
1089 	case OP_TLBI_RVAE1ISNXS:
1090 	case OP_TLBI_RVAE1OSNXS:
1091 	case OP_TLBI_RVALE2:
1092 	case OP_TLBI_RVALE2IS:
1093 	case OP_TLBI_RVALE2OS:
1094 	case OP_TLBI_RVALE1:
1095 	case OP_TLBI_RVALE1IS:
1096 	case OP_TLBI_RVALE1OS:
1097 	case OP_TLBI_RVALE2NXS:
1098 	case OP_TLBI_RVALE2ISNXS:
1099 	case OP_TLBI_RVALE2OSNXS:
1100 	case OP_TLBI_RVALE1NXS:
1101 	case OP_TLBI_RVALE1ISNXS:
1102 	case OP_TLBI_RVALE1OSNXS:
1103 		scope->type = TLBI_VA;
1104 		scope->va = decode_range_tlbi(val, &scope->size, &scope->asid);
1105 		break;
1106 	case OP_TLBI_RVAAE1:
1107 	case OP_TLBI_RVAAE1IS:
1108 	case OP_TLBI_RVAAE1OS:
1109 	case OP_TLBI_RVAAE1NXS:
1110 	case OP_TLBI_RVAAE1ISNXS:
1111 	case OP_TLBI_RVAAE1OSNXS:
1112 	case OP_TLBI_RVAALE1:
1113 	case OP_TLBI_RVAALE1IS:
1114 	case OP_TLBI_RVAALE1OS:
1115 	case OP_TLBI_RVAALE1NXS:
1116 	case OP_TLBI_RVAALE1ISNXS:
1117 	case OP_TLBI_RVAALE1OSNXS:
1118 		scope->type = TLBI_VAA;
1119 		scope->va = decode_range_tlbi(val, &scope->size, NULL);
1120 		break;
1121 	}
1122 }
1123 
1124 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val)
1125 {
1126 	struct s1e2_tlbi_scope scope = {};
1127 
1128 	compute_s1_tlbi_range(vcpu, inst, val, &scope);
1129 
1130 	guard(write_lock)(&vcpu->kvm->mmu_lock);
1131 	invalidate_vncr_va(vcpu->kvm, &scope);
1132 }
1133 
1134 void kvm_nested_s2_wp(struct kvm *kvm)
1135 {
1136 	int i;
1137 
1138 	lockdep_assert_held_write(&kvm->mmu_lock);
1139 
1140 	if (!kvm->arch.nested_mmus_size)
1141 		return;
1142 
1143 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1144 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1145 
1146 		if (kvm_s2_mmu_valid(mmu))
1147 			kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
1148 	}
1149 
1150 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1151 }
1152 
1153 void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
1154 {
1155 	int i;
1156 
1157 	lockdep_assert_held_write(&kvm->mmu_lock);
1158 
1159 	if (!kvm->arch.nested_mmus_size)
1160 		return;
1161 
1162 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1163 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1164 
1165 		if (kvm_s2_mmu_valid(mmu))
1166 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
1167 	}
1168 
1169 	kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1170 }
1171 
1172 void kvm_nested_s2_flush(struct kvm *kvm)
1173 {
1174 	int i;
1175 
1176 	lockdep_assert_held_write(&kvm->mmu_lock);
1177 
1178 	if (!kvm->arch.nested_mmus_size)
1179 		return;
1180 
1181 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1182 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1183 
1184 		if (kvm_s2_mmu_valid(mmu))
1185 			kvm_stage2_flush_range(mmu, 0, kvm_phys_size(mmu));
1186 	}
1187 }
1188 
1189 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1190 {
1191 	int i;
1192 
1193 	for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1194 		struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1195 
1196 		if (!WARN_ON(atomic_read(&mmu->refcnt)))
1197 			kvm_free_stage2_pgd(mmu);
1198 	}
1199 	kvfree(kvm->arch.nested_mmus);
1200 	kvm->arch.nested_mmus = NULL;
1201 	kvm->arch.nested_mmus_size = 0;
1202 	kvm_uninit_stage2_mmu(kvm);
1203 }
1204 
1205 /*
1206  * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter:
1207  *
1208  * - We introduce an internal representation of a vcpu-private TLB,
1209  *   representing the mapping between the guest VA contained in VNCR_EL2,
1210  *   the IPA the guest's EL2 PTs point to, and the actual PA this lives at.
1211  *
1212  * - On translation fault from a nested VNCR access, we create such a TLB.
1213  *   If there is no mapping to describe, the guest inherits the fault.
1214  *   Crucially, no actual mapping is done at this stage.
1215  *
1216  * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
1217  *   TLB exists, we map it in the fixmap for this CPU, and run with it. We
1218  *   have to respect the permissions dictated by the guest, but not the
1219  *   memory type (FWB is a must).
1220  *
1221  * - Note that we usually don't do a vcpu_load() on the back of a fault
1222  *   (unless we are preempted), so the resolution of a translation fault
1223  *   must go via a request that will map the VNCR page in the fixmap.
1224  *   vcpu_load() might as well use the same mechanism.
1225  *
1226  * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
1227  *   mapped, we unmap it. Yes it is that simple. The TLB still exists
1228  *   though, and may be reused at a later load.
1229  *
1230  * - On permission fault, we simply forward the fault to the guest's EL2.
1231  *   Get out of my way.
1232  *
1233  * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
1234  *   intersects with the TLBI request, invalidate it, and unmap the page
1235  *   from the fixmap. Because we need to look at all the vcpu-private TLBs,
1236  *   this requires some wide-ranging locking to ensure that nothing races
1237  *   against it. This may require some refcounting to avoid the search when
1238  *   no such TLB is present.
1239  *
1240  * - On MMU notifiers, we must invalidate our TLB in a similar way, but
1241  *   looking at the IPA instead. The funny part is that there may not be a
1242  *   stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
1243  *   instructions.
1244  */
1245 
1246 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
1247 {
1248 	if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
1249 		return 0;
1250 
1251 	vcpu->arch.vncr_tlb = kzalloc_obj(*vcpu->arch.vncr_tlb,
1252 					  GFP_KERNEL_ACCOUNT);
1253 	if (!vcpu->arch.vncr_tlb)
1254 		return -ENOMEM;
1255 
1256 	return 0;
1257 }
1258 
1259 static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
1260 {
1261 	return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
1262 }
1263 
1264 static int kvm_translate_vncr(struct kvm_vcpu *vcpu, bool *is_gmem)
1265 {
1266 	struct kvm_memory_slot *memslot;
1267 	bool write_fault, writable;
1268 	unsigned long mmu_seq;
1269 	struct vncr_tlb *vt;
1270 	struct page *page;
1271 	u64 va, pfn, gfn;
1272 	int ret;
1273 
1274 	vt = vcpu->arch.vncr_tlb;
1275 
1276 	/*
1277 	 * If we're about to walk the EL2 S1 PTs, we must invalidate the
1278 	 * current TLB, as it could be sampled from another vcpu doing a
1279 	 * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
1280 	 * translation, so not much of a choice.
1281 	 *
1282 	 * We also prepare the next walk wilst we're at it.
1283 	 */
1284 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1285 		invalidate_vncr(vt);
1286 
1287 		vt->wi = (struct s1_walk_info) {
1288 			.regime	= TR_EL20,
1289 			.as_el0	= false,
1290 			.pan	= false,
1291 		};
1292 		vt->wr = (struct s1_walk_result){};
1293 	}
1294 
1295 	guard(srcu)(&vcpu->kvm->srcu);
1296 
1297 	va =  read_vncr_el2(vcpu);
1298 
1299 	ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
1300 	if (ret)
1301 		return ret;
1302 
1303 	write_fault = kvm_is_write_fault(vcpu);
1304 
1305 	mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1306 	smp_rmb();
1307 
1308 	gfn = vt->wr.pa >> PAGE_SHIFT;
1309 	memslot = gfn_to_memslot(vcpu->kvm, gfn);
1310 	if (!memslot)
1311 		return -EFAULT;
1312 
1313 	*is_gmem = kvm_slot_has_gmem(memslot);
1314 	if (!*is_gmem) {
1315 		pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
1316 					&writable, &page);
1317 		if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
1318 			return -EFAULT;
1319 	} else {
1320 		ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL);
1321 		if (ret) {
1322 			kvm_prepare_memory_fault_exit(vcpu, vt->wr.pa, PAGE_SIZE,
1323 					      write_fault, false, false);
1324 			return ret;
1325 		}
1326 	}
1327 
1328 	scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1329 		if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
1330 			return -EAGAIN;
1331 
1332 		vt->gva = va;
1333 		vt->hpa = pfn << PAGE_SHIFT;
1334 		vt->valid = true;
1335 		vt->cpu = -1;
1336 
1337 		kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
1338 		kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
1339 	}
1340 
1341 	if (vt->wr.pw)
1342 		mark_page_dirty(vcpu->kvm, gfn);
1343 
1344 	return 0;
1345 }
1346 
1347 static void inject_vncr_perm(struct kvm_vcpu *vcpu)
1348 {
1349 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1350 	u64 esr = kvm_vcpu_get_esr(vcpu);
1351 
1352 	/* Adjust the fault level to reflect that of the guest's */
1353 	esr &= ~ESR_ELx_FSC;
1354 	esr |= FIELD_PREP(ESR_ELx_FSC,
1355 			  ESR_ELx_FSC_PERM_L(vt->wr.level));
1356 
1357 	kvm_inject_nested_sync(vcpu, esr);
1358 }
1359 
1360 static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
1361 {
1362 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1363 
1364 	lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
1365 
1366 	if (!vt->valid)
1367 		return false;
1368 
1369 	if (read_vncr_el2(vcpu) != vt->gva)
1370 		return false;
1371 
1372 	if (vt->wr.nG)
1373 		return get_asid_by_regime(vcpu, TR_EL20) == vt->wr.asid;
1374 
1375 	return true;
1376 }
1377 
1378 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
1379 {
1380 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1381 	u64 esr = kvm_vcpu_get_esr(vcpu);
1382 
1383 	WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
1384 
1385 	if (kvm_vcpu_abt_issea(vcpu))
1386 		return kvm_handle_guest_sea(vcpu);
1387 
1388 	if (esr_fsc_is_permission_fault(esr)) {
1389 		inject_vncr_perm(vcpu);
1390 	} else if (esr_fsc_is_translation_fault(esr)) {
1391 		bool valid, is_gmem = false;
1392 		int ret;
1393 
1394 		scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
1395 			valid = kvm_vncr_tlb_lookup(vcpu);
1396 
1397 		if (!valid)
1398 			ret = kvm_translate_vncr(vcpu, &is_gmem);
1399 		else
1400 			ret = -EPERM;
1401 
1402 		switch (ret) {
1403 		case -EAGAIN:
1404 			/* Let's try again... */
1405 			break;
1406 		case -ENOMEM:
1407 			/*
1408 			 * For guest_memfd, this indicates that it failed to
1409 			 * create a folio to back the memory. Inform userspace.
1410 			 */
1411 			if (is_gmem)
1412 				return 0;
1413 			/* Otherwise, let's try again... */
1414 			break;
1415 		case -EFAULT:
1416 		case -EIO:
1417 		case -EHWPOISON:
1418 			if (is_gmem)
1419 				return 0;
1420 			fallthrough;
1421 		case -EINVAL:
1422 		case -ENOENT:
1423 		case -EACCES:
1424 			/*
1425 			 * Translation failed, inject the corresponding
1426 			 * exception back to EL2.
1427 			 */
1428 			BUG_ON(!vt->wr.failed);
1429 
1430 			esr &= ~ESR_ELx_FSC;
1431 			esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
1432 
1433 			kvm_inject_nested_sync(vcpu, esr);
1434 			break;
1435 		case -EPERM:
1436 			/* Hack to deal with POE until we get kernel support */
1437 			inject_vncr_perm(vcpu);
1438 			break;
1439 		case 0:
1440 			break;
1441 		}
1442 	} else {
1443 		WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
1444 	}
1445 
1446 	return 1;
1447 }
1448 
1449 static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
1450 {
1451 	struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1452 	pgprot_t prot;
1453 
1454 	guard(preempt)();
1455 	guard(read_lock)(&vcpu->kvm->mmu_lock);
1456 
1457 	/*
1458 	 * The request to map VNCR may have raced against some other
1459 	 * event, such as an interrupt, and may not be valid anymore.
1460 	 */
1461 	if (is_hyp_ctxt(vcpu))
1462 		return;
1463 
1464 	/*
1465 	 * Check that the pseudo-TLB is valid and that VNCR_EL2 still
1466 	 * contains the expected value. If it doesn't, we simply bail out
1467 	 * without a mapping -- a transformed MSR/MRS will generate the
1468 	 * fault and allows us to populate the pseudo-TLB.
1469 	 */
1470 	if (!vt->valid)
1471 		return;
1472 
1473 	if (read_vncr_el2(vcpu) != vt->gva)
1474 		return;
1475 
1476 	if (vt->wr.nG && get_asid_by_regime(vcpu, TR_EL20) != vt->wr.asid)
1477 		return;
1478 
1479 	vt->cpu = smp_processor_id();
1480 
1481 	if (vt->wr.pw && vt->wr.pr)
1482 		prot = PAGE_KERNEL;
1483 	else if (vt->wr.pr)
1484 		prot = PAGE_KERNEL_RO;
1485 	else
1486 		prot = PAGE_NONE;
1487 
1488 	/*
1489 	 * We can't map write-only (or no permission at all) in the kernel,
1490 	 * but the guest can do it if using POE, so we'll have to turn a
1491 	 * translation fault into a permission fault at runtime.
1492 	 * FIXME: WO doesn't work at all, need POE support in the kernel.
1493 	 */
1494 	if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
1495 		__set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
1496 		host_data_set_flag(L1_VNCR_MAPPED);
1497 		atomic_inc(&vcpu->kvm->arch.vncr_map_count);
1498 	}
1499 }
1500 
1501 #define has_tgran_2(__r, __sz)						\
1502 	({								\
1503 		u64 _s1, _s2, _mmfr0 = __r;				\
1504 									\
1505 		_s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
1506 				    TGRAN##__sz##_2, _mmfr0);		\
1507 									\
1508 		_s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1,			\
1509 				    TGRAN##__sz, _mmfr0);		\
1510 									\
1511 		((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI &&		\
1512 		  _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
1513 		 (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
1514 		  _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI));		\
1515 	})
1516 /*
1517  * Our emulated CPU doesn't support all the possible features. For the
1518  * sake of simplicity (and probably mental sanity), wipe out a number
1519  * of feature bits we don't intend to support for the time being.
1520  * This list should get updated as new features get added to the NV
1521  * support, and new extension to the architecture.
1522  */
1523 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
1524 {
1525 	u64 orig_val = val;
1526 
1527 	switch (reg) {
1528 	case SYS_ID_AA64ISAR1_EL1:
1529 		/* Support everything but LS64 and Spec Invalidation */
1530 		val &= ~(ID_AA64ISAR1_EL1_LS64	|
1531 			 ID_AA64ISAR1_EL1_SPECRES);
1532 		break;
1533 
1534 	case SYS_ID_AA64PFR0_EL1:
1535 		/* No RME, AMU, MPAM, or S-EL2 */
1536 		val &= ~(ID_AA64PFR0_EL1_RME	|
1537 			 ID_AA64PFR0_EL1_AMU	|
1538 			 ID_AA64PFR0_EL1_MPAM	|
1539 			 ID_AA64PFR0_EL1_SEL2	|
1540 			 ID_AA64PFR0_EL1_EL3	|
1541 			 ID_AA64PFR0_EL1_EL2	|
1542 			 ID_AA64PFR0_EL1_EL1	|
1543 			 ID_AA64PFR0_EL1_EL0);
1544 		/* 64bit only at any EL */
1545 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
1546 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
1547 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
1548 		val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
1549 		break;
1550 
1551 	case SYS_ID_AA64PFR1_EL1:
1552 		/* Only support BTI, SSBS, CSV2_frac */
1553 		val &= ~(ID_AA64PFR1_EL1_PFAR		|
1554 			 ID_AA64PFR1_EL1_MTEX		|
1555 			 ID_AA64PFR1_EL1_THE		|
1556 			 ID_AA64PFR1_EL1_GCS		|
1557 			 ID_AA64PFR1_EL1_MTE_frac	|
1558 			 ID_AA64PFR1_EL1_NMI		|
1559 			 ID_AA64PFR1_EL1_SME		|
1560 			 ID_AA64PFR1_EL1_RES0		|
1561 			 ID_AA64PFR1_EL1_MPAM_frac	|
1562 			 ID_AA64PFR1_EL1_MTE);
1563 		break;
1564 
1565 	case SYS_ID_AA64PFR2_EL1:
1566 		/* GICv5 is not yet supported for NV */
1567 		val &= ~ID_AA64PFR2_EL1_GCIE;
1568 		break;
1569 
1570 	case SYS_ID_AA64MMFR0_EL1:
1571 		/* Hide ExS, Secure Memory */
1572 		val &= ~(ID_AA64MMFR0_EL1_EXS		|
1573 			 ID_AA64MMFR0_EL1_TGRAN4_2	|
1574 			 ID_AA64MMFR0_EL1_TGRAN16_2	|
1575 			 ID_AA64MMFR0_EL1_TGRAN64_2	|
1576 			 ID_AA64MMFR0_EL1_SNSMEM);
1577 
1578 		/* Hide CNTPOFF if present */
1579 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
1580 
1581 		/* Disallow unsupported S2 page sizes */
1582 		switch (PAGE_SIZE) {
1583 		case SZ_64K:
1584 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
1585 			fallthrough;
1586 		case SZ_16K:
1587 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
1588 			fallthrough;
1589 		case SZ_4K:
1590 			/* Support everything */
1591 			break;
1592 		}
1593 
1594 		/*
1595 		 * Since we can't support a guest S2 page size smaller
1596 		 * than the host's own page size (due to KVM only
1597 		 * populating its own S2 using the kernel's page
1598 		 * size), advertise the limitation using FEAT_GTG.
1599 		 */
1600 		switch (PAGE_SIZE) {
1601 		case SZ_4K:
1602 			if (has_tgran_2(orig_val, 4))
1603 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
1604 			fallthrough;
1605 		case SZ_16K:
1606 			if (has_tgran_2(orig_val, 16))
1607 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
1608 			fallthrough;
1609 		case SZ_64K:
1610 			if (has_tgran_2(orig_val, 64))
1611 				val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
1612 			break;
1613 		}
1614 
1615 		/* Cap PARange to 48bits */
1616 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
1617 		break;
1618 
1619 	case SYS_ID_AA64MMFR1_EL1:
1620 		val &= ~(ID_AA64MMFR1_EL1_CMOW		|
1621 			 ID_AA64MMFR1_EL1_nTLBPA	|
1622 			 ID_AA64MMFR1_EL1_ETS);
1623 
1624 		/* FEAT_E2H0 implies no VHE */
1625 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
1626 			val &= ~ID_AA64MMFR1_EL1_VH;
1627 
1628 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR1_EL1, HAFDBS, AF);
1629 		break;
1630 
1631 	case SYS_ID_AA64MMFR2_EL1:
1632 		val &= ~(ID_AA64MMFR2_EL1_BBM	|
1633 			 ID_AA64MMFR2_EL1_TTL	|
1634 			 GENMASK_ULL(47, 44)	|
1635 			 ID_AA64MMFR2_EL1_ST	|
1636 			 ID_AA64MMFR2_EL1_CCIDX	|
1637 			 ID_AA64MMFR2_EL1_VARange);
1638 
1639 		/* Force TTL support */
1640 		val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
1641 		break;
1642 
1643 	case SYS_ID_AA64MMFR4_EL1:
1644 		/*
1645 		 * You get EITHER
1646 		 *
1647 		 * - FEAT_VHE without FEAT_E2H0
1648 		 * - FEAT_NV limited to FEAT_NV2
1649 		 * - HCR_EL2.NV1 being RES0
1650 		 *
1651 		 * OR
1652 		 *
1653 		 * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
1654 		 *
1655 		 * Life is too short for anything else.
1656 		 */
1657 		if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
1658 			val = 0;
1659 		} else {
1660 			val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
1661 			val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
1662 		}
1663 		break;
1664 
1665 	case SYS_ID_AA64DFR0_EL1:
1666 		/* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
1667 		val &= ~(ID_AA64DFR0_EL1_ExtTrcBuff	|
1668 			 ID_AA64DFR0_EL1_BRBE		|
1669 			 ID_AA64DFR0_EL1_MTPMU		|
1670 			 ID_AA64DFR0_EL1_TraceBuffer	|
1671 			 ID_AA64DFR0_EL1_TraceFilt	|
1672 			 ID_AA64DFR0_EL1_PMSVer		|
1673 			 ID_AA64DFR0_EL1_CTX_CMPs	|
1674 			 ID_AA64DFR0_EL1_SEBEP		|
1675 			 ID_AA64DFR0_EL1_PMSS		|
1676 			 ID_AA64DFR0_EL1_TraceVer);
1677 
1678 		/*
1679 		 * FEAT_Debugv8p9 requires support for extended breakpoints /
1680 		 * watchpoints.
1681 		 */
1682 		val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1683 		break;
1684 	}
1685 
1686 	return val;
1687 }
1688 
1689 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
1690 			     enum vcpu_sysreg sr, u64 v)
1691 {
1692 	struct resx resx;
1693 
1694 	resx = kvm_get_sysreg_resx(vcpu->kvm, sr);
1695 	v &= ~resx.res0;
1696 	v |= resx.res1;
1697 
1698 	return v;
1699 }
1700 
1701 static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, struct resx resx)
1702 {
1703 	BUILD_BUG_ON(!__builtin_constant_p(sr));
1704 	BUILD_BUG_ON(sr < __SANITISED_REG_START__);
1705 	BUILD_BUG_ON(sr >= NR_SYS_REGS);
1706 
1707 	kvm_set_sysreg_resx(kvm, sr, resx);
1708 }
1709 
1710 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
1711 {
1712 	struct kvm *kvm = vcpu->kvm;
1713 	struct resx resx;
1714 
1715 	lockdep_assert_held(&kvm->arch.config_lock);
1716 
1717 	if (kvm->arch.sysreg_masks)
1718 		goto out;
1719 
1720 	kvm->arch.sysreg_masks = kzalloc_obj(*(kvm->arch.sysreg_masks),
1721 					     GFP_KERNEL_ACCOUNT);
1722 	if (!kvm->arch.sysreg_masks)
1723 		return -ENOMEM;
1724 
1725 	/* VTTBR_EL2 */
1726 	resx = (typeof(resx)){};
1727 	if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
1728 		resx.res0 |= GENMASK(63, 56);
1729 	if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
1730 		resx.res0 |= VTTBR_CNP_BIT;
1731 	set_sysreg_masks(kvm, VTTBR_EL2, resx);
1732 
1733 	/* VTCR_EL2 */
1734 	resx = get_reg_fixed_bits(kvm, VTCR_EL2);
1735 	set_sysreg_masks(kvm, VTCR_EL2, resx);
1736 
1737 	/* VMPIDR_EL2 */
1738 	resx.res0 = GENMASK(63, 40) | GENMASK(30, 24);
1739 	resx.res1 = BIT(31);
1740 	set_sysreg_masks(kvm, VMPIDR_EL2, resx);
1741 
1742 	/* HCR_EL2 */
1743 	resx = get_reg_fixed_bits(kvm, HCR_EL2);
1744 	set_sysreg_masks(kvm, HCR_EL2, resx);
1745 
1746 	/* HCRX_EL2 */
1747 	resx = get_reg_fixed_bits(kvm, HCRX_EL2);
1748 	set_sysreg_masks(kvm, HCRX_EL2, resx);
1749 
1750 	/* HFG[RW]TR_EL2 */
1751 	resx = get_reg_fixed_bits(kvm, HFGRTR_EL2);
1752 	set_sysreg_masks(kvm, HFGRTR_EL2, resx);
1753 	resx = get_reg_fixed_bits(kvm, HFGWTR_EL2);
1754 	set_sysreg_masks(kvm, HFGWTR_EL2, resx);
1755 
1756 	/* HDFG[RW]TR_EL2 */
1757 	resx = get_reg_fixed_bits(kvm, HDFGRTR_EL2);
1758 	set_sysreg_masks(kvm, HDFGRTR_EL2, resx);
1759 	resx = get_reg_fixed_bits(kvm, HDFGWTR_EL2);
1760 	set_sysreg_masks(kvm, HDFGWTR_EL2, resx);
1761 
1762 	/* HFGITR_EL2 */
1763 	resx = get_reg_fixed_bits(kvm, HFGITR_EL2);
1764 	set_sysreg_masks(kvm, HFGITR_EL2, resx);
1765 
1766 	/* HAFGRTR_EL2 - not a lot to see here */
1767 	resx = get_reg_fixed_bits(kvm, HAFGRTR_EL2);
1768 	set_sysreg_masks(kvm, HAFGRTR_EL2, resx);
1769 
1770 	/* HFG[RW]TR2_EL2 */
1771 	resx = get_reg_fixed_bits(kvm, HFGRTR2_EL2);
1772 	set_sysreg_masks(kvm, HFGRTR2_EL2, resx);
1773 	resx = get_reg_fixed_bits(kvm, HFGWTR2_EL2);
1774 	set_sysreg_masks(kvm, HFGWTR2_EL2, resx);
1775 
1776 	/* HDFG[RW]TR2_EL2 */
1777 	resx = get_reg_fixed_bits(kvm, HDFGRTR2_EL2);
1778 	set_sysreg_masks(kvm, HDFGRTR2_EL2, resx);
1779 	resx = get_reg_fixed_bits(kvm, HDFGWTR2_EL2);
1780 	set_sysreg_masks(kvm, HDFGWTR2_EL2, resx);
1781 
1782 	/* HFGITR2_EL2 */
1783 	resx = get_reg_fixed_bits(kvm, HFGITR2_EL2);
1784 	set_sysreg_masks(kvm, HFGITR2_EL2, resx);
1785 
1786 	/* TCR2_EL2 */
1787 	resx = get_reg_fixed_bits(kvm, TCR2_EL2);
1788 	set_sysreg_masks(kvm, TCR2_EL2, resx);
1789 
1790 	/* SCTLR_EL1 */
1791 	resx = get_reg_fixed_bits(kvm, SCTLR_EL1);
1792 	set_sysreg_masks(kvm, SCTLR_EL1, resx);
1793 
1794 	/* SCTLR_EL2 */
1795 	resx = get_reg_fixed_bits(kvm, SCTLR_EL2);
1796 	set_sysreg_masks(kvm, SCTLR_EL2, resx);
1797 
1798 	/* SCTLR2_ELx */
1799 	resx = get_reg_fixed_bits(kvm, SCTLR2_EL1);
1800 	set_sysreg_masks(kvm, SCTLR2_EL1, resx);
1801 	resx = get_reg_fixed_bits(kvm, SCTLR2_EL2);
1802 	set_sysreg_masks(kvm, SCTLR2_EL2, resx);
1803 
1804 	/* MDCR_EL2 */
1805 	resx = get_reg_fixed_bits(kvm, MDCR_EL2);
1806 	set_sysreg_masks(kvm, MDCR_EL2, resx);
1807 
1808 	/* CNTHCTL_EL2 */
1809 	resx.res0 = GENMASK(63, 20);
1810 	resx.res1 = 0;
1811 	if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
1812 		resx.res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
1813 	if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
1814 		resx.res0 |= CNTHCTL_ECV;
1815 		if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
1816 			resx.res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
1817 				      CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
1818 	}
1819 	if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1820 		resx.res0 |= GENMASK(11, 8);
1821 	set_sysreg_masks(kvm, CNTHCTL_EL2, resx);
1822 
1823 	/* ICH_HCR_EL2 */
1824 	resx.res0 = ICH_HCR_EL2_RES0;
1825 	resx.res1 = ICH_HCR_EL2_RES1;
1826 	if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
1827 		resx.res0 |= ICH_HCR_EL2_TDIR;
1828 	/* No GICv4 is presented to the guest */
1829 	resx.res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
1830 	set_sysreg_masks(kvm, ICH_HCR_EL2, resx);
1831 
1832 	/* VNCR_EL2 */
1833 	resx.res0 = VNCR_EL2_RES0;
1834 	resx.res1 = VNCR_EL2_RES1;
1835 	set_sysreg_masks(kvm, VNCR_EL2, resx);
1836 
1837 out:
1838 	for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1839 		__vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
1840 
1841 	return 0;
1842 }
1843 
1844 void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
1845 {
1846 	if (kvm_check_request(KVM_REQ_NESTED_S2_UNMAP, vcpu)) {
1847 		struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1848 
1849 		write_lock(&vcpu->kvm->mmu_lock);
1850 		if (mmu->pending_unmap) {
1851 			kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), true);
1852 			mmu->pending_unmap = false;
1853 		}
1854 		write_unlock(&vcpu->kvm->mmu_lock);
1855 	}
1856 
1857 	if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
1858 		kvm_map_l1_vncr(vcpu);
1859 
1860 	/* Must be last, as may switch context! */
1861 	if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
1862 		kvm_inject_nested_irq(vcpu);
1863 }
1864 
1865 /*
1866  * One of the many architectural bugs in FEAT_NV2 is that the guest hypervisor
1867  * can write to HCR_EL2 behind our back, potentially changing the exception
1868  * routing / masking for even the host context.
1869  *
1870  * What follows is some slop to (1) react to exception routing / masking and (2)
1871  * preserve the pending SError state across translation regimes.
1872  */
1873 void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu)
1874 {
1875 	if (!vcpu_has_nv(vcpu))
1876 		return;
1877 
1878 	if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1879 		kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1880 }
1881 
1882 void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu)
1883 {
1884 	unsigned long *hcr = vcpu_hcr(vcpu);
1885 
1886 	if (!vcpu_has_nv(vcpu))
1887 		return;
1888 
1889 	/*
1890 	 * We previously decided that an SError was deliverable to the guest.
1891 	 * Reap the pending state from HCR_EL2 and...
1892 	 */
1893 	if (unlikely(__test_and_clear_bit(__ffs(HCR_VSE), hcr)))
1894 		vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
1895 
1896 	/*
1897 	 * Re-attempt SError injection in case the deliverability has changed,
1898 	 * which is necessary to faithfully emulate WFI the case of a pending
1899 	 * SError being a wakeup condition.
1900 	 */
1901 	if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1902 		kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1903 }
1904 
1905 /*
1906  * KVM unconditionally sets most of these traps anyway but use an allowlist
1907  * to document the guest hypervisor traps that may take precedence and guard
1908  * against future changes to the non-nested trap configuration.
1909  */
1910 #define NV_MDCR_GUEST_INCLUDE	(MDCR_EL2_TDE	|	\
1911 				 MDCR_EL2_TDA	|	\
1912 				 MDCR_EL2_TDRA	|	\
1913 				 MDCR_EL2_TTRF	|	\
1914 				 MDCR_EL2_TPMS	|	\
1915 				 MDCR_EL2_TPM	|	\
1916 				 MDCR_EL2_TPMCR	|	\
1917 				 MDCR_EL2_TDCC	|	\
1918 				 MDCR_EL2_TDOSA)
1919 
1920 void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
1921 {
1922 	u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
1923 
1924 	if (is_nested_ctxt(vcpu))
1925 		vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
1926 	/*
1927 	 * In yet another example where FEAT_NV2 is fscking broken, accesses
1928 	 * to MDSCR_EL1 are redirected to the VNCR despite having an effect
1929 	 * at EL2. Use a big hammer to apply sanity.
1930 	 *
1931 	 * Unless of course we have FEAT_FGT, in which case we can precisely
1932 	 * trap MDSCR_EL1.
1933 	 */
1934 	else if (!cpus_have_final_cap(ARM64_HAS_FGT))
1935 		vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
1936 }
1937