1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright (C) 2017 - Columbia University and Linaro Ltd.
4 * Author: Jintack Lim <jintack.lim@linaro.org>
5 */
6
7 #include <linux/bitfield.h>
8 #include <linux/kvm.h>
9 #include <linux/kvm_host.h>
10
11 #include <asm/fixmap.h>
12 #include <asm/kvm_arm.h>
13 #include <asm/kvm_emulate.h>
14 #include <asm/kvm_mmu.h>
15 #include <asm/kvm_nested.h>
16 #include <asm/sysreg.h>
17
18 #include "sys_regs.h"
19
20 struct vncr_tlb {
21 /* The guest's VNCR_EL2 */
22 u64 gva;
23 struct s1_walk_info wi;
24 struct s1_walk_result wr;
25
26 u64 hpa;
27
28 /* -1 when not mapped on a CPU */
29 int cpu;
30
31 /*
32 * true if the TLB is valid. Can only be changed with the
33 * mmu_lock held.
34 */
35 bool valid;
36 };
37
38 /*
39 * Ratio of live shadow S2 MMU per vcpu. This is a trade-off between
40 * memory usage and potential number of different sets of S2 PTs in
41 * the guests. Running out of S2 MMUs only affects performance (we
42 * will invalidate them more often).
43 */
44 #define S2_MMU_PER_VCPU 2
45
kvm_init_nested(struct kvm * kvm)46 void kvm_init_nested(struct kvm *kvm)
47 {
48 kvm->arch.nested_mmus = NULL;
49 kvm->arch.nested_mmus_size = 0;
50 atomic_set(&kvm->arch.vncr_map_count, 0);
51 }
52
init_nested_s2_mmu(struct kvm * kvm,struct kvm_s2_mmu * mmu)53 static int init_nested_s2_mmu(struct kvm *kvm, struct kvm_s2_mmu *mmu)
54 {
55 /*
56 * We only initialise the IPA range on the canonical MMU, which
57 * defines the contract between KVM and userspace on where the
58 * "hardware" is in the IPA space. This affects the validity of MMIO
59 * exits forwarded to userspace, for example.
60 *
61 * For nested S2s, we use the PARange as exposed to the guest, as it
62 * is allowed to use it at will to expose whatever memory map it
63 * wants to its own guests as it would be on real HW.
64 */
65 return kvm_init_stage2_mmu(kvm, mmu, kvm_get_pa_bits(kvm));
66 }
67
kvm_vcpu_init_nested(struct kvm_vcpu * vcpu)68 int kvm_vcpu_init_nested(struct kvm_vcpu *vcpu)
69 {
70 struct kvm *kvm = vcpu->kvm;
71 struct kvm_s2_mmu *tmp;
72 int num_mmus, ret = 0;
73
74 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features) &&
75 !cpus_have_final_cap(ARM64_HAS_HCR_NV1))
76 return -EINVAL;
77
78 if (!vcpu->arch.ctxt.vncr_array)
79 vcpu->arch.ctxt.vncr_array = (u64 *)__get_free_page(GFP_KERNEL_ACCOUNT |
80 __GFP_ZERO);
81
82 if (!vcpu->arch.ctxt.vncr_array)
83 return -ENOMEM;
84
85 /*
86 * Let's treat memory allocation failures as benign: If we fail to
87 * allocate anything, return an error and keep the allocated array
88 * alive. Userspace may try to recover by intializing the vcpu
89 * again, and there is no reason to affect the whole VM for this.
90 */
91 num_mmus = atomic_read(&kvm->online_vcpus) * S2_MMU_PER_VCPU;
92 tmp = kvrealloc(kvm->arch.nested_mmus,
93 size_mul(sizeof(*kvm->arch.nested_mmus), num_mmus),
94 GFP_KERNEL_ACCOUNT | __GFP_ZERO);
95 if (!tmp)
96 return -ENOMEM;
97
98 swap(kvm->arch.nested_mmus, tmp);
99
100 /*
101 * If we went through a realocation, adjust the MMU back-pointers in
102 * the previously initialised kvm_pgtable structures.
103 */
104 if (kvm->arch.nested_mmus != tmp)
105 for (int i = 0; i < kvm->arch.nested_mmus_size; i++)
106 kvm->arch.nested_mmus[i].pgt->mmu = &kvm->arch.nested_mmus[i];
107
108 for (int i = kvm->arch.nested_mmus_size; !ret && i < num_mmus; i++)
109 ret = init_nested_s2_mmu(kvm, &kvm->arch.nested_mmus[i]);
110
111 if (ret) {
112 for (int i = kvm->arch.nested_mmus_size; i < num_mmus; i++)
113 kvm_free_stage2_pgd(&kvm->arch.nested_mmus[i]);
114
115 free_page((unsigned long)vcpu->arch.ctxt.vncr_array);
116 vcpu->arch.ctxt.vncr_array = NULL;
117
118 return ret;
119 }
120
121 kvm->arch.nested_mmus_size = num_mmus;
122
123 return 0;
124 }
125
126 struct s2_walk_info {
127 int (*read_desc)(phys_addr_t pa, u64 *desc, void *data);
128 void *data;
129 u64 baddr;
130 unsigned int max_oa_bits;
131 unsigned int pgshift;
132 unsigned int sl;
133 unsigned int t0sz;
134 bool be;
135 };
136
compute_fsc(int level,u32 fsc)137 static u32 compute_fsc(int level, u32 fsc)
138 {
139 return fsc | (level & 0x3);
140 }
141
esr_s2_fault(struct kvm_vcpu * vcpu,int level,u32 fsc)142 static int esr_s2_fault(struct kvm_vcpu *vcpu, int level, u32 fsc)
143 {
144 u32 esr;
145
146 esr = kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC;
147 esr |= compute_fsc(level, fsc);
148 return esr;
149 }
150
get_ia_size(struct s2_walk_info * wi)151 static int get_ia_size(struct s2_walk_info *wi)
152 {
153 return 64 - wi->t0sz;
154 }
155
check_base_s2_limits(struct s2_walk_info * wi,int level,int input_size,int stride)156 static int check_base_s2_limits(struct s2_walk_info *wi,
157 int level, int input_size, int stride)
158 {
159 int start_size, ia_size;
160
161 ia_size = get_ia_size(wi);
162
163 /* Check translation limits */
164 switch (BIT(wi->pgshift)) {
165 case SZ_64K:
166 if (level == 0 || (level == 1 && ia_size <= 42))
167 return -EFAULT;
168 break;
169 case SZ_16K:
170 if (level == 0 || (level == 1 && ia_size <= 40))
171 return -EFAULT;
172 break;
173 case SZ_4K:
174 if (level < 0 || (level == 0 && ia_size <= 42))
175 return -EFAULT;
176 break;
177 }
178
179 /* Check input size limits */
180 if (input_size > ia_size)
181 return -EFAULT;
182
183 /* Check number of entries in starting level table */
184 start_size = input_size - ((3 - level) * stride + wi->pgshift);
185 if (start_size < 1 || start_size > stride + 4)
186 return -EFAULT;
187
188 return 0;
189 }
190
191 /* Check if output is within boundaries */
check_output_size(struct s2_walk_info * wi,phys_addr_t output)192 static int check_output_size(struct s2_walk_info *wi, phys_addr_t output)
193 {
194 unsigned int output_size = wi->max_oa_bits;
195
196 if (output_size != 48 && (output & GENMASK_ULL(47, output_size)))
197 return -1;
198
199 return 0;
200 }
201
202 /*
203 * This is essentially a C-version of the pseudo code from the ARM ARM
204 * AArch64.TranslationTableWalk function. I strongly recommend looking at
205 * that pseudocode in trying to understand this.
206 *
207 * Must be called with the kvm->srcu read lock held
208 */
walk_nested_s2_pgd(phys_addr_t ipa,struct s2_walk_info * wi,struct kvm_s2_trans * out)209 static int walk_nested_s2_pgd(phys_addr_t ipa,
210 struct s2_walk_info *wi, struct kvm_s2_trans *out)
211 {
212 int first_block_level, level, stride, input_size, base_lower_bound;
213 phys_addr_t base_addr;
214 unsigned int addr_top, addr_bottom;
215 u64 desc; /* page table entry */
216 int ret;
217 phys_addr_t paddr;
218
219 switch (BIT(wi->pgshift)) {
220 default:
221 case SZ_64K:
222 case SZ_16K:
223 level = 3 - wi->sl;
224 first_block_level = 2;
225 break;
226 case SZ_4K:
227 level = 2 - wi->sl;
228 first_block_level = 1;
229 break;
230 }
231
232 stride = wi->pgshift - 3;
233 input_size = get_ia_size(wi);
234 if (input_size > 48 || input_size < 25)
235 return -EFAULT;
236
237 ret = check_base_s2_limits(wi, level, input_size, stride);
238 if (WARN_ON(ret))
239 return ret;
240
241 base_lower_bound = 3 + input_size - ((3 - level) * stride +
242 wi->pgshift);
243 base_addr = wi->baddr & GENMASK_ULL(47, base_lower_bound);
244
245 if (check_output_size(wi, base_addr)) {
246 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
247 return 1;
248 }
249
250 addr_top = input_size - 1;
251
252 while (1) {
253 phys_addr_t index;
254
255 addr_bottom = (3 - level) * stride + wi->pgshift;
256 index = (ipa & GENMASK_ULL(addr_top, addr_bottom))
257 >> (addr_bottom - 3);
258
259 paddr = base_addr | index;
260 ret = wi->read_desc(paddr, &desc, wi->data);
261 if (ret < 0)
262 return ret;
263
264 /*
265 * Handle reversedescriptors if endianness differs between the
266 * host and the guest hypervisor.
267 */
268 if (wi->be)
269 desc = be64_to_cpu((__force __be64)desc);
270 else
271 desc = le64_to_cpu((__force __le64)desc);
272
273 /* Check for valid descriptor at this point */
274 if (!(desc & 1) || ((desc & 3) == 1 && level == 3)) {
275 out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
276 out->desc = desc;
277 return 1;
278 }
279
280 /* We're at the final level or block translation level */
281 if ((desc & 3) == 1 || level == 3)
282 break;
283
284 if (check_output_size(wi, desc)) {
285 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
286 out->desc = desc;
287 return 1;
288 }
289
290 base_addr = desc & GENMASK_ULL(47, wi->pgshift);
291
292 level += 1;
293 addr_top = addr_bottom - 1;
294 }
295
296 if (level < first_block_level) {
297 out->esr = compute_fsc(level, ESR_ELx_FSC_FAULT);
298 out->desc = desc;
299 return 1;
300 }
301
302 if (check_output_size(wi, desc)) {
303 out->esr = compute_fsc(level, ESR_ELx_FSC_ADDRSZ);
304 out->desc = desc;
305 return 1;
306 }
307
308 if (!(desc & BIT(10))) {
309 out->esr = compute_fsc(level, ESR_ELx_FSC_ACCESS);
310 out->desc = desc;
311 return 1;
312 }
313
314 addr_bottom += contiguous_bit_shift(desc, wi, level);
315
316 /* Calculate and return the result */
317 paddr = (desc & GENMASK_ULL(47, addr_bottom)) |
318 (ipa & GENMASK_ULL(addr_bottom - 1, 0));
319 out->output = paddr;
320 out->block_size = 1UL << ((3 - level) * stride + wi->pgshift);
321 out->readable = desc & (0b01 << 6);
322 out->writable = desc & (0b10 << 6);
323 out->level = level;
324 out->desc = desc;
325 return 0;
326 }
327
read_guest_s2_desc(phys_addr_t pa,u64 * desc,void * data)328 static int read_guest_s2_desc(phys_addr_t pa, u64 *desc, void *data)
329 {
330 struct kvm_vcpu *vcpu = data;
331
332 return kvm_read_guest(vcpu->kvm, pa, desc, sizeof(*desc));
333 }
334
vtcr_to_walk_info(u64 vtcr,struct s2_walk_info * wi)335 static void vtcr_to_walk_info(u64 vtcr, struct s2_walk_info *wi)
336 {
337 wi->t0sz = vtcr & TCR_EL2_T0SZ_MASK;
338
339 switch (vtcr & VTCR_EL2_TG0_MASK) {
340 case VTCR_EL2_TG0_4K:
341 wi->pgshift = 12; break;
342 case VTCR_EL2_TG0_16K:
343 wi->pgshift = 14; break;
344 case VTCR_EL2_TG0_64K:
345 default: /* IMPDEF: treat any other value as 64k */
346 wi->pgshift = 16; break;
347 }
348
349 wi->sl = FIELD_GET(VTCR_EL2_SL0_MASK, vtcr);
350 /* Global limit for now, should eventually be per-VM */
351 wi->max_oa_bits = min(get_kvm_ipa_limit(),
352 ps_to_output_size(FIELD_GET(VTCR_EL2_PS_MASK, vtcr), false));
353 }
354
kvm_walk_nested_s2(struct kvm_vcpu * vcpu,phys_addr_t gipa,struct kvm_s2_trans * result)355 int kvm_walk_nested_s2(struct kvm_vcpu *vcpu, phys_addr_t gipa,
356 struct kvm_s2_trans *result)
357 {
358 u64 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
359 struct s2_walk_info wi;
360 int ret;
361
362 result->esr = 0;
363
364 if (!vcpu_has_nv(vcpu))
365 return 0;
366
367 wi.read_desc = read_guest_s2_desc;
368 wi.data = vcpu;
369 wi.baddr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
370
371 vtcr_to_walk_info(vtcr, &wi);
372
373 wi.be = vcpu_read_sys_reg(vcpu, SCTLR_EL2) & SCTLR_ELx_EE;
374
375 ret = walk_nested_s2_pgd(gipa, &wi, result);
376 if (ret)
377 result->esr |= (kvm_vcpu_get_esr(vcpu) & ~ESR_ELx_FSC);
378
379 return ret;
380 }
381
ttl_to_size(u8 ttl)382 static unsigned int ttl_to_size(u8 ttl)
383 {
384 int level = ttl & 3;
385 int gran = (ttl >> 2) & 3;
386 unsigned int max_size = 0;
387
388 switch (gran) {
389 case TLBI_TTL_TG_4K:
390 switch (level) {
391 case 0:
392 break;
393 case 1:
394 max_size = SZ_1G;
395 break;
396 case 2:
397 max_size = SZ_2M;
398 break;
399 case 3:
400 max_size = SZ_4K;
401 break;
402 }
403 break;
404 case TLBI_TTL_TG_16K:
405 switch (level) {
406 case 0:
407 case 1:
408 break;
409 case 2:
410 max_size = SZ_32M;
411 break;
412 case 3:
413 max_size = SZ_16K;
414 break;
415 }
416 break;
417 case TLBI_TTL_TG_64K:
418 switch (level) {
419 case 0:
420 case 1:
421 /* No 52bit IPA support */
422 break;
423 case 2:
424 max_size = SZ_512M;
425 break;
426 case 3:
427 max_size = SZ_64K;
428 break;
429 }
430 break;
431 default: /* No size information */
432 break;
433 }
434
435 return max_size;
436 }
437
pgshift_level_to_ttl(u16 shift,u8 level)438 static u8 pgshift_level_to_ttl(u16 shift, u8 level)
439 {
440 u8 ttl;
441
442 switch(shift) {
443 case 12:
444 ttl = TLBI_TTL_TG_4K;
445 break;
446 case 14:
447 ttl = TLBI_TTL_TG_16K;
448 break;
449 case 16:
450 ttl = TLBI_TTL_TG_64K;
451 break;
452 default:
453 BUG();
454 }
455
456 ttl <<= 2;
457 ttl |= level & 3;
458
459 return ttl;
460 }
461
462 /*
463 * Compute the equivalent of the TTL field by parsing the shadow PT. The
464 * granule size is extracted from the cached VTCR_EL2.TG0 while the level is
465 * retrieved from first entry carrying the level as a tag.
466 */
get_guest_mapping_ttl(struct kvm_s2_mmu * mmu,u64 addr)467 static u8 get_guest_mapping_ttl(struct kvm_s2_mmu *mmu, u64 addr)
468 {
469 u64 tmp, sz = 0, vtcr = mmu->tlb_vtcr;
470 kvm_pte_t pte;
471 u8 ttl, level;
472
473 lockdep_assert_held_write(&kvm_s2_mmu_to_kvm(mmu)->mmu_lock);
474
475 switch (vtcr & VTCR_EL2_TG0_MASK) {
476 case VTCR_EL2_TG0_4K:
477 ttl = (TLBI_TTL_TG_4K << 2);
478 break;
479 case VTCR_EL2_TG0_16K:
480 ttl = (TLBI_TTL_TG_16K << 2);
481 break;
482 case VTCR_EL2_TG0_64K:
483 default: /* IMPDEF: treat any other value as 64k */
484 ttl = (TLBI_TTL_TG_64K << 2);
485 break;
486 }
487
488 tmp = addr;
489
490 again:
491 /* Iteratively compute the block sizes for a particular granule size */
492 switch (vtcr & VTCR_EL2_TG0_MASK) {
493 case VTCR_EL2_TG0_4K:
494 if (sz < SZ_4K) sz = SZ_4K;
495 else if (sz < SZ_2M) sz = SZ_2M;
496 else if (sz < SZ_1G) sz = SZ_1G;
497 else sz = 0;
498 break;
499 case VTCR_EL2_TG0_16K:
500 if (sz < SZ_16K) sz = SZ_16K;
501 else if (sz < SZ_32M) sz = SZ_32M;
502 else sz = 0;
503 break;
504 case VTCR_EL2_TG0_64K:
505 default: /* IMPDEF: treat any other value as 64k */
506 if (sz < SZ_64K) sz = SZ_64K;
507 else if (sz < SZ_512M) sz = SZ_512M;
508 else sz = 0;
509 break;
510 }
511
512 if (sz == 0)
513 return 0;
514
515 tmp &= ~(sz - 1);
516 if (kvm_pgtable_get_leaf(mmu->pgt, tmp, &pte, NULL))
517 goto again;
518 if (!(pte & PTE_VALID))
519 goto again;
520 level = FIELD_GET(KVM_NV_GUEST_MAP_SZ, pte);
521 if (!level)
522 goto again;
523
524 ttl |= level;
525
526 /*
527 * We now have found some level information in the shadow S2. Check
528 * that the resulting range is actually including the original IPA.
529 */
530 sz = ttl_to_size(ttl);
531 if (addr < (tmp + sz))
532 return ttl;
533
534 return 0;
535 }
536
compute_tlb_inval_range(struct kvm_s2_mmu * mmu,u64 val)537 unsigned long compute_tlb_inval_range(struct kvm_s2_mmu *mmu, u64 val)
538 {
539 struct kvm *kvm = kvm_s2_mmu_to_kvm(mmu);
540 unsigned long max_size;
541 u8 ttl;
542
543 ttl = FIELD_GET(TLBI_TTL_MASK, val);
544
545 if (!ttl || !kvm_has_feat(kvm, ID_AA64MMFR2_EL1, TTL, IMP)) {
546 /* No TTL, check the shadow S2 for a hint */
547 u64 addr = (val & GENMASK_ULL(35, 0)) << 12;
548 ttl = get_guest_mapping_ttl(mmu, addr);
549 }
550
551 max_size = ttl_to_size(ttl);
552
553 if (!max_size) {
554 /* Compute the maximum extent of the invalidation */
555 switch (mmu->tlb_vtcr & VTCR_EL2_TG0_MASK) {
556 case VTCR_EL2_TG0_4K:
557 max_size = SZ_1G;
558 break;
559 case VTCR_EL2_TG0_16K:
560 max_size = SZ_32M;
561 break;
562 case VTCR_EL2_TG0_64K:
563 default: /* IMPDEF: treat any other value as 64k */
564 /*
565 * No, we do not support 52bit IPA in nested yet. Once
566 * we do, this should be 4TB.
567 */
568 max_size = SZ_512M;
569 break;
570 }
571 }
572
573 WARN_ON(!max_size);
574 return max_size;
575 }
576
577 /*
578 * We can have multiple *different* MMU contexts with the same VMID:
579 *
580 * - S2 being enabled or not, hence differing by the HCR_EL2.VM bit
581 *
582 * - Multiple vcpus using private S2s (huh huh...), hence differing by the
583 * VBBTR_EL2.BADDR address
584 *
585 * - A combination of the above...
586 *
587 * We can always identify which MMU context to pick at run-time. However,
588 * TLB invalidation involving a VMID must take action on all the TLBs using
589 * this particular VMID. This translates into applying the same invalidation
590 * operation to all the contexts that are using this VMID. Moar phun!
591 */
kvm_s2_mmu_iterate_by_vmid(struct kvm * kvm,u16 vmid,const union tlbi_info * info,void (* tlbi_callback)(struct kvm_s2_mmu *,const union tlbi_info *))592 void kvm_s2_mmu_iterate_by_vmid(struct kvm *kvm, u16 vmid,
593 const union tlbi_info *info,
594 void (*tlbi_callback)(struct kvm_s2_mmu *,
595 const union tlbi_info *))
596 {
597 write_lock(&kvm->mmu_lock);
598
599 for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
600 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
601
602 if (!kvm_s2_mmu_valid(mmu))
603 continue;
604
605 if (vmid == get_vmid(mmu->tlb_vttbr))
606 tlbi_callback(mmu, info);
607 }
608
609 write_unlock(&kvm->mmu_lock);
610 }
611
lookup_s2_mmu(struct kvm_vcpu * vcpu)612 struct kvm_s2_mmu *lookup_s2_mmu(struct kvm_vcpu *vcpu)
613 {
614 struct kvm *kvm = vcpu->kvm;
615 bool nested_stage2_enabled;
616 u64 vttbr, vtcr, hcr;
617
618 lockdep_assert_held_write(&kvm->mmu_lock);
619
620 vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2);
621 vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
622 hcr = vcpu_read_sys_reg(vcpu, HCR_EL2);
623
624 nested_stage2_enabled = hcr & HCR_VM;
625
626 /* Don't consider the CnP bit for the vttbr match */
627 vttbr &= ~VTTBR_CNP_BIT;
628
629 /*
630 * Two possibilities when looking up a S2 MMU context:
631 *
632 * - either S2 is enabled in the guest, and we need a context that is
633 * S2-enabled and matches the full VTTBR (VMID+BADDR) and VTCR,
634 * which makes it safe from a TLB conflict perspective (a broken
635 * guest won't be able to generate them),
636 *
637 * - or S2 is disabled, and we need a context that is S2-disabled
638 * and matches the VMID only, as all TLBs are tagged by VMID even
639 * if S2 translation is disabled.
640 */
641 for (int i = 0; i < kvm->arch.nested_mmus_size; i++) {
642 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
643
644 if (!kvm_s2_mmu_valid(mmu))
645 continue;
646
647 if (nested_stage2_enabled &&
648 mmu->nested_stage2_enabled &&
649 vttbr == mmu->tlb_vttbr &&
650 vtcr == mmu->tlb_vtcr)
651 return mmu;
652
653 if (!nested_stage2_enabled &&
654 !mmu->nested_stage2_enabled &&
655 get_vmid(vttbr) == get_vmid(mmu->tlb_vttbr))
656 return mmu;
657 }
658 return NULL;
659 }
660
get_s2_mmu_nested(struct kvm_vcpu * vcpu)661 static struct kvm_s2_mmu *get_s2_mmu_nested(struct kvm_vcpu *vcpu)
662 {
663 struct kvm *kvm = vcpu->kvm;
664 struct kvm_s2_mmu *s2_mmu;
665 int i;
666
667 lockdep_assert_held_write(&vcpu->kvm->mmu_lock);
668
669 s2_mmu = lookup_s2_mmu(vcpu);
670 if (s2_mmu)
671 goto out;
672
673 /*
674 * Make sure we don't always search from the same point, or we
675 * will always reuse a potentially active context, leaving
676 * free contexts unused.
677 */
678 for (i = kvm->arch.nested_mmus_next;
679 i < (kvm->arch.nested_mmus_size + kvm->arch.nested_mmus_next);
680 i++) {
681 s2_mmu = &kvm->arch.nested_mmus[i % kvm->arch.nested_mmus_size];
682
683 if (atomic_read(&s2_mmu->refcnt) == 0)
684 break;
685 }
686 BUG_ON(atomic_read(&s2_mmu->refcnt)); /* We have struct MMUs to spare */
687
688 /* Set the scene for the next search */
689 kvm->arch.nested_mmus_next = (i + 1) % kvm->arch.nested_mmus_size;
690
691 /* Make sure we don't forget to do the laundry */
692 if (kvm_s2_mmu_valid(s2_mmu))
693 s2_mmu->pending_unmap = true;
694
695 /*
696 * The virtual VMID (modulo CnP) will be used as a key when matching
697 * an existing kvm_s2_mmu.
698 *
699 * We cache VTCR at allocation time, once and for all. It'd be great
700 * if the guest didn't screw that one up, as this is not very
701 * forgiving...
702 */
703 s2_mmu->tlb_vttbr = vcpu_read_sys_reg(vcpu, VTTBR_EL2) & ~VTTBR_CNP_BIT;
704 s2_mmu->tlb_vtcr = vcpu_read_sys_reg(vcpu, VTCR_EL2);
705 s2_mmu->nested_stage2_enabled = vcpu_read_sys_reg(vcpu, HCR_EL2) & HCR_VM;
706
707 out:
708 atomic_inc(&s2_mmu->refcnt);
709
710 /*
711 * Set the vCPU request to perform an unmap, even if the pending unmap
712 * originates from another vCPU. This guarantees that the MMU has been
713 * completely unmapped before any vCPU actually uses it, and allows
714 * multiple vCPUs to lend a hand with completing the unmap.
715 */
716 if (s2_mmu->pending_unmap)
717 kvm_make_request(KVM_REQ_NESTED_S2_UNMAP, vcpu);
718
719 return s2_mmu;
720 }
721
kvm_init_nested_s2_mmu(struct kvm_s2_mmu * mmu)722 void kvm_init_nested_s2_mmu(struct kvm_s2_mmu *mmu)
723 {
724 /* CnP being set denotes an invalid entry */
725 mmu->tlb_vttbr = VTTBR_CNP_BIT;
726 mmu->nested_stage2_enabled = false;
727 atomic_set(&mmu->refcnt, 0);
728 }
729
kvm_vcpu_load_hw_mmu(struct kvm_vcpu * vcpu)730 void kvm_vcpu_load_hw_mmu(struct kvm_vcpu *vcpu)
731 {
732 /*
733 * If the vCPU kept its reference on the MMU after the last put,
734 * keep rolling with it.
735 */
736 if (is_hyp_ctxt(vcpu)) {
737 if (!vcpu->arch.hw_mmu)
738 vcpu->arch.hw_mmu = &vcpu->kvm->arch.mmu;
739 } else {
740 if (!vcpu->arch.hw_mmu) {
741 scoped_guard(write_lock, &vcpu->kvm->mmu_lock)
742 vcpu->arch.hw_mmu = get_s2_mmu_nested(vcpu);
743 }
744
745 if (__vcpu_sys_reg(vcpu, HCR_EL2) & HCR_NV)
746 kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
747 }
748 }
749
kvm_vcpu_put_hw_mmu(struct kvm_vcpu * vcpu)750 void kvm_vcpu_put_hw_mmu(struct kvm_vcpu *vcpu)
751 {
752 /* Unconditionally drop the VNCR mapping if we have one */
753 if (host_data_test_flag(L1_VNCR_MAPPED)) {
754 BUG_ON(vcpu->arch.vncr_tlb->cpu != smp_processor_id());
755 BUG_ON(is_hyp_ctxt(vcpu));
756
757 clear_fixmap(vncr_fixmap(vcpu->arch.vncr_tlb->cpu));
758 vcpu->arch.vncr_tlb->cpu = -1;
759 host_data_clear_flag(L1_VNCR_MAPPED);
760 atomic_dec(&vcpu->kvm->arch.vncr_map_count);
761 }
762
763 /*
764 * Keep a reference on the associated stage-2 MMU if the vCPU is
765 * scheduling out and not in WFI emulation, suggesting it is likely to
766 * reuse the MMU sometime soon.
767 */
768 if (vcpu->scheduled_out && !vcpu_get_flag(vcpu, IN_WFI))
769 return;
770
771 if (kvm_is_nested_s2_mmu(vcpu->kvm, vcpu->arch.hw_mmu))
772 atomic_dec(&vcpu->arch.hw_mmu->refcnt);
773
774 vcpu->arch.hw_mmu = NULL;
775 }
776
777 /*
778 * Returns non-zero if permission fault is handled by injecting it to the next
779 * level hypervisor.
780 */
kvm_s2_handle_perm_fault(struct kvm_vcpu * vcpu,struct kvm_s2_trans * trans)781 int kvm_s2_handle_perm_fault(struct kvm_vcpu *vcpu, struct kvm_s2_trans *trans)
782 {
783 bool forward_fault = false;
784
785 trans->esr = 0;
786
787 if (!kvm_vcpu_trap_is_permission_fault(vcpu))
788 return 0;
789
790 if (kvm_vcpu_trap_is_iabt(vcpu)) {
791 forward_fault = !kvm_s2_trans_executable(trans);
792 } else {
793 bool write_fault = kvm_is_write_fault(vcpu);
794
795 forward_fault = ((write_fault && !trans->writable) ||
796 (!write_fault && !trans->readable));
797 }
798
799 if (forward_fault)
800 trans->esr = esr_s2_fault(vcpu, trans->level, ESR_ELx_FSC_PERM);
801
802 return forward_fault;
803 }
804
kvm_inject_s2_fault(struct kvm_vcpu * vcpu,u64 esr_el2)805 int kvm_inject_s2_fault(struct kvm_vcpu *vcpu, u64 esr_el2)
806 {
807 vcpu_write_sys_reg(vcpu, vcpu->arch.fault.far_el2, FAR_EL2);
808 vcpu_write_sys_reg(vcpu, vcpu->arch.fault.hpfar_el2, HPFAR_EL2);
809
810 return kvm_inject_nested_sync(vcpu, esr_el2);
811 }
812
invalidate_vncr(struct vncr_tlb * vt)813 static void invalidate_vncr(struct vncr_tlb *vt)
814 {
815 vt->valid = false;
816 if (vt->cpu != -1)
817 clear_fixmap(vncr_fixmap(vt->cpu));
818 }
819
kvm_invalidate_vncr_ipa(struct kvm * kvm,u64 start,u64 end)820 static void kvm_invalidate_vncr_ipa(struct kvm *kvm, u64 start, u64 end)
821 {
822 struct kvm_vcpu *vcpu;
823 unsigned long i;
824
825 lockdep_assert_held_write(&kvm->mmu_lock);
826
827 if (!kvm_has_feat(kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
828 return;
829
830 kvm_for_each_vcpu(i, vcpu, kvm) {
831 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
832 u64 ipa_start, ipa_end, ipa_size;
833
834 /*
835 * Careful here: We end-up here from an MMU notifier,
836 * and this can race against a vcpu not being onlined
837 * yet, without the pseudo-TLB being allocated.
838 *
839 * Skip those, as they obviously don't participate in
840 * the invalidation at this stage.
841 */
842 if (!vt)
843 continue;
844
845 if (!vt->valid)
846 continue;
847
848 ipa_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
849 vt->wr.level));
850 ipa_start = vt->wr.pa & ~(ipa_size - 1);
851 ipa_end = ipa_start + ipa_size;
852
853 if (ipa_end <= start || ipa_start >= end)
854 continue;
855
856 invalidate_vncr(vt);
857 }
858 }
859
860 struct s1e2_tlbi_scope {
861 enum {
862 TLBI_ALL,
863 TLBI_VA,
864 TLBI_VAA,
865 TLBI_ASID,
866 } type;
867
868 u16 asid;
869 u64 va;
870 u64 size;
871 };
872
invalidate_vncr_va(struct kvm * kvm,struct s1e2_tlbi_scope * scope)873 static void invalidate_vncr_va(struct kvm *kvm,
874 struct s1e2_tlbi_scope *scope)
875 {
876 struct kvm_vcpu *vcpu;
877 unsigned long i;
878
879 lockdep_assert_held_write(&kvm->mmu_lock);
880
881 kvm_for_each_vcpu(i, vcpu, kvm) {
882 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
883 u64 va_start, va_end, va_size;
884
885 if (!vt->valid)
886 continue;
887
888 va_size = ttl_to_size(pgshift_level_to_ttl(vt->wi.pgshift,
889 vt->wr.level));
890 va_start = vt->gva & ~(va_size - 1);
891 va_end = va_start + va_size;
892
893 switch (scope->type) {
894 case TLBI_ALL:
895 break;
896
897 case TLBI_VA:
898 if (va_end <= scope->va ||
899 va_start >= (scope->va + scope->size))
900 continue;
901 if (vt->wr.nG && vt->wr.asid != scope->asid)
902 continue;
903 break;
904
905 case TLBI_VAA:
906 if (va_end <= scope->va ||
907 va_start >= (scope->va + scope->size))
908 continue;
909 break;
910
911 case TLBI_ASID:
912 if (!vt->wr.nG || vt->wr.asid != scope->asid)
913 continue;
914 break;
915 }
916
917 invalidate_vncr(vt);
918 }
919 }
920
921 #define tlbi_va_s1_to_va(v) (u64)sign_extend64((v) << 12, 48)
922
compute_s1_tlbi_range(struct kvm_vcpu * vcpu,u32 inst,u64 val,struct s1e2_tlbi_scope * scope)923 static void compute_s1_tlbi_range(struct kvm_vcpu *vcpu, u32 inst, u64 val,
924 struct s1e2_tlbi_scope *scope)
925 {
926 switch (inst) {
927 case OP_TLBI_ALLE2:
928 case OP_TLBI_ALLE2IS:
929 case OP_TLBI_ALLE2OS:
930 case OP_TLBI_VMALLE1:
931 case OP_TLBI_VMALLE1IS:
932 case OP_TLBI_VMALLE1OS:
933 case OP_TLBI_ALLE2NXS:
934 case OP_TLBI_ALLE2ISNXS:
935 case OP_TLBI_ALLE2OSNXS:
936 case OP_TLBI_VMALLE1NXS:
937 case OP_TLBI_VMALLE1ISNXS:
938 case OP_TLBI_VMALLE1OSNXS:
939 scope->type = TLBI_ALL;
940 break;
941 case OP_TLBI_VAE2:
942 case OP_TLBI_VAE2IS:
943 case OP_TLBI_VAE2OS:
944 case OP_TLBI_VAE1:
945 case OP_TLBI_VAE1IS:
946 case OP_TLBI_VAE1OS:
947 case OP_TLBI_VAE2NXS:
948 case OP_TLBI_VAE2ISNXS:
949 case OP_TLBI_VAE2OSNXS:
950 case OP_TLBI_VAE1NXS:
951 case OP_TLBI_VAE1ISNXS:
952 case OP_TLBI_VAE1OSNXS:
953 case OP_TLBI_VALE2:
954 case OP_TLBI_VALE2IS:
955 case OP_TLBI_VALE2OS:
956 case OP_TLBI_VALE1:
957 case OP_TLBI_VALE1IS:
958 case OP_TLBI_VALE1OS:
959 case OP_TLBI_VALE2NXS:
960 case OP_TLBI_VALE2ISNXS:
961 case OP_TLBI_VALE2OSNXS:
962 case OP_TLBI_VALE1NXS:
963 case OP_TLBI_VALE1ISNXS:
964 case OP_TLBI_VALE1OSNXS:
965 scope->type = TLBI_VA;
966 scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
967 if (!scope->size)
968 scope->size = SZ_1G;
969 scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
970 scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
971 break;
972 case OP_TLBI_ASIDE1:
973 case OP_TLBI_ASIDE1IS:
974 case OP_TLBI_ASIDE1OS:
975 case OP_TLBI_ASIDE1NXS:
976 case OP_TLBI_ASIDE1ISNXS:
977 case OP_TLBI_ASIDE1OSNXS:
978 scope->type = TLBI_ASID;
979 scope->asid = FIELD_GET(TLBIR_ASID_MASK, val);
980 break;
981 case OP_TLBI_VAAE1:
982 case OP_TLBI_VAAE1IS:
983 case OP_TLBI_VAAE1OS:
984 case OP_TLBI_VAAE1NXS:
985 case OP_TLBI_VAAE1ISNXS:
986 case OP_TLBI_VAAE1OSNXS:
987 case OP_TLBI_VAALE1:
988 case OP_TLBI_VAALE1IS:
989 case OP_TLBI_VAALE1OS:
990 case OP_TLBI_VAALE1NXS:
991 case OP_TLBI_VAALE1ISNXS:
992 case OP_TLBI_VAALE1OSNXS:
993 scope->type = TLBI_VAA;
994 scope->size = ttl_to_size(FIELD_GET(TLBI_TTL_MASK, val));
995 if (!scope->size)
996 scope->size = SZ_1G;
997 scope->va = tlbi_va_s1_to_va(val) & ~(scope->size - 1);
998 break;
999 case OP_TLBI_RVAE2:
1000 case OP_TLBI_RVAE2IS:
1001 case OP_TLBI_RVAE2OS:
1002 case OP_TLBI_RVAE1:
1003 case OP_TLBI_RVAE1IS:
1004 case OP_TLBI_RVAE1OS:
1005 case OP_TLBI_RVAE2NXS:
1006 case OP_TLBI_RVAE2ISNXS:
1007 case OP_TLBI_RVAE2OSNXS:
1008 case OP_TLBI_RVAE1NXS:
1009 case OP_TLBI_RVAE1ISNXS:
1010 case OP_TLBI_RVAE1OSNXS:
1011 case OP_TLBI_RVALE2:
1012 case OP_TLBI_RVALE2IS:
1013 case OP_TLBI_RVALE2OS:
1014 case OP_TLBI_RVALE1:
1015 case OP_TLBI_RVALE1IS:
1016 case OP_TLBI_RVALE1OS:
1017 case OP_TLBI_RVALE2NXS:
1018 case OP_TLBI_RVALE2ISNXS:
1019 case OP_TLBI_RVALE2OSNXS:
1020 case OP_TLBI_RVALE1NXS:
1021 case OP_TLBI_RVALE1ISNXS:
1022 case OP_TLBI_RVALE1OSNXS:
1023 scope->type = TLBI_VA;
1024 scope->va = decode_range_tlbi(val, &scope->size, &scope->asid);
1025 break;
1026 case OP_TLBI_RVAAE1:
1027 case OP_TLBI_RVAAE1IS:
1028 case OP_TLBI_RVAAE1OS:
1029 case OP_TLBI_RVAAE1NXS:
1030 case OP_TLBI_RVAAE1ISNXS:
1031 case OP_TLBI_RVAAE1OSNXS:
1032 case OP_TLBI_RVAALE1:
1033 case OP_TLBI_RVAALE1IS:
1034 case OP_TLBI_RVAALE1OS:
1035 case OP_TLBI_RVAALE1NXS:
1036 case OP_TLBI_RVAALE1ISNXS:
1037 case OP_TLBI_RVAALE1OSNXS:
1038 scope->type = TLBI_VAA;
1039 scope->va = decode_range_tlbi(val, &scope->size, NULL);
1040 break;
1041 }
1042 }
1043
kvm_handle_s1e2_tlbi(struct kvm_vcpu * vcpu,u32 inst,u64 val)1044 void kvm_handle_s1e2_tlbi(struct kvm_vcpu *vcpu, u32 inst, u64 val)
1045 {
1046 struct s1e2_tlbi_scope scope = {};
1047
1048 compute_s1_tlbi_range(vcpu, inst, val, &scope);
1049
1050 guard(write_lock)(&vcpu->kvm->mmu_lock);
1051 invalidate_vncr_va(vcpu->kvm, &scope);
1052 }
1053
kvm_nested_s2_wp(struct kvm * kvm)1054 void kvm_nested_s2_wp(struct kvm *kvm)
1055 {
1056 int i;
1057
1058 lockdep_assert_held_write(&kvm->mmu_lock);
1059
1060 for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1061 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1062
1063 if (kvm_s2_mmu_valid(mmu))
1064 kvm_stage2_wp_range(mmu, 0, kvm_phys_size(mmu));
1065 }
1066
1067 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1068 }
1069
kvm_nested_s2_unmap(struct kvm * kvm,bool may_block)1070 void kvm_nested_s2_unmap(struct kvm *kvm, bool may_block)
1071 {
1072 int i;
1073
1074 lockdep_assert_held_write(&kvm->mmu_lock);
1075
1076 for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1077 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1078
1079 if (kvm_s2_mmu_valid(mmu))
1080 kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), may_block);
1081 }
1082
1083 kvm_invalidate_vncr_ipa(kvm, 0, BIT(kvm->arch.mmu.pgt->ia_bits));
1084 }
1085
kvm_nested_s2_flush(struct kvm * kvm)1086 void kvm_nested_s2_flush(struct kvm *kvm)
1087 {
1088 int i;
1089
1090 lockdep_assert_held_write(&kvm->mmu_lock);
1091
1092 for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1093 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1094
1095 if (kvm_s2_mmu_valid(mmu))
1096 kvm_stage2_flush_range(mmu, 0, kvm_phys_size(mmu));
1097 }
1098 }
1099
kvm_arch_flush_shadow_all(struct kvm * kvm)1100 void kvm_arch_flush_shadow_all(struct kvm *kvm)
1101 {
1102 int i;
1103
1104 for (i = 0; i < kvm->arch.nested_mmus_size; i++) {
1105 struct kvm_s2_mmu *mmu = &kvm->arch.nested_mmus[i];
1106
1107 if (!WARN_ON(atomic_read(&mmu->refcnt)))
1108 kvm_free_stage2_pgd(mmu);
1109 }
1110 kvfree(kvm->arch.nested_mmus);
1111 kvm->arch.nested_mmus = NULL;
1112 kvm->arch.nested_mmus_size = 0;
1113 kvm_uninit_stage2_mmu(kvm);
1114 }
1115
1116 /*
1117 * Dealing with VNCR_EL2 exposed by the *guest* is a complicated matter:
1118 *
1119 * - We introduce an internal representation of a vcpu-private TLB,
1120 * representing the mapping between the guest VA contained in VNCR_EL2,
1121 * the IPA the guest's EL2 PTs point to, and the actual PA this lives at.
1122 *
1123 * - On translation fault from a nested VNCR access, we create such a TLB.
1124 * If there is no mapping to describe, the guest inherits the fault.
1125 * Crucially, no actual mapping is done at this stage.
1126 *
1127 * - On vcpu_load() in a non-HYP context with HCR_EL2.NV==1, if the above
1128 * TLB exists, we map it in the fixmap for this CPU, and run with it. We
1129 * have to respect the permissions dictated by the guest, but not the
1130 * memory type (FWB is a must).
1131 *
1132 * - Note that we usually don't do a vcpu_load() on the back of a fault
1133 * (unless we are preempted), so the resolution of a translation fault
1134 * must go via a request that will map the VNCR page in the fixmap.
1135 * vcpu_load() might as well use the same mechanism.
1136 *
1137 * - On vcpu_put() in a non-HYP context with HCR_EL2.NV==1, if the TLB was
1138 * mapped, we unmap it. Yes it is that simple. The TLB still exists
1139 * though, and may be reused at a later load.
1140 *
1141 * - On permission fault, we simply forward the fault to the guest's EL2.
1142 * Get out of my way.
1143 *
1144 * - On any TLBI for the EL2&0 translation regime, we must find any TLB that
1145 * intersects with the TLBI request, invalidate it, and unmap the page
1146 * from the fixmap. Because we need to look at all the vcpu-private TLBs,
1147 * this requires some wide-ranging locking to ensure that nothing races
1148 * against it. This may require some refcounting to avoid the search when
1149 * no such TLB is present.
1150 *
1151 * - On MMU notifiers, we must invalidate our TLB in a similar way, but
1152 * looking at the IPA instead. The funny part is that there may not be a
1153 * stage-2 mapping for this page if L1 hasn't accessed it using LD/ST
1154 * instructions.
1155 */
1156
kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu * vcpu)1157 int kvm_vcpu_allocate_vncr_tlb(struct kvm_vcpu *vcpu)
1158 {
1159 if (!kvm_has_feat(vcpu->kvm, ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY))
1160 return 0;
1161
1162 vcpu->arch.vncr_tlb = kzalloc(sizeof(*vcpu->arch.vncr_tlb),
1163 GFP_KERNEL_ACCOUNT);
1164 if (!vcpu->arch.vncr_tlb)
1165 return -ENOMEM;
1166
1167 return 0;
1168 }
1169
read_vncr_el2(struct kvm_vcpu * vcpu)1170 static u64 read_vncr_el2(struct kvm_vcpu *vcpu)
1171 {
1172 return (u64)sign_extend64(__vcpu_sys_reg(vcpu, VNCR_EL2), 48);
1173 }
1174
kvm_translate_vncr(struct kvm_vcpu * vcpu,bool * is_gmem)1175 static int kvm_translate_vncr(struct kvm_vcpu *vcpu, bool *is_gmem)
1176 {
1177 struct kvm_memory_slot *memslot;
1178 bool write_fault, writable;
1179 unsigned long mmu_seq;
1180 struct vncr_tlb *vt;
1181 struct page *page;
1182 u64 va, pfn, gfn;
1183 int ret;
1184
1185 vt = vcpu->arch.vncr_tlb;
1186
1187 /*
1188 * If we're about to walk the EL2 S1 PTs, we must invalidate the
1189 * current TLB, as it could be sampled from another vcpu doing a
1190 * TLBI *IS. A real CPU wouldn't do that, but we only keep a single
1191 * translation, so not much of a choice.
1192 *
1193 * We also prepare the next walk wilst we're at it.
1194 */
1195 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1196 invalidate_vncr(vt);
1197
1198 vt->wi = (struct s1_walk_info) {
1199 .regime = TR_EL20,
1200 .as_el0 = false,
1201 .pan = false,
1202 };
1203 vt->wr = (struct s1_walk_result){};
1204 }
1205
1206 guard(srcu)(&vcpu->kvm->srcu);
1207
1208 va = read_vncr_el2(vcpu);
1209
1210 ret = __kvm_translate_va(vcpu, &vt->wi, &vt->wr, va);
1211 if (ret)
1212 return ret;
1213
1214 write_fault = kvm_is_write_fault(vcpu);
1215
1216 mmu_seq = vcpu->kvm->mmu_invalidate_seq;
1217 smp_rmb();
1218
1219 gfn = vt->wr.pa >> PAGE_SHIFT;
1220 memslot = gfn_to_memslot(vcpu->kvm, gfn);
1221 if (!memslot)
1222 return -EFAULT;
1223
1224 *is_gmem = kvm_slot_has_gmem(memslot);
1225 if (!*is_gmem) {
1226 pfn = __kvm_faultin_pfn(memslot, gfn, write_fault ? FOLL_WRITE : 0,
1227 &writable, &page);
1228 if (is_error_noslot_pfn(pfn) || (write_fault && !writable))
1229 return -EFAULT;
1230 } else {
1231 ret = kvm_gmem_get_pfn(vcpu->kvm, memslot, gfn, &pfn, &page, NULL);
1232 if (ret) {
1233 kvm_prepare_memory_fault_exit(vcpu, vt->wr.pa, PAGE_SIZE,
1234 write_fault, false, false);
1235 return ret;
1236 }
1237 }
1238
1239 scoped_guard(write_lock, &vcpu->kvm->mmu_lock) {
1240 if (mmu_invalidate_retry(vcpu->kvm, mmu_seq))
1241 return -EAGAIN;
1242
1243 vt->gva = va;
1244 vt->hpa = pfn << PAGE_SHIFT;
1245 vt->valid = true;
1246 vt->cpu = -1;
1247
1248 kvm_make_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu);
1249 kvm_release_faultin_page(vcpu->kvm, page, false, vt->wr.pw);
1250 }
1251
1252 if (vt->wr.pw)
1253 mark_page_dirty(vcpu->kvm, gfn);
1254
1255 return 0;
1256 }
1257
inject_vncr_perm(struct kvm_vcpu * vcpu)1258 static void inject_vncr_perm(struct kvm_vcpu *vcpu)
1259 {
1260 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1261 u64 esr = kvm_vcpu_get_esr(vcpu);
1262
1263 /* Adjust the fault level to reflect that of the guest's */
1264 esr &= ~ESR_ELx_FSC;
1265 esr |= FIELD_PREP(ESR_ELx_FSC,
1266 ESR_ELx_FSC_PERM_L(vt->wr.level));
1267
1268 kvm_inject_nested_sync(vcpu, esr);
1269 }
1270
kvm_vncr_tlb_lookup(struct kvm_vcpu * vcpu)1271 static bool kvm_vncr_tlb_lookup(struct kvm_vcpu *vcpu)
1272 {
1273 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1274
1275 lockdep_assert_held_read(&vcpu->kvm->mmu_lock);
1276
1277 if (!vt->valid)
1278 return false;
1279
1280 if (read_vncr_el2(vcpu) != vt->gva)
1281 return false;
1282
1283 if (vt->wr.nG) {
1284 u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1285 u64 ttbr = ((tcr & TCR_A1) ?
1286 vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1287 vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1288 u16 asid;
1289
1290 asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1291 if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1292 !(tcr & TCR_ASID16))
1293 asid &= GENMASK(7, 0);
1294
1295 return asid == vt->wr.asid;
1296 }
1297
1298 return true;
1299 }
1300
kvm_handle_vncr_abort(struct kvm_vcpu * vcpu)1301 int kvm_handle_vncr_abort(struct kvm_vcpu *vcpu)
1302 {
1303 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1304 u64 esr = kvm_vcpu_get_esr(vcpu);
1305
1306 WARN_ON_ONCE(!(esr & ESR_ELx_VNCR));
1307
1308 if (kvm_vcpu_abt_issea(vcpu))
1309 return kvm_handle_guest_sea(vcpu);
1310
1311 if (esr_fsc_is_permission_fault(esr)) {
1312 inject_vncr_perm(vcpu);
1313 } else if (esr_fsc_is_translation_fault(esr)) {
1314 bool valid, is_gmem = false;
1315 int ret;
1316
1317 scoped_guard(read_lock, &vcpu->kvm->mmu_lock)
1318 valid = kvm_vncr_tlb_lookup(vcpu);
1319
1320 if (!valid)
1321 ret = kvm_translate_vncr(vcpu, &is_gmem);
1322 else
1323 ret = -EPERM;
1324
1325 switch (ret) {
1326 case -EAGAIN:
1327 /* Let's try again... */
1328 break;
1329 case -ENOMEM:
1330 /*
1331 * For guest_memfd, this indicates that it failed to
1332 * create a folio to back the memory. Inform userspace.
1333 */
1334 if (is_gmem)
1335 return 0;
1336 /* Otherwise, let's try again... */
1337 break;
1338 case -EFAULT:
1339 case -EIO:
1340 case -EHWPOISON:
1341 if (is_gmem)
1342 return 0;
1343 fallthrough;
1344 case -EINVAL:
1345 case -ENOENT:
1346 case -EACCES:
1347 /*
1348 * Translation failed, inject the corresponding
1349 * exception back to EL2.
1350 */
1351 BUG_ON(!vt->wr.failed);
1352
1353 esr &= ~ESR_ELx_FSC;
1354 esr |= FIELD_PREP(ESR_ELx_FSC, vt->wr.fst);
1355
1356 kvm_inject_nested_sync(vcpu, esr);
1357 break;
1358 case -EPERM:
1359 /* Hack to deal with POE until we get kernel support */
1360 inject_vncr_perm(vcpu);
1361 break;
1362 case 0:
1363 break;
1364 }
1365 } else {
1366 WARN_ONCE(1, "Unhandled VNCR abort, ESR=%llx\n", esr);
1367 }
1368
1369 return 1;
1370 }
1371
kvm_map_l1_vncr(struct kvm_vcpu * vcpu)1372 static void kvm_map_l1_vncr(struct kvm_vcpu *vcpu)
1373 {
1374 struct vncr_tlb *vt = vcpu->arch.vncr_tlb;
1375 pgprot_t prot;
1376
1377 guard(preempt)();
1378 guard(read_lock)(&vcpu->kvm->mmu_lock);
1379
1380 /*
1381 * The request to map VNCR may have raced against some other
1382 * event, such as an interrupt, and may not be valid anymore.
1383 */
1384 if (is_hyp_ctxt(vcpu))
1385 return;
1386
1387 /*
1388 * Check that the pseudo-TLB is valid and that VNCR_EL2 still
1389 * contains the expected value. If it doesn't, we simply bail out
1390 * without a mapping -- a transformed MSR/MRS will generate the
1391 * fault and allows us to populate the pseudo-TLB.
1392 */
1393 if (!vt->valid)
1394 return;
1395
1396 if (read_vncr_el2(vcpu) != vt->gva)
1397 return;
1398
1399 if (vt->wr.nG) {
1400 u64 tcr = vcpu_read_sys_reg(vcpu, TCR_EL2);
1401 u64 ttbr = ((tcr & TCR_A1) ?
1402 vcpu_read_sys_reg(vcpu, TTBR1_EL2) :
1403 vcpu_read_sys_reg(vcpu, TTBR0_EL2));
1404 u16 asid;
1405
1406 asid = FIELD_GET(TTBR_ASID_MASK, ttbr);
1407 if (!kvm_has_feat_enum(vcpu->kvm, ID_AA64MMFR0_EL1, ASIDBITS, 16) ||
1408 !(tcr & TCR_ASID16))
1409 asid &= GENMASK(7, 0);
1410
1411 if (asid != vt->wr.asid)
1412 return;
1413 }
1414
1415 vt->cpu = smp_processor_id();
1416
1417 if (vt->wr.pw && vt->wr.pr)
1418 prot = PAGE_KERNEL;
1419 else if (vt->wr.pr)
1420 prot = PAGE_KERNEL_RO;
1421 else
1422 prot = PAGE_NONE;
1423
1424 /*
1425 * We can't map write-only (or no permission at all) in the kernel,
1426 * but the guest can do it if using POE, so we'll have to turn a
1427 * translation fault into a permission fault at runtime.
1428 * FIXME: WO doesn't work at all, need POE support in the kernel.
1429 */
1430 if (pgprot_val(prot) != pgprot_val(PAGE_NONE)) {
1431 __set_fixmap(vncr_fixmap(vt->cpu), vt->hpa, prot);
1432 host_data_set_flag(L1_VNCR_MAPPED);
1433 atomic_inc(&vcpu->kvm->arch.vncr_map_count);
1434 }
1435 }
1436
1437 #define has_tgran_2(__r, __sz) \
1438 ({ \
1439 u64 _s1, _s2, _mmfr0 = __r; \
1440 \
1441 _s2 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
1442 TGRAN##__sz##_2, _mmfr0); \
1443 \
1444 _s1 = SYS_FIELD_GET(ID_AA64MMFR0_EL1, \
1445 TGRAN##__sz, _mmfr0); \
1446 \
1447 ((_s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_NI && \
1448 _s2 != ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz) || \
1449 (_s2 == ID_AA64MMFR0_EL1_TGRAN##__sz##_2_TGRAN##__sz && \
1450 _s1 != ID_AA64MMFR0_EL1_TGRAN##__sz##_NI)); \
1451 })
1452 /*
1453 * Our emulated CPU doesn't support all the possible features. For the
1454 * sake of simplicity (and probably mental sanity), wipe out a number
1455 * of feature bits we don't intend to support for the time being.
1456 * This list should get updated as new features get added to the NV
1457 * support, and new extension to the architecture.
1458 */
limit_nv_id_reg(struct kvm * kvm,u32 reg,u64 val)1459 u64 limit_nv_id_reg(struct kvm *kvm, u32 reg, u64 val)
1460 {
1461 u64 orig_val = val;
1462
1463 switch (reg) {
1464 case SYS_ID_AA64ISAR0_EL1:
1465 /* Support everything but TME */
1466 val &= ~ID_AA64ISAR0_EL1_TME;
1467 break;
1468
1469 case SYS_ID_AA64ISAR1_EL1:
1470 /* Support everything but LS64 and Spec Invalidation */
1471 val &= ~(ID_AA64ISAR1_EL1_LS64 |
1472 ID_AA64ISAR1_EL1_SPECRES);
1473 break;
1474
1475 case SYS_ID_AA64PFR0_EL1:
1476 /* No RME, AMU, MPAM, or S-EL2 */
1477 val &= ~(ID_AA64PFR0_EL1_RME |
1478 ID_AA64PFR0_EL1_AMU |
1479 ID_AA64PFR0_EL1_MPAM |
1480 ID_AA64PFR0_EL1_SEL2 |
1481 ID_AA64PFR0_EL1_EL3 |
1482 ID_AA64PFR0_EL1_EL2 |
1483 ID_AA64PFR0_EL1_EL1 |
1484 ID_AA64PFR0_EL1_EL0);
1485 /* 64bit only at any EL */
1486 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL0, IMP);
1487 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL1, IMP);
1488 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL2, IMP);
1489 val |= SYS_FIELD_PREP_ENUM(ID_AA64PFR0_EL1, EL3, IMP);
1490 break;
1491
1492 case SYS_ID_AA64PFR1_EL1:
1493 /* Only support BTI, SSBS, CSV2_frac */
1494 val &= ~(ID_AA64PFR1_EL1_PFAR |
1495 ID_AA64PFR1_EL1_MTEX |
1496 ID_AA64PFR1_EL1_THE |
1497 ID_AA64PFR1_EL1_GCS |
1498 ID_AA64PFR1_EL1_MTE_frac |
1499 ID_AA64PFR1_EL1_NMI |
1500 ID_AA64PFR1_EL1_SME |
1501 ID_AA64PFR1_EL1_RES0 |
1502 ID_AA64PFR1_EL1_MPAM_frac |
1503 ID_AA64PFR1_EL1_MTE);
1504 break;
1505
1506 case SYS_ID_AA64MMFR0_EL1:
1507 /* Hide ExS, Secure Memory */
1508 val &= ~(ID_AA64MMFR0_EL1_EXS |
1509 ID_AA64MMFR0_EL1_TGRAN4_2 |
1510 ID_AA64MMFR0_EL1_TGRAN16_2 |
1511 ID_AA64MMFR0_EL1_TGRAN64_2 |
1512 ID_AA64MMFR0_EL1_SNSMEM);
1513
1514 /* Hide CNTPOFF if present */
1515 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, ECV, IMP);
1516
1517 /* Disallow unsupported S2 page sizes */
1518 switch (PAGE_SIZE) {
1519 case SZ_64K:
1520 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, NI);
1521 fallthrough;
1522 case SZ_16K:
1523 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, NI);
1524 fallthrough;
1525 case SZ_4K:
1526 /* Support everything */
1527 break;
1528 }
1529
1530 /*
1531 * Since we can't support a guest S2 page size smaller
1532 * than the host's own page size (due to KVM only
1533 * populating its own S2 using the kernel's page
1534 * size), advertise the limitation using FEAT_GTG.
1535 */
1536 switch (PAGE_SIZE) {
1537 case SZ_4K:
1538 if (has_tgran_2(orig_val, 4))
1539 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN4_2, IMP);
1540 fallthrough;
1541 case SZ_16K:
1542 if (has_tgran_2(orig_val, 16))
1543 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN16_2, IMP);
1544 fallthrough;
1545 case SZ_64K:
1546 if (has_tgran_2(orig_val, 64))
1547 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR0_EL1, TGRAN64_2, IMP);
1548 break;
1549 }
1550
1551 /* Cap PARange to 48bits */
1552 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64MMFR0_EL1, PARANGE, 48);
1553 break;
1554
1555 case SYS_ID_AA64MMFR1_EL1:
1556 val &= ~(ID_AA64MMFR1_EL1_CMOW |
1557 ID_AA64MMFR1_EL1_nTLBPA |
1558 ID_AA64MMFR1_EL1_ETS |
1559 ID_AA64MMFR1_EL1_XNX |
1560 ID_AA64MMFR1_EL1_HAFDBS);
1561 /* FEAT_E2H0 implies no VHE */
1562 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features))
1563 val &= ~ID_AA64MMFR1_EL1_VH;
1564 break;
1565
1566 case SYS_ID_AA64MMFR2_EL1:
1567 val &= ~(ID_AA64MMFR2_EL1_BBM |
1568 ID_AA64MMFR2_EL1_TTL |
1569 GENMASK_ULL(47, 44) |
1570 ID_AA64MMFR2_EL1_ST |
1571 ID_AA64MMFR2_EL1_CCIDX |
1572 ID_AA64MMFR2_EL1_VARange);
1573
1574 /* Force TTL support */
1575 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR2_EL1, TTL, IMP);
1576 break;
1577
1578 case SYS_ID_AA64MMFR4_EL1:
1579 /*
1580 * You get EITHER
1581 *
1582 * - FEAT_VHE without FEAT_E2H0
1583 * - FEAT_NV limited to FEAT_NV2
1584 * - HCR_EL2.NV1 being RES0
1585 *
1586 * OR
1587 *
1588 * - FEAT_E2H0 without FEAT_VHE nor FEAT_NV
1589 *
1590 * Life is too short for anything else.
1591 */
1592 if (test_bit(KVM_ARM_VCPU_HAS_EL2_E2H0, kvm->arch.vcpu_features)) {
1593 val = 0;
1594 } else {
1595 val = SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, NV_frac, NV2_ONLY);
1596 val |= SYS_FIELD_PREP_ENUM(ID_AA64MMFR4_EL1, E2H0, NI_NV1);
1597 }
1598 break;
1599
1600 case SYS_ID_AA64DFR0_EL1:
1601 /* Only limited support for PMU, Debug, BPs, WPs, and HPMN0 */
1602 val &= ~(ID_AA64DFR0_EL1_ExtTrcBuff |
1603 ID_AA64DFR0_EL1_BRBE |
1604 ID_AA64DFR0_EL1_MTPMU |
1605 ID_AA64DFR0_EL1_TraceBuffer |
1606 ID_AA64DFR0_EL1_TraceFilt |
1607 ID_AA64DFR0_EL1_PMSVer |
1608 ID_AA64DFR0_EL1_CTX_CMPs |
1609 ID_AA64DFR0_EL1_SEBEP |
1610 ID_AA64DFR0_EL1_PMSS |
1611 ID_AA64DFR0_EL1_TraceVer);
1612
1613 /*
1614 * FEAT_Debugv8p9 requires support for extended breakpoints /
1615 * watchpoints.
1616 */
1617 val = ID_REG_LIMIT_FIELD_ENUM(val, ID_AA64DFR0_EL1, DebugVer, V8P8);
1618 break;
1619 }
1620
1621 return val;
1622 }
1623
kvm_vcpu_apply_reg_masks(const struct kvm_vcpu * vcpu,enum vcpu_sysreg sr,u64 v)1624 u64 kvm_vcpu_apply_reg_masks(const struct kvm_vcpu *vcpu,
1625 enum vcpu_sysreg sr, u64 v)
1626 {
1627 struct kvm_sysreg_masks *masks;
1628
1629 masks = vcpu->kvm->arch.sysreg_masks;
1630
1631 if (masks) {
1632 sr -= __SANITISED_REG_START__;
1633
1634 v &= ~masks->mask[sr].res0;
1635 v |= masks->mask[sr].res1;
1636 }
1637
1638 return v;
1639 }
1640
set_sysreg_masks(struct kvm * kvm,int sr,u64 res0,u64 res1)1641 static __always_inline void set_sysreg_masks(struct kvm *kvm, int sr, u64 res0, u64 res1)
1642 {
1643 int i = sr - __SANITISED_REG_START__;
1644
1645 BUILD_BUG_ON(!__builtin_constant_p(sr));
1646 BUILD_BUG_ON(sr < __SANITISED_REG_START__);
1647 BUILD_BUG_ON(sr >= NR_SYS_REGS);
1648
1649 kvm->arch.sysreg_masks->mask[i].res0 = res0;
1650 kvm->arch.sysreg_masks->mask[i].res1 = res1;
1651 }
1652
kvm_init_nv_sysregs(struct kvm_vcpu * vcpu)1653 int kvm_init_nv_sysregs(struct kvm_vcpu *vcpu)
1654 {
1655 struct kvm *kvm = vcpu->kvm;
1656 u64 res0, res1;
1657
1658 lockdep_assert_held(&kvm->arch.config_lock);
1659
1660 if (kvm->arch.sysreg_masks)
1661 goto out;
1662
1663 kvm->arch.sysreg_masks = kzalloc(sizeof(*(kvm->arch.sysreg_masks)),
1664 GFP_KERNEL_ACCOUNT);
1665 if (!kvm->arch.sysreg_masks)
1666 return -ENOMEM;
1667
1668 /* VTTBR_EL2 */
1669 res0 = res1 = 0;
1670 if (!kvm_has_feat_enum(kvm, ID_AA64MMFR1_EL1, VMIDBits, 16))
1671 res0 |= GENMASK(63, 56);
1672 if (!kvm_has_feat(kvm, ID_AA64MMFR2_EL1, CnP, IMP))
1673 res0 |= VTTBR_CNP_BIT;
1674 set_sysreg_masks(kvm, VTTBR_EL2, res0, res1);
1675
1676 /* VTCR_EL2 */
1677 res0 = GENMASK(63, 32) | GENMASK(30, 20);
1678 res1 = BIT(31);
1679 set_sysreg_masks(kvm, VTCR_EL2, res0, res1);
1680
1681 /* VMPIDR_EL2 */
1682 res0 = GENMASK(63, 40) | GENMASK(30, 24);
1683 res1 = BIT(31);
1684 set_sysreg_masks(kvm, VMPIDR_EL2, res0, res1);
1685
1686 /* HCR_EL2 */
1687 get_reg_fixed_bits(kvm, HCR_EL2, &res0, &res1);
1688 set_sysreg_masks(kvm, HCR_EL2, res0, res1);
1689
1690 /* HCRX_EL2 */
1691 get_reg_fixed_bits(kvm, HCRX_EL2, &res0, &res1);
1692 set_sysreg_masks(kvm, HCRX_EL2, res0, res1);
1693
1694 /* HFG[RW]TR_EL2 */
1695 get_reg_fixed_bits(kvm, HFGRTR_EL2, &res0, &res1);
1696 set_sysreg_masks(kvm, HFGRTR_EL2, res0, res1);
1697 get_reg_fixed_bits(kvm, HFGWTR_EL2, &res0, &res1);
1698 set_sysreg_masks(kvm, HFGWTR_EL2, res0, res1);
1699
1700 /* HDFG[RW]TR_EL2 */
1701 get_reg_fixed_bits(kvm, HDFGRTR_EL2, &res0, &res1);
1702 set_sysreg_masks(kvm, HDFGRTR_EL2, res0, res1);
1703 get_reg_fixed_bits(kvm, HDFGWTR_EL2, &res0, &res1);
1704 set_sysreg_masks(kvm, HDFGWTR_EL2, res0, res1);
1705
1706 /* HFGITR_EL2 */
1707 get_reg_fixed_bits(kvm, HFGITR_EL2, &res0, &res1);
1708 set_sysreg_masks(kvm, HFGITR_EL2, res0, res1);
1709
1710 /* HAFGRTR_EL2 - not a lot to see here */
1711 get_reg_fixed_bits(kvm, HAFGRTR_EL2, &res0, &res1);
1712 set_sysreg_masks(kvm, HAFGRTR_EL2, res0, res1);
1713
1714 /* HFG[RW]TR2_EL2 */
1715 get_reg_fixed_bits(kvm, HFGRTR2_EL2, &res0, &res1);
1716 set_sysreg_masks(kvm, HFGRTR2_EL2, res0, res1);
1717 get_reg_fixed_bits(kvm, HFGWTR2_EL2, &res0, &res1);
1718 set_sysreg_masks(kvm, HFGWTR2_EL2, res0, res1);
1719
1720 /* HDFG[RW]TR2_EL2 */
1721 get_reg_fixed_bits(kvm, HDFGRTR2_EL2, &res0, &res1);
1722 set_sysreg_masks(kvm, HDFGRTR2_EL2, res0, res1);
1723 get_reg_fixed_bits(kvm, HDFGWTR2_EL2, &res0, &res1);
1724 set_sysreg_masks(kvm, HDFGWTR2_EL2, res0, res1);
1725
1726 /* HFGITR2_EL2 */
1727 get_reg_fixed_bits(kvm, HFGITR2_EL2, &res0, &res1);
1728 set_sysreg_masks(kvm, HFGITR2_EL2, res0, res1);
1729
1730 /* TCR2_EL2 */
1731 get_reg_fixed_bits(kvm, TCR2_EL2, &res0, &res1);
1732 set_sysreg_masks(kvm, TCR2_EL2, res0, res1);
1733
1734 /* SCTLR_EL1 */
1735 get_reg_fixed_bits(kvm, SCTLR_EL1, &res0, &res1);
1736 set_sysreg_masks(kvm, SCTLR_EL1, res0, res1);
1737
1738 /* SCTLR2_ELx */
1739 get_reg_fixed_bits(kvm, SCTLR2_EL1, &res0, &res1);
1740 set_sysreg_masks(kvm, SCTLR2_EL1, res0, res1);
1741 get_reg_fixed_bits(kvm, SCTLR2_EL2, &res0, &res1);
1742 set_sysreg_masks(kvm, SCTLR2_EL2, res0, res1);
1743
1744 /* MDCR_EL2 */
1745 get_reg_fixed_bits(kvm, MDCR_EL2, &res0, &res1);
1746 set_sysreg_masks(kvm, MDCR_EL2, res0, res1);
1747
1748 /* CNTHCTL_EL2 */
1749 res0 = GENMASK(63, 20);
1750 res1 = 0;
1751 if (!kvm_has_feat(kvm, ID_AA64PFR0_EL1, RME, IMP))
1752 res0 |= CNTHCTL_CNTPMASK | CNTHCTL_CNTVMASK;
1753 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, CNTPOFF)) {
1754 res0 |= CNTHCTL_ECV;
1755 if (!kvm_has_feat(kvm, ID_AA64MMFR0_EL1, ECV, IMP))
1756 res0 |= (CNTHCTL_EL1TVT | CNTHCTL_EL1TVCT |
1757 CNTHCTL_EL1NVPCT | CNTHCTL_EL1NVVCT);
1758 }
1759 if (!kvm_has_feat(kvm, ID_AA64MMFR1_EL1, VH, IMP))
1760 res0 |= GENMASK(11, 8);
1761 set_sysreg_masks(kvm, CNTHCTL_EL2, res0, res1);
1762
1763 /* ICH_HCR_EL2 */
1764 res0 = ICH_HCR_EL2_RES0;
1765 res1 = ICH_HCR_EL2_RES1;
1766 if (!(kvm_vgic_global_state.ich_vtr_el2 & ICH_VTR_EL2_TDS))
1767 res0 |= ICH_HCR_EL2_TDIR;
1768 /* No GICv4 is presented to the guest */
1769 res0 |= ICH_HCR_EL2_DVIM | ICH_HCR_EL2_vSGIEOICount;
1770 set_sysreg_masks(kvm, ICH_HCR_EL2, res0, res1);
1771
1772 /* VNCR_EL2 */
1773 set_sysreg_masks(kvm, VNCR_EL2, VNCR_EL2_RES0, VNCR_EL2_RES1);
1774
1775 out:
1776 for (enum vcpu_sysreg sr = __SANITISED_REG_START__; sr < NR_SYS_REGS; sr++)
1777 __vcpu_rmw_sys_reg(vcpu, sr, |=, 0);
1778
1779 return 0;
1780 }
1781
check_nested_vcpu_requests(struct kvm_vcpu * vcpu)1782 void check_nested_vcpu_requests(struct kvm_vcpu *vcpu)
1783 {
1784 if (kvm_check_request(KVM_REQ_NESTED_S2_UNMAP, vcpu)) {
1785 struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu;
1786
1787 write_lock(&vcpu->kvm->mmu_lock);
1788 if (mmu->pending_unmap) {
1789 kvm_stage2_unmap_range(mmu, 0, kvm_phys_size(mmu), true);
1790 mmu->pending_unmap = false;
1791 }
1792 write_unlock(&vcpu->kvm->mmu_lock);
1793 }
1794
1795 if (kvm_check_request(KVM_REQ_MAP_L1_VNCR_EL2, vcpu))
1796 kvm_map_l1_vncr(vcpu);
1797
1798 /* Must be last, as may switch context! */
1799 if (kvm_check_request(KVM_REQ_GUEST_HYP_IRQ_PENDING, vcpu))
1800 kvm_inject_nested_irq(vcpu);
1801 }
1802
1803 /*
1804 * One of the many architectural bugs in FEAT_NV2 is that the guest hypervisor
1805 * can write to HCR_EL2 behind our back, potentially changing the exception
1806 * routing / masking for even the host context.
1807 *
1808 * What follows is some slop to (1) react to exception routing / masking and (2)
1809 * preserve the pending SError state across translation regimes.
1810 */
kvm_nested_flush_hwstate(struct kvm_vcpu * vcpu)1811 void kvm_nested_flush_hwstate(struct kvm_vcpu *vcpu)
1812 {
1813 if (!vcpu_has_nv(vcpu))
1814 return;
1815
1816 if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1817 kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1818 }
1819
kvm_nested_sync_hwstate(struct kvm_vcpu * vcpu)1820 void kvm_nested_sync_hwstate(struct kvm_vcpu *vcpu)
1821 {
1822 unsigned long *hcr = vcpu_hcr(vcpu);
1823
1824 if (!vcpu_has_nv(vcpu))
1825 return;
1826
1827 /*
1828 * We previously decided that an SError was deliverable to the guest.
1829 * Reap the pending state from HCR_EL2 and...
1830 */
1831 if (unlikely(__test_and_clear_bit(__ffs(HCR_VSE), hcr)))
1832 vcpu_set_flag(vcpu, NESTED_SERROR_PENDING);
1833
1834 /*
1835 * Re-attempt SError injection in case the deliverability has changed,
1836 * which is necessary to faithfully emulate WFI the case of a pending
1837 * SError being a wakeup condition.
1838 */
1839 if (unlikely(vcpu_test_and_clear_flag(vcpu, NESTED_SERROR_PENDING)))
1840 kvm_inject_serror_esr(vcpu, vcpu_get_vsesr(vcpu));
1841 }
1842
1843 /*
1844 * KVM unconditionally sets most of these traps anyway but use an allowlist
1845 * to document the guest hypervisor traps that may take precedence and guard
1846 * against future changes to the non-nested trap configuration.
1847 */
1848 #define NV_MDCR_GUEST_INCLUDE (MDCR_EL2_TDE | \
1849 MDCR_EL2_TDA | \
1850 MDCR_EL2_TDRA | \
1851 MDCR_EL2_TTRF | \
1852 MDCR_EL2_TPMS | \
1853 MDCR_EL2_TPM | \
1854 MDCR_EL2_TPMCR | \
1855 MDCR_EL2_TDCC | \
1856 MDCR_EL2_TDOSA)
1857
kvm_nested_setup_mdcr_el2(struct kvm_vcpu * vcpu)1858 void kvm_nested_setup_mdcr_el2(struct kvm_vcpu *vcpu)
1859 {
1860 u64 guest_mdcr = __vcpu_sys_reg(vcpu, MDCR_EL2);
1861
1862 /*
1863 * In yet another example where FEAT_NV2 is fscking broken, accesses
1864 * to MDSCR_EL1 are redirected to the VNCR despite having an effect
1865 * at EL2. Use a big hammer to apply sanity.
1866 */
1867 if (is_hyp_ctxt(vcpu))
1868 vcpu->arch.mdcr_el2 |= MDCR_EL2_TDA;
1869 else
1870 vcpu->arch.mdcr_el2 |= (guest_mdcr & NV_MDCR_GUEST_INCLUDE);
1871 }
1872