1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Kernel-based Virtual Machine driver for Linux
4 *
5 * Macros and functions to access KVM PTEs (also known as SPTEs)
6 *
7 * Copyright (C) 2006 Qumranet, Inc.
8 * Copyright 2020 Red Hat, Inc. and/or its affiliates.
9 */
10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
11
12 #include <linux/kvm_host.h>
13 #include "mmu.h"
14 #include "mmu_internal.h"
15 #include "x86.h"
16 #include "spte.h"
17
18 #include <asm/e820/api.h>
19 #include <asm/memtype.h>
20 #include <asm/vmx.h>
21
22 bool __read_mostly enable_mmio_caching = true;
23 static bool __ro_after_init allow_mmio_caching;
24 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444);
25 EXPORT_SYMBOL_GPL(enable_mmio_caching);
26
27 bool __read_mostly kvm_ad_enabled;
28
29 u64 __read_mostly shadow_host_writable_mask;
30 u64 __read_mostly shadow_mmu_writable_mask;
31 u64 __read_mostly shadow_nx_mask;
32 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */
33 u64 __read_mostly shadow_user_mask;
34 u64 __read_mostly shadow_accessed_mask;
35 u64 __read_mostly shadow_dirty_mask;
36 u64 __read_mostly shadow_mmio_value;
37 u64 __read_mostly shadow_mmio_mask;
38 u64 __read_mostly shadow_mmio_access_mask;
39 u64 __read_mostly shadow_present_mask;
40 u64 __read_mostly shadow_me_value;
41 u64 __read_mostly shadow_me_mask;
42 u64 __read_mostly shadow_acc_track_mask;
43
44 u64 __read_mostly shadow_nonpresent_or_rsvd_mask;
45 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask;
46
kvm_get_host_maxphyaddr(void)47 static u8 __init kvm_get_host_maxphyaddr(void)
48 {
49 /*
50 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected
51 * in CPU detection code, but the processor treats those reduced bits as
52 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at
53 * the physical address bits reported by CPUID, i.e. the raw MAXPHYADDR,
54 * when reasoning about CPU behavior with respect to MAXPHYADDR.
55 */
56 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008))
57 return cpuid_eax(0x80000008) & 0xff;
58
59 /*
60 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with
61 * custom CPUID. Proceed with whatever the kernel found since these features
62 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008).
63 */
64 return boot_cpu_data.x86_phys_bits;
65 }
66
kvm_mmu_spte_module_init(void)67 void __init kvm_mmu_spte_module_init(void)
68 {
69 /*
70 * Snapshot userspace's desire to allow MMIO caching. Whether or not
71 * KVM can actually enable MMIO caching depends on vendor-specific
72 * hardware capabilities and other module params that can't be resolved
73 * until the vendor module is loaded, i.e. enable_mmio_caching can and
74 * will change when the vendor module is (re)loaded.
75 */
76 allow_mmio_caching = enable_mmio_caching;
77
78 kvm_host.maxphyaddr = kvm_get_host_maxphyaddr();
79 }
80
generation_mmio_spte_mask(u64 gen)81 static u64 generation_mmio_spte_mask(u64 gen)
82 {
83 u64 mask;
84
85 WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK);
86
87 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK;
88 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK;
89 return mask;
90 }
91
make_mmio_spte(struct kvm_vcpu * vcpu,u64 gfn,unsigned int access)92 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access)
93 {
94 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK;
95 u64 spte = generation_mmio_spte_mask(gen);
96 u64 gpa = gfn << PAGE_SHIFT;
97
98 access &= shadow_mmio_access_mask;
99 spte |= vcpu->kvm->arch.shadow_mmio_value | access;
100 spte |= gpa | shadow_nonpresent_or_rsvd_mask;
101 spte |= (gpa & shadow_nonpresent_or_rsvd_mask)
102 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
103
104 return spte;
105 }
106
kvm_is_mmio_pfn(kvm_pfn_t pfn)107 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn)
108 {
109 if (pfn_valid(pfn))
110 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) &&
111 /*
112 * Some reserved pages, such as those from NVDIMM
113 * DAX devices, are not for MMIO, and can be mapped
114 * with cached memory type for better performance.
115 * However, the above check misconceives those pages
116 * as MMIO, and results in KVM mapping them with UC
117 * memory type, which would hurt the performance.
118 * Therefore, we check the host memory type in addition
119 * and only treat UC/UC-/WC pages as MMIO.
120 */
121 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn));
122
123 return !e820__mapped_raw_any(pfn_to_hpa(pfn),
124 pfn_to_hpa(pfn + 1) - 1,
125 E820_TYPE_RAM);
126 }
127
128 /*
129 * Returns true if the SPTE needs to be updated atomically due to having bits
130 * that may be changed without holding mmu_lock, and for which KVM must not
131 * lose information. E.g. KVM must not drop Dirty bit information. The caller
132 * is responsible for checking if the SPTE is shadow-present, and for
133 * determining whether or not the caller cares about non-leaf SPTEs.
134 */
spte_needs_atomic_update(u64 spte)135 bool spte_needs_atomic_update(u64 spte)
136 {
137 /* SPTEs can be made Writable bit by KVM's fast page fault handler. */
138 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte))
139 return true;
140
141 /*
142 * A/D-disabled SPTEs can be access-tracked by aging, and access-tracked
143 * SPTEs can be restored by KVM's fast page fault handler.
144 */
145 if (!spte_ad_enabled(spte))
146 return true;
147
148 /*
149 * Dirty and Accessed bits can be set by the CPU. Ignore the Accessed
150 * bit, as KVM tolerates false negatives/positives, e.g. KVM doesn't
151 * invalidate TLBs when aging SPTEs, and so it's safe to clobber the
152 * Accessed bit (and rare in practice).
153 */
154 return is_writable_pte(spte) && !(spte & shadow_dirty_mask);
155 }
156
make_spte(struct kvm_vcpu * vcpu,struct kvm_mmu_page * sp,const struct kvm_memory_slot * slot,unsigned int pte_access,gfn_t gfn,kvm_pfn_t pfn,u64 old_spte,bool prefetch,bool synchronizing,bool host_writable,u64 * new_spte)157 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp,
158 const struct kvm_memory_slot *slot,
159 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn,
160 u64 old_spte, bool prefetch, bool synchronizing,
161 bool host_writable, u64 *new_spte)
162 {
163 int level = sp->role.level;
164 u64 spte = SPTE_MMU_PRESENT_MASK;
165 bool wrprot = false;
166
167 /*
168 * For the EPT case, shadow_present_mask has no RWX bits set if
169 * exec-only page table entries are supported. In that case,
170 * ACC_USER_MASK and shadow_user_mask are used to represent
171 * read access. See FNAME(gpte_access) in paging_tmpl.h.
172 */
173 WARN_ON_ONCE((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE);
174
175 if (sp->role.ad_disabled)
176 spte |= SPTE_TDP_AD_DISABLED;
177 else if (kvm_mmu_page_ad_need_write_protect(vcpu->kvm, sp))
178 spte |= SPTE_TDP_AD_WRPROT_ONLY;
179
180 spte |= shadow_present_mask;
181 if (!prefetch || synchronizing)
182 spte |= shadow_accessed_mask;
183
184 /*
185 * For simplicity, enforce the NX huge page mitigation even if not
186 * strictly necessary. KVM could ignore the mitigation if paging is
187 * disabled in the guest, as the guest doesn't have any page tables to
188 * abuse. But to safely ignore the mitigation, KVM would have to
189 * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG
190 * is toggled on, and that's a net negative for performance when TDP is
191 * enabled. When TDP is disabled, KVM will always switch to a new MMU
192 * when CR0.PG is toggled, but leveraging that to ignore the mitigation
193 * would tie make_spte() further to vCPU/MMU state, and add complexity
194 * just to optimize a mode that is anything but performance critical.
195 */
196 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) &&
197 is_nx_huge_page_enabled(vcpu->kvm)) {
198 pte_access &= ~ACC_EXEC_MASK;
199 }
200
201 if (pte_access & ACC_EXEC_MASK)
202 spte |= shadow_x_mask;
203 else
204 spte |= shadow_nx_mask;
205
206 if (pte_access & ACC_USER_MASK)
207 spte |= shadow_user_mask;
208
209 if (level > PG_LEVEL_4K)
210 spte |= PT_PAGE_SIZE_MASK;
211
212 spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, kvm_is_mmio_pfn(pfn));
213 if (host_writable)
214 spte |= shadow_host_writable_mask;
215 else
216 pte_access &= ~ACC_WRITE_MASK;
217
218 if (shadow_me_value && !kvm_is_mmio_pfn(pfn))
219 spte |= shadow_me_value;
220
221 spte |= (u64)pfn << PAGE_SHIFT;
222
223 if (pte_access & ACC_WRITE_MASK) {
224 /*
225 * Unsync shadow pages that are reachable by the new, writable
226 * SPTE. Write-protect the SPTE if the page can't be unsync'd,
227 * e.g. it's write-tracked (upper-level SPs) or has one or more
228 * shadow pages and unsync'ing pages is not allowed.
229 *
230 * When overwriting an existing leaf SPTE, and the old SPTE was
231 * writable, skip trying to unsync shadow pages as any relevant
232 * shadow pages must already be unsync, i.e. the hash lookup is
233 * unnecessary (and expensive). Note, this relies on KVM not
234 * changing PFNs without first zapping the old SPTE, which is
235 * guaranteed by both the shadow MMU and the TDP MMU.
236 */
237 if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) &&
238 mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch))
239 wrprot = true;
240 else
241 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask |
242 shadow_dirty_mask;
243 }
244
245 if (prefetch && !synchronizing)
246 spte = mark_spte_for_access_track(spte);
247
248 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level),
249 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level,
250 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level));
251
252 /*
253 * Mark the memslot dirty *after* modifying it for access tracking.
254 * Unlike folios, memslots can be safely marked dirty out of mmu_lock,
255 * i.e. in the fast page fault handler.
256 */
257 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) {
258 /* Enforced by kvm_mmu_hugepage_adjust. */
259 WARN_ON_ONCE(level > PG_LEVEL_4K);
260 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn);
261 }
262
263 *new_spte = spte;
264 return wrprot;
265 }
266
modify_spte_protections(u64 spte,u64 set,u64 clear)267 static u64 modify_spte_protections(u64 spte, u64 set, u64 clear)
268 {
269 bool is_access_track = is_access_track_spte(spte);
270
271 if (is_access_track)
272 spte = restore_acc_track_spte(spte);
273
274 KVM_MMU_WARN_ON(set & clear);
275 spte = (spte | set) & ~clear;
276
277 if (is_access_track)
278 spte = mark_spte_for_access_track(spte);
279
280 return spte;
281 }
282
make_spte_executable(u64 spte)283 static u64 make_spte_executable(u64 spte)
284 {
285 return modify_spte_protections(spte, shadow_x_mask, shadow_nx_mask);
286 }
287
make_spte_nonexecutable(u64 spte)288 static u64 make_spte_nonexecutable(u64 spte)
289 {
290 return modify_spte_protections(spte, shadow_nx_mask, shadow_x_mask);
291 }
292
293 /*
294 * Construct an SPTE that maps a sub-page of the given huge page SPTE where
295 * `index` identifies which sub-page.
296 *
297 * This is used during huge page splitting to build the SPTEs that make up the
298 * new page table.
299 */
make_small_spte(struct kvm * kvm,u64 huge_spte,union kvm_mmu_page_role role,int index)300 u64 make_small_spte(struct kvm *kvm, u64 huge_spte,
301 union kvm_mmu_page_role role, int index)
302 {
303 u64 child_spte = huge_spte;
304
305 KVM_BUG_ON(!is_shadow_present_pte(huge_spte) || !is_large_pte(huge_spte), kvm);
306
307 /*
308 * The child_spte already has the base address of the huge page being
309 * split. So we just have to OR in the offset to the page at the next
310 * lower level for the given index.
311 */
312 child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT;
313
314 if (role.level == PG_LEVEL_4K) {
315 child_spte &= ~PT_PAGE_SIZE_MASK;
316
317 /*
318 * When splitting to a 4K page where execution is allowed, mark
319 * the page executable as the NX hugepage mitigation no longer
320 * applies.
321 */
322 if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm))
323 child_spte = make_spte_executable(child_spte);
324 }
325
326 return child_spte;
327 }
328
make_huge_spte(struct kvm * kvm,u64 small_spte,int level)329 u64 make_huge_spte(struct kvm *kvm, u64 small_spte, int level)
330 {
331 u64 huge_spte;
332
333 KVM_BUG_ON(!is_shadow_present_pte(small_spte) || level == PG_LEVEL_4K, kvm);
334
335 huge_spte = small_spte | PT_PAGE_SIZE_MASK;
336
337 /*
338 * huge_spte already has the address of the sub-page being collapsed
339 * from small_spte, so just clear the lower address bits to create the
340 * huge page address.
341 */
342 huge_spte &= KVM_HPAGE_MASK(level) | ~PAGE_MASK;
343
344 if (is_nx_huge_page_enabled(kvm))
345 huge_spte = make_spte_nonexecutable(huge_spte);
346
347 return huge_spte;
348 }
349
make_nonleaf_spte(u64 * child_pt,bool ad_disabled)350 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled)
351 {
352 u64 spte = SPTE_MMU_PRESENT_MASK;
353
354 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK |
355 shadow_user_mask | shadow_x_mask | shadow_me_value;
356
357 if (ad_disabled)
358 spte |= SPTE_TDP_AD_DISABLED;
359 else
360 spte |= shadow_accessed_mask;
361
362 return spte;
363 }
364
mark_spte_for_access_track(u64 spte)365 u64 mark_spte_for_access_track(u64 spte)
366 {
367 if (spte_ad_enabled(spte))
368 return spte & ~shadow_accessed_mask;
369
370 if (is_access_track_spte(spte))
371 return spte;
372
373 check_spte_writable_invariants(spte);
374
375 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK <<
376 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT),
377 "Access Tracking saved bit locations are not zero\n");
378
379 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) <<
380 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT;
381 spte &= ~(shadow_acc_track_mask | shadow_accessed_mask);
382
383 return spte;
384 }
385
kvm_mmu_set_mmio_spte_mask(u64 mmio_value,u64 mmio_mask,u64 access_mask)386 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask)
387 {
388 BUG_ON((u64)(unsigned)access_mask != access_mask);
389 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask);
390
391 /*
392 * Reset to the original module param value to honor userspace's desire
393 * to (dis)allow MMIO caching. Update the param itself so that
394 * userspace can see whether or not KVM is actually using MMIO caching.
395 */
396 enable_mmio_caching = allow_mmio_caching;
397 if (!enable_mmio_caching)
398 mmio_value = 0;
399
400 /*
401 * The mask must contain only bits that are carved out specifically for
402 * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO
403 * generation.
404 */
405 if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK))
406 mmio_value = 0;
407
408 /*
409 * Disable MMIO caching if the MMIO value collides with the bits that
410 * are used to hold the relocated GFN when the L1TF mitigation is
411 * enabled. This should never fire as there is no known hardware that
412 * can trigger this condition, e.g. SME/SEV CPUs that require a custom
413 * MMIO value are not susceptible to L1TF.
414 */
415 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask <<
416 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)))
417 mmio_value = 0;
418
419 /*
420 * The masked MMIO value must obviously match itself and a frozen SPTE
421 * must not get a false positive. Frozen SPTEs and MMIO SPTEs should
422 * never collide as MMIO must set some RWX bits, and frozen SPTEs must
423 * not set any RWX bits.
424 */
425 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) ||
426 WARN_ON(mmio_value && (FROZEN_SPTE & mmio_mask) == mmio_value))
427 mmio_value = 0;
428
429 if (!mmio_value)
430 enable_mmio_caching = false;
431
432 shadow_mmio_value = mmio_value;
433 shadow_mmio_mask = mmio_mask;
434 shadow_mmio_access_mask = access_mask;
435 }
436 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask);
437
kvm_mmu_set_mmio_spte_value(struct kvm * kvm,u64 mmio_value)438 void kvm_mmu_set_mmio_spte_value(struct kvm *kvm, u64 mmio_value)
439 {
440 kvm->arch.shadow_mmio_value = mmio_value;
441 }
442 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_value);
443
kvm_mmu_set_me_spte_mask(u64 me_value,u64 me_mask)444 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask)
445 {
446 /* shadow_me_value must be a subset of shadow_me_mask */
447 if (WARN_ON(me_value & ~me_mask))
448 me_value = me_mask = 0;
449
450 shadow_me_value = me_value;
451 shadow_me_mask = me_mask;
452 }
453 EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask);
454
kvm_mmu_set_ept_masks(bool has_ad_bits,bool has_exec_only)455 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only)
456 {
457 kvm_ad_enabled = has_ad_bits;
458
459 shadow_user_mask = VMX_EPT_READABLE_MASK;
460 shadow_accessed_mask = VMX_EPT_ACCESS_BIT;
461 shadow_dirty_mask = VMX_EPT_DIRTY_BIT;
462 shadow_nx_mask = 0ull;
463 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK;
464 /* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */
465 shadow_present_mask =
466 (has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT;
467
468 shadow_acc_track_mask = VMX_EPT_RWX_MASK;
469 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE;
470 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE;
471
472 /*
473 * EPT Misconfigurations are generated if the value of bits 2:0
474 * of an EPT paging-structure entry is 110b (write/execute).
475 */
476 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE,
477 VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0);
478 }
479 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks);
480
kvm_mmu_reset_all_pte_masks(void)481 void kvm_mmu_reset_all_pte_masks(void)
482 {
483 u8 low_phys_bits;
484 u64 mask;
485
486 kvm_ad_enabled = true;
487
488 /*
489 * If the CPU has 46 or less physical address bits, then set an
490 * appropriate mask to guard against L1TF attacks. Otherwise, it is
491 * assumed that the CPU is not vulnerable to L1TF.
492 *
493 * Some Intel CPUs address the L1 cache using more PA bits than are
494 * reported by CPUID. Use the PA width of the L1 cache when possible
495 * to achieve more effective mitigation, e.g. if system RAM overlaps
496 * the most significant bits of legal physical address space.
497 */
498 shadow_nonpresent_or_rsvd_mask = 0;
499 low_phys_bits = boot_cpu_data.x86_phys_bits;
500 if (boot_cpu_has_bug(X86_BUG_L1TF) &&
501 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >=
502 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) {
503 low_phys_bits = boot_cpu_data.x86_cache_bits
504 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN;
505 shadow_nonpresent_or_rsvd_mask =
506 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1);
507 }
508
509 shadow_nonpresent_or_rsvd_lower_gfn_mask =
510 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT);
511
512 shadow_user_mask = PT_USER_MASK;
513 shadow_accessed_mask = PT_ACCESSED_MASK;
514 shadow_dirty_mask = PT_DIRTY_MASK;
515 shadow_nx_mask = PT64_NX_MASK;
516 shadow_x_mask = 0;
517 shadow_present_mask = PT_PRESENT_MASK;
518
519 shadow_acc_track_mask = 0;
520 shadow_me_mask = 0;
521 shadow_me_value = 0;
522
523 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE;
524 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE;
525
526 /*
527 * Set a reserved PA bit in MMIO SPTEs to generate page faults with
528 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT
529 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports
530 * 52-bit physical addresses then there are no reserved PA bits in the
531 * PTEs and so the reserved PA approach must be disabled.
532 */
533 if (kvm_host.maxphyaddr < 52)
534 mask = BIT_ULL(51) | PT_PRESENT_MASK;
535 else
536 mask = 0;
537
538 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK);
539 }
540