1 // SPDX-License-Identifier: GPL-2.0-only 2 /* 3 * Kernel-based Virtual Machine driver for Linux 4 * 5 * Macros and functions to access KVM PTEs (also known as SPTEs) 6 * 7 * Copyright (C) 2006 Qumranet, Inc. 8 * Copyright 2020 Red Hat, Inc. and/or its affiliates. 9 */ 10 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt 11 12 #include <linux/kvm_host.h> 13 #include "mmu.h" 14 #include "mmu_internal.h" 15 #include "x86.h" 16 #include "spte.h" 17 18 #include <asm/e820/api.h> 19 #include <asm/memtype.h> 20 #include <asm/vmx.h> 21 22 bool __read_mostly enable_mmio_caching = true; 23 static bool __ro_after_init allow_mmio_caching; 24 module_param_named(mmio_caching, enable_mmio_caching, bool, 0444); 25 EXPORT_SYMBOL_GPL(enable_mmio_caching); 26 27 bool __read_mostly kvm_ad_enabled; 28 29 u64 __read_mostly shadow_host_writable_mask; 30 u64 __read_mostly shadow_mmu_writable_mask; 31 u64 __read_mostly shadow_nx_mask; 32 u64 __read_mostly shadow_x_mask; /* mutual exclusive with nx_mask */ 33 u64 __read_mostly shadow_user_mask; 34 u64 __read_mostly shadow_accessed_mask; 35 u64 __read_mostly shadow_dirty_mask; 36 u64 __read_mostly shadow_mmio_value; 37 u64 __read_mostly shadow_mmio_mask; 38 u64 __read_mostly shadow_mmio_access_mask; 39 u64 __read_mostly shadow_present_mask; 40 u64 __read_mostly shadow_memtype_mask; 41 u64 __read_mostly shadow_me_value; 42 u64 __read_mostly shadow_me_mask; 43 u64 __read_mostly shadow_acc_track_mask; 44 45 u64 __read_mostly shadow_nonpresent_or_rsvd_mask; 46 u64 __read_mostly shadow_nonpresent_or_rsvd_lower_gfn_mask; 47 48 static u8 __init kvm_get_host_maxphyaddr(void) 49 { 50 /* 51 * boot_cpu_data.x86_phys_bits is reduced when MKTME or SME are detected 52 * in CPU detection code, but the processor treats those reduced bits as 53 * 'keyID' thus they are not reserved bits. Therefore KVM needs to look at 54 * the physical address bits reported by CPUID, i.e. the raw MAXPHYADDR, 55 * when reasoning about CPU behavior with respect to MAXPHYADDR. 56 */ 57 if (likely(boot_cpu_data.extended_cpuid_level >= 0x80000008)) 58 return cpuid_eax(0x80000008) & 0xff; 59 60 /* 61 * Quite weird to have VMX or SVM but not MAXPHYADDR; probably a VM with 62 * custom CPUID. Proceed with whatever the kernel found since these features 63 * aren't virtualizable (SME/SEV also require CPUIDs higher than 0x80000008). 64 */ 65 return boot_cpu_data.x86_phys_bits; 66 } 67 68 void __init kvm_mmu_spte_module_init(void) 69 { 70 /* 71 * Snapshot userspace's desire to allow MMIO caching. Whether or not 72 * KVM can actually enable MMIO caching depends on vendor-specific 73 * hardware capabilities and other module params that can't be resolved 74 * until the vendor module is loaded, i.e. enable_mmio_caching can and 75 * will change when the vendor module is (re)loaded. 76 */ 77 allow_mmio_caching = enable_mmio_caching; 78 79 kvm_host.maxphyaddr = kvm_get_host_maxphyaddr(); 80 } 81 82 static u64 generation_mmio_spte_mask(u64 gen) 83 { 84 u64 mask; 85 86 WARN_ON_ONCE(gen & ~MMIO_SPTE_GEN_MASK); 87 88 mask = (gen << MMIO_SPTE_GEN_LOW_SHIFT) & MMIO_SPTE_GEN_LOW_MASK; 89 mask |= (gen << MMIO_SPTE_GEN_HIGH_SHIFT) & MMIO_SPTE_GEN_HIGH_MASK; 90 return mask; 91 } 92 93 u64 make_mmio_spte(struct kvm_vcpu *vcpu, u64 gfn, unsigned int access) 94 { 95 u64 gen = kvm_vcpu_memslots(vcpu)->generation & MMIO_SPTE_GEN_MASK; 96 u64 spte = generation_mmio_spte_mask(gen); 97 u64 gpa = gfn << PAGE_SHIFT; 98 99 WARN_ON_ONCE(!vcpu->kvm->arch.shadow_mmio_value); 100 101 access &= shadow_mmio_access_mask; 102 spte |= vcpu->kvm->arch.shadow_mmio_value | access; 103 spte |= gpa | shadow_nonpresent_or_rsvd_mask; 104 spte |= (gpa & shadow_nonpresent_or_rsvd_mask) 105 << SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 106 107 return spte; 108 } 109 110 static bool kvm_is_mmio_pfn(kvm_pfn_t pfn) 111 { 112 if (pfn_valid(pfn)) 113 return !is_zero_pfn(pfn) && PageReserved(pfn_to_page(pfn)) && 114 /* 115 * Some reserved pages, such as those from NVDIMM 116 * DAX devices, are not for MMIO, and can be mapped 117 * with cached memory type for better performance. 118 * However, the above check misconceives those pages 119 * as MMIO, and results in KVM mapping them with UC 120 * memory type, which would hurt the performance. 121 * Therefore, we check the host memory type in addition 122 * and only treat UC/UC-/WC pages as MMIO. 123 */ 124 (!pat_enabled() || pat_pfn_immune_to_uc_mtrr(pfn)); 125 126 return !e820__mapped_raw_any(pfn_to_hpa(pfn), 127 pfn_to_hpa(pfn + 1) - 1, 128 E820_TYPE_RAM); 129 } 130 131 /* 132 * Returns true if the SPTE has bits that may be set without holding mmu_lock. 133 * The caller is responsible for checking if the SPTE is shadow-present, and 134 * for determining whether or not the caller cares about non-leaf SPTEs. 135 */ 136 bool spte_has_volatile_bits(u64 spte) 137 { 138 if (!is_writable_pte(spte) && is_mmu_writable_spte(spte)) 139 return true; 140 141 if (is_access_track_spte(spte)) 142 return true; 143 144 if (spte_ad_enabled(spte)) { 145 if (!(spte & shadow_accessed_mask) || 146 (is_writable_pte(spte) && !(spte & shadow_dirty_mask))) 147 return true; 148 } 149 150 return false; 151 } 152 153 bool make_spte(struct kvm_vcpu *vcpu, struct kvm_mmu_page *sp, 154 const struct kvm_memory_slot *slot, 155 unsigned int pte_access, gfn_t gfn, kvm_pfn_t pfn, 156 u64 old_spte, bool prefetch, bool synchronizing, 157 bool host_writable, u64 *new_spte) 158 { 159 int level = sp->role.level; 160 u64 spte = SPTE_MMU_PRESENT_MASK; 161 bool wrprot = false; 162 163 /* 164 * For the EPT case, shadow_present_mask has no RWX bits set if 165 * exec-only page table entries are supported. In that case, 166 * ACC_USER_MASK and shadow_user_mask are used to represent 167 * read access. See FNAME(gpte_access) in paging_tmpl.h. 168 */ 169 WARN_ON_ONCE((pte_access | shadow_present_mask) == SHADOW_NONPRESENT_VALUE); 170 171 if (sp->role.ad_disabled) 172 spte |= SPTE_TDP_AD_DISABLED; 173 else if (kvm_mmu_page_ad_need_write_protect(sp)) 174 spte |= SPTE_TDP_AD_WRPROT_ONLY; 175 176 spte |= shadow_present_mask; 177 if (!prefetch || synchronizing) 178 spte |= shadow_accessed_mask; 179 180 /* 181 * For simplicity, enforce the NX huge page mitigation even if not 182 * strictly necessary. KVM could ignore the mitigation if paging is 183 * disabled in the guest, as the guest doesn't have any page tables to 184 * abuse. But to safely ignore the mitigation, KVM would have to 185 * ensure a new MMU is loaded (or all shadow pages zapped) when CR0.PG 186 * is toggled on, and that's a net negative for performance when TDP is 187 * enabled. When TDP is disabled, KVM will always switch to a new MMU 188 * when CR0.PG is toggled, but leveraging that to ignore the mitigation 189 * would tie make_spte() further to vCPU/MMU state, and add complexity 190 * just to optimize a mode that is anything but performance critical. 191 */ 192 if (level > PG_LEVEL_4K && (pte_access & ACC_EXEC_MASK) && 193 is_nx_huge_page_enabled(vcpu->kvm)) { 194 pte_access &= ~ACC_EXEC_MASK; 195 } 196 197 if (pte_access & ACC_EXEC_MASK) 198 spte |= shadow_x_mask; 199 else 200 spte |= shadow_nx_mask; 201 202 if (pte_access & ACC_USER_MASK) 203 spte |= shadow_user_mask; 204 205 if (level > PG_LEVEL_4K) 206 spte |= PT_PAGE_SIZE_MASK; 207 208 if (shadow_memtype_mask) 209 spte |= kvm_x86_call(get_mt_mask)(vcpu, gfn, 210 kvm_is_mmio_pfn(pfn)); 211 if (host_writable) 212 spte |= shadow_host_writable_mask; 213 else 214 pte_access &= ~ACC_WRITE_MASK; 215 216 if (shadow_me_value && !kvm_is_mmio_pfn(pfn)) 217 spte |= shadow_me_value; 218 219 spte |= (u64)pfn << PAGE_SHIFT; 220 221 if (pte_access & ACC_WRITE_MASK) { 222 /* 223 * Unsync shadow pages that are reachable by the new, writable 224 * SPTE. Write-protect the SPTE if the page can't be unsync'd, 225 * e.g. it's write-tracked (upper-level SPs) or has one or more 226 * shadow pages and unsync'ing pages is not allowed. 227 * 228 * When overwriting an existing leaf SPTE, and the old SPTE was 229 * writable, skip trying to unsync shadow pages as any relevant 230 * shadow pages must already be unsync, i.e. the hash lookup is 231 * unnecessary (and expensive). Note, this relies on KVM not 232 * changing PFNs without first zapping the old SPTE, which is 233 * guaranteed by both the shadow MMU and the TDP MMU. 234 */ 235 if ((!is_last_spte(old_spte, level) || !is_writable_pte(old_spte)) && 236 mmu_try_to_unsync_pages(vcpu->kvm, slot, gfn, synchronizing, prefetch)) 237 wrprot = true; 238 else 239 spte |= PT_WRITABLE_MASK | shadow_mmu_writable_mask | 240 shadow_dirty_mask; 241 } 242 243 if (prefetch && !synchronizing) 244 spte = mark_spte_for_access_track(spte); 245 246 WARN_ONCE(is_rsvd_spte(&vcpu->arch.mmu->shadow_zero_check, spte, level), 247 "spte = 0x%llx, level = %d, rsvd bits = 0x%llx", spte, level, 248 get_rsvd_bits(&vcpu->arch.mmu->shadow_zero_check, spte, level)); 249 250 /* 251 * Mark the memslot dirty *after* modifying it for access tracking. 252 * Unlike folios, memslots can be safely marked dirty out of mmu_lock, 253 * i.e. in the fast page fault handler. 254 */ 255 if ((spte & PT_WRITABLE_MASK) && kvm_slot_dirty_track_enabled(slot)) { 256 /* Enforced by kvm_mmu_hugepage_adjust. */ 257 WARN_ON_ONCE(level > PG_LEVEL_4K); 258 mark_page_dirty_in_slot(vcpu->kvm, slot, gfn); 259 } 260 261 *new_spte = spte; 262 return wrprot; 263 } 264 265 static u64 modify_spte_protections(u64 spte, u64 set, u64 clear) 266 { 267 bool is_access_track = is_access_track_spte(spte); 268 269 if (is_access_track) 270 spte = restore_acc_track_spte(spte); 271 272 KVM_MMU_WARN_ON(set & clear); 273 spte = (spte | set) & ~clear; 274 275 if (is_access_track) 276 spte = mark_spte_for_access_track(spte); 277 278 return spte; 279 } 280 281 static u64 make_spte_executable(u64 spte) 282 { 283 return modify_spte_protections(spte, shadow_x_mask, shadow_nx_mask); 284 } 285 286 static u64 make_spte_nonexecutable(u64 spte) 287 { 288 return modify_spte_protections(spte, shadow_nx_mask, shadow_x_mask); 289 } 290 291 /* 292 * Construct an SPTE that maps a sub-page of the given huge page SPTE where 293 * `index` identifies which sub-page. 294 * 295 * This is used during huge page splitting to build the SPTEs that make up the 296 * new page table. 297 */ 298 u64 make_small_spte(struct kvm *kvm, u64 huge_spte, 299 union kvm_mmu_page_role role, int index) 300 { 301 u64 child_spte = huge_spte; 302 303 KVM_BUG_ON(!is_shadow_present_pte(huge_spte) || !is_large_pte(huge_spte), kvm); 304 305 /* 306 * The child_spte already has the base address of the huge page being 307 * split. So we just have to OR in the offset to the page at the next 308 * lower level for the given index. 309 */ 310 child_spte |= (index * KVM_PAGES_PER_HPAGE(role.level)) << PAGE_SHIFT; 311 312 if (role.level == PG_LEVEL_4K) { 313 child_spte &= ~PT_PAGE_SIZE_MASK; 314 315 /* 316 * When splitting to a 4K page where execution is allowed, mark 317 * the page executable as the NX hugepage mitigation no longer 318 * applies. 319 */ 320 if ((role.access & ACC_EXEC_MASK) && is_nx_huge_page_enabled(kvm)) 321 child_spte = make_spte_executable(child_spte); 322 } 323 324 return child_spte; 325 } 326 327 u64 make_huge_spte(struct kvm *kvm, u64 small_spte, int level) 328 { 329 u64 huge_spte; 330 331 KVM_BUG_ON(!is_shadow_present_pte(small_spte) || level == PG_LEVEL_4K, kvm); 332 333 huge_spte = small_spte | PT_PAGE_SIZE_MASK; 334 335 /* 336 * huge_spte already has the address of the sub-page being collapsed 337 * from small_spte, so just clear the lower address bits to create the 338 * huge page address. 339 */ 340 huge_spte &= KVM_HPAGE_MASK(level) | ~PAGE_MASK; 341 342 if (is_nx_huge_page_enabled(kvm)) 343 huge_spte = make_spte_nonexecutable(huge_spte); 344 345 return huge_spte; 346 } 347 348 u64 make_nonleaf_spte(u64 *child_pt, bool ad_disabled) 349 { 350 u64 spte = SPTE_MMU_PRESENT_MASK; 351 352 spte |= __pa(child_pt) | shadow_present_mask | PT_WRITABLE_MASK | 353 shadow_user_mask | shadow_x_mask | shadow_me_value; 354 355 if (ad_disabled) 356 spte |= SPTE_TDP_AD_DISABLED; 357 else 358 spte |= shadow_accessed_mask; 359 360 return spte; 361 } 362 363 u64 mark_spte_for_access_track(u64 spte) 364 { 365 if (spte_ad_enabled(spte)) 366 return spte & ~shadow_accessed_mask; 367 368 if (is_access_track_spte(spte)) 369 return spte; 370 371 check_spte_writable_invariants(spte); 372 373 WARN_ONCE(spte & (SHADOW_ACC_TRACK_SAVED_BITS_MASK << 374 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT), 375 "Access Tracking saved bit locations are not zero\n"); 376 377 spte |= (spte & SHADOW_ACC_TRACK_SAVED_BITS_MASK) << 378 SHADOW_ACC_TRACK_SAVED_BITS_SHIFT; 379 spte &= ~(shadow_acc_track_mask | shadow_accessed_mask); 380 381 return spte; 382 } 383 384 void kvm_mmu_set_mmio_spte_mask(u64 mmio_value, u64 mmio_mask, u64 access_mask) 385 { 386 BUG_ON((u64)(unsigned)access_mask != access_mask); 387 WARN_ON(mmio_value & shadow_nonpresent_or_rsvd_lower_gfn_mask); 388 389 /* 390 * Reset to the original module param value to honor userspace's desire 391 * to (dis)allow MMIO caching. Update the param itself so that 392 * userspace can see whether or not KVM is actually using MMIO caching. 393 */ 394 enable_mmio_caching = allow_mmio_caching; 395 if (!enable_mmio_caching) 396 mmio_value = 0; 397 398 /* 399 * The mask must contain only bits that are carved out specifically for 400 * the MMIO SPTE mask, e.g. to ensure there's no overlap with the MMIO 401 * generation. 402 */ 403 if (WARN_ON(mmio_mask & ~SPTE_MMIO_ALLOWED_MASK)) 404 mmio_value = 0; 405 406 /* 407 * Disable MMIO caching if the MMIO value collides with the bits that 408 * are used to hold the relocated GFN when the L1TF mitigation is 409 * enabled. This should never fire as there is no known hardware that 410 * can trigger this condition, e.g. SME/SEV CPUs that require a custom 411 * MMIO value are not susceptible to L1TF. 412 */ 413 if (WARN_ON(mmio_value & (shadow_nonpresent_or_rsvd_mask << 414 SHADOW_NONPRESENT_OR_RSVD_MASK_LEN))) 415 mmio_value = 0; 416 417 /* 418 * The masked MMIO value must obviously match itself and a frozen SPTE 419 * must not get a false positive. Frozen SPTEs and MMIO SPTEs should 420 * never collide as MMIO must set some RWX bits, and frozen SPTEs must 421 * not set any RWX bits. 422 */ 423 if (WARN_ON((mmio_value & mmio_mask) != mmio_value) || 424 WARN_ON(mmio_value && (FROZEN_SPTE & mmio_mask) == mmio_value)) 425 mmio_value = 0; 426 427 if (!mmio_value) 428 enable_mmio_caching = false; 429 430 shadow_mmio_value = mmio_value; 431 shadow_mmio_mask = mmio_mask; 432 shadow_mmio_access_mask = access_mask; 433 } 434 EXPORT_SYMBOL_GPL(kvm_mmu_set_mmio_spte_mask); 435 436 void kvm_mmu_set_me_spte_mask(u64 me_value, u64 me_mask) 437 { 438 /* shadow_me_value must be a subset of shadow_me_mask */ 439 if (WARN_ON(me_value & ~me_mask)) 440 me_value = me_mask = 0; 441 442 shadow_me_value = me_value; 443 shadow_me_mask = me_mask; 444 } 445 EXPORT_SYMBOL_GPL(kvm_mmu_set_me_spte_mask); 446 447 void kvm_mmu_set_ept_masks(bool has_ad_bits, bool has_exec_only) 448 { 449 kvm_ad_enabled = has_ad_bits; 450 451 shadow_user_mask = VMX_EPT_READABLE_MASK; 452 shadow_accessed_mask = VMX_EPT_ACCESS_BIT; 453 shadow_dirty_mask = VMX_EPT_DIRTY_BIT; 454 shadow_nx_mask = 0ull; 455 shadow_x_mask = VMX_EPT_EXECUTABLE_MASK; 456 /* VMX_EPT_SUPPRESS_VE_BIT is needed for W or X violation. */ 457 shadow_present_mask = 458 (has_exec_only ? 0ull : VMX_EPT_READABLE_MASK) | VMX_EPT_SUPPRESS_VE_BIT; 459 /* 460 * EPT overrides the host MTRRs, and so KVM must program the desired 461 * memtype directly into the SPTEs. Note, this mask is just the mask 462 * of all bits that factor into the memtype, the actual memtype must be 463 * dynamically calculated, e.g. to ensure host MMIO is mapped UC. 464 */ 465 shadow_memtype_mask = VMX_EPT_MT_MASK | VMX_EPT_IPAT_BIT; 466 shadow_acc_track_mask = VMX_EPT_RWX_MASK; 467 shadow_host_writable_mask = EPT_SPTE_HOST_WRITABLE; 468 shadow_mmu_writable_mask = EPT_SPTE_MMU_WRITABLE; 469 470 /* 471 * EPT Misconfigurations are generated if the value of bits 2:0 472 * of an EPT paging-structure entry is 110b (write/execute). 473 */ 474 kvm_mmu_set_mmio_spte_mask(VMX_EPT_MISCONFIG_WX_VALUE, 475 VMX_EPT_RWX_MASK | VMX_EPT_SUPPRESS_VE_BIT, 0); 476 } 477 EXPORT_SYMBOL_GPL(kvm_mmu_set_ept_masks); 478 479 void kvm_mmu_reset_all_pte_masks(void) 480 { 481 u8 low_phys_bits; 482 u64 mask; 483 484 kvm_ad_enabled = true; 485 486 /* 487 * If the CPU has 46 or less physical address bits, then set an 488 * appropriate mask to guard against L1TF attacks. Otherwise, it is 489 * assumed that the CPU is not vulnerable to L1TF. 490 * 491 * Some Intel CPUs address the L1 cache using more PA bits than are 492 * reported by CPUID. Use the PA width of the L1 cache when possible 493 * to achieve more effective mitigation, e.g. if system RAM overlaps 494 * the most significant bits of legal physical address space. 495 */ 496 shadow_nonpresent_or_rsvd_mask = 0; 497 low_phys_bits = boot_cpu_data.x86_phys_bits; 498 if (boot_cpu_has_bug(X86_BUG_L1TF) && 499 !WARN_ON_ONCE(boot_cpu_data.x86_cache_bits >= 500 52 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN)) { 501 low_phys_bits = boot_cpu_data.x86_cache_bits 502 - SHADOW_NONPRESENT_OR_RSVD_MASK_LEN; 503 shadow_nonpresent_or_rsvd_mask = 504 rsvd_bits(low_phys_bits, boot_cpu_data.x86_cache_bits - 1); 505 } 506 507 shadow_nonpresent_or_rsvd_lower_gfn_mask = 508 GENMASK_ULL(low_phys_bits - 1, PAGE_SHIFT); 509 510 shadow_user_mask = PT_USER_MASK; 511 shadow_accessed_mask = PT_ACCESSED_MASK; 512 shadow_dirty_mask = PT_DIRTY_MASK; 513 shadow_nx_mask = PT64_NX_MASK; 514 shadow_x_mask = 0; 515 shadow_present_mask = PT_PRESENT_MASK; 516 517 /* 518 * For shadow paging and NPT, KVM uses PAT entry '0' to encode WB 519 * memtype in the SPTEs, i.e. relies on host MTRRs to provide the 520 * correct memtype (WB is the "weakest" memtype). 521 */ 522 shadow_memtype_mask = 0; 523 shadow_acc_track_mask = 0; 524 shadow_me_mask = 0; 525 shadow_me_value = 0; 526 527 shadow_host_writable_mask = DEFAULT_SPTE_HOST_WRITABLE; 528 shadow_mmu_writable_mask = DEFAULT_SPTE_MMU_WRITABLE; 529 530 /* 531 * Set a reserved PA bit in MMIO SPTEs to generate page faults with 532 * PFEC.RSVD=1 on MMIO accesses. 64-bit PTEs (PAE, x86-64, and EPT 533 * paging) support a maximum of 52 bits of PA, i.e. if the CPU supports 534 * 52-bit physical addresses then there are no reserved PA bits in the 535 * PTEs and so the reserved PA approach must be disabled. 536 */ 537 if (kvm_host.maxphyaddr < 52) 538 mask = BIT_ULL(51) | PT_PRESENT_MASK; 539 else 540 mask = 0; 541 542 kvm_mmu_set_mmio_spte_mask(mask, mask, ACC_WRITE_MASK | ACC_USER_MASK); 543 } 544